[pve-devel] [PATCH v10 qemu-server/cloudinit 4/8] cloudinit: use qcow2 for future snapshot support
Wolfgang Bumiller
w.bumiller at proxmox.com
Tue Aug 11 15:51:51 CEST 2015
The config-disk is now generated into a qcow2 located on a
configured storage.
It is now also storage-managed and so live-migration and
live-snapshotting should work as they do for regular hard
drives.
Config drives are recognized by their storage name of the
form: VMID/vm-VMID-cloudinit.qcow2
Example:
ahci0: local:112/vm-112-cloudinit.qcow2,media=cdrom
FIXME: This was easier to implement. Ideally the VMID
wouldn't be required at all but this requires some more
changes so I'm leaving this open for comments for now.
---
PVE/QemuServer.pm | 85 +++++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 64 insertions(+), 21 deletions(-)
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 0786c49..2e9717c 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -400,13 +400,6 @@ EODESCR
type => 'string',
description => "Ssh keys for root",
},
- cloudinit => {
- optional => 1,
- type => 'boolean',
- description => "Enable cloudinit config generation.",
- default => 0,
- },
-
};
# what about other qemu settings ?
@@ -2070,11 +2063,6 @@ sub write_vm_config {
delete $conf->{cdrom};
}
- if ($conf->{cloudinit}) {
- die "option cloudinit conflicts with ide3\n" if $conf->{ide3};
- delete $conf->{cloudinit};
- }
-
# we do not use 'smp' any longer
if ($conf->{sockets}) {
delete $conf->{smp};
@@ -4909,6 +4897,7 @@ sub print_pci_addr {
'net29' => { bus => 1, addr => 24 },
'net30' => { bus => 1, addr => 25 },
'net31' => { bus => 1, addr => 26 },
+ 'ahci1' => { bus => 1, addr => 27 },
'virtio6' => { bus => 2, addr => 1 },
'virtio7' => { bus => 2, addr => 2 },
'virtio8' => { bus => 2, addr => 3 },
@@ -6431,10 +6420,70 @@ sub scsihw_infos {
return ($maxdev, $controller, $controller_prefix);
}
+# FIXME: Reasonable size? qcow2 shouldn't grow if the space isn't used anyway?
+my $cloudinit_iso_size = 5; # in MB
+
+sub prepare_cloudinit_disk {
+ my ($vmid, $storeid) = @_;
+
+ my $storecfg = PVE::Storage::config();
+ my $imagedir = PVE::Storage::get_image_dir($storecfg, $storeid, $vmid);
+ my $iso_name = "vm-$vmid-cloudinit.qcow2";
+ my $iso_path = "$imagedir/$iso_name";
+ if (!-e $iso_path) {
+ # vdisk_alloc size is in K
+ PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, 'qcow2', $iso_name, $cloudinit_iso_size*1024);
+ }
+ return ($iso_path, 'qcow2');
+}
+
+# FIXME: also in LXCCreate.pm => move to pve-common
+sub next_free_nbd_dev {
+
+ for(my $i = 0;;$i++) {
+ my $dev = "/dev/nbd$i";
+ last if ! -b $dev;
+ next if -f "/sys/block/nbd$i/pid"; # busy
+ return $dev;
+ }
+ die "unable to find free nbd device\n";
+}
+
+sub commit_cloudinit_disk {
+ my ($file_path, $iso_path, $format) = @_;
+
+ my $nbd_dev = next_free_nbd_dev();
+ run_command(['qemu-nbd', '-c', $nbd_dev, $iso_path, '-f', $format]);
+
+ eval {
+ run_command(['genisoimage',
+ '-R',
+ '-V', 'config-2',
+ '-o', $nbd_dev,
+ $file_path]);
+ };
+ my $err = $@;
+ eval { run_command(['qemu-nbd', '-d', $nbd_dev]); };
+ warn $@ if $@;
+ die $err if $err;
+}
+
+sub find_cloudinit_storage {
+ my ($conf, $vmid) = @_;
+ foreach my $ds (keys %$conf) {
+ next if !valid_drivename($ds);
+ if ($conf->{$ds} =~ m@^(?:volume=)?([^:]+):\Q$vmid\E/vm-\Q$vmid\E-cloudinit\.qcow2@) {
+ return $1;
+ }
+ }
+ return undef;
+}
+
sub generate_cloudinitconfig {
my ($conf, $vmid) = @_;
- return if !$conf->{cloudinit};
+ my $storeid = find_cloudinit_storage($conf, $vmid);
+ return if !$storeid;
my $path = "/tmp/cloudinit/$vmid";
@@ -6448,14 +6497,8 @@ sub generate_cloudinitconfig {
. generate_cloudinit_network($conf, $path);
generate_cloudinit_metadata($conf, $path, $digest_data);
- my $cmd = [];
- push @$cmd, 'genisoimage';
- push @$cmd, '-R';
- push @$cmd, '-V', 'config-2';
- push @$cmd, '-o', "$path/configdrive.iso";
- push @$cmd, "$path/drive";
-
- run_command($cmd);
+ my ($iso_path, $format) = prepare_cloudinit_disk($vmid, $storeid);
+ commit_cloudinit_disk("$path/drive", $iso_path, $format);
rmtree("$path/drive");
}
--
2.1.4
More information about the pve-devel
mailing list