[pve-devel] [PATCH 3/5] cloudinit: use qcow2 for snapshot support

Alexandre Derumier aderumier at odiso.com
Tue Jun 30 16:01:48 CEST 2015


From: Wolfgang Bumiller <w.bumiller at proxmox.com>

The config-disk is now generated into a qcow2 located on a
configured storage.
It is now also storage-managed and solive-migration and
live-snapshotting should work as they do for regular hard
drives.

Signed-off-by: Alexandre Derumier <aderumier at odiso.com>
---
 PVE/API2/Qemu.pm         |  16 ++---
 PVE/QemuMigrate.pm       |   8 +--
 PVE/QemuServer.pm        | 158 ++++++++++++++++++++++++++++++-----------------
 PVE/VZDump/QemuServer.pm |   2 +-
 4 files changed, 113 insertions(+), 71 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index fae2872..285acc1 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -57,7 +57,7 @@ my $test_deallocate_drive = sub {
 my $check_storage_access = sub {
    my ($rpcenv, $authuser, $storecfg, $vmid, $settings, $default_storage) = @_;
 
-   PVE::QemuServer::foreach_drive($settings, sub {
+   PVE::QemuServer::foreach_drive($settings, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
@@ -79,11 +79,11 @@ my $check_storage_access = sub {
 };
 
 my $check_storage_access_clone = sub {
-   my ($rpcenv, $authuser, $storecfg, $conf, $storage) = @_;
+   my ($rpcenv, $authuser, $storecfg, $conf, $vmid, $storage) = @_;
 
    my $sharedvm = 1;
 
-   PVE::QemuServer::foreach_drive($conf, sub {
+   PVE::QemuServer::foreach_drive($conf, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
@@ -123,7 +123,7 @@ my $create_disks = sub {
     my $vollist = [];
 
     my $res = {};
-    PVE::QemuServer::foreach_drive($settings, sub {
+    PVE::QemuServer::foreach_drive($settings, $vmid, sub {
 	my ($ds, $disk) = @_;
 
 	my $volid = $disk->{file};
@@ -2052,8 +2052,8 @@ __PACKAGE__->register_method({
 	}
 	my $storecfg = PVE::Storage::config();
 
-	my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg);
-	my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
+	my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg, $vmid);
+	my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $vmid, $storecfg, $snapname, $running);
 
 	return {
 	    hasFeature => $hasFeature,
@@ -2205,7 +2205,7 @@ __PACKAGE__->register_method({
 
 	    my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf;
 
-	    my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $storage);
+	    my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $vmid, $storage);
 
 	    die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
 
@@ -2563,7 +2563,7 @@ __PACKAGE__->register_method({
 	}
 
 	my $storecfg = PVE::Storage::config();
-	PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
+	PVE::QemuServer::check_storage_availability($storecfg, $conf, $vmid, $target);
 
 	if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
 
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 264a2a7..0f77745 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -153,7 +153,7 @@ sub prepare {
     }
 
     # activate volumes
-    my $vollist = PVE::QemuServer::get_vm_volumes($conf);
+    my $vollist = PVE::QemuServer::get_vm_volumes($conf, $vmid);
     PVE::Storage::activate_volumes($self->{storecfg}, $vollist);
 
     # fixme: check if storage is available on both nodes
@@ -192,7 +192,7 @@ sub sync_disks {
 
             # get list from PVE::Storage (for unused volumes)
             my $dl = PVE::Storage::vdisk_list($self->{storecfg}, $storeid, $vmid);
-            PVE::Storage::foreach_volid($dl, sub {
+            PVE::Storage::foreach_volid($dl, $vmid, sub {
                 my ($volid, $sid, $volname) = @_;
 
                 # check if storage is available on target node
@@ -205,7 +205,7 @@ sub sync_disks {
 
 	# and add used, owned/non-shared disks (just to be sure we have all)
 
-	PVE::QemuServer::foreach_volid($conf, sub {
+	PVE::QemuServer::foreach_volid($conf, $vmid, sub {
 	    my ($volid, $is_cdrom) = @_;
 
 	    return if !$volid;
@@ -629,7 +629,7 @@ sub phase3_cleanup {
 
     # always deactivate volumes - avoid lvm LVs to be active on several nodes
     eval {
-	my $vollist = PVE::QemuServer::get_vm_volumes($conf);
+	my $vollist = PVE::QemuServer::get_vm_volumes($conf, $vmid);
 	PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
     };
     if (my $err = $@) {
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 51f1277..b13162e 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -402,8 +402,11 @@ EODESCR
     },
     cloudinit => {
 	optional => 1,
-	type => 'boolean',
-	description => "Enable cloudinit config generation.",
+	type => 'string',
+	# FIXME: for template support this will become a formatted string
+	#	 like: storage=mynfs,template=sometemplate
+	format => 'pve-storage-id',
+	description => "Enable cloudinit config generation on a specified storage.",
 	default => 0,
     },
 
@@ -752,8 +755,6 @@ sub get_iso_path {
 	return get_cdrom_path();
     } elsif ($cdrom eq 'none') {
 	return '';
-    } elsif ($cdrom eq 'cloudinit') {
-	return "/tmp/cloudinit/$vmid/configdrive.iso";
     } elsif ($cdrom =~ m|^/|) {
 	return $cdrom;
     } else {
@@ -765,7 +766,7 @@ sub get_iso_path {
 sub filename_to_volume_id {
     my ($vmid, $file, $media) = @_;
 
-     if (!($file eq 'none' || $file eq 'cdrom' || $file eq 'cloudinit' ||
+     if (!($file eq 'none' || $file eq 'cdrom' ||
 	  $file =~ m|^/dev/.+| || $file =~ m/^([^:]+):(.+)$/)) {
 
 	return undef if $file =~ m|/|;
@@ -1913,7 +1914,7 @@ sub destroy_vm {
     check_lock($conf);
 
     # only remove disks owned by this VM
-    foreach_drive($conf, sub {
+    foreach_drive($conf, $vmid, sub {
 	my ($ds, $drive) = @_;
 
  	return if drive_is_cdrom($drive);
@@ -2067,8 +2068,8 @@ sub write_vm_config {
 	delete $conf->{cdrom};
     }
 
-    if ($conf->{cloudinit}) {
-	die "option cloudinit conflicts with ide3\n" if $conf->{ide3};
+    if ($conf->{cloudinit} && $conf->{ide3}) {
+	die "option cloudinit conflicts with ide3\n";
 	delete $conf->{cloudinit};
     }
 
@@ -2227,9 +2228,9 @@ sub check_local_resources {
 
 # check if used storages are available on all nodes (use by migrate)
 sub check_storage_availability {
-    my ($storecfg, $conf, $node) = @_;
+    my ($storecfg, $conf, $vmid, $node) = @_;
 
-    foreach_drive($conf, sub {
+    foreach_drive($conf, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	my $volid = $drive->{file};
@@ -2246,13 +2247,13 @@ sub check_storage_availability {
 
 # list nodes where all VM images are available (used by has_feature API)
 sub shared_nodes {
-    my ($conf, $storecfg) = @_;
+    my ($conf, $storecfg, $vmid) = @_;
 
     my $nodelist = PVE::Cluster::get_nodelist();
     my $nodehash = { map { $_ => 1 } @$nodelist };
     my $nodename = PVE::INotify::nodename();
 
-    foreach_drive($conf, sub {
+    foreach_drive($conf, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	my $volid = $drive->{file};
@@ -2604,7 +2605,7 @@ sub foreach_dimm {
 }
 
 sub foreach_drive {
-    my ($conf, $func) = @_;
+    my ($conf, $vmid, $func) = @_;
 
     foreach my $ds (keys %$conf) {
 	next if !valid_drivename($ds);
@@ -2614,10 +2615,25 @@ sub foreach_drive {
 
 	&$func($ds, $drive);
     }
+
+    if (my $storeid = $conf->{cloudinit}) {
+	my $storecfg = PVE::Storage::config();
+	my $imagedir = PVE::Storage::get_image_dir($storecfg, $storeid, $vmid);
+	my $iso_name = "vm-$vmid-cloudinit.qcow2";
+	my $iso_path = "$imagedir/$iso_name";
+	# Only snapshot it if it has already been created.
+	# (Which is not the case if the VM has never been started before with
+	# cloud-init enabled.)
+	if (-e $iso_path) {
+	    my $ds = 'ide3';
+	    my $drive = parse_drive($ds, "$storeid:$vmid/vm-$vmid-cloudinit.qcow2");
+	    &$func($ds, $drive) if $drive;
+	}
+    }
 }
 
 sub foreach_volid {
-    my ($conf, $func) = @_;
+    my ($conf, $vmid, $func) = @_;
 
     my $volhash = {};
 
@@ -2629,7 +2645,7 @@ sub foreach_volid {
 	$volhash->{$volid} = $is_cdrom || 0;
     };
 
-    foreach_drive($conf, sub {
+    foreach_drive($conf, $vmid, sub {
 	my ($ds, $drive) = @_;
 	&$test_volid($drive->{file}, drive_is_cdrom($drive));
     });
@@ -2637,7 +2653,7 @@ sub foreach_volid {
     foreach my $snapname (keys %{$conf->{snapshots}}) {
 	my $snap = $conf->{snapshots}->{$snapname};
 	&$test_volid($snap->{vmstate}, 0);
-	foreach_drive($snap, sub {
+	foreach_drive($snap, $vmid, sub {
 	    my ($ds, $drive) = @_;
 	    &$test_volid($drive->{file}, drive_is_cdrom($drive));
         });
@@ -3133,7 +3149,7 @@ sub config_to_command {
 	push @$devices, '-iscsi', "initiator-name=$initiator";
     }
 
-    foreach_drive($conf, sub {
+    foreach_drive($conf, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	if (PVE::Storage::parse_volume_id($drive->{file}, 1)) {
@@ -3192,8 +3208,6 @@ sub config_to_command {
 	push @$devices, '-device', print_drivedevice_full($storecfg, $conf, $vmid, $drive, $bridges);
     });
 
-    generate_cloudinit_command($conf, $vmid, $storecfg, $bridges, $devices);
-
     for (my $i = 0; $i < $MAX_NETS; $i++) {
          next if !$conf->{"net$i"};
          my $d = parse_net($conf->{"net$i"});
@@ -4450,10 +4464,10 @@ sub vm_reset {
 }
 
 sub get_vm_volumes {
-    my ($conf) = @_;
+    my ($conf, $vmid) = @_;
 
     my $vollist = [];
-    foreach_volid($conf, sub {
+    foreach_volid($conf, $vmid, sub {
 	my ($volid, $is_cdrom) = @_;
 
 	return if $volid =~ m|^/|;
@@ -4473,7 +4487,7 @@ sub vm_stop_cleanup {
     eval {
 
 	if (!$keepActive) {
-	    my $vollist = get_vm_volumes($conf);
+	    my $vollist = get_vm_volumes($conf, $vmid);
 	    PVE::Storage::deactivate_volumes($storecfg, $vollist);
 	}
 
@@ -5302,7 +5316,7 @@ sub restore_vma_archive {
 	# create empty/temp config
 	if ($oldconf) {
 	    PVE::Tools::file_set_contents($conffile, "memory: 128\n");
-	    foreach_drive($oldconf, sub {
+	    foreach_drive($oldconf, $vmid, sub {
 		my ($ds, $drive) = @_;
 
 		return if drive_is_cdrom($drive);
@@ -5676,7 +5690,7 @@ my $snapshot_prepare = sub {
 	    if defined($conf->{snapshots}->{$snapname});
 
 	my $storecfg = PVE::Storage::config();
-	die "snapshot feature is not available" if !has_feature('snapshot', $conf, $storecfg);
+	die "snapshot feature is not available" if !has_feature('snapshot', $conf, $vmid, $storecfg);
 
 	$snap = $conf->{snapshots}->{$snapname} = {};
 
@@ -5758,7 +5772,7 @@ sub snapshot_rollback {
 
     my $snap = &$get_snapshot_config();
 
-    foreach_drive($snap, sub {
+    foreach_drive($snap, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	return if drive_is_cdrom($drive);
@@ -5819,7 +5833,7 @@ sub snapshot_rollback {
 
     lock_config($vmid, $updatefn);
 
-    foreach_drive($snap, sub {
+    foreach_drive($snap, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	return if drive_is_cdrom($drive);
@@ -5904,7 +5918,7 @@ sub snapshot_create {
  	    }
 	};
 
-	foreach_drive($snap, sub {
+	foreach_drive($snap, $vmid, sub {
 	    my ($ds, $drive) = @_;
 
 	    return if drive_is_cdrom($drive);
@@ -6036,7 +6050,7 @@ sub snapshot_delete {
     };
 
     # now remove all internal snapshots
-    foreach_drive($snap, sub {
+    foreach_drive($snap, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	return if drive_is_cdrom($drive);
@@ -6063,10 +6077,10 @@ sub snapshot_delete {
 }
 
 sub has_feature {
-    my ($feature, $conf, $storecfg, $snapname, $running) = @_;
+    my ($feature, $conf, $vmid, $storecfg, $snapname, $running) = @_;
 
     my $err;
-    foreach_drive($conf, sub {
+    foreach_drive($conf, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	return if drive_is_cdrom($drive);
@@ -6082,7 +6096,7 @@ sub template_create {
 
     my $storecfg = PVE::Storage::config();
 
-    foreach_drive($conf, sub {
+    foreach_drive($conf, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	return if drive_is_cdrom($drive);
@@ -6363,46 +6377,73 @@ sub scsihw_infos {
     return ($maxdev, $controller, $controller_prefix);
 }
 
+# FIXME: Reasonable size? qcow2 shouldn't grow if the space isn't used anyway?
+my $cloudinit_iso_size = 5; # in MB
+
+sub prepare_cloudinit_disk {
+    my ($vmid, $storeid) = @_;
+
+    my $storecfg = PVE::Storage::config();
+    my $imagedir = PVE::Storage::get_image_dir($storecfg, $storeid, $vmid);
+    my $iso_name = "vm-$vmid-cloudinit.qcow2";
+    my $iso_path = "$imagedir/$iso_name";
+    return $iso_path if -e $iso_path;
+
+    # vdisk_alloc size is in K
+    my $iso_volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, 'qcow2', $iso_name, $cloudinit_iso_size*1024);
+    return $iso_path;
+}
+
+# FIXME: also in LXCCreate.pm => move to pve-common
+sub next_free_nbd_dev {
+    
+    for(my $i = 0;;$i++) {
+	my $dev = "/dev/nbd$i";
+	last if ! -b $dev;
+	next if -f "/sys/block/nbd$i/pid"; # busy
+	return $dev;
+    }
+    die "unable to find free nbd device\n";
+}
+
+sub commit_cloudinit_disk {
+    my ($file_path, $iso_path) = @_;
+
+    my $nbd_dev = next_free_nbd_dev();
+    run_command(['qemu-nbd', '-c', $nbd_dev, $iso_path]);
+
+    eval {
+	run_command(['genisoimage',
+		     '-R',
+		     '-V', 'config-2',
+		     '-o', $nbd_dev,
+		     $file_path]);
+    };
+    my $err = $@;
+    eval { run_command(['qemu-nbd', '-d', $nbd_dev]); };
+    warn $@ if $@;
+    die $err if $err;
+}
+
 sub generate_cloudinitconfig {
     my ($conf, $vmid) = @_;
 
-    return if !$conf->{cloudinit};
+    my $storeid = $conf->{cloudinit};
+    return if !$storeid;
 
     my $path = "/tmp/cloudinit/$vmid";
 
-    mkdir "/tmp/cloudinit";
-    mkdir $path;
-    mkdir "$path/drive";
-    mkdir "$path/drive/openstack";
-    mkdir "$path/drive/openstack/latest";
-    mkdir "$path/drive/openstack/content";
+    mkpath "$path/drive/openstack/latest";
+    mkpath "$path/drive/openstack/content";
     my $digest_data = generate_cloudinit_userdata($conf, $path)
 		    . generate_cloudinit_network($conf, $path);
     generate_cloudinit_metadata($conf, $path, $digest_data);
 
-    my $cmd = [];
-    push @$cmd, 'genisoimage';
-    push @$cmd, '-R';
-    push @$cmd, '-V', 'config-2';
-    push @$cmd, '-o', "$path/configdrive.iso";
-    push @$cmd, "$path/drive";
-
-    run_command($cmd);
+    my $iso_path = prepare_cloudinit_disk($vmid, $storeid);
+    commit_cloudinit_disk("$path/drive", $iso_path);
     rmtree("$path/drive");
 }
 
-sub generate_cloudinit_command {
-    my ($conf, $vmid, $storecfg, $bridges, $devices) = @_;
-
-    return if !$conf->{cloudinit};
-
-    my $path = "/tmp/cloudinit/$vmid/configdrive.iso";
-    my $drive = parse_drive('ide3', 'cloudinit,media=cdrom');
-    my $drive_cmd = print_drive_full($storecfg, $vmid, $drive);
-    push @$devices, '-drive', $drive_cmd;
-    push @$devices, '-device', print_drivedevice_full($storecfg, $conf, $vmid, $drive, $bridges);
-}
-
 sub generate_cloudinit_userdata {
     my ($conf, $path) = @_;
 
@@ -6441,6 +6482,7 @@ sub generate_cloudinit_metadata {
     file_write($fn, $content);
 }
 
+# FIXME: also in LXC.pm => move to pve-common
 my $ipv4_reverse_mask = [
     '0.0.0.0',
     '128.0.0.0',
diff --git a/PVE/VZDump/QemuServer.pm b/PVE/VZDump/QemuServer.pm
index 314538f..9f7e68a 100644
--- a/PVE/VZDump/QemuServer.pm
+++ b/PVE/VZDump/QemuServer.pm
@@ -59,7 +59,7 @@ sub prepare {
 
     my $vollist = [];
     my $drivehash = {};
-    PVE::QemuServer::foreach_drive($conf, sub {
+    PVE::QemuServer::foreach_drive($conf, $vmid, sub {
 	my ($ds, $drive) = @_;
 
 	return if PVE::QemuServer::drive_is_cdrom($drive);
-- 
2.1.4




More information about the pve-devel mailing list