[pve-devel] [PATCH qemu-server v3 2/5] implement suspend to disk for running vms

Dominik Csapak d.csapak at proxmox.com
Thu Mar 14 17:04:47 CET 2019


the idea is to have the same logic as with snapshots, but without
the snapshotting of the disks, and after saving the vm state (incl memory),
we hard shut off the guest.

this way the disks will not be touched anymore by the guest

to prevent any alteration of the vm (incl migration, hw changes, etc) we
add a config lock 'suspend'

Signed-off-by: Dominik Csapak <d.csapak at proxmox.com>
---
changes from v2:
* leave __snapshot_save_vmstate in QemuConfig
* better error logging/handling
* stop only when not using 'todisk' since savevm-start handles that already
* use 2 locks

 PVE/QemuConfig.pm | 22 +++++++++++-----
 PVE/QemuServer.pm | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 87 insertions(+), 11 deletions(-)

diff --git a/PVE/QemuConfig.pm b/PVE/QemuConfig.pm
index 61a018e..6693585 100644
--- a/PVE/QemuConfig.pm
+++ b/PVE/QemuConfig.pm
@@ -116,9 +116,7 @@ sub get_replicatable_volumes {
 }
 
 sub __snapshot_save_vmstate {
-    my ($class, $vmid, $conf, $snapname, $storecfg) = @_;
-
-    my $snap = $conf->{snapshots}->{$snapname};
+    my ($class, $vmid, $conf, $snapname, $storecfg, $suspend) = @_;
 
     # first, use explicitly configured storage
     my $target = $conf->{vmstatestorage};
@@ -140,12 +138,24 @@ sub __snapshot_save_vmstate {
 
     my $driver_state_size = 500; # assume 500MB is enough to safe all driver state;
     my $size = $conf->{memory}*2 + $driver_state_size;
+    my $scfg = PVE::Storage::storage_config($storecfg, $target);
 
     my $name = "vm-$vmid-state-$snapname";
-    my $scfg = PVE::Storage::storage_config($storecfg, $target);
     $name .= ".raw" if $scfg->{path}; # add filename extension for file base storage
-    $snap->{vmstate} = PVE::Storage::vdisk_alloc($storecfg, $target, $vmid, 'raw', $name, $size*1024);
-    $snap->{runningmachine} = PVE::QemuServer::get_current_qemu_machine($vmid);
+
+    my $statefile = PVE::Storage::vdisk_alloc($storecfg, $target, $vmid, 'raw', $name, $size*1024);
+    my $runningmachine = PVE::QemuServer::get_current_qemu_machine($vmid);
+
+    if ($suspend) {
+	$conf->{vmstate} = $statefile;
+	$conf->{runningmachine} = $runningmachine;
+    } else {
+	my $snap = $conf->{snapshots}->{$snapname};
+	$snap->{vmstate} = $statefile;
+	$snap->{runningmachine} = $runningmachine;
+    }
+
+    return $statefile;
 }
 
 sub __snapshot_check_running {
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index deac8e2..1f12f37 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -300,7 +300,7 @@ my $confdesc = {
 	optional => 1,
 	type => 'string',
 	description => "Lock/unlock the VM.",
-	enum => [qw(backup clone create migrate rollback snapshot snapshot-delete)],
+	enum => [qw(backup clone create migrate rollback snapshot snapshot-delete suspending suspended)],
     },
     cpulimit => {
 	optional => 1,
@@ -5658,17 +5658,83 @@ sub vm_stop {
 }
 
 sub vm_suspend {
-    my ($vmid, $skiplock) = @_;
+    my ($vmid, $skiplock, $includestate) = @_;
+
+    my $conf;
+    my $path;
+    my $storecfg;
+    my $vmstate;
 
     PVE::QemuConfig->lock_config($vmid, sub {
 
-	my $conf = PVE::QemuConfig->load_config($vmid);
+	$conf = PVE::QemuConfig->load_config($vmid);
 
+	my $is_backing_up = PVE::QemuConfig->has_lock($conf, 'backup');
 	PVE::QemuConfig->check_lock($conf)
-	    if !($skiplock || PVE::QemuConfig->has_lock($conf, 'backup'));
+	    if !($skiplock || $is_backing_up);
 
-	vm_mon_cmd($vmid, "stop");
+	die "cannot suspend to disk during backup\n"
+	    if $is_backing_up && $includestate;
+
+	if ($includestate) {
+	    $conf->{lock} = 'suspending';
+	    my $date = strftime("%Y-%m-%d", localtime(time()));
+	    $storecfg = PVE::Storage::config();
+	    $vmstate = PVE::QemuConfig->__snapshot_save_vmstate($vmid, $conf, "suspend-$date", $storecfg, 1);
+	    $path = PVE::Storage::path($storecfg, $vmstate);
+	    PVE::QemuConfig->write_config($vmid, $conf);
+	} else {
+	    vm_mon_cmd($vmid, "stop");
+	}
     });
+
+    if ($includestate) {
+	# save vm state
+	PVE::Storage::activate_volumes($storecfg, [$vmstate]);
+
+	eval {
+	    vm_mon_cmd($vmid, "savevm-start", statefile => $path);
+	    for(;;) {
+		my $state = vm_mon_cmd_nocheck($vmid, "query-savevm");
+		if (!$state->{status}) {
+		    die "savevm not active\n";
+		} elsif ($state->{status} eq 'active') {
+		    sleep(1);
+		    next;
+		} elsif ($state->{status} eq 'completed') {
+		    last;
+		} elsif ($state->{status} eq 'failed' && $state->{error}) {
+		    die "query-savevm failed with error '$state->{error}'\n"
+		} else {
+		    die "query-savevm returned status '$state->{status}'\n";
+		}
+	    }
+	};
+	my $err = $@;
+
+	PVE::QemuConfig->lock_config($vmid, sub {
+	    $conf = PVE::QemuConfig->load_config($vmid);
+	    if ($err) {
+		# cleanup, but leave suspending lock, to indicate something went wrong
+		eval {
+		    vm_mon_cmd($vmid, "savevm-end");
+		    PVE::Storage::deactivate_volumes($storecfg, [$vmstate]);
+		    PVE::Storage::vdisk_free($storecfg, $vmstate);
+		    delete $conf->@{qw(vmstate runningmachine)};
+		    PVE::QemuConfig->write_config($vmid, $conf);
+		};
+		warn $@ if $@;
+		die $err;
+	    }
+
+	    die "lock changed unexpectedly\n"
+		if !PVE::QemuConfig->has_lock($conf, 'suspending');
+
+	    vm_qmp_command($vmid, { execute => "quit" });
+	    $conf->{lock} = 'suspended';
+	    PVE::QemuConfig->write_config($vmid, $conf);
+	});
+    }
 }
 
 sub vm_resume {
-- 
2.11.0





More information about the pve-devel mailing list