[pve-devel] [PATCH v2 pve-manager 1/2] api2: add suspendall endpoint
Hannes Laimer
h.laimer at proxmox.com
Tue Feb 9 11:31:23 CET 2021
Handels pause and hibernation, the reason for not splitting it was to mirror
the behaviour of the already existing suspend endpoint for single VMs.
Signed-off-by: Hannes Laimer <h.laimer at proxmox.com>
---
Endpoint code is mostly taken from already existing ednpoints, namely
stopall and startall.
PVE/API2/Nodes.pm | 119 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 119 insertions(+)
diff --git a/PVE/API2/Nodes.pm b/PVE/API2/Nodes.pm
index 8172231e..3e6e9fa2 100644
--- a/PVE/API2/Nodes.pm
+++ b/PVE/API2/Nodes.pm
@@ -1943,6 +1943,125 @@ __PACKAGE__->register_method ({
return $rpcenv->fork_worker('stopall', undef, $authuser, $code);
}});
+my $create_suspend_worker = sub {
+ my ($nodename, $type, $vmid, $down_timeout, $todisk) = @_;
+
+ my $upid;
+ if ($type eq 'qemu') {
+ return if !PVE::QemuServer::check_running($vmid, 1);
+ my $timeout = defined($down_timeout) ? int($down_timeout) : 60*3;
+ print STDERR "Suspending VM $vmid (timeout = $timeout seconds)\n";
+ $upid = PVE::API2::Qemu->vm_suspend({node => $nodename, vmid => $vmid, todisk => $todisk});
+ } else {
+ die "suspension is only supported on VMs, not on '$type'\n";
+ }
+
+ return $upid;
+};
+
+__PACKAGE__->register_method ({
+ name => 'suspendall',
+ path => 'suspendall',
+ method => 'POST',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'VM.PowerMgmt' ]],
+ },
+ proxyto => 'node',
+ description => "Suspend all VMs.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vms => {
+ description => "Only consider Guests with these IDs.",
+ type => 'string', format => 'pve-vmid-list',
+ optional => 1,
+ },
+ todisk => {
+ type => 'boolean',
+ default => 0,
+ optional => 1,
+ description => 'If set, suspends the VM to disk. Will be resumed on next VM start.',
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $nodename = $param->{node};
+ $nodename = PVE::INotify::nodename() if $nodename eq 'localhost';
+
+ my $code = sub {
+
+ $rpcenv->{type} = 'priv'; # to start tasks in background
+
+ my $stopList = &$get_start_stop_list($nodename, undef, $param->{vms});
+
+ my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
+ my $datacenterconfig = cfs_read_file('datacenter.cfg');
+ # if not set by user spawn max cpu count number of workers
+ my $maxWorkers = $datacenterconfig->{max_workers} || $cpuinfo->{cpus};
+
+ foreach my $order (sort {$b <=> $a} keys %$stopList) {
+ my $vmlist = $stopList->{$order};
+ my $workers = {};
+
+ my $finish_worker = sub {
+ my $pid = shift;
+ my $d = $workers->{$pid};
+ return if !$d;
+ delete $workers->{$pid};
+
+ syslog('info', "end task $d->{upid}");
+ };
+
+ foreach my $vmid (sort {$b <=> $a} keys %$vmlist) {
+ my $d = $vmlist->{$vmid};
+ my $upid;
+ eval { $upid = &$create_suspend_worker($nodename, $d->{type}, $vmid, $d->{down}, $param->{todisk}); };
+ warn $@ if $@;
+ next if !$upid;
+
+ my $res = PVE::Tools::upid_decode($upid, 1);
+ next if !$res;
+
+ my $pid = $res->{pid};
+
+ $workers->{$pid} = { type => $d->{type}, upid => $upid, vmid => $vmid };
+ while (scalar(keys %$workers) >= $maxWorkers) {
+ foreach my $p (keys %$workers) {
+ if (!PVE::ProcFSTools::check_process_running($p)) {
+ &$finish_worker($p);
+ }
+ }
+ sleep(1);
+ }
+ }
+ while (scalar(keys %$workers)) {
+ foreach my $p (keys %$workers) {
+ if (!PVE::ProcFSTools::check_process_running($p)) {
+ &$finish_worker($p);
+ }
+ }
+ sleep(1);
+ }
+ }
+
+ syslog('info', "all VMs suspended");
+
+ return;
+ };
+
+ return $rpcenv->fork_worker('suspendall', undef, $authuser, $code);
+ }});
+
my $create_migrate_worker = sub {
my ($nodename, $type, $vmid, $target, $with_local_disks) = @_;
--
2.20.1
More information about the pve-devel
mailing list