[pve-devel] [PATCH v2 pve-manager 1/2] api2: add suspendall endpoint

Thomas Lamprecht t.lamprecht at proxmox.com
Fri Feb 19 17:41:27 CET 2021


On 09.02.21 11:31, Hannes Laimer wrote:
> Handels pause and hibernation, the reason for not splitting it was to mirror
> the behaviour of the already existing suspend endpoint for single VMs.
> 
> Signed-off-by: Hannes Laimer <h.laimer at proxmox.com>
> ---
> 
> Endpoint code is mostly taken from already existing ednpoints, namely
> stopall and startall.
> 
>  PVE/API2/Nodes.pm | 119 ++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 119 insertions(+)
> 
> diff --git a/PVE/API2/Nodes.pm b/PVE/API2/Nodes.pm
> index 8172231e..3e6e9fa2 100644
> --- a/PVE/API2/Nodes.pm
> +++ b/PVE/API2/Nodes.pm
> @@ -1943,6 +1943,125 @@ __PACKAGE__->register_method ({
>  	return $rpcenv->fork_worker('stopall', undef, $authuser, $code);
>      }});
>  
> +my $create_suspend_worker = sub {
> +    my ($nodename, $type, $vmid, $down_timeout, $todisk) = @_;
> +
> +    my $upid;
> +    if ($type eq 'qemu') {
> +	return if !PVE::QemuServer::check_running($vmid, 1);
> +	my $timeout =  defined($down_timeout) ? int($down_timeout) : 60*3;
> +	print STDERR "Suspending VM $vmid (timeout = $timeout seconds)\n";
> +	$upid = PVE::API2::Qemu->vm_suspend({node => $nodename, vmid => $vmid, todisk => $todisk});
> +    } else {
> +	die "suspension is only supported on VMs, not on '$type'\n";
> +    }
> +
> +    return $upid;
> +};
> +
> +__PACKAGE__->register_method ({
> +    name => 'suspendall',
> +    path => 'suspendall',
> +    method => 'POST',
> +    protected => 1,
> +    permissions => {
> +	check => ['perm', '/', [ 'VM.PowerMgmt' ]],

permissions are still unchanged?

From:
https://pve.proxmox.com/pve-docs/api-viewer/index.html#/nodes/{node}/qemu/{vmid}/status/suspend

"You need 'VM.PowerMgmt' on /vms/{vmid}, and if you have set 'todisk', you need also
'VM.Config.Disk' on /vms/{vmid} and 'Datastore.AllocateSpace' on the storage for the
vmstate."

But you call PVE::API2::Qemu->vm_suspend directly, so all schema based checks, i.e., those not
done there in code directly, get circumvented.

Did you checked that this is OK??


> +    },
> +    proxyto => 'node',
> +    description => "Suspend all VMs.",
> +    parameters => {
> +    	additionalProperties => 0,
> +	properties => {
> +	    node => get_standard_option('pve-node'),
> +	    vms => {
> +		description => "Only consider Guests with these IDs.",
> +		type => 'string',  format => 'pve-vmid-list',
> +		optional => 1,
> +	    },
> +	    todisk => {
> +		type => 'boolean',
> +		default => 0,

I'd even enable this by defaults, prime use case.
Could be defaulted to true via gui to, as alternative.

> +		optional => 1,
> +		description => 'If set, suspends the VM to disk. Will be resumed on next VM start.',
> +	    },

a state storage maybe also useful, some may even want a mapping per VM? could be overkill
though as people can already configure state storage in the VM options.

> +	},
> +    },
> +    returns => {
> +	type => 'string',
> +    },
> +    code => sub {
> +	my ($param) = @_;
> +
> +	my $rpcenv = PVE::RPCEnvironment::get();
> +	my $authuser = $rpcenv->get_user();
> +
> +	my $nodename = $param->{node};
> +	$nodename = PVE::INotify::nodename() if $nodename eq 'localhost';
> +
> +	my $code = sub {
> +
> +	    $rpcenv->{type} = 'priv'; # to start tasks in background
> +
> +	    my $stopList = &$get_start_stop_list($nodename, undef, $param->{vms});
> +
> +	    my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
> +	    my $datacenterconfig = cfs_read_file('datacenter.cfg');
> +	    # if not set by user spawn max cpu count number of workers
> +	    my $maxWorkers =  $datacenterconfig->{max_workers} || $cpuinfo->{cpus};
> +
> +	    foreach my $order (sort {$b <=> $a} keys %$stopList) {
> +		my $vmlist = $stopList->{$order};
> +		my $workers = {};
> +
> +		my $finish_worker = sub {
> +		    my $pid = shift;
> +		    my $d = $workers->{$pid};
> +		    return if !$d;
> +		    delete $workers->{$pid};
> +
> +		    syslog('info', "end task $d->{upid}");
> +		};

still not factored out

> +
> +		foreach my $vmid (sort {$b <=> $a} keys %$vmlist) {
> +		    my $d = $vmlist->{$vmid};
> +		    my $upid;
> +		    eval { $upid = &$create_suspend_worker($nodename, $d->{type}, $vmid, $d->{down}, $param->{todisk}); };
> +		    warn $@ if $@;
> +		    next if !$upid;
> +
> +		    my $res = PVE::Tools::upid_decode($upid, 1);
> +		    next if !$res;
> +
> +		    my $pid = $res->{pid};
> +
> +		    $workers->{$pid} = { type => $d->{type}, upid => $upid, vmid => $vmid };
> +		    while (scalar(keys %$workers) >= $maxWorkers) {
> +			foreach my $p (keys %$workers) {
> +			    if (!PVE::ProcFSTools::check_process_running($p)) {
> +				&$finish_worker($p);
> +			    }
> +			}
> +			sleep(1);

still not factored out

> +		    }
> +		}
> +		while (scalar(keys %$workers)) {
> +		    foreach my $p (keys %$workers) {
> +			if (!PVE::ProcFSTools::check_process_running($p)) {
> +			    &$finish_worker($p);
> +			}
> +		    }
> +		    sleep(1);
> +		}

still not factored out...

did you even read my last review?!

> +	    }
> +
> +	    syslog('info', "all VMs suspended");
> +
> +	    return;
> +	};
> +
> +	return $rpcenv->fork_worker('suspendall', undef, $authuser, $code);
> +    }});
> +
>  my $create_migrate_worker = sub {
>      my ($nodename, $type, $vmid, $target, $with_local_disks) = @_;
>  
> 






More information about the pve-devel mailing list