[pve-devel] [PATCH v2 qemu-server 05/11] refactor: Move check_running to QemuConfig
Fabian Grünbichler
f.gruenbichler at proxmox.com
Wed Oct 30 11:39:37 CET 2019
On October 28, 2019 12:59 pm, Stefan Reiter wrote:
> Also move check_cmdline, since check_running is its only user. Changes
> all uses of check_running in QemuServer, including mocking in snapshot
> tests.
>
> Signed-off-by: Stefan Reiter <s.reiter at proxmox.com>
> ---
> PVE/API2/Qemu.pm | 32 +++++++-------
> PVE/CLI/qm.pm | 13 +++---
> PVE/QemuConfig.pm | 65 ++++++++++++++++++++++++++-
> PVE/QemuMigrate.pm | 3 +-
> PVE/QemuServer.pm | 85 ++++++------------------------------
> PVE/QemuServer/Agent.pm | 3 +-
> PVE/QemuServer/ImportDisk.pm | 3 +-
> PVE/QemuServer/Memory.pm | 3 +-
> PVE/VZDump/QemuServer.pm | 7 +--
> test/snapshot-test.pm | 7 +--
> 10 files changed, 116 insertions(+), 105 deletions(-)
>
> diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
> index b2c0b0d..9912e4d 100644
> --- a/PVE/API2/Qemu.pm
> +++ b/PVE/API2/Qemu.pm
> @@ -556,7 +556,7 @@ __PACKAGE__->register_method({
>
> PVE::QemuConfig->check_protection($conf, $emsg);
>
> - die "$emsg vm is running\n" if PVE::QemuServer::check_running($vmid);
> + die "$emsg vm is running\n" if PVE::QemuConfig::check_running($vmid);
>
> my $realcmd = sub {
> PVE::QemuServer::restore_archive($archive, $vmid, $authuser, {
> @@ -1220,7 +1220,7 @@ my $update_vm_api = sub {
>
> return if !scalar(keys %{$conf->{pending}});
>
> - my $running = PVE::QemuServer::check_running($vmid);
> + my $running = PVE::QemuConfig::check_running($vmid);
>
> # apply pending changes
>
> @@ -1439,7 +1439,7 @@ __PACKAGE__->register_method({
>
> # early tests (repeat after locking)
> die "VM $vmid is running - destroy failed\n"
> - if PVE::QemuServer::check_running($vmid);
> + if PVE::QemuConfig::check_running($vmid);
>
> my $realcmd = sub {
> my $upid = shift;
> @@ -1447,7 +1447,7 @@ __PACKAGE__->register_method({
> syslog('info', "destroy VM $vmid: $upid\n");
> PVE::QemuConfig->lock_config($vmid, sub {
> die "VM $vmid is running - destroy failed\n"
> - if (PVE::QemuServer::check_running($vmid));
> + if (PVE::QemuConfig::check_running($vmid));
>
> PVE::QemuServer::destroy_vm($storecfg, $vmid, 1, $skiplock);
>
> @@ -2179,7 +2179,7 @@ __PACKAGE__->register_method({
> raise_param_exc({ skiplock => "Only root may use this option." })
> if $skiplock && $authuser ne 'root at pam';
>
> - die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
> + die "VM $vmid not running\n" if !PVE::QemuConfig::check_running($vmid);
>
> my $realcmd = sub {
> my $upid = shift;
> @@ -2349,7 +2349,7 @@ __PACKAGE__->register_method({
> die "VM is paused - cannot shutdown\n";
> }
>
> - die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
> + die "VM $vmid not running\n" if !PVE::QemuConfig::check_running($vmid);
>
> my $realcmd = sub {
> my $upid = shift;
> @@ -2413,7 +2413,7 @@ __PACKAGE__->register_method({
> raise_param_exc({ skiplock => "Only root may use this option." })
> if $skiplock && $authuser ne 'root at pam';
>
> - die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
> + die "VM $vmid not running\n" if !PVE::QemuConfig::check_running($vmid);
>
> die "Cannot suspend HA managed VM to disk\n"
> if $todisk && PVE::HA::Config::vm_is_ha_managed($vmid);
> @@ -2482,7 +2482,7 @@ __PACKAGE__->register_method({
> };
>
> die "VM $vmid not running\n"
> - if !$to_disk_suspended && !PVE::QemuServer::check_running($vmid, $nocheck);
> + if !$to_disk_suspended && !PVE::QemuConfig::check_running($vmid, $nocheck);
>
> my $realcmd = sub {
> my $upid = shift;
> @@ -2592,7 +2592,7 @@ __PACKAGE__->register_method({
>
> my $feature = extract_param($param, 'feature');
>
> - my $running = PVE::QemuServer::check_running($vmid);
> + my $running = PVE::QemuConfig::check_running($vmid);
>
> my $conf = PVE::QemuConfig->load_config($vmid);
>
> @@ -2739,7 +2739,7 @@ __PACKAGE__->register_method({
>
> PVE::Cluster::check_cfs_quorum();
>
> - my $running = PVE::QemuServer::check_running($vmid) || 0;
> + my $running = PVE::QemuConfig::check_running($vmid) || 0;
>
> # exclusive lock if VM is running - else shared lock is enough;
> my $shared_lock = $running ? 0 : 1;
> @@ -2753,7 +2753,7 @@ __PACKAGE__->register_method({
>
> PVE::QemuConfig->check_lock($conf);
>
> - my $verify_running = PVE::QemuServer::check_running($vmid) || 0;
> + my $verify_running = PVE::QemuConfig::check_running($vmid) || 0;
>
> die "unexpected state change\n" if $verify_running != $running;
>
> @@ -3059,7 +3059,7 @@ __PACKAGE__->register_method({
>
> PVE::Cluster::log_msg('info', $authuser, "move disk VM $vmid: move --disk $disk --storage $storeid");
>
> - my $running = PVE::QemuServer::check_running($vmid);
> + my $running = PVE::QemuConfig::check_running($vmid);
>
> PVE::Storage::activate_volumes($storecfg, [ $drive->{file} ]);
>
> @@ -3228,7 +3228,7 @@ __PACKAGE__->register_method({
> # try to detect errors early
> PVE::QemuConfig->check_lock($vmconf);
>
> - $res->{running} = PVE::QemuServer::check_running($vmid) ? 1:0;
> + $res->{running} = PVE::QemuConfig::check_running($vmid) ? 1:0;
>
> # if vm is not running, return target nodes where local storage is available
> # for offline migration
> @@ -3358,7 +3358,7 @@ __PACKAGE__->register_method({
>
> PVE::QemuConfig->check_lock($conf);
>
> - if (PVE::QemuServer::check_running($vmid)) {
> + if (PVE::QemuConfig::check_running($vmid)) {
> die "can't migrate running VM without --online\n" if !$param->{online};
> } else {
> warn "VM isn't running. Doing offline migration instead\n." if $param->{online};
> @@ -3654,7 +3654,7 @@ __PACKAGE__->register_method({
> push @$res, $item;
> }
>
> - my $running = PVE::QemuServer::check_running($vmid, 1) ? 1 : 0;
> + my $running = PVE::QemuConfig::check_running($vmid, 1) ? 1 : 0;
> my $current = {
> name => 'current',
> digest => $conf->{digest},
> @@ -4003,7 +4003,7 @@ __PACKAGE__->register_method({
> if PVE::QemuConfig->is_template($conf) && !$disk;
>
> die "you can't convert a VM to template if VM is running\n"
> - if PVE::QemuServer::check_running($vmid);
> + if PVE::QemuConfig::check_running($vmid);
>
> my $realcmd = sub {
> PVE::QemuServer::template_create($vmid, $conf, $disk);
> diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
> index 44beac9..4de0ae2 100755
> --- a/PVE/CLI/qm.pm
> +++ b/PVE/CLI/qm.pm
> @@ -21,6 +21,7 @@ use PVE::RPCEnvironment;
> use PVE::Exception qw(raise_param_exc);
> use PVE::Network;
> use PVE::GuestHelpers;
> +use PVE::QemuConfig;
> use PVE::QemuSchema;
> use PVE::QemuServer;
> use PVE::QemuServer::ImportDisk;
> @@ -310,7 +311,7 @@ __PACKAGE__->register_method ({
> last;
> } elsif ($line =~ /^resume (\d+)$/) {
> my $vmid = $1;
> - if (PVE::QemuServer::check_running($vmid, 1)) {
> + if (PVE::QemuConfig::check_running($vmid, 1)) {
> eval { PVE::QemuServer::vm_resume($vmid, 1, 1); };
> if ($@) {
> $tunnel_write->("ERR: resume failed - $@");
> @@ -350,18 +351,18 @@ __PACKAGE__->register_method ({
> my $vmid = $param->{vmid};
> my $timeout = $param->{timeout};
>
> - my $pid = PVE::QemuServer::check_running ($vmid);
> + my $pid = PVE::QemuConfig::check_running ($vmid);
> return if !$pid;
>
> print "waiting until VM $vmid stopps (PID $pid)\n";
>
> my $count = 0;
> - while ((!$timeout || ($count < $timeout)) && PVE::QemuServer::check_running ($vmid)) {
> + while ((!$timeout || ($count < $timeout)) && PVE::QemuConfig::check_running ($vmid)) {
> $count++;
> sleep 1;
> }
>
> - die "wait failed - got timeout\n" if PVE::QemuServer::check_running ($vmid);
> + die "wait failed - got timeout\n" if PVE::QemuConfig::check_running ($vmid);
>
> return undef;
> }});
> @@ -558,7 +559,7 @@ __PACKAGE__->register_method ({
> die "unable to find a serial interface\n" if !$iface;
> }
>
> - die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
> + die "VM $vmid not running\n" if !PVE::QemuConfig::check_running($vmid);
>
> my $socket = "/var/run/qemu-server/${vmid}.$iface";
>
> @@ -774,7 +775,7 @@ __PACKAGE__->register_method({
>
> PVE::QemuConfig->lock_config($vmid, sub {
> my $conf = PVE::QemuConfig->load_config ($vmid);
> - my $pid = PVE::QemuServer::check_running ($vmid);
> + my $pid = PVE::QemuConfig::check_running ($vmid);
> die "vm still running\n" if $pid;
>
> if (!$clean) {
> diff --git a/PVE/QemuConfig.pm b/PVE/QemuConfig.pm
> index b63e57c..08cac38 100644
> --- a/PVE/QemuConfig.pm
> +++ b/PVE/QemuConfig.pm
> @@ -3,8 +3,12 @@ package PVE::QemuConfig;
> use strict;
> use warnings;
>
> +use File::stat;
> +use IO::File;
> +
> use PVE::AbstractConfig;
> use PVE::INotify;
> +use PVE::ProcFSTools;
> use PVE::QemuSchema;
> use PVE::QemuServer;
> use PVE::Storage;
> @@ -161,7 +165,7 @@ sub __snapshot_save_vmstate {
>
> sub __snapshot_check_running {
> my ($class, $vmid) = @_;
> - return PVE::QemuServer::check_running($vmid);
> + return check_running($vmid);
> }
>
> sub __snapshot_check_freeze_needed {
> @@ -392,4 +396,63 @@ sub __snapshot_foreach_volume {
> }
> # END implemented abstract methods from PVE::AbstractConfig
>
> +sub check_cmdline {
> + my ($pidfile, $pid) = @_;
> +
> + my $fh = IO::File->new("/proc/$pid/cmdline", "r");
> + if (defined($fh)) {
> + my $line = <$fh>;
> + $fh->close;
> + return undef if !$line;
> + my @param = split(/\0/, $line);
> +
> + my $cmd = $param[0];
> + return if !$cmd || ($cmd !~ m|kvm$| && $cmd !~ m@(?:^|/)qemu-system-[^/]+$@);
> +
> + for (my $i = 0; $i < scalar (@param); $i++) {
> + my $p = $param[$i];
> + next if !$p;
> + if (($p eq '-pidfile') || ($p eq '--pidfile')) {
> + my $p = $param[$i+1];
> + return 1 if $p && ($p eq $pidfile);
> + return undef;
> + }
> + }
> + }
> + return undef;
> +}
> +
> +sub check_running {
> + my ($vmid, $nocheck, $node) = @_;
> +
> + my $filename = PVE::QemuConfig->config_file($vmid, $node);
> +
> + die "unable to find configuration file for VM $vmid - no such machine\n"
> + if !$nocheck && ! -f $filename;
> +
> + my $pidfile = PVE::QemuSchema::pidfile_name($vmid);
maybe it would make more sense to keep a short version of check_running
in QemuServer (it's not really config related), and merge everything
after this point into check_cmdline (maybe with a better name), which
could then be put into the same file as the stuff from patch #4?
this would avoid a lot of churn, and check_running is more a method
operating on a (running) VM instance (which, for better or worse, is
basically what QemuServer.pm represents) than on its config..
also see comment on patch #5
> +
> + if (my $fd = IO::File->new("<$pidfile")) {
> + my $st = stat($fd);
> + my $line = <$fd>;
> + close($fd);
> +
> + my $mtime = $st->mtime;
> + if ($mtime > time()) {
> + warn "file '$filename' modified in future\n";
> + }
> +
> + if ($line =~ m/^(\d+)$/) {
> + my $pid = $1;
> + if (check_cmdline($pidfile, $pid)) {
> + if (my $pinfo = PVE::ProcFSTools::check_process_running($pid)) {
> + return $pid;
> + }
> + }
> + }
> + }
> +
> + return undef;
> +}
> +
> 1;
> diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
> index 65f39b6..e6993d4 100644
> --- a/PVE/QemuMigrate.pm
> +++ b/PVE/QemuMigrate.pm
> @@ -10,6 +10,7 @@ use PVE::INotify;
> use PVE::Tools;
> use PVE::Cluster;
> use PVE::Storage;
> +use PVE::QemuConfig;
> use PVE::QemuServer;
> use Time::HiRes qw( usleep );
> use PVE::RPCEnvironment;
> @@ -211,7 +212,7 @@ sub prepare {
> PVE::QemuConfig->check_lock($conf);
>
> my $running = 0;
> - if (my $pid = PVE::QemuServer::check_running($vmid)) {
> + if (my $pid = PVE::QemuConfig::check_running($vmid)) {
> die "can't migrate running VM without --online\n" if !$online;
> $running = $pid;
>
> diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
> index 817394e..d5eba39 100644
> --- a/PVE/QemuServer.pm
> +++ b/PVE/QemuServer.pm
> @@ -2952,65 +2952,6 @@ sub check_local_storage_availability {
> return $nodehash
> }
>
> -sub check_cmdline {
> - my ($pidfile, $pid) = @_;
> -
> - my $fh = IO::File->new("/proc/$pid/cmdline", "r");
> - if (defined($fh)) {
> - my $line = <$fh>;
> - $fh->close;
> - return undef if !$line;
> - my @param = split(/\0/, $line);
> -
> - my $cmd = $param[0];
> - return if !$cmd || ($cmd !~ m|kvm$| && $cmd !~ m@(?:^|/)qemu-system-[^/]+$@);
> -
> - for (my $i = 0; $i < scalar (@param); $i++) {
> - my $p = $param[$i];
> - next if !$p;
> - if (($p eq '-pidfile') || ($p eq '--pidfile')) {
> - my $p = $param[$i+1];
> - return 1 if $p && ($p eq $pidfile);
> - return undef;
> - }
> - }
> - }
> - return undef;
> -}
> -
> -sub check_running {
> - my ($vmid, $nocheck, $node) = @_;
> -
> - my $filename = PVE::QemuConfig->config_file($vmid, $node);
> -
> - die "unable to find configuration file for VM $vmid - no such machine\n"
> - if !$nocheck && ! -f $filename;
> -
> - my $pidfile = PVE::QemuSchema::pidfile_name($vmid);
> -
> - if (my $fd = IO::File->new("<$pidfile")) {
> - my $st = stat($fd);
> - my $line = <$fd>;
> - close($fd);
> -
> - my $mtime = $st->mtime;
> - if ($mtime > time()) {
> - warn "file '$filename' modified in future\n";
> - }
> -
> - if ($line =~ m/^(\d+)$/) {
> - my $pid = $1;
> - if (check_cmdline($pidfile, $pid)) {
> - if (my $pinfo = PVE::ProcFSTools::check_process_running($pid)) {
> - return $pid;
> - }
> - }
> - }
> - }
> -
> - return undef;
> -}
> -
> sub vzlist {
>
> my $vzlist = config_list();
> @@ -3021,7 +2962,7 @@ sub vzlist {
> next if $de !~ m/^(\d+)\.pid$/;
> my $vmid = $1;
> next if !defined($vzlist->{$vmid});
> - if (my $pid = check_running($vmid)) {
> + if (my $pid = PVE::QemuConfig::check_running($vmid)) {
> $vzlist->{$vmid}->{pid} = $pid;
> }
> }
> @@ -4640,7 +4581,7 @@ sub qemu_block_set_io_throttle {
> $bps_max_length, $bps_rd_max_length, $bps_wr_max_length,
> $iops_max_length, $iops_rd_max_length, $iops_wr_max_length) = @_;
>
> - return if !check_running($vmid) ;
> + return if !PVE::QemuConfig::check_running($vmid) ;
>
> vm_mon_cmd($vmid, "block_set_io_throttle", device => $deviceid,
> bps => int($bps),
> @@ -4701,7 +4642,7 @@ sub __read_avail {
> sub qemu_block_resize {
> my ($vmid, $deviceid, $storecfg, $volid, $size) = @_;
>
> - my $running = check_running($vmid);
> + my $running = PVE::QemuConfig::check_running($vmid);
>
> $size = 0 if !PVE::Storage::volume_resize($storecfg, $volid, $size, $running);
>
> @@ -4714,7 +4655,7 @@ sub qemu_block_resize {
> sub qemu_volume_snapshot {
> my ($vmid, $deviceid, $storecfg, $volid, $snap) = @_;
>
> - my $running = check_running($vmid);
> + my $running = PVE::QemuConfig::check_running($vmid);
>
> if ($running && do_snapshots_with_qemu($storecfg, $volid)){
> vm_mon_cmd($vmid, 'blockdev-snapshot-internal-sync', device => $deviceid, name => $snap);
> @@ -4726,7 +4667,7 @@ sub qemu_volume_snapshot {
> sub qemu_volume_snapshot_delete {
> my ($vmid, $deviceid, $storecfg, $volid, $snap) = @_;
>
> - my $running = check_running($vmid);
> + my $running = PVE::QemuConfig::check_running($vmid);
>
> if($running) {
>
> @@ -5243,7 +5184,7 @@ sub vm_start {
> PVE::QemuConfig->check_lock($conf)
> if !($skiplock || $is_suspended);
>
> - die "VM $vmid already running\n" if check_running($vmid, undef, $migratedfrom);
> + die "VM $vmid already running\n" if PVE::QemuConfig::check_running($vmid, undef, $migratedfrom);
>
> # clean up leftover reboot request files
> eval { clear_reboot_request($vmid); };
> @@ -5555,7 +5496,7 @@ sub vm_qmp_command {
> }
>
> eval {
> - die "VM $vmid not running\n" if !check_running($vmid, $nocheck);
> + die "VM $vmid not running\n" if !PVE::QemuConfig::check_running($vmid, $nocheck);
> my $sname = PVE::QemuSchema::qmp_socket($vmid);
> if (-e $sname) { # test if VM is reasonambe new and supports qmp/qga
> my $qmpclient = PVE::QMPClient->new();
> @@ -5680,7 +5621,7 @@ sub vm_stop_cleanup {
> sub _do_vm_stop {
> my ($storecfg, $vmid, $skiplock, $nocheck, $timeout, $shutdown, $force, $keepActive) = @_;
>
> - my $pid = check_running($vmid, $nocheck);
> + my $pid = PVE::QemuConfig::check_running($vmid, $nocheck);
> return if !$pid;
>
> my $conf;
> @@ -5714,7 +5655,7 @@ sub _do_vm_stop {
> $timeout = 60 if !defined($timeout);
>
> my $count = 0;
> - while (($count < $timeout) && check_running($vmid, $nocheck)) {
> + while (($count < $timeout) && PVE::QemuConfig::check_running($vmid, $nocheck)) {
> $count++;
> sleep 1;
> }
> @@ -5743,7 +5684,7 @@ sub _do_vm_stop {
> $timeout = 10;
>
> my $count = 0;
> - while (($count < $timeout) && check_running($vmid, $nocheck)) {
> + while (($count < $timeout) && PVE::QemuConfig::check_running($vmid, $nocheck)) {
> $count++;
> sleep 1;
> }
> @@ -5766,7 +5707,7 @@ sub vm_stop {
> $force = 1 if !defined($force) && !$shutdown;
>
> if ($migratedfrom){
> - my $pid = check_running($vmid, $nocheck, $migratedfrom);
> + my $pid = PVE::QemuConfig::check_running($vmid, $nocheck, $migratedfrom);
> kill 15, $pid if $pid;
> my $conf = PVE::QemuConfig->load_config($vmid, $migratedfrom);
> vm_stop_cleanup($storecfg, $vmid, $conf, $keepActive, 0);
> @@ -5784,7 +5725,7 @@ sub vm_reboot {
> PVE::QemuConfig->lock_config($vmid, sub {
>
> # only reboot if running, as qmeventd starts it again on a stop event
> - return if !check_running($vmid);
> + return if !PVE::QemuConfig::check_running($vmid);
>
> create_reboot_request($vmid);
>
> @@ -5919,7 +5860,7 @@ sub vm_destroy {
>
> my $conf = PVE::QemuConfig->load_config($vmid);
>
> - if (!check_running($vmid)) {
> + if (!PVE::QemuConfig::check_running($vmid)) {
> destroy_vm($storecfg, $vmid, undef, $skiplock);
> } else {
> die "VM $vmid is running - destroy failed\n";
> diff --git a/PVE/QemuServer/Agent.pm b/PVE/QemuServer/Agent.pm
> index 586ac3a..8ffe3bc 100644
> --- a/PVE/QemuServer/Agent.pm
> +++ b/PVE/QemuServer/Agent.pm
> @@ -3,6 +3,7 @@ package PVE::QemuServer::Agent;
> use strict;
> use warnings;
>
> +use PVE::QemuConfig;
> use PVE::QemuServer;
> use MIME::Base64 qw(decode_base64);
> use JSON;
> @@ -40,7 +41,7 @@ sub agent_available {
>
> eval {
> die "No QEMU guest agent configured\n" if !defined($conf->{agent});
> - die "VM $vmid is not running\n" if !PVE::QemuServer::check_running($vmid);
> + die "VM $vmid is not running\n" if !PVE::QemuConfig::check_running($vmid);
> die "QEMU guest agent is not running\n" if !PVE::QemuServer::qga_check_running($vmid, 1);
> };
>
> diff --git a/PVE/QemuServer/ImportDisk.pm b/PVE/QemuServer/ImportDisk.pm
> index 5d391e6..fc2110f 100755
> --- a/PVE/QemuServer/ImportDisk.pm
> +++ b/PVE/QemuServer/ImportDisk.pm
> @@ -4,6 +4,7 @@ use strict;
> use warnings;
>
> use PVE::Storage;
> +use PVE::QemuConfig;
> use PVE::QemuServer;
> use PVE::Tools qw(run_command extract_param);
>
> @@ -52,7 +53,7 @@ sub do_import {
> $vm_conf->{pending}->{$drive_name} = $dst_volid;
> PVE::QemuConfig->write_config($vmid, $vm_conf);
>
> - my $running = PVE::QemuServer::check_running($vmid);
> + my $running = PVE::QemuConfig::check_running($vmid);
> if ($running) {
> my $errors = {};
> PVE::QemuServer::vmconfig_hotplug_pending($vmid, $vm_conf, $storecfg, $modified, $errors);
> diff --git a/PVE/QemuServer/Memory.pm b/PVE/QemuServer/Memory.pm
> index 5aceabf..3f4088b 100644
> --- a/PVE/QemuServer/Memory.pm
> +++ b/PVE/QemuServer/Memory.pm
> @@ -2,6 +2,7 @@ package PVE::QemuServer::Memory;
>
> use strict;
> use warnings;
> +use PVE::QemuConfig;
> use PVE::QemuServer;
> use PVE::Tools qw(run_command lock_file lock_file_full file_read_firstline dir_glob_foreach);
> use PVE::Exception qw(raise raise_param_exc);
> @@ -104,7 +105,7 @@ sub foreach_reverse_dimm {
> sub qemu_memory_hotplug {
> my ($vmid, $conf, $defaults, $opt, $value) = @_;
>
> - return $value if !PVE::QemuServer::check_running($vmid);
> + return $value if !PVE::QemuConfig::check_running($vmid);
>
> my $sockets = 1;
> $sockets = $conf->{sockets} if $conf->{sockets};
> diff --git a/PVE/VZDump/QemuServer.pm b/PVE/VZDump/QemuServer.pm
> index e02a069..91768de 100644
> --- a/PVE/VZDump/QemuServer.pm
> +++ b/PVE/VZDump/QemuServer.pm
> @@ -17,6 +17,7 @@ use PVE::Storage;
> use PVE::Tools;
> use PVE::VZDump;
>
> +use PVE::QemuConfig;
> use PVE::QemuServer;
>
> use base qw (PVE::VZDump::Plugin);
> @@ -56,7 +57,7 @@ sub prepare {
> if defined($conf->{name});
>
> $self->{vm_was_running} = 1;
> - if (!PVE::QemuServer::check_running($vmid)) {
> + if (!PVE::QemuConfig::check_running($vmid)) {
> $self->{vm_was_running} = 0;
> }
>
> @@ -137,7 +138,7 @@ sub prepare {
> sub vm_status {
> my ($self, $vmid) = @_;
>
> - my $running = PVE::QemuServer::check_running($vmid) ? 1 : 0;
> + my $running = PVE::QemuConfig::check_running($vmid) ? 1 : 0;
>
> return wantarray ? ($running, $running ? 'running' : 'stopped') : $running;
> }
> @@ -312,7 +313,7 @@ sub archive {
> my $resume_on_backup;
>
> my $skiplock = 1;
> - my $vm_is_running = PVE::QemuServer::check_running($vmid);
> + my $vm_is_running = PVE::QemuConfig::check_running($vmid);
> if (!$vm_is_running) {
> eval {
> $self->loginfo("starting kvm to execute backup task");
> diff --git a/test/snapshot-test.pm b/test/snapshot-test.pm
> index c95e7f3..53d08e6 100644
> --- a/test/snapshot-test.pm
> +++ b/test/snapshot-test.pm
> @@ -297,14 +297,14 @@ sub __snapshot_save_vmstate {
> $snap->{vmstate} = "somestorage:state-volume";
> $snap->{runningmachine} = "somemachine"
> }
> -# END mocked PVE::QemuConfig methods
> -
> -# BEGIN redefine PVE::QemuServer methods
>
> sub check_running {
> return $running;
> }
>
> +# END mocked PVE::QemuConfig methods
> +
> +# BEGIN redefine PVE::QemuServer methods
>
> sub do_snapshots_with_qemu {
> return 0;
> @@ -376,6 +376,7 @@ $qemu_config_module->mock('load_config', \&load_config);
> $qemu_config_module->mock('write_config', \&write_config);
> $qemu_config_module->mock('has_feature', \&has_feature);
> $qemu_config_module->mock('__snapshot_save_vmstate', \&__snapshot_save_vmstate);
> +$qemu_config_module->mock('check_running', \&check_running);
>
> # ignore existing replication config
> my $repl_config_module = new Test::MockModule('PVE::ReplicationConfig');
> --
> 2.20.1
>
>
> _______________________________________________
> pve-devel mailing list
> pve-devel at pve.proxmox.com
> https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
>
>
More information about the pve-devel
mailing list