[pve-devel] [PATCH v4 qemu-server 2/12] qemuserver: foreach_volid: include pending volumes

Aaron Lauterer a.lauterer at proxmox.com
Fri Jun 16 11:56:58 CEST 2023


Make it possible to optionally iterate over disks in the pending section
of VMs, similar as to how snapshots are handled already.

This is for example useful in the migration. Since the storages aren't
scanned anymore, we need to pick up disk images referenced only in the
pending section.

All calling sites are adapted and enable it, except for
QemuConfig::get_replicatable_volumes as that would cause a change for
the replication if pending disks would be included.

The following lists the calling sites and if they should be fine with
the change (source [0]):

1. QemuMigrate: scan_local_volumes(): needed to include pending disk
   images
2. API2/Qemu.pm: check_vm_disks_local() for migration precondition:
   related to migration, so more consistent with pending
3. QemuConfig.pm: get_replicatable_volumes(): would change the behavior
   of the replication, will not use it for now.
4. QemuServer.pm: get_vm_volumes(): is used multiple times by:
4a. vm_stop_cleanup() to deactivate/unmap: should also be fine with
    including pending
4b. QemuMigrate.pm: in prepare(): part of migration, so more consistent
    with pending
4c. QemuMigrate.pm: in phase3_cleanup() for deactivation: part of
    migration, so more consistent with pending

[0] https://lists.proxmox.com/pipermail/pve-devel/2023-May/056868.html

Signed-off-by: Aaron Lauterer <a.lauterer at proxmox.com>
---
changes since v3:

added as intermediate patch for a better git history as suggested

 PVE/API2/Qemu.pm   |  2 +-
 PVE/QemuConfig.pm  |  2 +-
 PVE/QemuMigrate.pm |  2 +-
 PVE/QemuServer.pm  | 17 ++++++++++++-----
 4 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index c92734a..37f78fe 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -4164,7 +4164,7 @@ my $check_vm_disks_local = sub {
     my $local_disks = {};
 
     # add some more information to the disks e.g. cdrom
-    PVE::QemuServer::foreach_volid($vmconf, sub {
+    PVE::QemuServer::foreach_volid($vmconf, 1, sub {
 	my ($volid, $attr) = @_;
 
 	my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
diff --git a/PVE/QemuConfig.pm b/PVE/QemuConfig.pm
index 10e6929..5e46db2 100644
--- a/PVE/QemuConfig.pm
+++ b/PVE/QemuConfig.pm
@@ -161,7 +161,7 @@ sub get_replicatable_volumes {
 	$volhash->{$volid} = 1;
     };
 
-    PVE::QemuServer::foreach_volid($conf, $test_volid);
+    PVE::QemuServer::foreach_volid($conf, undef, $test_volid);
 
     return $volhash;
 }
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 5f4f402..d979211 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -413,7 +413,7 @@ sub scan_local_volumes {
 		if PVE::Storage::volume_is_base_and_used($storecfg, $volid);
 	};
 
-	PVE::QemuServer::foreach_volid($conf, sub {
+	PVE::QemuServer::foreach_volid($conf, 1, sub {
 	    my ($volid, $attr) = @_;
 	    eval { $test_volid->($volid, $attr); };
 	    if (my $err = $@) {
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 6cbaf87..33acef6 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -2424,7 +2424,7 @@ sub destroy_vm {
 
     if ($purge_unreferenced) { # also remove unreferenced disk
 	my $vmdisks = PVE::Storage::vdisk_list($storecfg, undef, $vmid, undef, 'images');
-	PVE::Storage::foreach_volid($vmdisks, sub {
+	PVE::Storage::foreach_volid($vmdisks, 1, sub {
 	    my ($volid, $sid, $volname, $d) = @_;
 	    eval { PVE::Storage::vdisk_free($storecfg, $volid) };
 	    warn $@ if $@;
@@ -4855,12 +4855,12 @@ sub set_migration_caps {
 }
 
 sub foreach_volid {
-    my ($conf, $func, @param) = @_;
+    my ($conf, $include_pending, $func, @param) = @_;
 
     my $volhash = {};
 
     my $test_volid = sub {
-	my ($key, $drive, $snapname) = @_;
+	my ($key, $drive, $snapname, $pending) = @_;
 
 	my $volid = $drive->{file};
 	return if !$volid;
@@ -4876,11 +4876,13 @@ sub foreach_volid {
 	$volhash->{$volid}->{shared} = 1 if $drive->{shared};
 
 	$volhash->{$volid}->{referenced_in_config} //= 0;
-	$volhash->{$volid}->{referenced_in_config} = 1 if !defined($snapname);
+	$volhash->{$volid}->{referenced_in_config} = 1 if !defined($snapname) && !defined($pending);
 
 	$volhash->{$volid}->{referenced_in_snapshot}->{$snapname} = 1
 	    if defined($snapname);
 
+	$volhash->{$volid}->{referenced_in_pending} = 1 if defined($pending);
+
 	my $size = $drive->{size};
 	$volhash->{$volid}->{size} //= $size if $size;
 
@@ -4902,6 +4904,11 @@ sub foreach_volid {
     };
 
     PVE::QemuConfig->foreach_volume_full($conf, $include_opts, $test_volid);
+
+    if ($include_pending && defined($conf->{pending}) && $conf->{pending}->%*) {
+	PVE::QemuConfig->foreach_volume_full($conf->{pending}, $include_opts, $test_volid, undef, 1);
+    }
+
     foreach my $snapname (keys %{$conf->{snapshots}}) {
 	my $snap = $conf->{snapshots}->{$snapname};
 	PVE::QemuConfig->foreach_volume_full($snap, $include_opts, $test_volid, $snapname);
@@ -6149,7 +6156,7 @@ sub get_vm_volumes {
     my ($conf) = @_;
 
     my $vollist = [];
-    foreach_volid($conf, sub {
+    foreach_volid($conf, 1, sub {
 	my ($volid, $attr) = @_;
 
 	return if $volid =~ m|^/|;
-- 
2.39.2






More information about the pve-devel mailing list