[pve-devel] [PATCH v3 qemu-server 1/7] migration: only migrate disks used by the guest

Aaron Lauterer a.lauterer at proxmox.com
Thu Jun 1 15:53:36 CEST 2023


When scanning all configured storages for disk images belonging to the
VM, the migration could easily fail if a storage is not available, but
enabled. That storage might not even be used by the VM at all.

By not doing that and only looking at the disk images referenced in the
VM config, we can avoid that.
Extra handling is needed for disk images currently in the 'pending'
section of the VM config. These disk images used to be detected by
scanning all storages before.
It is also necessary to fetch some information (size, format) about the
disk images explicitly that used to be provided by the initial scan of
all storages.

The big change regarding behavior is that disk images not referenced in
the VM config file will be ignored.  They are already orphans that used
to be migrated as well, but are now left where they are.  The tests have
been adapted to that changed behavior.

Signed-off-by: Aaron Lauterer <a.lauterer at proxmox.com>
---
changes sind v2:
- move handling of pending changes into QemuSerer::foreach_volid
    Seems to not have any bad side-effects
- style fixes
- use 'volume_size_info()' to get format and size of the image

 PVE/QemuMigrate.pm                    | 88 ++++++++-------------------
 PVE/QemuServer.pm                     |  9 ++-
 test/MigrationTest/QemuMigrateMock.pm |  9 +++
 test/run_qemu_migrate_tests.pl        | 12 ++--
 4 files changed, 50 insertions(+), 68 deletions(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 09cc1d8..163a721 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -149,6 +149,22 @@ sub lock_vm {
     return PVE::QemuConfig->lock_config($vmid, $code, @param);
 }
 
+sub target_storage_check_available {
+    my ($self, $storecfg, $targetsid, $volid) = @_;
+
+    if (!$self->{opts}->{remote}) {
+	# check if storage is available on target node
+	my $target_scfg = PVE::Storage::storage_check_enabled(
+	    $storecfg,
+	    $targetsid,
+	    $self->{node},
+	);
+	my ($vtype) = PVE::Storage::parse_volname($storecfg, $volid);
+	die "$volid: content type '$vtype' is not available on storage '$targetsid'\n"
+	    if !$target_scfg->{content}->{$vtype};
+    }
+}
+
 sub prepare {
     my ($self, $vmid) = @_;
 
@@ -236,18 +252,7 @@ sub prepare {
 
 	$storages->{$targetsid} = 1;
 
-	if (!$self->{opts}->{remote}) {
-	    # check if storage is available on target node
-	    my $target_scfg = PVE::Storage::storage_check_enabled(
-		$storecfg,
-		$targetsid,
-		$self->{node},
-	    );
-	    my ($vtype) = PVE::Storage::parse_volname($storecfg, $volid);
-
-	    die "$volid: content type '$vtype' is not available on storage '$targetsid'\n"
-		if !$target_scfg->{content}->{$vtype};
-	}
+	$self->target_storage_check_available($storecfg, $targetsid, $volid);
 
 	if ($scfg->{shared}) {
 	    # PVE::Storage::activate_storage checks this for non-shared storages
@@ -312,49 +317,6 @@ sub scan_local_volumes {
 	    $abort = 1;
 	};
 
-	my @sids = PVE::Storage::storage_ids($storecfg);
-	foreach my $storeid (@sids) {
-	    my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
-	    next if $scfg->{shared} && !$self->{opts}->{remote};
-	    next if !PVE::Storage::storage_check_enabled($storecfg, $storeid, undef, 1);
-
-	    # get list from PVE::Storage (for unused volumes)
-	    my $dl = PVE::Storage::vdisk_list($storecfg, $storeid, $vmid, undef, 'images');
-
-	    next if @{$dl->{$storeid}} == 0;
-
-	    my $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $storeid);
-	    if (!$self->{opts}->{remote}) {
-		# check if storage is available on target node
-		my $target_scfg = PVE::Storage::storage_check_enabled(
-		    $storecfg,
-		    $targetsid,
-		    $self->{node},
-		);
-
-		die "content type 'images' is not available on storage '$targetsid'\n"
-		    if !$target_scfg->{content}->{images};
-
-	    }
-
-	    my $bwlimit = $self->get_bwlimit($storeid, $targetsid);
-
-	    PVE::Storage::foreach_volid($dl, sub {
-		my ($volid, $sid, $volinfo) = @_;
-
-		$local_volumes->{$volid}->{ref} = 'storage';
-		$local_volumes->{$volid}->{size} = $volinfo->{size};
-		$local_volumes->{$volid}->{targetsid} = $targetsid;
-		$local_volumes->{$volid}->{bwlimit} = $bwlimit;
-
-		# If with_snapshots is not set for storage migrate, it tries to use
-		# a raw+size stream, but on-the-fly conversion from qcow2 to raw+size
-		# back to qcow2 is currently not possible.
-		$local_volumes->{$volid}->{snapshots} = ($volinfo->{format} =~ /^(?:qcow2|vmdk)$/);
-		$local_volumes->{$volid}->{format} = $volinfo->{format};
-	    });
-	}
-
 	my $replicatable_volumes = !$self->{replication_jobcfg} ? {}
 	    : PVE::QemuConfig->get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1);
 	foreach my $volid (keys %{$replicatable_volumes}) {
@@ -396,17 +358,21 @@ sub scan_local_volumes {
 		$targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $sid);
 	    }
 
-	    # check target storage on target node if intra-cluster migration
-	    if (!$self->{opts}->{remote}) {
-		PVE::Storage::storage_check_enabled($storecfg, $targetsid, $self->{node});
-
-		return if $scfg->{shared};
-	    }
+	    return if $scfg->{shared} && !$self->{opts}->{remote};
+	    $self->target_storage_check_available($storecfg, $targetsid, $volid);
 
 	    $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot';
 	    $local_volumes->{$volid}->{ref} = 'storage' if $attr->{is_unused};
+	    $local_volumes->{$volid}->{ref} = 'storage' if $attr->{is_pending};
 	    $local_volumes->{$volid}->{ref} = 'generated' if $attr->{is_tpmstate};
 
+	    my $bwlimit = $self->get_bwlimit($sid, $targetsid);
+	    $local_volumes->{$volid}->{targetsid} = $targetsid;
+	    $local_volumes->{$volid}->{bwlimit} = $bwlimit;
+
+	    ($local_volumes->{$volid}->{size}, $local_volumes->{$volid}->{format})
+		= PVE::Storage::volume_size_info($storecfg, $volid);
+
 	    $local_volumes->{$volid}->{is_vmstate} = $attr->{is_vmstate} ? 1 : 0;
 
 	    $local_volumes->{$volid}->{drivename} = $attr->{drivename}
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index ab33aa3..f88d695 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -4855,7 +4855,7 @@ sub foreach_volid {
     my $volhash = {};
 
     my $test_volid = sub {
-	my ($key, $drive, $snapname) = @_;
+	my ($key, $drive, $snapname, $pending) = @_;
 
 	my $volid = $drive->{file};
 	return if !$volid;
@@ -4888,6 +4888,8 @@ sub foreach_volid {
 	$volhash->{$volid}->{is_unused} //= 0;
 	$volhash->{$volid}->{is_unused} = 1 if $key =~ /^unused\d+$/;
 
+	$volhash->{$volid}->{is_pending} = 1 if $pending;
+
 	$volhash->{$volid}->{drivename} = $key if is_valid_drivename($key);
     };
 
@@ -4897,6 +4899,11 @@ sub foreach_volid {
     };
 
     PVE::QemuConfig->foreach_volume_full($conf, $include_opts, $test_volid);
+
+    if (defined($conf->{pending}) && $conf->{pending}->%*) {
+	PVE::QemuConfig->foreach_volume_full($conf->{pending}, $include_opts, $test_volid, undef, 1);
+    }
+
     foreach my $snapname (keys %{$conf->{snapshots}}) {
 	my $snap = $conf->{snapshots}->{$snapname};
 	PVE::QemuConfig->foreach_volume_full($snap, $include_opts, $test_volid, $snapname);
diff --git a/test/MigrationTest/QemuMigrateMock.pm b/test/MigrationTest/QemuMigrateMock.pm
index 94fe686..cec34b7 100644
--- a/test/MigrationTest/QemuMigrateMock.pm
+++ b/test/MigrationTest/QemuMigrateMock.pm
@@ -230,6 +230,15 @@ $MigrationTest::Shared::storage_module->mock(
 	}
 	return $res;
     },
+    volume_size_info => sub {
+	my ($scfg, $volid) = @_;
+	my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
+
+	for my $v ($source_vdisks->{$storeid}->@*) {
+	    return wantarray ? ($v->{size}, $v->{format}, $v->{used}, $v->{parent}) : $v->{size}
+		if $v->{volid} eq $volid;
+	}
+    },
     vdisk_free => sub {
 	my ($scfg, $volid) = @_;
 
diff --git a/test/run_qemu_migrate_tests.pl b/test/run_qemu_migrate_tests.pl
index 090449f..fedbc32 100755
--- a/test/run_qemu_migrate_tests.pl
+++ b/test/run_qemu_migrate_tests.pl
@@ -708,7 +708,6 @@ my $tests = [
 	},
     },
     {
-	# FIXME: Maybe add orphaned drives as unused?
 	name => '149_running_orphaned_disk_targetstorage_zfs',
 	target => 'pve1',
 	vmid => 149,
@@ -729,10 +728,11 @@ my $tests = [
 	},
 	expected_calls => $default_expected_calls_online,
 	expected => {
-	    source_volids => {},
+	    source_volids => {
+		'local-dir:149/vm-149-disk-0.qcow2' => 1,
+	    },
 	    target_volids => {
 		'local-zfs:vm-149-disk-10' => 1,
-		'local-zfs:vm-149-disk-0' => 1,
 	    },
 	    vm_config => get_patched_config(149, {
 		scsi0 => 'local-zfs:vm-149-disk-10,format=raw,size=4G',
@@ -745,7 +745,6 @@ my $tests = [
 	},
     },
     {
-	# FIXME: Maybe add orphaned drives as unused?
 	name => '149_running_orphaned_disk',
 	target => 'pve1',
 	vmid => 149,
@@ -765,10 +764,11 @@ my $tests = [
 	},
 	expected_calls => $default_expected_calls_online,
 	expected => {
-	    source_volids => {},
+	    source_volids => {
+		'local-dir:149/vm-149-disk-0.qcow2' => 1,
+	    },
 	    target_volids => {
 		'local-lvm:vm-149-disk-10' => 1,
-		'local-dir:149/vm-149-disk-0.qcow2' => 1,
 	    },
 	    vm_config => get_patched_config(149, {
 		scsi0 => 'local-lvm:vm-149-disk-10,format=raw,size=4G',
-- 
2.30.2






More information about the pve-devel mailing list