[pve-devel] [PATCH 6/8] add live storage migration with vm migration

Alexandre Derumier aderumier at odiso.com
Tue Jan 3 15:03:17 CET 2017


This allow to migrate disks on local storage  to a remote node storage.

When the target node start, a new volumes are created and exposed through qemu embedded nbd server.

qemu drive-mirror is launch on source vm for each disk with nbd server as target.

when drive-mirror reach 100% of 1 disk, we don't complete the block jobs and begin mirror of next disk.
(mirroring are parralel, but we try to mirroring them 1 by 1 to avoid storage && network overload)

Then we live migrate the vm to destination node. (drive-mirror still occur at the same time).

We the vm is livemigrate (source vm paused, target vm pause), we complete the block jobs mirror.

When is done we stop the source vm and resume the target vm

Signed-off-by: Alexandre Derumier <aderumier at odiso.com>
---
 PVE/API2/Qemu.pm   |   7 +++
 PVE/QemuMigrate.pm | 122 +++++++++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 121 insertions(+), 8 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 3e4e7f7..90d31f6 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -2724,6 +2724,10 @@ __PACKAGE__->register_method({
 		description => "CIDR of the (sub) network that is used for migration.",
 		optional => 1,
 	    },
+	    targetstorage => get_standard_option('pve-storage-id', {
+		description => "Target storage.",
+		optional => 1,
+	    }),
 	},
     },
     returns => {
@@ -2750,6 +2754,9 @@ __PACKAGE__->register_method({
 
 	my $vmid = extract_param($param, 'vmid');
 
+	raise_param_exc({ targetstorage => "Live Storage migration can only be done online" })
+	    if !$param->{online} && $param->{targetstorage};
+
 	raise_param_exc({ force => "Only root may use this option." })
 	    if $param->{force} && $authuser ne 'root at pam';
 
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 76ae55e..e099a42 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -186,8 +186,10 @@ sub prepare {
 	my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
 
 	# check if storage is available on both nodes
+	my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
+
 	my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
-	PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
+	PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
 
 	if ($scfg->{shared}) {
 	    # PVE::Storage::activate_storage checks this for non-shared storages
@@ -214,8 +216,6 @@ sub prepare {
 sub sync_disks {
     my ($self, $vmid) = @_;
 
-    $self->log('info', "copying disk images");
-
     my $conf = $self->{vmconf};
 
     # local volumes which have been copied
@@ -290,6 +290,7 @@ sub sync_disks {
 
 	    my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
 
+	    my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
 	    # check if storage is available on both nodes
 	    my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
 	    PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
@@ -312,7 +313,7 @@ sub sync_disks {
 		# exceptions: 'zfspool' or 'qcow2' files (on directory storage)
 
 		my $format = PVE::QemuServer::qemu_img_format($scfg, $volname);
-
+		die "online storage migration not possible if snapshot exists\n" if $self->{running};
 		if (!($scfg->{type} eq 'zfspool' || $format eq 'qcow2')) {
 		    die "non-migratable snapshot exists\n";
 		}
@@ -362,8 +363,8 @@ sub sync_disks {
 	    $self->log('warn', "$err");
 	}
 
-	if ($self->{running} && !$sharedvm) {
-	    die "can't do online migration - VM uses local disks\n";
+	if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) {
+	    $self->{opts}->{targetstorage} = 1; #use same sid for remote local
 	}
 
 	if ($abort) {
@@ -387,15 +388,39 @@ sub sync_disks {
 	    }
 	}
 
+	$self->log('info', "copying disk images");
+
 	foreach my $volid (keys %$local_volumes) {
 	    my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
-	    push @{$self->{volumes}}, $volid;
-	    PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
+	    if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid} eq 'config') {
+		push @{$self->{online_local_volumes}}, $volid;
+	    } else {
+		push @{$self->{volumes}}, $volid;
+		PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid);
+	    }
 	}
     };
     die "Failed to sync data - $@" if $@;
 }
 
+sub cleanup_remotedisks {
+    my ($self) = @_;
+
+    foreach my $target_drive (keys %{$self->{target_drive}}) {
+
+	my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
+	my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
+
+	my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
+
+	eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+	if (my $err = $@) {
+	    $self->log('err', $err);
+	    $self->{errors} = 1;
+	}
+    }
+}
+
 sub phase1 {
     my ($self, $vmid) = @_;
 
@@ -482,6 +507,10 @@ sub phase2 {
 	push @$cmd, '--machine', $self->{forcemachine};
     }
 
+    if ($self->{opts}->{targetstorage}) {
+	push @$cmd, '--targetstorage', $self->{opts}->{targetstorage};
+    }
+
     my $spice_port;
 
     # Note: We try to keep $spice_ticket secret (do not pass via command line parameter)
@@ -507,6 +536,16 @@ sub phase2 {
         elsif ($line =~ m/^spice listens on port (\d+)$/) {
 	    $spice_port = int($1);
 	}
+        elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
+	    my $volid = $4;
+	    my $nbd_uri = "nbd:$1:$2:exportname=$3";
+	    my $targetdrive = $3;
+	    $targetdrive =~ s/drive-//g;
+
+	    $self->{target_drive}->{$targetdrive}->{volid} = $volid;
+	    $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
+
+	}
     }, errfunc => sub {
 	my $line = shift;
 	$self->log('info', $line);
@@ -551,6 +590,19 @@ sub phase2 {
     }
 
     my $start = time();
+
+    if ($self->{opts}->{targetstorage}) {
+	$self->{storage_migration} = 1;
+	$self->{storage_migration_jobs} = {};
+	$self->log('info', "starting storage migration");
+
+	die "the number of destination local disk is not equal to number of source local disk" if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
+	foreach my $drive (keys %{$self->{target_drive}}){
+	    $self->log('info', "$drive: start migration to to $self->{target_drive}->{$drive}->{nbd_uri}");
+	    PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $self->{target_drive}->{$drive}->{nbd_uri}, $vmid, undef, $self->{storage_migration_jobs}, 1);
+	}
+    }
+
     $self->log('info', "starting online/live migration on $ruri");
     $self->{livemigration} = 1;
 
@@ -750,6 +802,19 @@ sub phase2_cleanup {
     }
 
     # cleanup ressources on target host
+    if ( $self->{storage_migration} ) {
+
+	eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
+	if (my $err = $@) {
+	    $self->log('err', $err);
+	}
+
+	eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
+	if (my $err = $@) {
+	    $self->log('err', $err);
+	}
+    }
+
     my $nodename = PVE::INotify::nodename();
  
     my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
@@ -791,6 +856,24 @@ sub phase3_cleanup {
     my $conf = $self->{vmconf};
     return if $self->{phase2errors};
 
+    if ($self->{storage_migration}) {
+
+	eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); }; #finish block-job
+
+	if (my $err = $@) {
+	    eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
+	    eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
+	    die "Failed to completed storage migration\n";
+	} else {
+
+	    foreach my $target_drive (keys %{$self->{target_drive}}) {
+		my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
+		$conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive);
+		PVE::QemuConfig->write_config($vmid, $conf);
+	    }
+	}
+    }
+
     # move config to remote node
     my $conffile = PVE::QemuConfig->config_file($vmid);
     my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
@@ -845,6 +928,29 @@ sub phase3_cleanup {
 	$self->{errors} = 1;
     }
 
+    if($self->{storage_migration}) {
+	# destroy local copies
+	my $volids = $self->{online_local_volumes};
+
+	foreach my $volid (@$volids) {
+	    eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
+	    if (my $err = $@) {
+		$self->log('err', "removing local copy of '$volid' failed - $err");
+		$self->{errors} = 1;
+		last if $err =~ /^interrupted by signal$/;
+	    }
+	}
+
+	#stop nbd server to remote vm
+	my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
+
+	eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+	if (my $err = $@) {
+	    $self->log('err', $err);
+	    $self->{errors} = 1;
+	}
+    }
+
     # clear migrate lock
     my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
     $self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
-- 
2.1.4




More information about the pve-devel mailing list