[pve-devel] [PATCH 7/7] add live storage migration with vm migration
Alexandre Derumier
aderumier at odiso.com
Thu Oct 20 02:35:16 CEST 2016
This allow to migrate disks on local storage to a remote node storage.
When the target node start, a new volumes are created and exposed through qemu embedded nbd server.
qemu drive-mirror is launch on source vm for each disk with nbd server as target.
when drive-mirror reach 100% for each disk, we don't complete the block jobs.
Then we live migrate the vm to destination node. (drive-mirror still occur at the same time).
We the vm is livemigrate (source vm paused, target vm pause), we complete the block jobs mirror.
When is done we stop the source vm and resume the target vm
Signed-off-by: Alexandre Derumier <aderumier at odiso.com>
---
PVE/API2/Qemu.pm | 7 ++++
PVE/QemuMigrate.pm | 96 ++++++++++++++++++++++++++++++++++++++++++++++++++----
2 files changed, 97 insertions(+), 6 deletions(-)
diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 69ad938..6949412 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -2663,6 +2663,10 @@ __PACKAGE__->register_method({
description => "Allow to migrate VMs which use local devices. Only root may use this option.",
optional => 1,
},
+ targetstorage => get_standard_option('pve-storage-id', {
+ description => "Target storage.",
+ optional => 1,
+ }),
},
},
returns => {
@@ -2689,6 +2693,9 @@ __PACKAGE__->register_method({
my $vmid = extract_param($param, 'vmid');
+ raise_param_exc({ targetstorage => "Live Storage migration can only be done online" })
+ if !$param->{online} && $param->{targetstorage};
+
raise_param_exc({ force => "Only root may use this option." })
if $param->{force} && $authuser ne 'root at pam';
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 22a49ef..e058729 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -186,8 +186,10 @@ sub prepare {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
# check if storage is available on both nodes
+ my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
+
my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
- PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
+ PVE::Storage::storage_check_node($self->{storecfg}, $targetsid, $self->{node});
if ($scfg->{shared}) {
# PVE::Storage::activate_storage checks this for non-shared storages
@@ -214,8 +216,6 @@ sub prepare {
sub sync_disks {
my ($self, $vmid) = @_;
- $self->log('info', "copying disk images");
-
my $conf = $self->{vmconf};
# local volumes which have been copied
@@ -290,6 +290,7 @@ sub sync_disks {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
# check if storage is available on both nodes
my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $sid);
PVE::Storage::storage_check_node($self->{storecfg}, $sid, $self->{node});
@@ -362,14 +363,16 @@ sub sync_disks {
$self->log('warn', "$err");
}
- if ($self->{running} && !$sharedvm) {
- die "can't do online migration - VM uses local disks\n";
+ if ($self->{running} && !$sharedvm && !$self->{opts}->{targetstorage}) {
+ $self->{opts}->{targetstorage} = 'auto'; #use same sid for remote local
}
if ($abort) {
die "can't migrate VM - check log\n";
}
+ return if $self->{running} && $self->{opts}->{targetstorage};
+
# additional checks for local storage
foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
@@ -387,6 +390,8 @@ sub sync_disks {
}
}
+ $self->log('info', "copying disk images");
+
foreach my $volid (keys %$local_volumes) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
push @{$self->{volumes}}, $volid;
@@ -396,6 +401,24 @@ sub sync_disks {
die "Failed to sync data - $@" if $@;
}
+sub cleanup_remotedisks {
+ my ($self) = @_;
+
+ foreach my $target_drive (keys %{$self->{target_drive}}) {
+
+ my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
+
+ my $cmd = [@{$self->{rem_ssh}}, 'pvesm', 'free', "$storeid:$volname"];
+
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+ }
+}
+
sub phase1 {
my ($self, $vmid) = @_;
@@ -452,7 +475,7 @@ sub phase2 {
$spice_ticket = $res->{ticket};
}
- push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
+ push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename, '--targetstorage', $self->{opts}->{targetstorage};
# we use TCP only for unsecure migrations as TCP ssh forward tunnels often
# did appeared to late (they are hard, if not impossible, to check for)
@@ -496,6 +519,16 @@ sub phase2 {
elsif ($line =~ m/^spice listens on port (\d+)$/) {
$spice_port = int($1);
}
+ elsif ($line =~ m/^storage migration listens on nbd:(localhost|[\d\.]+|\[[\d\.:a-fA-F]+\]):(\d+):exportname=(\S+) volume:(\S+)$/) {
+ my $volid = $4;
+ my $nbd_uri = "nbd:$1:$2:exportname=$3";
+ my $targetdrive = $3;
+ $targetdrive =~ s/drive-//g;
+
+ $self->{target_drive}->{$targetdrive}->{volid} = $volid;
+ $self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
+
+ }
}, errfunc => sub {
my $line = shift;
$self->log('info', $line);
@@ -540,6 +573,18 @@ sub phase2 {
}
my $start = time();
+
+ if ($self->{opts}->{targetstorage}) {
+ $self->{storage_migration} = 1;
+
+ foreach my $drive (keys %{$self->{target_drive}}){
+ $self->log('info', "starting storage migration of drive $drive to $self->{target_drive}->{$drive}->{nbd_uri}");
+ PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $self->{target_drive}->{$drive}->{nbd_uri}, $vmid);
+ }
+
+ PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, 1); #wait until all drives are ready
+ }
+
$self->log('info', "starting online/live migration on $ruri");
$self->{livemigration} = 1;
@@ -739,6 +784,10 @@ sub phase2_cleanup {
}
# cleanup ressources on target host
+ if ( $self->{storage_migration} ) {
+ PVE::QemuMigrate::cleanup_remotedisks($self);
+ }
+
my $nodename = PVE::INotify::nodename();
my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
@@ -755,6 +804,9 @@ sub phase2_cleanup {
$self->{errors} = 1;
}
}
+
+
+
}
sub phase3 {
@@ -780,6 +832,23 @@ sub phase3_cleanup {
my $conf = $self->{vmconf};
return if $self->{phase2errors};
+ if ($self->{storage_migration}) {
+
+ eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid); }; #finish block-job
+
+ if (my $err = $@) {
+ #what do we do ? abort migration ?
+ PVE::QemuMigrate::cleanup_remotedisks($self);
+ } else {
+
+ foreach my $target_drive (keys %{$self->{target_drive}}) {
+ my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
+ $conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+ }
+ }
+
# move config to remote node
my $conffile = PVE::QemuConfig->config_file($vmid);
my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
@@ -787,6 +856,7 @@ sub phase3_cleanup {
die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
if !rename($conffile, $newconffile);
+
if ($self->{livemigration}) {
# now that config file is move, we can resume vm on target if livemigrate
my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
@@ -834,6 +904,20 @@ sub phase3_cleanup {
$self->{errors} = 1;
}
+ #improve me
+ if($self->{storage_migration}) {
+ #delete source volid ?
+
+ #stop nbd server to remote vm
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
+
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+ }
+
# clear migrate lock
my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
$self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
--
2.1.4
More information about the pve-devel
mailing list