[pve-devel] [PATCH qemu-server] PVE/QemuMigrate.pm: use replication job, transfer replication state

Fabian Grünbichler f.gruenbichler at proxmox.com
Tue Jun 20 11:31:57 CEST 2017


On Tue, Jun 20, 2017 at 11:01:24AM +0200, Dietmar Maurer wrote:
> Signed-off-by: Dietmar Maurer <dietmar at proxmox.com>
> ---
>  PVE/QemuMigrate.pm | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 53 insertions(+), 1 deletion(-)
> 
> diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
> index bf0bb44..85fa6ff 100644
> --- a/PVE/QemuMigrate.pm
> +++ b/PVE/QemuMigrate.pm
> @@ -13,6 +13,9 @@ use PVE::Storage;
>  use PVE::QemuServer;
>  use Time::HiRes qw( usleep );
>  use PVE::RPCEnvironment;
> +use PVE::ReplicationConfig;
> +use PVE::ReplicationState;
> +use JSON;
>  
>  use base qw(PVE::AbstractMigrate);
>  
> @@ -379,16 +382,30 @@ sub sync_disks {
>  	    }
>  	}
>  
> +	my $rep_volumes;
> +
>  	$self->log('info', "copying disk images");
>  
> +	my $rep_cfg = PVE::ReplicationConfig->new();
> +
> +	if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) {
> +	    my $start_time = time();
> +	    my $logfunc = sub { my ($msg) = @_;  $self->log('info', $msg); };
> +	    $rep_volumes = PVE::Replication::run_replication(
> +	       'PVE::QemuConfig', $jobcfg, $start_time, $start_time, $logfunc);
> +	}
> +
>  	foreach my $volid (keys %$local_volumes) {
>  	    my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
>  	    if ($self->{running} && $self->{opts}->{targetstorage} && $local_volumes->{$volid} eq 'config') {
> +		die "can't live migrate replicated volume - feature not implemented\n" if $rep_volumes->{$volid};

this check should be before the loop, and before run_replication is
called. I think we probably need to get the replicatable volumes before
the loop in line 340, and compare them there (the config is locked and
we have the replicate<->migrate lock as well, so nothing can change
inbetween anyway).

while you are there, the die in line 344 should be downgraded to a
warning and setting $abort ;)

>  		push @{$self->{online_local_volumes}}, $volid;
>  	    } else {
> +		next if $rep_volumes->{$volid};
>  		push @{$self->{volumes}}, $volid;
>  		my $insecure = $self->{opts}->{migration_type} eq 'insecure';
> -		PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $sid, undef, undef, undef, undef, $insecure);
> +		PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{ssh_info}, $sid,
> +					      undef, undef, undef, undef, $insecure);

this is actually not related ;)

>  	    }
>  	}
>      };
> @@ -833,6 +850,35 @@ sub phase3 {
>      }
>  }
>  
> +
> +# transfer replication state for vmid to migration target node.
> +my $transfer_replication_state = sub {
> +    my ($self, $vmid) = @_;
> +
> +    my $stateobj = PVE::ReplicationState::read_state();
> +
> +    if (defined($stateobj->{$vmid})) {
> +	# This have to be quoted when it run it over ssh.
> +	my $state = PVE::Tools::shellquote(encode_json($stateobj->{$vmid}));
> +
> +	my $cmd = [ @{$self->{rem_ssh}}, 'pvesr', 'set-state', $vmid, $state];
> +	$self->cmd($cmd);
> +    }
> +
> +    my $transfer_job = sub {
> +	my $rep_cfg = PVE::ReplicationConfig->new();
> +	my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node});
> +
> +	return if !$jobcfg;
> +
> +	$jobcfg->{target} = PVE::INotify::nodename();
> +
> +	$rep_cfg->write();
> +    };
> +
> +    PVE::ReplicationConfig::lock($transfer_job);
> +};
> +
>  sub phase3_cleanup {
>      my ($self, $vmid, $err) = @_;
>  
> @@ -863,6 +909,12 @@ sub phase3_cleanup {
>      die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
>          if !rename($conffile, $newconffile);
>  
> +    eval { $transfer_replication_state->($self, $vmid); };
> +    if (my $err = $@) {
> +	$self->log('err', "transfer replication state/job faile - $err");
> +	$self->{errors} = 1;
> +    }
> +

shouldn't this happen before moving the config file to the other node?
we don't hold the guest_migration_lock on the target node AFAICT, and as
soon as the config is moved a replication job could be triggered on the
target node (because replication does not check_lock).

>      if ($self->{livemigration}) {
>  	if ($self->{storage_migration}) {
>  	    # remove drives referencing the nbd server from source
> -- 
> 2.11.0
> 
> _______________________________________________
> pve-devel mailing list
> pve-devel at pve.proxmox.com
> https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel




More information about the pve-devel mailing list