[pve-devel] [PATCH qemu-server v7 7/7] qm: add remote-migrate command
Fabian Grünbichler
f.gruenbichler at proxmox.com
Thu Nov 17 14:33:46 CET 2022
which wraps the remote_migrate_vm API endpoint, but does the
precondition checks that can be done up front itself.
this now just leaves the FP retrieval and target node name lookup to the
sync part of the API endpoint, which should be do-able in <30s ..
an example invocation:
$ qm remote-migrate 1234 4321 'host=123.123.123.123,apitoken=PVEAPIToken=user at pve!incoming=aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee,fingerprint=aa:bb:cc:dd:ee:ff:aa:bb:cc:dd:ee:ff:aa:bb:cc:dd:ee:ff:aa:bb:cc:dd:ee:ff:aa:bb:cc:dd:ee:ff:aa:bb' --target-bridge vmbr0 --target-storage zfs-a:rbd-b,nfs-c:dir-d,zfs-e --online
will migrate the local VM 1234 to the host 123.123.1232.123 using the
given API token, mapping the VMID to 4321 on the target cluster, all its
virtual NICs to the target vm bridge 'vmbr0', any volumes on storage
zfs-a to storage rbd-b, any on storage nfs-c to storage dir-d, and any
other volumes to storage zfs-e. the source VM will be stopped but remain
on the source node/cluster after the migration has finished.
Signed-off-by: Fabian Grünbichler <f.gruenbichler at proxmox.com>
---
Notes:
v7:
- fix example in commit message
- rebase on top of PVE::CLI::qm changes
v6:
- mark as experimental
- drop `with-local-disks` parameter from API, always set to true
- add example invocation to commit message
v5: rename to 'remote-migrate'
PVE/API2/Qemu.pm | 31 -------------
PVE/CLI/qm.pm | 113 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 113 insertions(+), 31 deletions(-)
diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 6836c557..b0c40fa5 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -4543,17 +4543,6 @@ __PACKAGE__->register_method({
$param->{online} = 0;
}
- # FIXME: fork worker hear to avoid timeout? or poll these periodically
- # in pvestatd and access cached info here? all of the below is actually
- # checked at the remote end anyway once we call the mtunnel endpoint,
- # we could also punt it to the client and not do it here at all..
- my $resources = $api_client->get("/cluster/resources", { type => 'vm' });
- if (grep { defined($_->{vmid}) && $_->{vmid} eq $target_vmid } @$resources) {
- raise_param_exc({ target_vmid => "Guest with ID '$target_vmid' already exists on remote cluster" });
- }
-
- my $storages = $api_client->get("/nodes/localhost/storage", { enabled => 1 });
-
my $storecfg = PVE::Storage::config();
my $target_storage = extract_param($param, 'target-storage');
my $storagemap = eval { PVE::JSONSchema::parse_idmap($target_storage, 'pve-storage-id') };
@@ -4565,26 +4554,6 @@ __PACKAGE__->register_method({
raise_param_exc({ 'target-bridge' => "failed to parse bridge map: $@" })
if $@;
- my $check_remote_storage = sub {
- my ($storage) = @_;
- my $found = [ grep { $_->{storage} eq $storage } @$storages ];
- die "remote: storage '$storage' does not exist!\n"
- if !@$found;
-
- $found = @$found[0];
-
- my $content_types = [ PVE::Tools::split_list($found->{content}) ];
- die "remote: storage '$storage' cannot store images\n"
- if !grep { $_ eq 'images' } @$content_types;
- };
-
- foreach my $target_sid (values %{$storagemap->{entries}}) {
- $check_remote_storage->($target_sid);
- }
-
- $check_remote_storage->($storagemap->{default})
- if $storagemap->{default};
-
die "remote migration requires explicit storage mapping!\n"
if $storagemap->{identity};
diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index 6655842e..66feecce 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -15,6 +15,7 @@ use POSIX qw(strftime);
use Term::ReadLine;
use URI::Escape;
+use PVE::APIClient::LWP;
use PVE::Cluster;
use PVE::Exception qw(raise_param_exc);
use PVE::GuestHelpers;
@@ -159,6 +160,117 @@ __PACKAGE__->register_method ({
return;
}});
+
+__PACKAGE__->register_method({
+ name => 'remote_migrate_vm',
+ path => 'remote_migrate_vm',
+ method => 'POST',
+ description => "Migrate virtual machine to a remote cluster. Creates a new migration task. EXPERIMENTAL feature!",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ 'target-vmid' => get_standard_option('pve-vmid', { optional => 1 }),
+ 'target-endpoint' => get_standard_option('proxmox-remote', {
+ description => "Remote target endpoint",
+ }),
+ online => {
+ type => 'boolean',
+ description => "Use online/live migration if VM is running. Ignored if VM is stopped.",
+ optional => 1,
+ },
+ delete => {
+ type => 'boolean',
+ description => "Delete the original VM and related data after successful migration. By default the original VM is kept on the source cluster in a stopped state.",
+ optional => 1,
+ default => 0,
+ },
+ 'target-storage' => get_standard_option('pve-targetstorage', {
+ completion => \&PVE::QemuServer::complete_migration_storage,
+ optional => 0,
+ }),
+ 'target-bridge' => {
+ type => 'string',
+ description => "Mapping from source to target bridges. Providing only a single bridge ID maps all source bridges to that bridge. Providing the special value '1' will map each source bridge to itself.",
+ format => 'bridge-pair-list',
+ },
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'migrate limit from datacenter or storage config',
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $source_vmid = $param->{vmid};
+ my $target_endpoint = $param->{'target-endpoint'};
+ my $target_vmid = $param->{'target-vmid'} // $source_vmid;
+
+ my $remote = PVE::JSONSchema::parse_property_string('proxmox-remote', $target_endpoint);
+
+ # TODO: move this as helper somewhere appropriate?
+ my $conn_args = {
+ protocol => 'https',
+ host => $remote->{host},
+ port => $remote->{port} // 8006,
+ apitoken => $remote->{apitoken},
+ };
+
+ $conn_args->{cached_fingerprints} = { uc($remote->{fingerprint}) => 1 }
+ if defined($remote->{fingerprint});
+
+ my $api_client = PVE::APIClient::LWP->new(%$conn_args);
+ my $resources = $api_client->get("/cluster/resources", { type => 'vm' });
+ if (grep { defined($_->{vmid}) && $_->{vmid} eq $target_vmid } @$resources) {
+ raise_param_exc({ target_vmid => "Guest with ID '$target_vmid' already exists on remote cluster" });
+ }
+
+ my $storages = $api_client->get("/nodes/localhost/storage", { enabled => 1 });
+
+ my $storecfg = PVE::Storage::config();
+ my $target_storage = $param->{'target-storage'};
+ my $storagemap = eval { PVE::JSONSchema::parse_idmap($target_storage, 'pve-storage-id') };
+ raise_param_exc({ 'target-storage' => "failed to parse storage map: $@" })
+ if $@;
+
+ my $check_remote_storage = sub {
+ my ($storage) = @_;
+ my $found = [ grep { $_->{storage} eq $storage } @$storages ];
+ die "remote: storage '$storage' does not exist!\n"
+ if !@$found;
+
+ $found = @$found[0];
+
+ my $content_types = [ PVE::Tools::split_list($found->{content}) ];
+ die "remote: storage '$storage' cannot store images\n"
+ if !grep { $_ eq 'images' } @$content_types;
+ };
+
+ foreach my $target_sid (values %{$storagemap->{entries}}) {
+ $check_remote_storage->($target_sid);
+ }
+
+ $check_remote_storage->($storagemap->{default})
+ if $storagemap->{default};
+
+ return PVE::API2::Qemu->remote_migrate_vm($param);
+ }});
+
__PACKAGE__->register_method ({
name => 'status',
path => 'status',
@@ -900,6 +1012,7 @@ our $cmddef = {
clone => [ "PVE::API2::Qemu", 'clone_vm', ['vmid', 'newid'], { %node }, $upid_exit ],
migrate => [ "PVE::API2::Qemu", 'migrate_vm', ['vmid', 'target'], { %node }, $upid_exit ],
+ 'remote-migrate' => [ __PACKAGE__, 'remote_migrate_vm', ['vmid', 'target-vmid', 'target-endpoint'], { %node }, $upid_exit ],
set => [ "PVE::API2::Qemu", 'update_vm', ['vmid'], { %node } ],
--
2.30.2
More information about the pve-devel
mailing list