[pve-devel] [PATCH v3 pve-manager 10/23] pvesr prepare-local-job: new helper
Dietmar Maurer
dietmar at proxmox.com
Tue May 30 15:20:07 CEST 2017
Prepare for starting a replication job. This is called on the target
node before replication starts. This call is for internal use, and
return a JSON object on stdout. The method first test if VM <vmid>
reside on the local node. If so, stop immediately. After that the
method scans all volume IDs for snapshots, and removes all replications
snapshots with timestamps different than <last_sync>. It also removes
any unused volumes.
Returns a hash with boolean markers for all volumes with existing
replication snapshots.
Signed-off-by: Dietmar Maurer <dietmar at proxmox.com>
---
PVE/CLI/pvesr.pm | 95 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
PVE/Replication.pm | 30 +++++++++++++++++
2 files changed, 125 insertions(+)
diff --git a/PVE/CLI/pvesr.pm b/PVE/CLI/pvesr.pm
index 8a5075ad..98d54b09 100644
--- a/PVE/CLI/pvesr.pm
+++ b/PVE/CLI/pvesr.pm
@@ -12,6 +12,7 @@ use PVE::Tools qw(extract_param);
use PVE::SafeSyslog;
use PVE::CLIHandler;
+use PVE::Cluster;
use PVE::Replication;
use PVE::API2::ReplicationConfig;
use PVE::API2::Replication;
@@ -25,6 +26,98 @@ sub setup_environment {
}
__PACKAGE__->register_method ({
+ name => 'prepare_local_job',
+ path => 'prepare_local_job',
+ method => 'POST',
+ description => "Prepare for starting a replication job. This is called on the target node before replication starts. This call is for internal use, and return a JSON object on stdout. The method first test if VM <vmid> reside on the local node. If so, stop immediately. After that the method scans all volume IDs for snapshots, and removes all replications snapshots with timestamps different than <last_sync>. It also removes any unused volumes. Returns a hash with boolean markers for all volumes with existing replication snapshots.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ id => get_standard_option('pve-replication-id'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::Cluster::complete_vmid }),
+ 'extra-args' => get_standard_option('extra-args', {
+ description => "The list of volume IDs to consider." }),
+ last_sync => {
+ description => "Time (UNIX epoch) of last successful sync. If not specified, all replication snapshots get removed.",
+ type => 'integer',
+ minimum => 0,
+ optional => 1,
+ },
+ },
+ },
+ returns => { type => 'null' },
+ code => sub {
+ my ($param) = @_;
+
+ my $jobid = $param->{id};
+ my $vmid = $param->{vmid};
+ my $last_sync = $param->{last_sync} // 0;
+
+ my $local_node = PVE::INotify::nodename();
+
+ my $vms = PVE::Cluster::get_vmlist();
+ die "guest '$vmid' is on local node\n"
+ if $vms->{ids}->{$vmid} && $vms->{ids}->{$vmid}->{node} eq $local_node;
+
+ my $storecfg = PVE::Storage::config();
+
+ my $dl = PVE::Storage::vdisk_list($storecfg, undef, $vmid);
+
+ my $volids = [];
+
+ die "no volumes specified\n" if !scalar(@{$param->{'extra-args'}});
+
+ foreach my $volid (@{$param->{'extra-args'}}) {
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $scfg = PVE::Storage::storage_check_enabled($storecfg, $storeid, $local_node);
+ die "storage '$storeid' is a shared storage\n" if $scfg->{shared};
+
+ my ($vtype, undef, $ownervm) = PVE::Storage::parse_volname($storecfg, $volid);
+ die "volume '$volid' has wrong vtype ($vtype != 'images')\n"
+ if $vtype ne 'images';
+ die "volume '$volid' has wrong owner\n"
+ if !$ownervm || $vmid != $ownervm;
+
+ my $found = 0;
+ foreach my $info (@{$dl->{$storeid}}) {
+ if ($info->{volid} eq $volid) {
+ $found = 1;
+ last;
+ }
+ }
+
+ push @$volids, $volid if $found;
+ }
+
+ $volids = [ sort @$volids ];
+
+ my $logfunc = sub {
+ my ($start_time, $msg) = @_;
+ print STDERR "$msg\n";
+ };
+
+ # remove stale volumes
+ foreach my $storeid (keys %$dl) {
+ my $scfg = PVE::Storage::storage_check_enabled($storecfg, $storeid, $local_node, 1);
+ next if !$scfg || $scfg->{shared};
+ foreach my $info (@{$dl->{$storeid}}) {
+ my $volid = $info->{volid};
+ next if grep { $_ eq $volid } @$volids;
+ $logfunc->(undef, "$jobid: delete stale volume '$volid'");
+ PVE::Storage::vdisk_free($storecfg, $volid);
+ }
+ }
+
+ my $last_snapshots = PVE::Replication::prepare(
+ $storecfg, $volids, $jobid, $last_sync, undef, $logfunc);
+
+ print to_json($last_snapshots) . "\n";
+
+ return undef;
+ }});
+
+__PACKAGE__->register_method ({
name => 'run',
path => 'run',
method => 'POST',
@@ -173,6 +266,8 @@ our $cmddef = {
enable => [ __PACKAGE__, 'enable', ['id'], {}],
disable => [ __PACKAGE__, 'disable', ['id'], {}],
+ 'prepare-local-job' => [ __PACKAGE__, 'prepare_local_job', ['id', 'vmid', 'extra-args'], {} ],
+
run => [ __PACKAGE__ , 'run'],
};
diff --git a/PVE/Replication.pm b/PVE/Replication.pm
index 1646dff9..895939f6 100644
--- a/PVE/Replication.pm
+++ b/PVE/Replication.pm
@@ -156,6 +156,36 @@ my $get_next_job = sub {
return $jobcfg;
};
+sub replication_snapshot_name {
+ my ($jobid, $last_sync) = @_;
+
+ my $prefix = "replicate_${jobid}_";
+ my $snapname = "${prefix}${last_sync}_snap";
+
+ wantarray ? ($prefix, $snapname) : $snapname;
+}
+
+sub prepare {
+ my ($storecfg, $volids, $jobid, $last_sync, $start_time, $logfunc) = @_;
+
+ my ($prefix, $snapname) = replication_snapshot_name($jobid, $last_sync);
+
+ my $last_snapshots = {};
+ foreach my $volid (@$volids) {
+ my $list = PVE::Storage::volume_snapshot_list($storecfg, $volid, $prefix);
+ my $found = 0;
+ foreach my $snap (@$list) {
+ if ($snap eq $snapname) {
+ $last_snapshots->{$volid} = 1;
+ } else {
+ $logfunc->($start_time, "$jobid: delete stale snapshot '$snap' on $volid");
+ PVE::Storage::volume_snapshot_delete($storecfg, $volid, $snap);
+ }
+ }
+ }
+
+ return $last_snapshots;
+}
sub replicate {
my ($jobcfg, $start_time, $logfunc) = @_;
--
2.11.0
More information about the pve-devel
mailing list