[pve-devel] [RFC v3 guest-common 7/7] fix #3111: config: snapshot delete: check if replication still needs it
Fabian Ebner
f.ebner at proxmox.com
Thu Aug 12 13:01:11 CEST 2021
and abort if it does and --force is not specified.
After rollback, the rollback snapshot might still be needed as the
base for incremental replication, because rollback removes (blocking)
replication snapshots.
It's not enough to limit the check to the most recent snapshot,
because new snapshots might've been created between rollback and
remove.
It's not enough to limit the check to snapshots without a parent (i.e.
in case of ZFS, the oldest), because some volumes might've been added
only after that, meaning the oldest snapshot is not an incremental
replication base for them.
Signed-off-by: Fabian Ebner <f.ebner at proxmox.com>
---
Sent as RFC, because I feel like this is quite a bit of code just to
prevent a corner case that's now already warned about upon rollback.
Arguably the warning in the UI is not very visible, but improving that
by either using the new task warnings or showing the task viewer upon
rollback is an alternative that might be preferable.
src/PVE/AbstractConfig.pm | 41 ++++++++++++++++++++++++++++++++++++
src/PVE/Replication.pm | 6 +++++-
src/PVE/ReplicationConfig.pm | 14 ++++++++++++
3 files changed, 60 insertions(+), 1 deletion(-)
diff --git a/src/PVE/AbstractConfig.pm b/src/PVE/AbstractConfig.pm
index a5a15bf..39f1cc8 100644
--- a/src/PVE/AbstractConfig.pm
+++ b/src/PVE/AbstractConfig.pm
@@ -824,6 +824,44 @@ sub snapshot_create {
$class->__snapshot_commit($vmid, $snapname);
}
+# Check if the snapshot might still be needed by a replication job.
+my $snapshot_delete_assert_not_needed_by_replication = sub {
+ my ($class, $vmid, $conf, $snap, $snapname) = @_;
+
+ my $repl_conf = PVE::ReplicationConfig->new();
+ return if !$repl_conf->check_for_existing_jobs($vmid, 1);
+
+ my $storecfg = PVE::Storage::config();
+
+ # Current config's volumes are relevant for replication.
+ my $volumes = $class->get_replicatable_volumes($storecfg, $vmid, $conf, 1);
+
+ my $replication_jobs = $repl_conf->list_guests_local_replication_jobs($vmid);
+
+ $class->foreach_volume($snap, sub {
+ my ($vs, $volume) = @_;
+
+ my $volid_key = $class->volid_key();
+ my $volid = $volume->{$volid_key};
+
+ return if !$volumes->{$volid};
+
+ my $snapshots = PVE::Storage::volume_snapshot_list($storecfg, $volid);
+
+ for my $job ($replication_jobs->@*) {
+ my $jobid = $job->{id};
+
+ my @jobs_snapshots = grep {
+ PVE::Replication::is_replication_snapshot($_, $jobid)
+ } $snapshots->@*;
+
+ next if scalar(@jobs_snapshots) > 0;
+
+ die "snapshot '$snapname' needed by replication job '$jobid' - run replication first\n";
+ }
+ });
+};
+
# Deletes a snapshot.
# Note: $drivehash is only set when called from snapshot_create.
sub snapshot_delete {
@@ -838,6 +876,9 @@ sub snapshot_delete {
die "snapshot '$snapname' does not exist\n" if !defined($snap);
+ $snapshot_delete_assert_not_needed_by_replication->($class, $vmid, $conf, $snap, $snapname)
+ if !$drivehash && !$force;
+
$class->set_lock($vmid, 'snapshot-delete')
if (!$drivehash); # doesn't already have a 'snapshot' lock
diff --git a/src/PVE/Replication.pm b/src/PVE/Replication.pm
index 2609ad6..098ac00 100644
--- a/src/PVE/Replication.pm
+++ b/src/PVE/Replication.pm
@@ -470,7 +470,11 @@ sub run_replication {
}
sub is_replication_snapshot {
- my ($snapshot_name) = @_;
+ my ($snapshot_name, $jobid) = @_;
+
+ if (defined($jobid)) {
+ return $snapshot_name =~ m/^__replicate_\Q$jobid\E/ ? 1 : 0;
+ }
return $snapshot_name =~ m/^__replicate_/ ? 1 : 0;
}
diff --git a/src/PVE/ReplicationConfig.pm b/src/PVE/ReplicationConfig.pm
index fd856a0..78f55bb 100644
--- a/src/PVE/ReplicationConfig.pm
+++ b/src/PVE/ReplicationConfig.pm
@@ -228,6 +228,20 @@ sub find_local_replication_job {
return undef;
}
+sub list_guests_local_replication_jobs {
+ my ($cfg, $vmid) = @_;
+
+ my $jobs = [];
+
+ for my $job (values %{$cfg->{ids}}) {
+ next if $job->{type} ne 'local' || $job->{guest} != $vmid;
+
+ push @{$jobs}, $job;
+ }
+
+ return $jobs;
+}
+
# makes old_target the new source for all local jobs of this guest
# makes new_target the target for the single local job with target old_target
sub switch_replication_job_target_nolock {
--
2.30.2
More information about the pve-devel
mailing list