[pve-devel] [PATCH storage 6/6] api: disks: delete: add flag for cleaning up storage config

Fabian Ebner f.ebner at proxmox.com
Mon Oct 25 15:47:49 CEST 2021


Update node restrictions to reflect that the storage is not available
anymore on the particular node. If the storage was only configured for
that node, remove it altogether.

Signed-off-by: Fabian Ebner <f.ebner at proxmox.com>
---
 PVE/API2/Disks/Directory.pm | 20 ++++++++++++++++++++
 PVE/API2/Disks/LVM.pm       | 20 ++++++++++++++++++++
 PVE/API2/Disks/LVMThin.pm   | 21 +++++++++++++++++++++
 PVE/API2/Disks/ZFS.pm       | 20 ++++++++++++++++++++
 PVE/API2/Storage/Config.pm  | 27 +++++++++++++++++++++++++++
 5 files changed, 108 insertions(+)

diff --git a/PVE/API2/Disks/Directory.pm b/PVE/API2/Disks/Directory.pm
index c9dcb52..df63ba9 100644
--- a/PVE/API2/Disks/Directory.pm
+++ b/PVE/API2/Disks/Directory.pm
@@ -314,6 +314,13 @@ __PACKAGE__->register_method ({
 	properties => {
 	    node => get_standard_option('pve-node'),
 	    name => get_standard_option('pve-storage-id'),
+	    'cleanup-config' => {
+		description => "Marks associated storage(s) as not available on this node anymore ".
+		    "or removes them from the configuration (if configured for this node only).",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
 	    'cleanup-disks' => {
 		description => "Also wipe disk so it can be repurposed afterwards.",
 		type => 'boolean',
@@ -330,6 +337,7 @@ __PACKAGE__->register_method ({
 	my $user = $rpcenv->get_user();
 
 	my $name = $param->{name};
+	my $node = $param->{node};
 
 	my $worker = sub {
 	    my $path = "/mnt/pve/$name";
@@ -357,10 +365,22 @@ __PACKAGE__->register_method ({
 
 		unlink $mountunitpath or $! == ENOENT or die "cannot remove $mountunitpath - $!\n";
 
+		my $config_err;
+		if ($param->{'cleanup-config'}) {
+		    my $match = sub {
+			my ($scfg) = @_;
+			return $scfg->{type} eq 'dir' && $scfg->{path} eq $path;
+		    };
+		    eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
+		    warn $config_err = $@ if $@;
+		}
+
 		if ($to_wipe) {
 		    PVE::Diskmanage::wipe_blockdev($to_wipe);
 		    PVE::Diskmanage::udevadm_trigger($to_wipe);
 		}
+
+		die "config cleanup failed - $config_err" if $config_err;
 	    });
 	};
 
diff --git a/PVE/API2/Disks/LVM.pm b/PVE/API2/Disks/LVM.pm
index 1af3d43..6e4331a 100644
--- a/PVE/API2/Disks/LVM.pm
+++ b/PVE/API2/Disks/LVM.pm
@@ -198,6 +198,13 @@ __PACKAGE__->register_method ({
 	properties => {
 	    node => get_standard_option('pve-node'),
 	    name => get_standard_option('pve-storage-id'),
+	    'cleanup-config' => {
+		description => "Marks associated storage(s) as not available on this node anymore ".
+		    "or removes them from the configuration (if configured for this node only).",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
 	    'cleanup-disks' => {
 		description => "Also wipe disks so they can be repurposed afterwards.",
 		type => 'boolean',
@@ -214,6 +221,7 @@ __PACKAGE__->register_method ({
 	my $user = $rpcenv->get_user();
 
 	my $name = $param->{name};
+	my $node = $param->{node};
 
 	my $worker = sub {
 	    PVE::Diskmanage::locked_disk_action(sub {
@@ -222,6 +230,16 @@ __PACKAGE__->register_method ({
 
 		PVE::Storage::LVMPlugin::lvm_destroy_volume_group($name);
 
+		my $config_err;
+		if ($param->{'cleanup-config'}) {
+		    my $match = sub {
+			my ($scfg) = @_;
+			return $scfg->{type} eq 'lvm' && $scfg->{vgname} eq $name;
+		    };
+		    eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
+		    warn $config_err = $@ if $@;
+		}
+
 		if ($param->{'cleanup-disks'}) {
 		    my $wiped = [];
 		    eval {
@@ -235,6 +253,8 @@ __PACKAGE__->register_method ({
 		    PVE::Diskmanage::udevadm_trigger($wiped->@*);
 		    die "cleanup failed - $err" if $err;
 		}
+
+		die "config cleanup failed - $config_err" if $config_err;
 	    });
 	};
 
diff --git a/PVE/API2/Disks/LVMThin.pm b/PVE/API2/Disks/LVMThin.pm
index ea36ce2..a82ab15 100644
--- a/PVE/API2/Disks/LVMThin.pm
+++ b/PVE/API2/Disks/LVMThin.pm
@@ -177,6 +177,13 @@ __PACKAGE__->register_method ({
 	    node => get_standard_option('pve-node'),
 	    name => get_standard_option('pve-storage-id'),
 	    'volume-group' => get_standard_option('pve-storage-id'),
+	    'cleanup-config' => {
+		description => "Marks associated storage(s) as not available on this node anymore ".
+		    "or removes them from the configuration (if configured for this node only).",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
 	    'cleanup-disks' => {
 		description => "Also wipe disks so they can be repurposed afterwards.",
 		type => 'boolean',
@@ -194,6 +201,7 @@ __PACKAGE__->register_method ({
 
 	my $vg = $param->{'volume-group'};
 	my $lv = $param->{name};
+	my $node = $param->{node};
 
 	my $worker = sub {
 	    PVE::Diskmanage::locked_disk_action(sub {
@@ -204,6 +212,17 @@ __PACKAGE__->register_method ({
 
 		run_command(['lvremove', '-y', "${vg}/${lv}"]);
 
+		my $config_err;
+		if ($param->{'cleanup-config'}) {
+		    my $match = sub {
+			my ($scfg) = @_;
+			return if $scfg->{type} ne 'lvmthin';
+			return $scfg->{vgname} eq $vg && $scfg->{thinpool} eq $lv;
+		    };
+		    eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
+		    warn $config_err = $@ if $@;
+		}
+
 		if ($param->{'cleanup-disks'}) {
 		    my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
 
@@ -222,6 +241,8 @@ __PACKAGE__->register_method ({
 		    PVE::Diskmanage::udevadm_trigger($wiped->@*);
 		    die "cleanup failed - $err" if $err;
 		}
+
+		die "config cleanup failed - $config_err" if $config_err;
 	    });
 	};
 
diff --git a/PVE/API2/Disks/ZFS.pm b/PVE/API2/Disks/ZFS.pm
index 10b73a5..63bc435 100644
--- a/PVE/API2/Disks/ZFS.pm
+++ b/PVE/API2/Disks/ZFS.pm
@@ -460,6 +460,13 @@ __PACKAGE__->register_method ({
 	properties => {
 	    node => get_standard_option('pve-node'),
 	    name => get_standard_option('pve-storage-id'),
+	    'cleanup-config' => {
+		description => "Marks associated storage(s) as not available on this node anymore ".
+		    "or removes them from the configuration (if configured for this node only).",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
 	    'cleanup-disks' => {
 		description => "Also wipe disks so they can be repurposed afterwards.",
 		type => 'boolean',
@@ -476,6 +483,7 @@ __PACKAGE__->register_method ({
 	my $user = $rpcenv->get_user();
 
 	my $name = $param->{name};
+	my $node = $param->{node};
 
 	my $worker = sub {
 	    PVE::Diskmanage::locked_disk_action(sub {
@@ -516,10 +524,22 @@ __PACKAGE__->register_method ({
 
 		run_command(['zpool', 'destroy', $name]);
 
+		my $config_err;
+		if ($param->{'cleanup-config'}) {
+		    my $match = sub {
+			my ($scfg) = @_;
+			return $scfg->{type} eq 'zfspool' && $scfg->{pool} eq $name;
+		    };
+		    eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); };
+		    warn $config_err = $@ if $@;
+		}
+
 		eval { PVE::Diskmanage::wipe_blockdev($_) for $to_wipe->@*; };
 		my $err = $@;
 		PVE::Diskmanage::udevadm_trigger($to_wipe->@*);
 		die "cleanup failed - $err" if $err;
+
+		die "config cleanup failed - $config_err" if $config_err;
 	    });
 	};
 
diff --git a/PVE/API2/Storage/Config.pm b/PVE/API2/Storage/Config.pm
index bf38df3..6bd770e 100755
--- a/PVE/API2/Storage/Config.pm
+++ b/PVE/API2/Storage/Config.pm
@@ -38,6 +38,33 @@ my $api_storage_config = sub {
     return $scfg;
 };
 
+# For storages that $match->($scfg), update node restrictions to not include $node anymore and
+# in case no node remains, remove the storage altogether.
+sub cleanup_storages_for_node {
+    my ($self, $match, $node) = @_;
+
+    my $config = PVE::Storage::config();
+    my $cluster_nodes = PVE::Cluster::get_nodelist();
+
+    for my $storeid (keys $config->{ids}->%*) {
+	my $scfg = PVE::Storage::storage_config($config, $storeid);
+	next if !$match->($scfg);
+
+	my $nodes = $scfg->{nodes} || { map { $_ => 1 } $cluster_nodes->@* };
+	next if !$nodes->{$node}; # not configured on $node, so nothing to do
+	delete $nodes->{$node};
+
+	if (scalar(keys $nodes->%*) > 0) {
+	    $self->update({
+		nodes => join(',', sort keys $nodes->%*),
+		storage => $storeid,
+	    });
+	} else {
+	    $self->delete({storage => $storeid});
+	}
+    }
+}
+
 __PACKAGE__->register_method ({
     name => 'index',
     path => '',
-- 
2.30.2






More information about the pve-devel mailing list