[pve-devel] [PATCH manager v2 11/11] pveceph: add 'fs destroy' command
Dominik Csapak
d.csapak at proxmox.com
Mon Oct 25 16:01:38 CEST 2021
with 'remove-storages' and 'remove-pools' as optional parameters
Signed-off-by: Dominik Csapak <d.csapak at proxmox.com>
---
PVE/CLI/pveceph.pm | 120 ++++++++++++++++++++++++++++++++++++++++++
PVE/Ceph/Tools.pm | 15 ++++++
www/manager6/Utils.js | 1 +
3 files changed, 136 insertions(+)
diff --git a/PVE/CLI/pveceph.pm b/PVE/CLI/pveceph.pm
index b04d1346..995cfcd5 100755
--- a/PVE/CLI/pveceph.pm
+++ b/PVE/CLI/pveceph.pm
@@ -221,6 +221,125 @@ __PACKAGE__->register_method ({
return undef;
}});
+my $get_storages = sub {
+ my ($fs, $is_default) = @_;
+
+ my $cfg = PVE::Storage::config();
+
+ my $storages = $cfg->{ids};
+ my $res = {};
+ foreach my $storeid (keys %$storages) {
+ my $curr = $storages->{$storeid};
+ next if $curr->{type} ne 'cephfs';
+ my $cur_fs = $curr->{'fs-name'};
+ $res->{$storeid} = $storages->{$storeid}
+ if (!defined($cur_fs) && $is_default) || (defined($cur_fs) && $fs eq $cur_fs);
+ }
+
+ return $res;
+};
+
+__PACKAGE__->register_method ({
+ name => 'destroyfs',
+ path => 'destroyfs',
+ method => 'DELETE',
+ description => "Destroy a Ceph filesystem",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ name => {
+ description => "The ceph filesystem name.",
+ type => 'string',
+ },
+ 'remove-storages' => {
+ description => "Remove all pveceph-managed storages configured for this fs.",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
+ 'remove-pools' => {
+ description => "Remove data and metadata pools configured for this fs.",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::Ceph::Tools::check_ceph_inited();
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $user = $rpcenv->get_user();
+
+ my $fs_name = $param->{name};
+
+ my $fs;
+ my $fs_list = PVE::Ceph::Tools::ls_fs();
+ for my $entry (@$fs_list) {
+ next if $entry->{name} ne $fs_name;
+ $fs = $entry;
+ last;
+ }
+ die "no such cephfs '$fs_name'\n" if !$fs;
+
+ my $worker = sub {
+ my $rados = PVE::RADOS->new();
+
+ if ($param->{'remove-storages'}) {
+ my $defaultfs;
+ my $fs_dump = $rados->mon_command({ prefix => "fs dump" });
+ for my $fs ($fs_dump->{filesystems}->@*) {
+ next if $fs->{id} != $fs_dump->{default_fscid};
+ $defaultfs = $fs->{mdsmap}->{fs_name};
+ }
+ warn "no default fs found, maybe not all relevant storages are removed\n"
+ if !defined($defaultfs);
+
+ my $storages = $get_storages->($fs_name, $fs_name eq ($defaultfs // ''));
+ for my $storeid (keys %$storages) {
+ my $store = $storages->{$storeid};
+ if (!$store->{disable}) {
+ die "storage '$storeid' is not disabled, make sure to disable ".
+ "and unmount the storage first\n";
+ }
+ }
+
+ my $err;
+ for my $storeid (keys %$storages) {
+ # skip external clusters, not managed by pveceph
+ next if $storages->{$storeid}->{monhost};
+ eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
+ if ($@) {
+ warn "failed to remove storage '$storeid': $@\n";
+ $err = 1;
+ }
+ }
+ die "failed to remove (some) storages - check log and remove manually!\n"
+ if $err;
+ }
+
+ PVE::Ceph::Tools::destroy_fs($fs_name, $rados);
+
+ if ($param->{'remove-pools'}) {
+ warn "removing metadata pool '$fs->{metadata_pool}'\n";
+ eval { PVE::Ceph::Tools::destroy_pool($fs->{metadata_pool}, $rados) };
+ warn "$@\n" if $@;
+
+ foreach my $pool ($fs->{data_pools}->@*) {
+ warn "removing data pool '$pool'\n";
+ eval { PVE::Ceph::Tools::destroy_pool($pool, $rados) };
+ warn "$@\n" if $@;
+ }
+ }
+
+ };
+ return $rpcenv->fork_worker('cephdestroyfs', $fs_name, $user, $worker);
+ }});
+
our $cmddef = {
init => [ 'PVE::API2::Ceph', 'init', [], { node => $nodename } ],
pool => {
@@ -256,6 +375,7 @@ our $cmddef = {
destroypool => { alias => 'pool destroy' },
fs => {
create => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }],
+ destroy => [ __PACKAGE__, 'destroyfs', ['name'], { node => $nodename }],
},
osd => {
create => [ 'PVE::API2::Ceph::OSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],
diff --git a/PVE/Ceph/Tools.pm b/PVE/Ceph/Tools.pm
index 2f818276..36d7788a 100644
--- a/PVE/Ceph/Tools.pm
+++ b/PVE/Ceph/Tools.pm
@@ -340,6 +340,21 @@ sub create_fs {
});
}
+sub destroy_fs {
+ my ($fs, $rados) = @_;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ $rados->mon_command({
+ prefix => "fs rm",
+ fs_name => $fs,
+ 'yes_i_really_mean_it' => JSON::true,
+ format => 'plain',
+ });
+}
+
sub setup_pve_symlinks {
# fail if we find a real file instead of a link
if (-f $ceph_cfgpath) {
diff --git a/www/manager6/Utils.js b/www/manager6/Utils.js
index 274d4db2..38615c30 100644
--- a/www/manager6/Utils.js
+++ b/www/manager6/Utils.js
@@ -1831,6 +1831,7 @@ Ext.define('PVE.Utils', {
cephdestroymon: ['Ceph Monitor', gettext('Destroy')],
cephdestroyosd: ['Ceph OSD', gettext('Destroy')],
cephdestroypool: ['Ceph Pool', gettext('Destroy')],
+ cephdestroyfs: ['CephFS', gettext('Destroy')],
cephfscreate: ['CephFS', gettext('Create')],
cephsetpool: ['Ceph Pool', gettext('Edit')],
cephsetflags: ['', gettext('Change global Ceph flags')],
--
2.30.2
More information about the pve-devel
mailing list