[pve-devel] [PATCH manager v4 12/19] ceph/destroypool: move image check into worker
Fabian Grünbichler
f.gruenbichler at proxmox.com
Tue Sep 5 14:59:41 CEST 2017
vdisk_list can potentially take very long, and we don't want
the API request to time out.
Signed-off-by: Fabian Grünbichler <f.gruenbichler at proxmox.com>
---
new in v3
PVE/API2/Ceph.pm | 32 +++++++++++++++++---------------
1 file changed, 17 insertions(+), 15 deletions(-)
diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index c28c4f7d..9fef0487 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -1843,23 +1843,25 @@ __PACKAGE__->register_method ({
if $param->{remove_storages};
my $pool = $param->{name};
- my $storages = $get_storages->($pool);
-
- # if not forced, destroy ceph pool only when no
- # vm disks are on it anymore
- if (!$param->{force}) {
- my $storagecfg = PVE::Storage::config();
- foreach my $storageid (keys %$storages) {
- my $storage = $storages->{$storageid};
-
- # check if any vm disks are on the pool
- my $res = PVE::Storage::vdisk_list($storagecfg, $storageid);
- die "ceph pool '$pool' still in use by storage '$storageid'\n"
- if @{$res->{$storageid}} != 0;
- }
- }
my $worker = sub {
+ my $storages = $get_storages->($pool);
+
+ # if not forced, destroy ceph pool only when no
+ # vm disks are on it anymore
+ if (!$param->{force}) {
+ my $storagecfg = PVE::Storage::config();
+ foreach my $storeid (keys %$storages) {
+ my $storage = $storages->{$storeid};
+
+ # check if any vm disks are on the pool
+ print "checking storage '$storeid' for RBD images..\n";
+ my $res = PVE::Storage::vdisk_list($storagecfg, $storeid);
+ die "ceph pool '$pool' still in use by storage '$storeid'\n"
+ if @{$res->{$storeid}} != 0;
+ }
+ }
+
my $rados = PVE::RADOS->new();
# fixme: '--yes-i-really-really-mean-it'
$rados->mon_command({
--
2.11.0
More information about the pve-devel
mailing list