[pve-devel] [PATCH manager v3 13/20] ceph/destroypool: move image check into worker

Fabian Gr├╝nbichler f.gruenbichler at proxmox.com
Thu Aug 31 11:38:16 CEST 2017


vdisk_list can potentially take very long, and we don't want
the API request to time out.

Signed-off-by: Fabian Gr├╝nbichler <f.gruenbichler at proxmox.com>
---
new in v3

 PVE/API2/Ceph.pm | 32 +++++++++++++++++---------------
 1 file changed, 17 insertions(+), 15 deletions(-)

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index e4443bf7..c966cfdc 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -1844,23 +1844,25 @@ __PACKAGE__->register_method ({
 	    if $param->{remove_storages};
 
 	my $pool = $param->{name};
-	my $storages = $get_storages->($pool);
-
-	# if not forced, destroy ceph pool only when no
-	# vm disks are on it anymore
-	if (!$param->{force}) {
-	    my $storagecfg = PVE::Storage::config();
-	    foreach my $storageid (keys %$storages) {
-		my $storage = $storages->{$storageid};
-
-		# check if any vm disks are on the pool
-		my $res = PVE::Storage::vdisk_list($storagecfg, $storageid);
-		die "ceph pool '$pool' still in use by storage '$storageid'\n"
-		    if @{$res->{$storageid}} != 0;
-	    }
-	}
 
 	my $worker = sub {
+	    my $storages = $get_storages->($pool);
+
+	    # if not forced, destroy ceph pool only when no
+	    # vm disks are on it anymore
+	    if (!$param->{force}) {
+		my $storagecfg = PVE::Storage::config();
+		foreach my $storeid (keys %$storages) {
+		    my $storage = $storages->{$storeid};
+
+		    # check if any vm disks are on the pool
+		    print "checking storage '$storeid' for RBD images..\n";
+		    my $res = PVE::Storage::vdisk_list($storagecfg, $storeid);
+		    die "ceph pool '$pool' still in use by storage '$storeid'\n"
+			if @{$res->{$storeid}} != 0;
+		}
+	    }
+
 	    my $rados = PVE::RADOS->new();
 	    # fixme: '--yes-i-really-really-mean-it'
 	    $rados->mon_command({
-- 
2.11.0




More information about the pve-devel mailing list