[pve-devel] [PATCH manager v4 1/2] Fix #2051: preserve DB/WAL disk on destroy
Alwin Antreich
a.antreich at proxmox.com
Thu Feb 7 16:29:41 CET 2019
'pveceph osd destroy <num> --cleanup'
When executing the command above, all disks associated with the OSD are
at the moment wiped with dd (incl. separate disks with DB/WAL).
The patch adds the ability to 'wipe_disks' to wipe the partition instead
of the whole disk.
Signed-off-by: Alwin Antreich <a.antreich at proxmox.com>
---
V3 -> V4:
- incorporate suggestion by Thomas
- and add second patch for cleanup
https://pve.proxmox.com/pipermail/pve-devel/2019-February/035573.html
PVE/API2/Ceph/OSD.pm | 15 ++++++++-------
PVE/Ceph/Tools.pm | 19 +++++++++++++++----
2 files changed, 23 insertions(+), 11 deletions(-)
diff --git a/PVE/API2/Ceph/OSD.pm b/PVE/API2/Ceph/OSD.pm
index b4dc277e..c3858831 100644
--- a/PVE/API2/Ceph/OSD.pm
+++ b/PVE/API2/Ceph/OSD.pm
@@ -17,6 +17,7 @@ use PVE::RADOS;
use PVE::RESTHandler;
use PVE::RPCEnvironment;
use PVE::Tools qw(run_command file_set_contents);
+use PVE::ProcFSTools;
use base qw(PVE::RESTHandler);
@@ -394,7 +395,6 @@ __PACKAGE__->register_method ({
# try to unmount from standard mount point
my $mountpoint = "/var/lib/ceph/osd/ceph-$osdid";
- my $disks_to_wipe = {};
my $remove_partition = sub {
my ($part) = @_;
@@ -402,11 +402,10 @@ __PACKAGE__->register_method ({
my $partnum = PVE::Diskmanage::get_partnum($part);
my $devpath = PVE::Diskmanage::get_blockdev($part);
+ PVE::Ceph::Tools::wipe_disks($part);
print "remove partition $part (disk '${devpath}', partnum $partnum)\n";
eval { run_command(['/sbin/sgdisk', '-d', $partnum, "${devpath}"]); };
warn $@ if $@;
-
- $disks_to_wipe->{$devpath} = 1;
};
my $partitions_to_remove = [];
@@ -418,7 +417,9 @@ __PACKAGE__->register_method ({
next if !($dev && $path && $fstype);
next if $dev !~ m|^/dev/|;
if ($path eq $mountpoint) {
- my $data_part = abs_path($dev);
+ my ($data_part) = abs_path($dev) =~ m|^(/.+)|
+ or die "invalid path: $path \n"; # untaint $part
+
push @$partitions_to_remove, $data_part;
last;
}
@@ -427,7 +428,9 @@ __PACKAGE__->register_method ({
}
foreach my $path (qw(journal block block.db block.wal)) {
- my $part = abs_path("$mountpoint/$path");
+ my ($part) = abs_path("$mountpoint/$path") =~ m|^(/.+)|
+ or die "invalid path: $path \n"; # untaint $part
+
if ($part) {
push @$partitions_to_remove, $part;
}
@@ -443,8 +446,6 @@ __PACKAGE__->register_method ({
foreach my $part (@$partitions_to_remove) {
$remove_partition->($part);
}
-
- PVE::Ceph::Tools::wipe_disks(keys %$disks_to_wipe);
}
};
diff --git a/PVE/Ceph/Tools.pm b/PVE/Ceph/Tools.pm
index 0ada98cf..9b02d7c8 100644
--- a/PVE/Ceph/Tools.pm
+++ b/PVE/Ceph/Tools.pm
@@ -4,6 +4,7 @@ use strict;
use warnings;
use File::Path;
+use File::Basename;
use IO::File;
use PVE::Tools qw(run_command dir_glob_foreach);
@@ -232,11 +233,21 @@ sub systemd_managed {
sub wipe_disks {
my (@devs) = @_;
- my @wipe_cmd = qw(/bin/dd if=/dev/zero bs=1M count=200 conv=fdatasync);
+ my @wipe_cmd = qw(/bin/dd if=/dev/zero bs=1M conv=fdatasync);
+
foreach my $devpath (@devs) {
- print "wipe disk: $devpath\n";
- eval { run_command([@wipe_cmd, "of=${devpath}"]) };
- warn $@ if $@;
+ my $devname = basename($devpath);
+ my $dev_size = PVE::Tools::file_get_contents("/sys/class/block/$devname/size");
+
+ ($dev_size) = $dev_size =~ m|(\d+)|; # untaint $dev_size
+ die "Coulnd't get the size of the device $devname\n" if (!defined($dev_size));
+
+ my $size = ($dev_size * 512 / 1024 / 1024);
+ my $count = ($size < 200) ? $size : 200;
+
+ print "wipe disk/partition: $devpath\n";
+ eval { run_command([@wipe_cmd, "count=$count", "of=${devpath}"]) };
+ warn $@ if $@;
}
};
--
2.11.0
More information about the pve-devel
mailing list