[pve-devel] [PATCH manager 3/7] ceph: move service_cmd and MDS related code to Services.pm
Dominik Csapak
d.csapak at proxmox.com
Wed Dec 19 11:24:43 CET 2018
Also adapts the calls to the relevant subs.
Signed-off-by: Dominik Csapak <d.csapak at proxmox.com>
---
PVE/API2/Ceph.pm | 24 ++++---
PVE/API2/Ceph/FS.pm | 5 +-
PVE/API2/Ceph/MDS.pm | 7 +-
PVE/Ceph/Makefile | 1 +
PVE/Ceph/Services.pm | 197 +++++++++++++++++++++++++++++++++++++++++++++++++++
PVE/Ceph/Tools.pm | 177 ---------------------------------------------
6 files changed, 218 insertions(+), 193 deletions(-)
create mode 100644 PVE/Ceph/Services.pm
diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index 2ee08a81..af6af312 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -7,6 +7,7 @@ use Cwd qw(abs_path);
use IO::File;
use PVE::Ceph::Tools;
+use PVE::Ceph::Services;
use PVE::CephConfig;
use PVE::Cluster qw(cfs_read_file cfs_write_file);
use PVE::Diskmanage;
@@ -376,8 +377,8 @@ __PACKAGE__->register_method ({
print "destroy OSD $osdsection\n";
eval {
- PVE::Ceph::Tools::ceph_service_cmd('stop', $osdsection);
- PVE::Ceph::Tools::ceph_service_cmd('disable', $osdsection);
+ PVE::Ceph::Services::ceph_service_cmd('stop', $osdsection);
+ PVE::Ceph::Services::ceph_service_cmd('disable', $osdsection);
};
warn $@ if $@;
@@ -538,6 +539,7 @@ use Net::IP;
use UUID;
use PVE::Ceph::Tools;
+use PVE::Ceph::Services;
use PVE::Cluster qw(cfs_read_file cfs_write_file);
use PVE::JSONSchema qw(get_standard_option);
use PVE::Network;
@@ -994,9 +996,9 @@ my $create_mgr = sub {
run_command(["chown", 'ceph:ceph', '-R', $mgrdir]);
print "enabling service 'ceph-mgr\@$id.service'\n";
- PVE::Ceph::Tools::ceph_service_cmd('enable', $mgrname);
+ PVE::Ceph::Services::ceph_service_cmd('enable', $mgrname);
print "starting service 'ceph-mgr\@$id.service'\n";
- PVE::Ceph::Tools::ceph_service_cmd('start', $mgrname);
+ PVE::Ceph::Services::ceph_service_cmd('start', $mgrname);
};
my $destroy_mgr = sub {
@@ -1010,9 +1012,9 @@ my $destroy_mgr = sub {
if ! -d $mgrdir;
print "disabling service 'ceph-mgr\@$mgrid.service'\n";
- PVE::Ceph::Tools::ceph_service_cmd('disable', $mgrname);
+ PVE::Ceph::Services::ceph_service_cmd('disable', $mgrname);
print "stopping service 'ceph-mgr\@$mgrid.service'\n";
- PVE::Ceph::Tools::ceph_service_cmd('stop', $mgrname);
+ PVE::Ceph::Services::ceph_service_cmd('stop', $mgrname);
print "removing manager directory '$mgrdir'\n";
File::Path::remove_tree($mgrdir);
@@ -1168,7 +1170,7 @@ __PACKAGE__->register_method ({
} elsif ($create_keys_pid == 0) {
exit PVE::Tools::run_command(['ceph-create-keys', '-i', $monid]);
} else {
- PVE::Ceph::Tools::ceph_service_cmd('start', $monsection);
+ PVE::Ceph::Services::ceph_service_cmd('start', $monsection);
if ($systemd_managed) {
#to ensure we have the correct startup order.
@@ -1252,7 +1254,7 @@ __PACKAGE__->register_method ({
$rados->mon_command({ prefix => "mon remove", name => $monid, format => 'plain' });
- eval { PVE::Ceph::Tools::ceph_service_cmd('stop', $monsection); };
+ eval { PVE::Ceph::Services::ceph_service_cmd('stop', $monsection); };
warn $@ if $@;
delete $cfg->{$monsection};
@@ -1402,7 +1404,7 @@ __PACKAGE__->register_method ({
push @$cmd, $param->{service};
}
- PVE::Ceph::Tools::ceph_service_cmd(@$cmd);
+ PVE::Ceph::Services::ceph_service_cmd(@$cmd);
};
return $rpcenv->fork_worker('srvstop', $param->{service} || 'ceph',
@@ -1453,7 +1455,7 @@ __PACKAGE__->register_method ({
push @$cmd, $param->{service};
}
- PVE::Ceph::Tools::ceph_service_cmd(@$cmd);
+ PVE::Ceph::Services::ceph_service_cmd(@$cmd);
};
return $rpcenv->fork_worker('srvstart', $param->{service} || 'ceph',
@@ -1504,7 +1506,7 @@ __PACKAGE__->register_method ({
push @$cmd, $param->{service};
}
- PVE::Ceph::Tools::ceph_service_cmd(@$cmd);
+ PVE::Ceph::Services::ceph_service_cmd(@$cmd);
};
return $rpcenv->fork_worker('srvrestart', $param->{service} || 'ceph',
diff --git a/PVE/API2/Ceph/FS.pm b/PVE/API2/Ceph/FS.pm
index 1f0e7c9b..3ede2128 100644
--- a/PVE/API2/Ceph/FS.pm
+++ b/PVE/API2/Ceph/FS.pm
@@ -4,6 +4,7 @@ use strict;
use warnings;
use PVE::Ceph::Tools;
+use PVE::Ceph::Services;
use PVE::JSONSchema qw(get_standard_option);
use PVE::RADOS;
use PVE::RESTHandler;
@@ -138,7 +139,7 @@ __PACKAGE__->register_method ({
die "ceph pools '$pool_data' and/or '$pool_metadata' already exist\n"
if $existing_pools->{$pool_data} || $existing_pools->{$pool_metadata};
- my $running_mds = PVE::Ceph::Tools::get_cluster_mds_state($rados);
+ my $running_mds = PVE::Ceph::Services::get_cluster_mds_state($rados);
die "no running Metadata Server (MDS) found!\n" if !scalar(keys %$running_mds);
PVE::Storage::assert_sid_unused($fs_name) if $param->{add_storage};
@@ -193,7 +194,7 @@ __PACKAGE__->register_method ({
print "Adding '$fs_name' to storage configuration...\n";
my $waittime = 0;
- while (!PVE::Ceph::Tools::is_any_mds_active($rados)) {
+ while (!PVE::Ceph::Services::is_any_mds_active($rados)) {
if ($waittime >= 10) {
die "Need MDS to add storage, but none got active!\n";
}
diff --git a/PVE/API2/Ceph/MDS.pm b/PVE/API2/Ceph/MDS.pm
index 502ecf5a..19489690 100644
--- a/PVE/API2/Ceph/MDS.pm
+++ b/PVE/API2/Ceph/MDS.pm
@@ -4,6 +4,7 @@ use strict;
use warnings;
use PVE::Ceph::Tools;
+use PVE::Ceph::Services;
use PVE::Cluster qw(cfs_read_file cfs_write_file);
use PVE::INotify;
use PVE::JSONSchema qw(get_standard_option);
@@ -89,7 +90,7 @@ __PACKAGE__->register_method ({
}
}
- my $mds_state = PVE::Ceph::Tools::get_cluster_mds_state();
+ my $mds_state = PVE::Ceph::Services::get_cluster_mds_state();
foreach my $name (keys %$mds_state) {
my $d = $mds_state->{$name};
# just overwrite, this always provides more info
@@ -174,7 +175,7 @@ __PACKAGE__->register_method ({
cfs_write_file('ceph.conf', $cfg);
- eval { PVE::Ceph::Tools::create_mds($mds_id, $rados) };
+ eval { PVE::Ceph::Services::create_mds($mds_id, $rados) };
if (my $err = $@) {
# we abort early if the section is defined, so we know that we
# wrote it at this point. Do not auto remove the service, could
@@ -236,7 +237,7 @@ __PACKAGE__->register_method ({
cfs_write_file('ceph.conf', $cfg);
}
- PVE::Ceph::Tools::destroy_mds($mds_id, $rados);
+ PVE::Ceph::Services::destroy_mds($mds_id, $rados);
};
return $rpcenv->fork_worker('cephdestroymds', "mds.$mds_id", $authuser, $worker);
diff --git a/PVE/Ceph/Makefile b/PVE/Ceph/Makefile
index c2b7819a..8169f235 100644
--- a/PVE/Ceph/Makefile
+++ b/PVE/Ceph/Makefile
@@ -1,6 +1,7 @@
include ../../defines.mk
PERLSOURCE = \
+ Services.pm\
Tools.pm
all:
diff --git a/PVE/Ceph/Services.pm b/PVE/Ceph/Services.pm
new file mode 100644
index 00000000..e7dcb926
--- /dev/null
+++ b/PVE/Ceph/Services.pm
@@ -0,0 +1,197 @@
+package PVE::Ceph::Services;
+
+use strict;
+use warnings;
+
+use PVE::Ceph::Tools;
+use PVE::Tools qw(run_command);
+use PVE::RADOS;
+
+use File::Path;
+
+sub ceph_service_cmd {
+ my ($action, $service) = @_;
+
+ my $pve_ceph_cfgpath = PVE::Ceph::Tools::get_config('pve_ceph_cfgpath');
+ if (PVE::Ceph::Tools::systemd_managed()) {
+
+ if ($service && $service =~ m/^(mon|osd|mds|mgr|radosgw)(\.([A-Za-z0-9\-]{1,32}))?$/) {
+ $service = defined($3) ? "ceph-$1\@$3" : "ceph-$1.target";
+ } else {
+ $service = "ceph.target";
+ }
+
+ PVE::Tools::run_command(['/bin/systemctl', $action, $service]);
+
+ } else {
+ # ceph daemons does not call 'setsid', so we do that ourself
+ # (fork_worker send KILL to whole process group)
+ PVE::Tools::run_command(['setsid', 'service', 'ceph', '-c', $pve_ceph_cfgpath, $action, $service]);
+ }
+}
+
+# MDS
+
+sub list_local_mds_ids {
+ my $mds_list = [];
+ my $ceph_mds_data_dir = PVE::Ceph::Tools::get_config('ceph_mds_data_dir');
+ my $ccname = PVE::Ceph::Tools::get_config('ccname');
+
+ PVE::Tools::dir_glob_foreach($ceph_mds_data_dir, qr/$ccname-(\S+)/, sub {
+ my (undef, $mds_id) = @_;
+ push @$mds_list, $mds_id;
+ });
+
+ return $mds_list;
+}
+
+sub get_cluster_mds_state {
+ my ($rados) = @_;
+
+ my $mds_state = {};
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $add_state = sub {
+ my ($mds) = @_;
+
+ my $state = {};
+ $state->{addr} = $mds->{addr};
+ $state->{rank} = $mds->{rank};
+ $state->{standby_replay} = $mds->{standby_replay} ? 1 : 0;
+ $state->{state} = $mds->{state};
+
+ $mds_state->{$mds->{name}} = $state;
+ };
+
+ my $mds_dump = $rados->mon_command({ prefix => 'mds stat' });
+ my $fsmap = $mds_dump->{fsmap};
+
+
+ foreach my $mds (@{$fsmap->{standbys}}) {
+ $add_state->($mds);
+ }
+
+ my $fs_info = $fsmap->{filesystems}->[0];
+ my $active_mds = $fs_info->{mdsmap}->{info};
+
+ # normally there's only one active MDS, but we can have multiple active for
+ # different ranks (e.g., different cephs path hierarchy). So just add all.
+ foreach my $mds (values %$active_mds) {
+ $add_state->($mds);
+ }
+
+ return $mds_state;
+}
+
+sub is_any_mds_active {
+ my ($rados) = @_;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $mds_dump = $rados->mon_command({ prefix => 'mds stat' });
+ my $fs = $mds_dump->{fsmap}->{filesystems};
+
+ if (!($fs && scalar(@$fs) > 0)) {
+ return undef;
+ }
+ my $active_mds = $fs->[0]->{mdsmap}->{info};
+
+ for my $mds (values %$active_mds) {
+ return 1 if $mds->{state} eq 'up:active';
+ }
+
+ return 0;
+}
+
+sub create_mds {
+ my ($id, $rados) = @_;
+
+ # `ceph fs status` fails with numeric only ID.
+ die "ID: $id, numeric only IDs are not supported\n"
+ if $id =~ /^\d+$/;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $ccname = PVE::Ceph::Tools::get_config('ccname');
+ my $service_dir = "/var/lib/ceph/mds/$ccname-$id";
+ my $service_keyring = "$service_dir/keyring";
+ my $service_name = "mds.$id";
+
+ die "ceph MDS directory '$service_dir' already exists\n"
+ if -d $service_dir;
+
+ print "creating MDS directory '$service_dir'\n";
+ eval { File::Path::mkpath($service_dir) };
+ my $err = $@;
+ die "creation MDS directory '$service_dir' failed\n" if $err;
+
+ # http://docs.ceph.com/docs/luminous/install/manual-deployment/#adding-mds
+ my $priv = [
+ mon => 'allow profile mds',
+ osd => 'allow rwx',
+ mds => 'allow *',
+ ];
+
+ print "creating keys for '$service_name'\n";
+ my $output = $rados->mon_command({
+ prefix => 'auth get-or-create',
+ entity => $service_name,
+ caps => $priv,
+ format => 'plain',
+ });
+
+ PVE::Tools::file_set_contents($service_keyring, $output);
+
+ print "setting ceph as owner for service directory\n";
+ run_command(["chown", 'ceph:ceph', '-R', $service_dir]);
+
+ print "enabling service 'ceph-mds\@$id.service'\n";
+ ceph_service_cmd('enable', $service_name);
+ print "starting service 'ceph-mds\@$id.service'\n";
+ ceph_service_cmd('start', $service_name);
+
+ return undef;
+};
+
+sub destroy_mds {
+ my ($id, $rados) = @_;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $ccname = PVE::Ceph::Tools::get_config('ccname');
+
+ my $service_name = "mds.$id";
+ my $service_dir = "/var/lib/ceph/mds/$ccname-$id";
+
+ print "disabling service 'ceph-mds\@$id.service'\n";
+ ceph_service_cmd('disable', $service_name);
+ print "stopping service 'ceph-mds\@$id.service'\n";
+ ceph_service_cmd('stop', $service_name);
+
+ if (-d $service_dir) {
+ print "removing ceph-mds directory '$service_dir'\n";
+ File::Path::remove_tree($service_dir);
+ } else {
+ warn "cannot cleanup MDS $id directory, '$service_dir' not found\n"
+ }
+
+ print "removing ceph auth for '$service_name'\n";
+ $rados->mon_command({
+ prefix => 'auth del',
+ entity => $service_name,
+ format => 'plain'
+ });
+
+ return undef;
+};
+
+1;
diff --git a/PVE/Ceph/Tools.pm b/PVE/Ceph/Tools.pm
index 55e86f7f..6c7e7c1f 100644
--- a/PVE/Ceph/Tools.pm
+++ b/PVE/Ceph/Tools.pm
@@ -216,26 +216,6 @@ sub setup_pve_symlinks {
}
}
-sub ceph_service_cmd {
- my ($action, $service) = @_;
-
- if (systemd_managed()) {
-
- if ($service && $service =~ m/^(mon|osd|mds|mgr|radosgw)(\.([A-Za-z0-9\-]{1,32}))?$/) {
- $service = defined($3) ? "ceph-$1\@$3" : "ceph-$1.target";
- } else {
- $service = "ceph.target";
- }
-
- PVE::Tools::run_command(['/bin/systemctl', $action, $service]);
-
- } else {
- # ceph daemons does not call 'setsid', so we do that ourself
- # (fork_worker send KILL to whole process group)
- PVE::Tools::run_command(['setsid', 'service', 'ceph', '-c', $pve_ceph_cfgpath, $action, $service]);
- }
-}
-
# Ceph versions greater Hammer use 'ceph' as user and group instead
# of 'root', and use systemd.
sub systemd_managed {
@@ -247,163 +227,6 @@ sub systemd_managed {
}
}
-sub list_local_mds_ids {
- my $mds_list = [];
-
- PVE::Tools::dir_glob_foreach($ceph_mds_data_dir, qr/$ccname-(\S+)/, sub {
- my (undef, $mds_id) = @_;
- push @$mds_list, $mds_id;
- });
-
- return $mds_list;
-}
-
-sub get_cluster_mds_state {
- my ($rados) = @_;
-
- my $mds_state = {};
-
- if (!defined($rados)) {
- $rados = PVE::RADOS->new();
- }
-
- my $add_state = sub {
- my ($mds) = @_;
-
- my $state = {};
- $state->{addr} = $mds->{addr};
- $state->{rank} = $mds->{rank};
- $state->{standby_replay} = $mds->{standby_replay} ? 1 : 0;
- $state->{state} = $mds->{state};
-
- $mds_state->{$mds->{name}} = $state;
- };
-
- my $mds_dump = $rados->mon_command({ prefix => 'mds stat' });
- my $fsmap = $mds_dump->{fsmap};
-
-
- foreach my $mds (@{$fsmap->{standbys}}) {
- $add_state->($mds);
- }
-
- my $fs_info = $fsmap->{filesystems}->[0];
- my $active_mds = $fs_info->{mdsmap}->{info};
-
- # normally there's only one active MDS, but we can have multiple active for
- # different ranks (e.g., different cephs path hierarchy). So just add all.
- foreach my $mds (values %$active_mds) {
- $add_state->($mds);
- }
-
- return $mds_state;
-}
-
-sub is_any_mds_active {
- my ($rados) = @_;
-
- if (!defined($rados)) {
- $rados = PVE::RADOS->new();
- }
-
- my $mds_dump = $rados->mon_command({ prefix => 'mds stat' });
- my $fs = $mds_dump->{fsmap}->{filesystems};
-
- if (!($fs && scalar(@$fs) > 0)) {
- return undef;
- }
- my $active_mds = $fs->[0]->{mdsmap}->{info};
-
- for my $mds (values %$active_mds) {
- return 1 if $mds->{state} eq 'up:active';
- }
-
- return 0;
-}
-
-sub create_mds {
- my ($id, $rados) = @_;
-
- # `ceph fs status` fails with numeric only ID.
- die "ID: $id, numeric only IDs are not supported\n"
- if $id =~ /^\d+$/;
-
- if (!defined($rados)) {
- $rados = PVE::RADOS->new();
- }
-
- my $service_dir = "/var/lib/ceph/mds/$ccname-$id";
- my $service_keyring = "$service_dir/keyring";
- my $service_name = "mds.$id";
-
- die "ceph MDS directory '$service_dir' already exists\n"
- if -d $service_dir;
-
- print "creating MDS directory '$service_dir'\n";
- eval { File::Path::mkpath($service_dir) };
- my $err = $@;
- die "creation MDS directory '$service_dir' failed\n" if $err;
-
- # http://docs.ceph.com/docs/luminous/install/manual-deployment/#adding-mds
- my $priv = [
- mon => 'allow profile mds',
- osd => 'allow rwx',
- mds => 'allow *',
- ];
-
- print "creating keys for '$service_name'\n";
- my $output = $rados->mon_command({
- prefix => 'auth get-or-create',
- entity => $service_name,
- caps => $priv,
- format => 'plain',
- });
-
- PVE::Tools::file_set_contents($service_keyring, $output);
-
- print "setting ceph as owner for service directory\n";
- run_command(["chown", 'ceph:ceph', '-R', $service_dir]);
-
- print "enabling service 'ceph-mds\@$id.service'\n";
- ceph_service_cmd('enable', $service_name);
- print "starting service 'ceph-mds\@$id.service'\n";
- ceph_service_cmd('start', $service_name);
-
- return undef;
-};
-
-sub destroy_mds {
- my ($id, $rados) = @_;
-
- if (!defined($rados)) {
- $rados = PVE::RADOS->new();
- }
-
- my $service_name = "mds.$id";
- my $service_dir = "/var/lib/ceph/mds/$ccname-$id";
-
- print "disabling service 'ceph-mds\@$id.service'\n";
- ceph_service_cmd('disable', $service_name);
- print "stopping service 'ceph-mds\@$id.service'\n";
- ceph_service_cmd('stop', $service_name);
-
- if (-d $service_dir) {
- print "removing ceph-mds directory '$service_dir'\n";
- File::Path::remove_tree($service_dir);
- } else {
- warn "cannot cleanup MDS $id directory, '$service_dir' not found\n"
- }
-
- print "removing ceph auth for '$service_name'\n";
- $rados->mon_command({
- prefix => 'auth del',
- entity => $service_name,
- format => 'plain'
- });
-
- return undef;
-};
-
# wipe the first 200 MB to clear off leftovers from previous use, otherwise a
# create OSD fails.
sub wipe_disks {
--
2.11.0
More information about the pve-devel
mailing list