[pve-devel] [PATCH ha-manager 4/5] allow use of external fencing devices
Thomas Lamprecht
t.lamprecht at proxmox.com
Mon Apr 11 17:06:02 CEST 2016
A node now can be fenced with the use of external hardware fence
devices.
Those device can be configured at /etc/pve/ha/fence.cfg
also the fencing option in the datacenter configuration file must
be set to either 'hardware' or 'both', else configured devices
will *not* be used.
If hardware is selected as mode a valid device config *must* be
made, the fencing will not be marked as successful even if the CRM
could theoretical acquire the lock from the failed node!
This is done as some setups may require a HW fence agent to cut the
node off and where a watchdog which resets the node may be
dangerous.
We always *must* acquire the log before we can mark the failed node
as fenced and place it into the 'unknown' state and recover its
services.
The CRM bails out in case of an lost manager lock event where
$manager->cleanup() gets called.
There we kill all remaining open fence processes, if any,
and reset the fence status.
The currents masters manager class processes the running fencing
jobs, this means picking up finished fence workers and evaluating
their result.
Now regressions test with faked virtual HW fence devices are also
possible.
The current virtual devices succeed always, this will be changed
in a future patch to allow testing of more (dangerous) corner cases.
Device can be configured in the testdir/fence.cfg file and follow
the exactly same format as the real ones (see man dlm.conf)
Signed-off-by: Thomas Lamprecht <t.lamprecht at proxmox.com>
---
src/PVE/HA/Manager.pm | 9 +++++-
src/PVE/HA/NodeStatus.pm | 39 +++++++++++++++++++++++-
src/test/test-hw-fence1/README | 1 +
src/test/test-hw-fence1/cmdlist | 4 +++
src/test/test-hw-fence1/fence.cfg | 6 ++++
src/test/test-hw-fence1/hardware_status | 5 ++++
src/test/test-hw-fence1/log.expect | 53 +++++++++++++++++++++++++++++++++
src/test/test-hw-fence1/manager_status | 1 +
src/test/test-hw-fence1/service_config | 5 ++++
9 files changed, 121 insertions(+), 2 deletions(-)
create mode 100644 src/test/test-hw-fence1/README
create mode 100644 src/test/test-hw-fence1/cmdlist
create mode 100644 src/test/test-hw-fence1/fence.cfg
create mode 100644 src/test/test-hw-fence1/hardware_status
create mode 100644 src/test/test-hw-fence1/log.expect
create mode 100644 src/test/test-hw-fence1/manager_status
create mode 100644 src/test/test-hw-fence1/service_config
diff --git a/src/PVE/HA/Manager.pm b/src/PVE/HA/Manager.pm
index 9b4e6f2..ce42bc9 100644
--- a/src/PVE/HA/Manager.pm
+++ b/src/PVE/HA/Manager.pm
@@ -8,6 +8,7 @@ use Data::Dumper;
use PVE::Tools;
use PVE::HA::Tools ':exit_codes';
use PVE::HA::NodeStatus;
+use PVE::HA::Fence;
my $fence_delay = 60;
@@ -38,7 +39,13 @@ sub new {
sub cleanup {
my ($self) = @_;
- # todo: ?
+ my $haenv = $self->{haenv};
+
+ # reset pending fence jobs and node states
+ if (PVE::HA::Fence::has_fencing_job($haenv->nodename())) {
+ $haenv->log('notice', "bailing out from running fence jobs");
+ PVE::HA::Fence::kill_and_cleanup_jobs($haenv);
+ }
}
sub flush_master_status {
diff --git a/src/PVE/HA/NodeStatus.pm b/src/PVE/HA/NodeStatus.pm
index eb174cb..6b29f9c 100644
--- a/src/PVE/HA/NodeStatus.pm
+++ b/src/PVE/HA/NodeStatus.pm
@@ -2,6 +2,7 @@ package PVE::HA::NodeStatus;
use strict;
use warnings;
+use PVE::HA::Fence;
use Data::Dumper;
@@ -177,11 +178,47 @@ sub fence_node {
&$set_node_state($self, $node, 'fence');
}
- my $success = $haenv->get_ha_agent_lock($node);
+ my ($success, $hw_fence_success) = (0, 0);
+
+ my $fencing_mode = $haenv->fencing_mode();
+
+ if ($fencing_mode eq 'hardware' || $fencing_mode eq 'both') {
+
+ $hw_fence_success = PVE::HA::Fence::is_node_fenced($node);
+
+ # bad fence.cfg or no devices and only hardware fencing configured
+ if ($hw_fence_success < 0 && $fencing_mode eq 'hardware') {
+ $haenv->log('err', "Fencing of node '$node' failed and needs " .
+ "manual intervention!");
+ return 0;
+ }
+
+ if ($hw_fence_success > 0) {
+ # we fenced the node, now we're allowed to "steal" its lock
+ $haenv->log('notice', "fencing of node '$node' succeeded, " .
+ "trying to get its agent lock");
+ $haenv->release_ha_agent_lock($node);
+
+ } else {
+
+ # start and process fencing
+ if (PVE::HA::Fence::run_fence_jobs($haenv, $node)) {
+ $haenv->log('notice', "Started fencing off node '$node'");
+ }
+
+ }
+ }
+
+ # we *always* need the failed nodes lock, it secures that we are allowed to
+ # recover its services and prevents races, e.g. if it's restarting.
+ if ($hw_fence_success || $fencing_mode ne 'hardware' ) {
+ $success = $haenv->get_ha_agent_lock($node);
+ }
if ($success) {
$haenv->log("info", "fencing: acknowleged - got agent lock for node '$node'");
&$set_node_state($self, $node, 'unknown');
+ PVE::HA::Fence::kill_and_cleanup_jobs($node) if ($fencing_mode ne 'watchdog');
}
return $success;
diff --git a/src/test/test-hw-fence1/README b/src/test/test-hw-fence1/README
new file mode 100644
index 0000000..d0dea4b
--- /dev/null
+++ b/src/test/test-hw-fence1/README
@@ -0,0 +1 @@
+Test failover after single node network failure with HW fence devices.
diff --git a/src/test/test-hw-fence1/cmdlist b/src/test/test-hw-fence1/cmdlist
new file mode 100644
index 0000000..eee0e40
--- /dev/null
+++ b/src/test/test-hw-fence1/cmdlist
@@ -0,0 +1,4 @@
+[
+ [ "power node1 on", "power node2 on", "power node3 on"],
+ [ "network node3 off" ]
+]
diff --git a/src/test/test-hw-fence1/fence.cfg b/src/test/test-hw-fence1/fence.cfg
new file mode 100644
index 0000000..0bbe096
--- /dev/null
+++ b/src/test/test-hw-fence1/fence.cfg
@@ -0,0 +1,6 @@
+# see man dlm.conf
+device virt fence_virt ip="127.0.0.1"
+connect virt node=node1 plug=100
+connect virt node=node2 plug=101
+connect virt node=node3 plug=102
+
diff --git a/src/test/test-hw-fence1/hardware_status b/src/test/test-hw-fence1/hardware_status
new file mode 100644
index 0000000..451beb1
--- /dev/null
+++ b/src/test/test-hw-fence1/hardware_status
@@ -0,0 +1,5 @@
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-hw-fence1/log.expect b/src/test/test-hw-fence1/log.expect
new file mode 100644
index 0000000..15555d7
--- /dev/null
+++ b/src/test/test-hw-fence1/log.expect
@@ -0,0 +1,53 @@
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 node1/crm: got lock 'ha_manager_lock'
+info 20 node1/crm: status change wait_for_quorum => master
+info 20 node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info 20 node1/crm: adding new service 'vm:101' on node 'node1'
+info 20 node1/crm: adding new service 'vm:102' on node 'node2'
+info 20 node1/crm: adding new service 'vm:103' on node 'node3'
+info 20 node1/crm: service 'vm:102': state changed from 'started' to 'request_stop'
+info 21 node1/lrm: got lock 'ha_agent_node1_lock'
+info 21 node1/lrm: status change wait_for_agent_lock => active
+info 21 node1/lrm: starting service vm:101
+info 21 node1/lrm: service status vm:101 started
+info 22 node2/crm: status change wait_for_quorum => slave
+info 23 node2/lrm: got lock 'ha_agent_node2_lock'
+info 23 node2/lrm: status change wait_for_agent_lock => active
+info 24 node3/crm: status change wait_for_quorum => slave
+info 25 node3/lrm: got lock 'ha_agent_node3_lock'
+info 25 node3/lrm: status change wait_for_agent_lock => active
+info 25 node3/lrm: starting service vm:103
+info 25 node3/lrm: service status vm:103 started
+info 40 node1/crm: service 'vm:102': state changed from 'request_stop' to 'stopped'
+info 120 cmdlist: execute network node3 off
+info 120 node1/crm: node 'node3': state changed from 'online' => 'unknown'
+info 124 node3/crm: status change slave => wait_for_quorum
+info 125 node3/lrm: status change active => lost_agent_lock
+info 160 node1/crm: service 'vm:103': state changed from 'started' to 'fence'
+info 160 node1/crm: node 'node3': state changed from 'unknown' => 'fence'
+noti 160 node1/crm: Start fencing node 'node3'
+noti 160 node1/crm: [fence 'node3'] execute cmd: fence_virt --ip=127.0.0.1 --plug=102
+info 160 fence_virt: execute power node3 off
+info 160 node3/crm: killed by poweroff
+info 160 node3/lrm: killed by poweroff
+noti 160 node1/crm: Started fencing off node 'node3'
+noti 180 node1/crm: fencing of node 'node3' succeeded, trying to get its agent lock
+info 180 node1/crm: got lock 'ha_agent_node3_lock'
+info 180 node1/crm: fencing: acknowleged - got agent lock for node 'node3'
+info 180 node1/crm: node 'node3': state changed from 'fence' => 'unknown'
+info 180 node1/crm: recover service 'vm:103' from fenced node 'node3' to node 'node2'
+info 180 node1/crm: service 'vm:103': state changed from 'fence' to 'started' (node = node2)
+info 183 node2/lrm: starting service vm:103
+info 183 node2/lrm: service status vm:103 started
+info 720 hardware: exit simulation - done
diff --git a/src/test/test-hw-fence1/manager_status b/src/test/test-hw-fence1/manager_status
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/src/test/test-hw-fence1/manager_status
@@ -0,0 +1 @@
+{}
diff --git a/src/test/test-hw-fence1/service_config b/src/test/test-hw-fence1/service_config
new file mode 100644
index 0000000..70f11d6
--- /dev/null
+++ b/src/test/test-hw-fence1/service_config
@@ -0,0 +1,5 @@
+{
+ "vm:101": { "node": "node1", "state": "enabled" },
+ "vm:102": { "node": "node2" },
+ "vm:103": { "node": "node3", "state": "enabled" }
+}
--
2.1.4
More information about the pve-devel
mailing list