[pve-devel] [PATCH ha-manager v3 09/13] test: ha tester: add test cases for negative resource affinity rules
Daniel Kral
d.kral at proxmox.com
Fri Jul 4 20:20:52 CEST 2025
Add test cases for strict negative resource affinity rules, i.e. where
resources must be kept on separate nodes. These verify the behavior of
the resources in strict negative resource affinity rules in case of a
failover of the node of one or more of these resources in the following
scenarios:
1. 2 resources in neg. affinity and a 3 node cluster; 1 node failing
2. 3 resources in neg. affinity and a 5 node cluster; 1 node failing
3. 3 resources in neg. affinity and a 5 node cluster; 2 nodes failing
4. 2 resources in neg. affinity and a 3 node cluster; 1 node failing,
but the recovery node cannot start the resource
5. Pair of 2 neg. resource affinity rules (with one common resource in
both) in a 3 node cluster; 1 node failing
6. 2 resources in neg. affinity and a 3 node cluster; 1 node failing,
but both resources cannot start on the recovery node
7. 2 resources in neg. affinity and a 3 node cluster; 1 resource
manually migrated to another free node; other resources in neg.
affinity with migrated resource cannot be migrated to that resource's
source node during migration
8. 3 resources in neg. affinity and a 3 node cluster; 1 resource
manually migrated to another resource's node fails
The word "strict" describes the current policy of resource affinity
rules and is added in anticipation of a "non-strict" variant in the
future.
Signed-off-by: Daniel Kral <d.kral at proxmox.com>
---
.../README | 13 +++
.../cmdlist | 4 +
.../hardware_status | 5 +
.../log.expect | 60 ++++++++++
.../manager_status | 1 +
.../rules_config | 3 +
.../service_config | 6 +
.../README | 15 +++
.../cmdlist | 4 +
.../hardware_status | 7 ++
.../log.expect | 90 ++++++++++++++
.../manager_status | 1 +
.../rules_config | 3 +
.../service_config | 10 ++
.../README | 16 +++
.../cmdlist | 4 +
.../hardware_status | 7 ++
.../log.expect | 110 ++++++++++++++++++
.../manager_status | 1 +
.../rules_config | 3 +
.../service_config | 10 ++
.../README | 18 +++
.../cmdlist | 4 +
.../hardware_status | 5 +
.../log.expect | 69 +++++++++++
.../manager_status | 1 +
.../rules_config | 3 +
.../service_config | 6 +
.../README | 11 ++
.../cmdlist | 4 +
.../hardware_status | 5 +
.../log.expect | 56 +++++++++
.../manager_status | 1 +
.../rules_config | 7 ++
.../service_config | 5 +
.../README | 18 +++
.../cmdlist | 4 +
.../hardware_status | 5 +
.../log.expect | 69 +++++++++++
.../manager_status | 1 +
.../rules_config | 3 +
.../service_config | 6 +
.../README | 15 +++
.../cmdlist | 5 +
.../hardware_status | 5 +
.../log.expect | 52 +++++++++
.../manager_status | 1 +
.../rules_config | 3 +
.../service_config | 4 +
.../README | 12 ++
.../cmdlist | 4 +
.../hardware_status | 5 +
.../log.expect | 38 ++++++
.../manager_status | 1 +
.../rules_config | 3 +
.../service_config | 5 +
56 files changed, 827 insertions(+)
create mode 100644 src/test/test-resource-affinity-strict-negative1/README
create mode 100644 src/test/test-resource-affinity-strict-negative1/cmdlist
create mode 100644 src/test/test-resource-affinity-strict-negative1/hardware_status
create mode 100644 src/test/test-resource-affinity-strict-negative1/log.expect
create mode 100644 src/test/test-resource-affinity-strict-negative1/manager_status
create mode 100644 src/test/test-resource-affinity-strict-negative1/rules_config
create mode 100644 src/test/test-resource-affinity-strict-negative1/service_config
create mode 100644 src/test/test-resource-affinity-strict-negative2/README
create mode 100644 src/test/test-resource-affinity-strict-negative2/cmdlist
create mode 100644 src/test/test-resource-affinity-strict-negative2/hardware_status
create mode 100644 src/test/test-resource-affinity-strict-negative2/log.expect
create mode 100644 src/test/test-resource-affinity-strict-negative2/manager_status
create mode 100644 src/test/test-resource-affinity-strict-negative2/rules_config
create mode 100644 src/test/test-resource-affinity-strict-negative2/service_config
create mode 100644 src/test/test-resource-affinity-strict-negative3/README
create mode 100644 src/test/test-resource-affinity-strict-negative3/cmdlist
create mode 100644 src/test/test-resource-affinity-strict-negative3/hardware_status
create mode 100644 src/test/test-resource-affinity-strict-negative3/log.expect
create mode 100644 src/test/test-resource-affinity-strict-negative3/manager_status
create mode 100644 src/test/test-resource-affinity-strict-negative3/rules_config
create mode 100644 src/test/test-resource-affinity-strict-negative3/service_config
create mode 100644 src/test/test-resource-affinity-strict-negative4/README
create mode 100644 src/test/test-resource-affinity-strict-negative4/cmdlist
create mode 100644 src/test/test-resource-affinity-strict-negative4/hardware_status
create mode 100644 src/test/test-resource-affinity-strict-negative4/log.expect
create mode 100644 src/test/test-resource-affinity-strict-negative4/manager_status
create mode 100644 src/test/test-resource-affinity-strict-negative4/rules_config
create mode 100644 src/test/test-resource-affinity-strict-negative4/service_config
create mode 100644 src/test/test-resource-affinity-strict-negative5/README
create mode 100644 src/test/test-resource-affinity-strict-negative5/cmdlist
create mode 100644 src/test/test-resource-affinity-strict-negative5/hardware_status
create mode 100644 src/test/test-resource-affinity-strict-negative5/log.expect
create mode 100644 src/test/test-resource-affinity-strict-negative5/manager_status
create mode 100644 src/test/test-resource-affinity-strict-negative5/rules_config
create mode 100644 src/test/test-resource-affinity-strict-negative5/service_config
create mode 100644 src/test/test-resource-affinity-strict-negative6/README
create mode 100644 src/test/test-resource-affinity-strict-negative6/cmdlist
create mode 100644 src/test/test-resource-affinity-strict-negative6/hardware_status
create mode 100644 src/test/test-resource-affinity-strict-negative6/log.expect
create mode 100644 src/test/test-resource-affinity-strict-negative6/manager_status
create mode 100644 src/test/test-resource-affinity-strict-negative6/rules_config
create mode 100644 src/test/test-resource-affinity-strict-negative6/service_config
create mode 100644 src/test/test-resource-affinity-strict-negative7/README
create mode 100644 src/test/test-resource-affinity-strict-negative7/cmdlist
create mode 100644 src/test/test-resource-affinity-strict-negative7/hardware_status
create mode 100644 src/test/test-resource-affinity-strict-negative7/log.expect
create mode 100644 src/test/test-resource-affinity-strict-negative7/manager_status
create mode 100644 src/test/test-resource-affinity-strict-negative7/rules_config
create mode 100644 src/test/test-resource-affinity-strict-negative7/service_config
create mode 100644 src/test/test-resource-affinity-strict-negative8/README
create mode 100644 src/test/test-resource-affinity-strict-negative8/cmdlist
create mode 100644 src/test/test-resource-affinity-strict-negative8/hardware_status
create mode 100644 src/test/test-resource-affinity-strict-negative8/log.expect
create mode 100644 src/test/test-resource-affinity-strict-negative8/manager_status
create mode 100644 src/test/test-resource-affinity-strict-negative8/rules_config
create mode 100644 src/test/test-resource-affinity-strict-negative8/service_config
diff --git a/src/test/test-resource-affinity-strict-negative1/README b/src/test/test-resource-affinity-strict-negative1/README
new file mode 100644
index 0000000..0f01197
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative1/README
@@ -0,0 +1,13 @@
+Test whether a strict negative resource affinity rule among two resources makes
+one of the resources migrate to a different recovery node than the other in case
+of a failover of their previously assigned node.
+
+The test scenario is:
+- vm:101 and vm:102 must be kept separate
+- vm:101 and vm:102 are currently running on node2 and node3 respectively
+- node1 has a higher resource count than node2 to test the resource affinity rule
+ is applied even though the scheduler would prefer the less utilized node
+
+The expected outcome is:
+- As node3 fails, vm:102 is migrated to node1; even though the utilization of
+ node1 is high already, the resources must be kept separate
diff --git a/src/test/test-resource-affinity-strict-negative1/cmdlist b/src/test/test-resource-affinity-strict-negative1/cmdlist
new file mode 100644
index 0000000..c0a4daa
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative1/cmdlist
@@ -0,0 +1,4 @@
+[
+ [ "power node1 on", "power node2 on", "power node3 on" ],
+ [ "network node3 off" ]
+]
diff --git a/src/test/test-resource-affinity-strict-negative1/hardware_status b/src/test/test-resource-affinity-strict-negative1/hardware_status
new file mode 100644
index 0000000..451beb1
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative1/hardware_status
@@ -0,0 +1,5 @@
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative1/log.expect b/src/test/test-resource-affinity-strict-negative1/log.expect
new file mode 100644
index 0000000..475db39
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative1/log.expect
@@ -0,0 +1,60 @@
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 node1/crm: got lock 'ha_manager_lock'
+info 20 node1/crm: status change wait_for_quorum => master
+info 20 node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info 20 node1/crm: adding new service 'vm:101' on node 'node2'
+info 20 node1/crm: adding new service 'vm:102' on node 'node3'
+info 20 node1/crm: adding new service 'vm:103' on node 'node1'
+info 20 node1/crm: adding new service 'vm:104' on node 'node1'
+info 20 node1/crm: service 'vm:101': state changed from 'request_start' to 'started' (node = node2)
+info 20 node1/crm: service 'vm:102': state changed from 'request_start' to 'started' (node = node3)
+info 20 node1/crm: service 'vm:103': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:104': state changed from 'request_start' to 'started' (node = node1)
+info 21 node1/lrm: got lock 'ha_agent_node1_lock'
+info 21 node1/lrm: status change wait_for_agent_lock => active
+info 21 node1/lrm: starting service vm:103
+info 21 node1/lrm: service status vm:103 started
+info 21 node1/lrm: starting service vm:104
+info 21 node1/lrm: service status vm:104 started
+info 22 node2/crm: status change wait_for_quorum => slave
+info 23 node2/lrm: got lock 'ha_agent_node2_lock'
+info 23 node2/lrm: status change wait_for_agent_lock => active
+info 23 node2/lrm: starting service vm:101
+info 23 node2/lrm: service status vm:101 started
+info 24 node3/crm: status change wait_for_quorum => slave
+info 25 node3/lrm: got lock 'ha_agent_node3_lock'
+info 25 node3/lrm: status change wait_for_agent_lock => active
+info 25 node3/lrm: starting service vm:102
+info 25 node3/lrm: service status vm:102 started
+info 120 cmdlist: execute network node3 off
+info 120 node1/crm: node 'node3': state changed from 'online' => 'unknown'
+info 124 node3/crm: status change slave => wait_for_quorum
+info 125 node3/lrm: status change active => lost_agent_lock
+info 160 node1/crm: service 'vm:102': state changed from 'started' to 'fence'
+info 160 node1/crm: node 'node3': state changed from 'unknown' => 'fence'
+emai 160 node1/crm: FENCE: Try to fence node 'node3'
+info 166 watchdog: execute power node3 off
+info 165 node3/crm: killed by poweroff
+info 166 node3/lrm: killed by poweroff
+info 166 hardware: server 'node3' stopped by poweroff (watchdog)
+info 240 node1/crm: got lock 'ha_agent_node3_lock'
+info 240 node1/crm: fencing: acknowledged - got agent lock for node 'node3'
+info 240 node1/crm: node 'node3': state changed from 'fence' => 'unknown'
+emai 240 node1/crm: SUCCEED: fencing: acknowledged - got agent lock for node 'node3'
+info 240 node1/crm: service 'vm:102': state changed from 'fence' to 'recovery'
+info 240 node1/crm: recover service 'vm:102' from fenced node 'node3' to node 'node1'
+info 240 node1/crm: service 'vm:102': state changed from 'recovery' to 'started' (node = node1)
+info 241 node1/lrm: starting service vm:102
+info 241 node1/lrm: service status vm:102 started
+info 720 hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-strict-negative1/manager_status b/src/test/test-resource-affinity-strict-negative1/manager_status
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative1/manager_status
@@ -0,0 +1 @@
+{}
diff --git a/src/test/test-resource-affinity-strict-negative1/rules_config b/src/test/test-resource-affinity-strict-negative1/rules_config
new file mode 100644
index 0000000..2074776
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative1/rules_config
@@ -0,0 +1,3 @@
+resource-affinity: lonely-must-vms-be
+ resources vm:101,vm:102
+ affinity negative
diff --git a/src/test/test-resource-affinity-strict-negative1/service_config b/src/test/test-resource-affinity-strict-negative1/service_config
new file mode 100644
index 0000000..6582e8c
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative1/service_config
@@ -0,0 +1,6 @@
+{
+ "vm:101": { "node": "node2", "state": "started" },
+ "vm:102": { "node": "node3", "state": "started" },
+ "vm:103": { "node": "node1", "state": "started" },
+ "vm:104": { "node": "node1", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative2/README b/src/test/test-resource-affinity-strict-negative2/README
new file mode 100644
index 0000000..613be64
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative2/README
@@ -0,0 +1,15 @@
+Test whether a strict negative resource affinity rule among three resources makes
+one of the resources migrate to a different node than the other resources in case
+of a failover of the resource's previously assigned node.
+
+The test scenario is:
+- vm:101, vm:102, and vm:103 must be kept separate
+- vm:101, vm:102, and vm:103 are on node3, node4, and node5 respectively
+- node1 and node2 have each both higher resource counts than node3, node4 and
+ node5 to test the rule is applied even though the scheduler would prefer the
+ less utilized nodes node3 and node4
+
+The expected outcome is:
+- As node5 fails, vm:103 is migrated to node2; even though the utilization of
+ node2 is high already, the resources must be kept separate; node2 is chosen
+ since node1 has one more resource running on it
diff --git a/src/test/test-resource-affinity-strict-negative2/cmdlist b/src/test/test-resource-affinity-strict-negative2/cmdlist
new file mode 100644
index 0000000..89d09c9
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative2/cmdlist
@@ -0,0 +1,4 @@
+[
+ [ "power node1 on", "power node2 on", "power node3 on", "power node4 on", "power node5 on" ],
+ [ "network node5 off" ]
+]
diff --git a/src/test/test-resource-affinity-strict-negative2/hardware_status b/src/test/test-resource-affinity-strict-negative2/hardware_status
new file mode 100644
index 0000000..7b8e961
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative2/hardware_status
@@ -0,0 +1,7 @@
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" },
+ "node4": { "power": "off", "network": "off" },
+ "node5": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative2/log.expect b/src/test/test-resource-affinity-strict-negative2/log.expect
new file mode 100644
index 0000000..858d3c9
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative2/log.expect
@@ -0,0 +1,90 @@
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node4 on
+info 20 node4/crm: status change startup => wait_for_quorum
+info 20 node4/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node5 on
+info 20 node5/crm: status change startup => wait_for_quorum
+info 20 node5/lrm: status change startup => wait_for_agent_lock
+info 20 node1/crm: got lock 'ha_manager_lock'
+info 20 node1/crm: status change wait_for_quorum => master
+info 20 node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node4': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node5': state changed from 'unknown' => 'online'
+info 20 node1/crm: adding new service 'vm:101' on node 'node3'
+info 20 node1/crm: adding new service 'vm:102' on node 'node4'
+info 20 node1/crm: adding new service 'vm:103' on node 'node5'
+info 20 node1/crm: adding new service 'vm:104' on node 'node1'
+info 20 node1/crm: adding new service 'vm:105' on node 'node1'
+info 20 node1/crm: adding new service 'vm:106' on node 'node1'
+info 20 node1/crm: adding new service 'vm:107' on node 'node2'
+info 20 node1/crm: adding new service 'vm:108' on node 'node2'
+info 20 node1/crm: service 'vm:101': state changed from 'request_start' to 'started' (node = node3)
+info 20 node1/crm: service 'vm:102': state changed from 'request_start' to 'started' (node = node4)
+info 20 node1/crm: service 'vm:103': state changed from 'request_start' to 'started' (node = node5)
+info 20 node1/crm: service 'vm:104': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:105': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:106': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:107': state changed from 'request_start' to 'started' (node = node2)
+info 20 node1/crm: service 'vm:108': state changed from 'request_start' to 'started' (node = node2)
+info 21 node1/lrm: got lock 'ha_agent_node1_lock'
+info 21 node1/lrm: status change wait_for_agent_lock => active
+info 21 node1/lrm: starting service vm:104
+info 21 node1/lrm: service status vm:104 started
+info 21 node1/lrm: starting service vm:105
+info 21 node1/lrm: service status vm:105 started
+info 21 node1/lrm: starting service vm:106
+info 21 node1/lrm: service status vm:106 started
+info 22 node2/crm: status change wait_for_quorum => slave
+info 23 node2/lrm: got lock 'ha_agent_node2_lock'
+info 23 node2/lrm: status change wait_for_agent_lock => active
+info 23 node2/lrm: starting service vm:107
+info 23 node2/lrm: service status vm:107 started
+info 23 node2/lrm: starting service vm:108
+info 23 node2/lrm: service status vm:108 started
+info 24 node3/crm: status change wait_for_quorum => slave
+info 25 node3/lrm: got lock 'ha_agent_node3_lock'
+info 25 node3/lrm: status change wait_for_agent_lock => active
+info 25 node3/lrm: starting service vm:101
+info 25 node3/lrm: service status vm:101 started
+info 26 node4/crm: status change wait_for_quorum => slave
+info 27 node4/lrm: got lock 'ha_agent_node4_lock'
+info 27 node4/lrm: status change wait_for_agent_lock => active
+info 27 node4/lrm: starting service vm:102
+info 27 node4/lrm: service status vm:102 started
+info 28 node5/crm: status change wait_for_quorum => slave
+info 29 node5/lrm: got lock 'ha_agent_node5_lock'
+info 29 node5/lrm: status change wait_for_agent_lock => active
+info 29 node5/lrm: starting service vm:103
+info 29 node5/lrm: service status vm:103 started
+info 120 cmdlist: execute network node5 off
+info 120 node1/crm: node 'node5': state changed from 'online' => 'unknown'
+info 128 node5/crm: status change slave => wait_for_quorum
+info 129 node5/lrm: status change active => lost_agent_lock
+info 160 node1/crm: service 'vm:103': state changed from 'started' to 'fence'
+info 160 node1/crm: node 'node5': state changed from 'unknown' => 'fence'
+emai 160 node1/crm: FENCE: Try to fence node 'node5'
+info 170 watchdog: execute power node5 off
+info 169 node5/crm: killed by poweroff
+info 170 node5/lrm: killed by poweroff
+info 170 hardware: server 'node5' stopped by poweroff (watchdog)
+info 240 node1/crm: got lock 'ha_agent_node5_lock'
+info 240 node1/crm: fencing: acknowledged - got agent lock for node 'node5'
+info 240 node1/crm: node 'node5': state changed from 'fence' => 'unknown'
+emai 240 node1/crm: SUCCEED: fencing: acknowledged - got agent lock for node 'node5'
+info 240 node1/crm: service 'vm:103': state changed from 'fence' to 'recovery'
+info 240 node1/crm: recover service 'vm:103' from fenced node 'node5' to node 'node2'
+info 240 node1/crm: service 'vm:103': state changed from 'recovery' to 'started' (node = node2)
+info 243 node2/lrm: starting service vm:103
+info 243 node2/lrm: service status vm:103 started
+info 720 hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-strict-negative2/manager_status b/src/test/test-resource-affinity-strict-negative2/manager_status
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative2/manager_status
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/src/test/test-resource-affinity-strict-negative2/rules_config b/src/test/test-resource-affinity-strict-negative2/rules_config
new file mode 100644
index 0000000..44e6a02
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative2/rules_config
@@ -0,0 +1,3 @@
+resource-affinity: lonely-must-vms-be
+ resources vm:101,vm:102,vm:103
+ affinity negative
diff --git a/src/test/test-resource-affinity-strict-negative2/service_config b/src/test/test-resource-affinity-strict-negative2/service_config
new file mode 100644
index 0000000..2c27816
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative2/service_config
@@ -0,0 +1,10 @@
+{
+ "vm:101": { "node": "node3", "state": "started" },
+ "vm:102": { "node": "node4", "state": "started" },
+ "vm:103": { "node": "node5", "state": "started" },
+ "vm:104": { "node": "node1", "state": "started" },
+ "vm:105": { "node": "node1", "state": "started" },
+ "vm:106": { "node": "node1", "state": "started" },
+ "vm:107": { "node": "node2", "state": "started" },
+ "vm:108": { "node": "node2", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative3/README b/src/test/test-resource-affinity-strict-negative3/README
new file mode 100644
index 0000000..a26301a
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative3/README
@@ -0,0 +1,16 @@
+Test whether a strict negative resource affinity rule among three resources makes
+two of the resources migrate to two different recovery nodes than the node of
+the third resource in case of a failover of their two previously assigned nodes.
+
+The test scenario is:
+- vm:101, vm:102, and vm:103 must be kept separate
+- vm:101, vm:102, and vm:103 are respectively on node3, node4, and node5
+- node1 and node2 have both higher resource counts than node3, node4 and node5
+ to test the resource affinity rule is enforced even though the utilization
+ would prefer the other node3
+
+The expected outcome is:
+- As node4 and node5 fails, vm:102 and vm:103 are migrated to node2 and node1
+ respectively; even though the utilization of node1 and node2 are high
+ already, the resources must be kept separate; node2 is chosen first since
+ node1 has one more resource running on it
diff --git a/src/test/test-resource-affinity-strict-negative3/cmdlist b/src/test/test-resource-affinity-strict-negative3/cmdlist
new file mode 100644
index 0000000..1934596
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative3/cmdlist
@@ -0,0 +1,4 @@
+[
+ [ "power node1 on", "power node2 on", "power node3 on", "power node4 on", "power node5 on" ],
+ [ "network node4 off", "network node5 off" ]
+]
diff --git a/src/test/test-resource-affinity-strict-negative3/hardware_status b/src/test/test-resource-affinity-strict-negative3/hardware_status
new file mode 100644
index 0000000..7b8e961
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative3/hardware_status
@@ -0,0 +1,7 @@
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" },
+ "node4": { "power": "off", "network": "off" },
+ "node5": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative3/log.expect b/src/test/test-resource-affinity-strict-negative3/log.expect
new file mode 100644
index 0000000..4acdcec
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative3/log.expect
@@ -0,0 +1,110 @@
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node4 on
+info 20 node4/crm: status change startup => wait_for_quorum
+info 20 node4/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node5 on
+info 20 node5/crm: status change startup => wait_for_quorum
+info 20 node5/lrm: status change startup => wait_for_agent_lock
+info 20 node1/crm: got lock 'ha_manager_lock'
+info 20 node1/crm: status change wait_for_quorum => master
+info 20 node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node4': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node5': state changed from 'unknown' => 'online'
+info 20 node1/crm: adding new service 'vm:101' on node 'node3'
+info 20 node1/crm: adding new service 'vm:102' on node 'node4'
+info 20 node1/crm: adding new service 'vm:103' on node 'node5'
+info 20 node1/crm: adding new service 'vm:104' on node 'node1'
+info 20 node1/crm: adding new service 'vm:105' on node 'node1'
+info 20 node1/crm: adding new service 'vm:106' on node 'node1'
+info 20 node1/crm: adding new service 'vm:107' on node 'node2'
+info 20 node1/crm: adding new service 'vm:108' on node 'node2'
+info 20 node1/crm: service 'vm:101': state changed from 'request_start' to 'started' (node = node3)
+info 20 node1/crm: service 'vm:102': state changed from 'request_start' to 'started' (node = node4)
+info 20 node1/crm: service 'vm:103': state changed from 'request_start' to 'started' (node = node5)
+info 20 node1/crm: service 'vm:104': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:105': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:106': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:107': state changed from 'request_start' to 'started' (node = node2)
+info 20 node1/crm: service 'vm:108': state changed from 'request_start' to 'started' (node = node2)
+info 21 node1/lrm: got lock 'ha_agent_node1_lock'
+info 21 node1/lrm: status change wait_for_agent_lock => active
+info 21 node1/lrm: starting service vm:104
+info 21 node1/lrm: service status vm:104 started
+info 21 node1/lrm: starting service vm:105
+info 21 node1/lrm: service status vm:105 started
+info 21 node1/lrm: starting service vm:106
+info 21 node1/lrm: service status vm:106 started
+info 22 node2/crm: status change wait_for_quorum => slave
+info 23 node2/lrm: got lock 'ha_agent_node2_lock'
+info 23 node2/lrm: status change wait_for_agent_lock => active
+info 23 node2/lrm: starting service vm:107
+info 23 node2/lrm: service status vm:107 started
+info 23 node2/lrm: starting service vm:108
+info 23 node2/lrm: service status vm:108 started
+info 24 node3/crm: status change wait_for_quorum => slave
+info 25 node3/lrm: got lock 'ha_agent_node3_lock'
+info 25 node3/lrm: status change wait_for_agent_lock => active
+info 25 node3/lrm: starting service vm:101
+info 25 node3/lrm: service status vm:101 started
+info 26 node4/crm: status change wait_for_quorum => slave
+info 27 node4/lrm: got lock 'ha_agent_node4_lock'
+info 27 node4/lrm: status change wait_for_agent_lock => active
+info 27 node4/lrm: starting service vm:102
+info 27 node4/lrm: service status vm:102 started
+info 28 node5/crm: status change wait_for_quorum => slave
+info 29 node5/lrm: got lock 'ha_agent_node5_lock'
+info 29 node5/lrm: status change wait_for_agent_lock => active
+info 29 node5/lrm: starting service vm:103
+info 29 node5/lrm: service status vm:103 started
+info 120 cmdlist: execute network node4 off
+info 120 cmdlist: execute network node5 off
+info 120 node1/crm: node 'node4': state changed from 'online' => 'unknown'
+info 120 node1/crm: node 'node5': state changed from 'online' => 'unknown'
+info 126 node4/crm: status change slave => wait_for_quorum
+info 127 node4/lrm: status change active => lost_agent_lock
+info 128 node5/crm: status change slave => wait_for_quorum
+info 129 node5/lrm: status change active => lost_agent_lock
+info 160 node1/crm: service 'vm:102': state changed from 'started' to 'fence'
+info 160 node1/crm: service 'vm:103': state changed from 'started' to 'fence'
+info 160 node1/crm: node 'node4': state changed from 'unknown' => 'fence'
+emai 160 node1/crm: FENCE: Try to fence node 'node4'
+info 160 node1/crm: node 'node5': state changed from 'unknown' => 'fence'
+emai 160 node1/crm: FENCE: Try to fence node 'node5'
+info 168 watchdog: execute power node4 off
+info 167 node4/crm: killed by poweroff
+info 168 node4/lrm: killed by poweroff
+info 168 hardware: server 'node4' stopped by poweroff (watchdog)
+info 170 watchdog: execute power node5 off
+info 169 node5/crm: killed by poweroff
+info 170 node5/lrm: killed by poweroff
+info 170 hardware: server 'node5' stopped by poweroff (watchdog)
+info 240 node1/crm: got lock 'ha_agent_node4_lock'
+info 240 node1/crm: fencing: acknowledged - got agent lock for node 'node4'
+info 240 node1/crm: node 'node4': state changed from 'fence' => 'unknown'
+emai 240 node1/crm: SUCCEED: fencing: acknowledged - got agent lock for node 'node4'
+info 240 node1/crm: service 'vm:102': state changed from 'fence' to 'recovery'
+info 240 node1/crm: got lock 'ha_agent_node5_lock'
+info 240 node1/crm: fencing: acknowledged - got agent lock for node 'node5'
+info 240 node1/crm: node 'node5': state changed from 'fence' => 'unknown'
+emai 240 node1/crm: SUCCEED: fencing: acknowledged - got agent lock for node 'node5'
+info 240 node1/crm: service 'vm:103': state changed from 'fence' to 'recovery'
+info 240 node1/crm: recover service 'vm:102' from fenced node 'node4' to node 'node2'
+info 240 node1/crm: service 'vm:102': state changed from 'recovery' to 'started' (node = node2)
+info 240 node1/crm: recover service 'vm:103' from fenced node 'node5' to node 'node1'
+info 240 node1/crm: service 'vm:103': state changed from 'recovery' to 'started' (node = node1)
+info 241 node1/lrm: starting service vm:103
+info 241 node1/lrm: service status vm:103 started
+info 243 node2/lrm: starting service vm:102
+info 243 node2/lrm: service status vm:102 started
+info 720 hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-strict-negative3/manager_status b/src/test/test-resource-affinity-strict-negative3/manager_status
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative3/manager_status
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/src/test/test-resource-affinity-strict-negative3/rules_config b/src/test/test-resource-affinity-strict-negative3/rules_config
new file mode 100644
index 0000000..44e6a02
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative3/rules_config
@@ -0,0 +1,3 @@
+resource-affinity: lonely-must-vms-be
+ resources vm:101,vm:102,vm:103
+ affinity negative
diff --git a/src/test/test-resource-affinity-strict-negative3/service_config b/src/test/test-resource-affinity-strict-negative3/service_config
new file mode 100644
index 0000000..2c27816
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative3/service_config
@@ -0,0 +1,10 @@
+{
+ "vm:101": { "node": "node3", "state": "started" },
+ "vm:102": { "node": "node4", "state": "started" },
+ "vm:103": { "node": "node5", "state": "started" },
+ "vm:104": { "node": "node1", "state": "started" },
+ "vm:105": { "node": "node1", "state": "started" },
+ "vm:106": { "node": "node1", "state": "started" },
+ "vm:107": { "node": "node2", "state": "started" },
+ "vm:108": { "node": "node2", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative4/README b/src/test/test-resource-affinity-strict-negative4/README
new file mode 100644
index 0000000..16895a4
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative4/README
@@ -0,0 +1,18 @@
+Test whether a strict negative resource affinity rule among two resources makes
+one of the resources migrate to a different recovery node than the other resource
+in case of a failover of resource's previously assigned node. As the resource
+fails to start on the recovery node (e.g. insufficient resources), the failing
+resource is kept on the recovery node.
+
+The test scenario is:
+- vm:101 and fa:120001 must be kept separate
+- vm:101 and fa:120001 are on node2 and node3 respectively
+- fa:120001 will fail to start on node1
+- node1 has a higher resource count than node2 to test the resource affinity rule
+ is applied even though the scheduler would prefer the less utilized node
+
+The expected outcome is:
+- As node3 fails, fa:120001 is migrated to node1
+- fa:120001 will stay on the node (potentially in recovery), since it cannot be
+ started on node1, but cannot be relocated to another one either due to the
+ strict resource affinity rule
diff --git a/src/test/test-resource-affinity-strict-negative4/cmdlist b/src/test/test-resource-affinity-strict-negative4/cmdlist
new file mode 100644
index 0000000..c0a4daa
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative4/cmdlist
@@ -0,0 +1,4 @@
+[
+ [ "power node1 on", "power node2 on", "power node3 on" ],
+ [ "network node3 off" ]
+]
diff --git a/src/test/test-resource-affinity-strict-negative4/hardware_status b/src/test/test-resource-affinity-strict-negative4/hardware_status
new file mode 100644
index 0000000..451beb1
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative4/hardware_status
@@ -0,0 +1,5 @@
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative4/log.expect b/src/test/test-resource-affinity-strict-negative4/log.expect
new file mode 100644
index 0000000..f772ea8
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative4/log.expect
@@ -0,0 +1,69 @@
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 node1/crm: got lock 'ha_manager_lock'
+info 20 node1/crm: status change wait_for_quorum => master
+info 20 node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info 20 node1/crm: adding new service 'fa:120001' on node 'node3'
+info 20 node1/crm: adding new service 'vm:101' on node 'node2'
+info 20 node1/crm: adding new service 'vm:103' on node 'node1'
+info 20 node1/crm: adding new service 'vm:104' on node 'node1'
+info 20 node1/crm: service 'fa:120001': state changed from 'request_start' to 'started' (node = node3)
+info 20 node1/crm: service 'vm:101': state changed from 'request_start' to 'started' (node = node2)
+info 20 node1/crm: service 'vm:103': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:104': state changed from 'request_start' to 'started' (node = node1)
+info 21 node1/lrm: got lock 'ha_agent_node1_lock'
+info 21 node1/lrm: status change wait_for_agent_lock => active
+info 21 node1/lrm: starting service vm:103
+info 21 node1/lrm: service status vm:103 started
+info 21 node1/lrm: starting service vm:104
+info 21 node1/lrm: service status vm:104 started
+info 22 node2/crm: status change wait_for_quorum => slave
+info 23 node2/lrm: got lock 'ha_agent_node2_lock'
+info 23 node2/lrm: status change wait_for_agent_lock => active
+info 23 node2/lrm: starting service vm:101
+info 23 node2/lrm: service status vm:101 started
+info 24 node3/crm: status change wait_for_quorum => slave
+info 25 node3/lrm: got lock 'ha_agent_node3_lock'
+info 25 node3/lrm: status change wait_for_agent_lock => active
+info 25 node3/lrm: starting service fa:120001
+info 25 node3/lrm: service status fa:120001 started
+info 120 cmdlist: execute network node3 off
+info 120 node1/crm: node 'node3': state changed from 'online' => 'unknown'
+info 124 node3/crm: status change slave => wait_for_quorum
+info 125 node3/lrm: status change active => lost_agent_lock
+info 160 node1/crm: service 'fa:120001': state changed from 'started' to 'fence'
+info 160 node1/crm: node 'node3': state changed from 'unknown' => 'fence'
+emai 160 node1/crm: FENCE: Try to fence node 'node3'
+info 166 watchdog: execute power node3 off
+info 165 node3/crm: killed by poweroff
+info 166 node3/lrm: killed by poweroff
+info 166 hardware: server 'node3' stopped by poweroff (watchdog)
+info 240 node1/crm: got lock 'ha_agent_node3_lock'
+info 240 node1/crm: fencing: acknowledged - got agent lock for node 'node3'
+info 240 node1/crm: node 'node3': state changed from 'fence' => 'unknown'
+emai 240 node1/crm: SUCCEED: fencing: acknowledged - got agent lock for node 'node3'
+info 240 node1/crm: service 'fa:120001': state changed from 'fence' to 'recovery'
+info 240 node1/crm: recover service 'fa:120001' from fenced node 'node3' to node 'node1'
+info 240 node1/crm: service 'fa:120001': state changed from 'recovery' to 'started' (node = node1)
+info 241 node1/lrm: starting service fa:120001
+warn 241 node1/lrm: unable to start service fa:120001
+warn 241 node1/lrm: restart policy: retry number 1 for service 'fa:120001'
+info 261 node1/lrm: starting service fa:120001
+warn 261 node1/lrm: unable to start service fa:120001
+err 261 node1/lrm: unable to start service fa:120001 on local node after 1 retries
+warn 280 node1/crm: starting service fa:120001 on node 'node1' failed, relocating service.
+warn 280 node1/crm: Start Error Recovery: Tried all available nodes for service 'fa:120001', retry start on current node. Tried nodes: node1
+info 281 node1/lrm: starting service fa:120001
+info 281 node1/lrm: service status fa:120001 started
+info 300 node1/crm: relocation policy successful for 'fa:120001' on node 'node1', failed nodes: node1
+info 720 hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-strict-negative4/manager_status b/src/test/test-resource-affinity-strict-negative4/manager_status
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative4/manager_status
@@ -0,0 +1 @@
+{}
diff --git a/src/test/test-resource-affinity-strict-negative4/rules_config b/src/test/test-resource-affinity-strict-negative4/rules_config
new file mode 100644
index 0000000..227ec31
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative4/rules_config
@@ -0,0 +1,3 @@
+resource-affinity: lonely-must-vms-be
+ resources vm:101,fa:120001
+ affinity negative
diff --git a/src/test/test-resource-affinity-strict-negative4/service_config b/src/test/test-resource-affinity-strict-negative4/service_config
new file mode 100644
index 0000000..f53c2bc
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative4/service_config
@@ -0,0 +1,6 @@
+{
+ "vm:101": { "node": "node2", "state": "started" },
+ "fa:120001": { "node": "node3", "state": "started" },
+ "vm:103": { "node": "node1", "state": "started" },
+ "vm:104": { "node": "node1", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative5/README b/src/test/test-resource-affinity-strict-negative5/README
new file mode 100644
index 0000000..35276fb
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative5/README
@@ -0,0 +1,11 @@
+Test whether two pair-wise strict negative resource affinity rules, i.e. where
+one resource is in two separate negative resource affinity rules with two other
+resources, makes one of the outer resources migrate to the same node as the other
+outer resource in case of a failover of their previously assigned node.
+
+The test scenario is:
+- vm:101 and vm:102, and vm:101 and vm:103 must each be kept separate
+- vm:101, vm:102, and vm:103 are respectively on node1, node2, and node3
+
+The expected outcome is:
+- As node3 fails, vm:103 is migrated to node2 - the same as vm:102
diff --git a/src/test/test-resource-affinity-strict-negative5/cmdlist b/src/test/test-resource-affinity-strict-negative5/cmdlist
new file mode 100644
index 0000000..c0a4daa
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative5/cmdlist
@@ -0,0 +1,4 @@
+[
+ [ "power node1 on", "power node2 on", "power node3 on" ],
+ [ "network node3 off" ]
+]
diff --git a/src/test/test-resource-affinity-strict-negative5/hardware_status b/src/test/test-resource-affinity-strict-negative5/hardware_status
new file mode 100644
index 0000000..451beb1
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative5/hardware_status
@@ -0,0 +1,5 @@
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative5/log.expect b/src/test/test-resource-affinity-strict-negative5/log.expect
new file mode 100644
index 0000000..16156ad
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative5/log.expect
@@ -0,0 +1,56 @@
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 node1/crm: got lock 'ha_manager_lock'
+info 20 node1/crm: status change wait_for_quorum => master
+info 20 node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info 20 node1/crm: adding new service 'vm:101' on node 'node1'
+info 20 node1/crm: adding new service 'vm:102' on node 'node2'
+info 20 node1/crm: adding new service 'vm:103' on node 'node3'
+info 20 node1/crm: service 'vm:101': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:102': state changed from 'request_start' to 'started' (node = node2)
+info 20 node1/crm: service 'vm:103': state changed from 'request_start' to 'started' (node = node3)
+info 21 node1/lrm: got lock 'ha_agent_node1_lock'
+info 21 node1/lrm: status change wait_for_agent_lock => active
+info 21 node1/lrm: starting service vm:101
+info 21 node1/lrm: service status vm:101 started
+info 22 node2/crm: status change wait_for_quorum => slave
+info 23 node2/lrm: got lock 'ha_agent_node2_lock'
+info 23 node2/lrm: status change wait_for_agent_lock => active
+info 23 node2/lrm: starting service vm:102
+info 23 node2/lrm: service status vm:102 started
+info 24 node3/crm: status change wait_for_quorum => slave
+info 25 node3/lrm: got lock 'ha_agent_node3_lock'
+info 25 node3/lrm: status change wait_for_agent_lock => active
+info 25 node3/lrm: starting service vm:103
+info 25 node3/lrm: service status vm:103 started
+info 120 cmdlist: execute network node3 off
+info 120 node1/crm: node 'node3': state changed from 'online' => 'unknown'
+info 124 node3/crm: status change slave => wait_for_quorum
+info 125 node3/lrm: status change active => lost_agent_lock
+info 160 node1/crm: service 'vm:103': state changed from 'started' to 'fence'
+info 160 node1/crm: node 'node3': state changed from 'unknown' => 'fence'
+emai 160 node1/crm: FENCE: Try to fence node 'node3'
+info 166 watchdog: execute power node3 off
+info 165 node3/crm: killed by poweroff
+info 166 node3/lrm: killed by poweroff
+info 166 hardware: server 'node3' stopped by poweroff (watchdog)
+info 240 node1/crm: got lock 'ha_agent_node3_lock'
+info 240 node1/crm: fencing: acknowledged - got agent lock for node 'node3'
+info 240 node1/crm: node 'node3': state changed from 'fence' => 'unknown'
+emai 240 node1/crm: SUCCEED: fencing: acknowledged - got agent lock for node 'node3'
+info 240 node1/crm: service 'vm:103': state changed from 'fence' to 'recovery'
+info 240 node1/crm: recover service 'vm:103' from fenced node 'node3' to node 'node2'
+info 240 node1/crm: service 'vm:103': state changed from 'recovery' to 'started' (node = node2)
+info 243 node2/lrm: starting service vm:103
+info 243 node2/lrm: service status vm:103 started
+info 720 hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-strict-negative5/manager_status b/src/test/test-resource-affinity-strict-negative5/manager_status
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative5/manager_status
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/src/test/test-resource-affinity-strict-negative5/rules_config b/src/test/test-resource-affinity-strict-negative5/rules_config
new file mode 100644
index 0000000..6a13333
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative5/rules_config
@@ -0,0 +1,7 @@
+resource-affinity: lonely-must-some-vms-be1
+ resources vm:101,vm:102
+ affinity negative
+
+resource-affinity: lonely-must-some-vms-be2
+ resources vm:101,vm:103
+ affinity negative
diff --git a/src/test/test-resource-affinity-strict-negative5/service_config b/src/test/test-resource-affinity-strict-negative5/service_config
new file mode 100644
index 0000000..4b26f6b
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative5/service_config
@@ -0,0 +1,5 @@
+{
+ "vm:101": { "node": "node1", "state": "started" },
+ "vm:102": { "node": "node2", "state": "started" },
+ "vm:103": { "node": "node3", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative6/README b/src/test/test-resource-affinity-strict-negative6/README
new file mode 100644
index 0000000..2c8e7c1
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative6/README
@@ -0,0 +1,18 @@
+Test whether a strict negative resource affinity rule among two resources makes
+one of the resources migrate to a different recovery node than the other resource
+in case of a failover of the resource's previously assigned node. As the other
+resource fails to starts on the recovery node (e.g. insufficient resources), the
+failing resource is kept on the recovery node.
+
+The test scenario is:
+- fa:120001 and fa:220001 must be kept separate
+- fa:120001 and fa:220001 are on node2 and node3 respectively
+- fa:120001 and fa:220001 will fail to start on node1
+- node1 has a higher resource count than node2 to test the resource affinity rule
+ is applied even though the scheduler would prefer the less utilized node
+
+The expected outcome is:
+- As node3 fails, fa:220001 is migrated to node1
+- fa:220001 will stay on the node (potentially in recovery), since it cannot be
+ started on node1, but cannot be relocated to another one either due to the
+ strict resource affinity rule
diff --git a/src/test/test-resource-affinity-strict-negative6/cmdlist b/src/test/test-resource-affinity-strict-negative6/cmdlist
new file mode 100644
index 0000000..c0a4daa
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative6/cmdlist
@@ -0,0 +1,4 @@
+[
+ [ "power node1 on", "power node2 on", "power node3 on" ],
+ [ "network node3 off" ]
+]
diff --git a/src/test/test-resource-affinity-strict-negative6/hardware_status b/src/test/test-resource-affinity-strict-negative6/hardware_status
new file mode 100644
index 0000000..451beb1
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative6/hardware_status
@@ -0,0 +1,5 @@
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative6/log.expect b/src/test/test-resource-affinity-strict-negative6/log.expect
new file mode 100644
index 0000000..0d9854a
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative6/log.expect
@@ -0,0 +1,69 @@
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 node1/crm: got lock 'ha_manager_lock'
+info 20 node1/crm: status change wait_for_quorum => master
+info 20 node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info 20 node1/crm: adding new service 'fa:120001' on node 'node2'
+info 20 node1/crm: adding new service 'fa:220001' on node 'node3'
+info 20 node1/crm: adding new service 'vm:101' on node 'node1'
+info 20 node1/crm: adding new service 'vm:102' on node 'node1'
+info 20 node1/crm: service 'fa:120001': state changed from 'request_start' to 'started' (node = node2)
+info 20 node1/crm: service 'fa:220001': state changed from 'request_start' to 'started' (node = node3)
+info 20 node1/crm: service 'vm:101': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:102': state changed from 'request_start' to 'started' (node = node1)
+info 21 node1/lrm: got lock 'ha_agent_node1_lock'
+info 21 node1/lrm: status change wait_for_agent_lock => active
+info 21 node1/lrm: starting service vm:101
+info 21 node1/lrm: service status vm:101 started
+info 21 node1/lrm: starting service vm:102
+info 21 node1/lrm: service status vm:102 started
+info 22 node2/crm: status change wait_for_quorum => slave
+info 23 node2/lrm: got lock 'ha_agent_node2_lock'
+info 23 node2/lrm: status change wait_for_agent_lock => active
+info 23 node2/lrm: starting service fa:120001
+info 23 node2/lrm: service status fa:120001 started
+info 24 node3/crm: status change wait_for_quorum => slave
+info 25 node3/lrm: got lock 'ha_agent_node3_lock'
+info 25 node3/lrm: status change wait_for_agent_lock => active
+info 25 node3/lrm: starting service fa:220001
+info 25 node3/lrm: service status fa:220001 started
+info 120 cmdlist: execute network node3 off
+info 120 node1/crm: node 'node3': state changed from 'online' => 'unknown'
+info 124 node3/crm: status change slave => wait_for_quorum
+info 125 node3/lrm: status change active => lost_agent_lock
+info 160 node1/crm: service 'fa:220001': state changed from 'started' to 'fence'
+info 160 node1/crm: node 'node3': state changed from 'unknown' => 'fence'
+emai 160 node1/crm: FENCE: Try to fence node 'node3'
+info 166 watchdog: execute power node3 off
+info 165 node3/crm: killed by poweroff
+info 166 node3/lrm: killed by poweroff
+info 166 hardware: server 'node3' stopped by poweroff (watchdog)
+info 240 node1/crm: got lock 'ha_agent_node3_lock'
+info 240 node1/crm: fencing: acknowledged - got agent lock for node 'node3'
+info 240 node1/crm: node 'node3': state changed from 'fence' => 'unknown'
+emai 240 node1/crm: SUCCEED: fencing: acknowledged - got agent lock for node 'node3'
+info 240 node1/crm: service 'fa:220001': state changed from 'fence' to 'recovery'
+info 240 node1/crm: recover service 'fa:220001' from fenced node 'node3' to node 'node1'
+info 240 node1/crm: service 'fa:220001': state changed from 'recovery' to 'started' (node = node1)
+info 241 node1/lrm: starting service fa:220001
+warn 241 node1/lrm: unable to start service fa:220001
+warn 241 node1/lrm: restart policy: retry number 1 for service 'fa:220001'
+info 261 node1/lrm: starting service fa:220001
+warn 261 node1/lrm: unable to start service fa:220001
+err 261 node1/lrm: unable to start service fa:220001 on local node after 1 retries
+warn 280 node1/crm: starting service fa:220001 on node 'node1' failed, relocating service.
+warn 280 node1/crm: Start Error Recovery: Tried all available nodes for service 'fa:220001', retry start on current node. Tried nodes: node1
+info 281 node1/lrm: starting service fa:220001
+info 281 node1/lrm: service status fa:220001 started
+info 300 node1/crm: relocation policy successful for 'fa:220001' on node 'node1', failed nodes: node1
+info 720 hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-strict-negative6/manager_status b/src/test/test-resource-affinity-strict-negative6/manager_status
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative6/manager_status
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/src/test/test-resource-affinity-strict-negative6/rules_config b/src/test/test-resource-affinity-strict-negative6/rules_config
new file mode 100644
index 0000000..95a24f5
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative6/rules_config
@@ -0,0 +1,3 @@
+resource-affinity: lonely-must-vms-be
+ resources fa:120001,fa:220001
+ affinity negative
diff --git a/src/test/test-resource-affinity-strict-negative6/service_config b/src/test/test-resource-affinity-strict-negative6/service_config
new file mode 100644
index 0000000..1f9480c
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative6/service_config
@@ -0,0 +1,6 @@
+{
+ "vm:101": { "node": "node1", "state": "started" },
+ "vm:102": { "node": "node1", "state": "started" },
+ "fa:120001": { "node": "node2", "state": "started" },
+ "fa:220001": { "node": "node3", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative7/README b/src/test/test-resource-affinity-strict-negative7/README
new file mode 100644
index 0000000..818abba
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative7/README
@@ -0,0 +1,15 @@
+Test whether a strict negative resource affinity rule among two resources makes
+one of the resource, which is manually migrated to another node, be migrated
+there and disallows other resources, which are in negative affinity with the
+migrated resource, to not be migrated to the migrated resource's source node.
+
+The test scenario is:
+- vm:101 and vm:102 must be kept separate
+- vm:101 and vm:102 are running on node1 and node2 respectively
+
+The expected outcome is:
+- vm:101 is migrated to node3
+- While vm:101 is migrated, vm:102 cannot be migrated to node1, as vm:101 is
+ still putting load on node1 as its source node
+- After vm:101 is successfully migrated to node3, vm:102 can be migrated to
+ node1
diff --git a/src/test/test-resource-affinity-strict-negative7/cmdlist b/src/test/test-resource-affinity-strict-negative7/cmdlist
new file mode 100644
index 0000000..468ba56
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative7/cmdlist
@@ -0,0 +1,5 @@
+[
+ [ "power node1 on", "power node2 on", "power node3 on"],
+ [ "service vm:101 migrate node3", "service vm:102 migrate node1" ],
+ [ "service vm:102 migrate node1" ]
+]
diff --git a/src/test/test-resource-affinity-strict-negative7/hardware_status b/src/test/test-resource-affinity-strict-negative7/hardware_status
new file mode 100644
index 0000000..451beb1
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative7/hardware_status
@@ -0,0 +1,5 @@
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative7/log.expect b/src/test/test-resource-affinity-strict-negative7/log.expect
new file mode 100644
index 0000000..6060f5e
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative7/log.expect
@@ -0,0 +1,52 @@
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 node1/crm: got lock 'ha_manager_lock'
+info 20 node1/crm: status change wait_for_quorum => master
+info 20 node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info 20 node1/crm: adding new service 'vm:101' on node 'node1'
+info 20 node1/crm: adding new service 'vm:102' on node 'node2'
+info 20 node1/crm: service 'vm:101': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:102': state changed from 'request_start' to 'started' (node = node2)
+info 21 node1/lrm: got lock 'ha_agent_node1_lock'
+info 21 node1/lrm: status change wait_for_agent_lock => active
+info 21 node1/lrm: starting service vm:101
+info 21 node1/lrm: service status vm:101 started
+info 22 node2/crm: status change wait_for_quorum => slave
+info 23 node2/lrm: got lock 'ha_agent_node2_lock'
+info 23 node2/lrm: status change wait_for_agent_lock => active
+info 23 node2/lrm: starting service vm:102
+info 23 node2/lrm: service status vm:102 started
+info 24 node3/crm: status change wait_for_quorum => slave
+info 120 cmdlist: execute service vm:101 migrate node3
+info 120 cmdlist: execute service vm:102 migrate node1
+info 120 node1/crm: got crm command: migrate vm:101 node3
+err 120 node1/crm: crm command 'migrate vm:102 node1' error - service 'vm:101' on node 'node1' in negative affinity with service 'vm:102'
+info 120 node1/crm: migrate service 'vm:101' to node 'node3'
+info 120 node1/crm: service 'vm:101': state changed from 'started' to 'migrate' (node = node1, target = node3)
+info 121 node1/lrm: service vm:101 - start migrate to node 'node3'
+info 121 node1/lrm: service vm:101 - end migrate to node 'node3'
+info 140 node1/crm: service 'vm:101': state changed from 'migrate' to 'started' (node = node3)
+info 145 node3/lrm: got lock 'ha_agent_node3_lock'
+info 145 node3/lrm: status change wait_for_agent_lock => active
+info 145 node3/lrm: starting service vm:101
+info 145 node3/lrm: service status vm:101 started
+info 220 cmdlist: execute service vm:102 migrate node1
+info 220 node1/crm: got crm command: migrate vm:102 node1
+info 220 node1/crm: migrate service 'vm:102' to node 'node1'
+info 220 node1/crm: service 'vm:102': state changed from 'started' to 'migrate' (node = node2, target = node1)
+info 223 node2/lrm: service vm:102 - start migrate to node 'node1'
+info 223 node2/lrm: service vm:102 - end migrate to node 'node1'
+info 240 node1/crm: service 'vm:102': state changed from 'migrate' to 'started' (node = node1)
+info 241 node1/lrm: starting service vm:102
+info 241 node1/lrm: service status vm:102 started
+info 820 hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-strict-negative7/manager_status b/src/test/test-resource-affinity-strict-negative7/manager_status
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative7/manager_status
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/src/test/test-resource-affinity-strict-negative7/rules_config b/src/test/test-resource-affinity-strict-negative7/rules_config
new file mode 100644
index 0000000..2074776
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative7/rules_config
@@ -0,0 +1,3 @@
+resource-affinity: lonely-must-vms-be
+ resources vm:101,vm:102
+ affinity negative
diff --git a/src/test/test-resource-affinity-strict-negative7/service_config b/src/test/test-resource-affinity-strict-negative7/service_config
new file mode 100644
index 0000000..0336d09
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative7/service_config
@@ -0,0 +1,4 @@
+{
+ "vm:101": { "node": "node1", "state": "started" },
+ "vm:102": { "node": "node2", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative8/README b/src/test/test-resource-affinity-strict-negative8/README
new file mode 100644
index 0000000..f338aad
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative8/README
@@ -0,0 +1,12 @@
+Test whether a strict negative resource affinity rule among three resources makes
+one of the resource, which is manually migrated to another resource's node, where
+the resource is in negative affinity with the migrated resource, stay on the node
+of the other resources.
+
+The test scenario is:
+- vm:101, vm:102, and vm:103 must be kept separate
+- vm:101, vm:102, and vm:103 are all running on node1, node2, and node3
+
+The expected outcome is:
+- vm:101 cannot be migrated to node3 as it would conflict the negative resource
+ affinity rule between vm:101, vm:102 and vm:103.
diff --git a/src/test/test-resource-affinity-strict-negative8/cmdlist b/src/test/test-resource-affinity-strict-negative8/cmdlist
new file mode 100644
index 0000000..13cab7b
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative8/cmdlist
@@ -0,0 +1,4 @@
+[
+ [ "power node1 on", "power node2 on", "power node3 on"],
+ [ "service vm:101 migrate node3" ]
+]
diff --git a/src/test/test-resource-affinity-strict-negative8/hardware_status b/src/test/test-resource-affinity-strict-negative8/hardware_status
new file mode 100644
index 0000000..451beb1
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative8/hardware_status
@@ -0,0 +1,5 @@
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-strict-negative8/log.expect b/src/test/test-resource-affinity-strict-negative8/log.expect
new file mode 100644
index 0000000..96f55d5
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative8/log.expect
@@ -0,0 +1,38 @@
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 node1/crm: got lock 'ha_manager_lock'
+info 20 node1/crm: status change wait_for_quorum => master
+info 20 node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info 20 node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info 20 node1/crm: adding new service 'vm:101' on node 'node1'
+info 20 node1/crm: adding new service 'vm:102' on node 'node2'
+info 20 node1/crm: adding new service 'vm:103' on node 'node3'
+info 20 node1/crm: service 'vm:101': state changed from 'request_start' to 'started' (node = node1)
+info 20 node1/crm: service 'vm:102': state changed from 'request_start' to 'started' (node = node2)
+info 20 node1/crm: service 'vm:103': state changed from 'request_start' to 'started' (node = node3)
+info 21 node1/lrm: got lock 'ha_agent_node1_lock'
+info 21 node1/lrm: status change wait_for_agent_lock => active
+info 21 node1/lrm: starting service vm:101
+info 21 node1/lrm: service status vm:101 started
+info 22 node2/crm: status change wait_for_quorum => slave
+info 23 node2/lrm: got lock 'ha_agent_node2_lock'
+info 23 node2/lrm: status change wait_for_agent_lock => active
+info 23 node2/lrm: starting service vm:102
+info 23 node2/lrm: service status vm:102 started
+info 24 node3/crm: status change wait_for_quorum => slave
+info 25 node3/lrm: got lock 'ha_agent_node3_lock'
+info 25 node3/lrm: status change wait_for_agent_lock => active
+info 25 node3/lrm: starting service vm:103
+info 25 node3/lrm: service status vm:103 started
+info 120 cmdlist: execute service vm:101 migrate node3
+err 120 node1/crm: crm command 'migrate vm:101 node3' error - service 'vm:103' on node 'node3' in negative affinity with service 'vm:101'
+info 720 hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-strict-negative8/manager_status b/src/test/test-resource-affinity-strict-negative8/manager_status
new file mode 100644
index 0000000..9e26dfe
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative8/manager_status
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/src/test/test-resource-affinity-strict-negative8/rules_config b/src/test/test-resource-affinity-strict-negative8/rules_config
new file mode 100644
index 0000000..44e6a02
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative8/rules_config
@@ -0,0 +1,3 @@
+resource-affinity: lonely-must-vms-be
+ resources vm:101,vm:102,vm:103
+ affinity negative
diff --git a/src/test/test-resource-affinity-strict-negative8/service_config b/src/test/test-resource-affinity-strict-negative8/service_config
new file mode 100644
index 0000000..4b26f6b
--- /dev/null
+++ b/src/test/test-resource-affinity-strict-negative8/service_config
@@ -0,0 +1,5 @@
+{
+ "vm:101": { "node": "node1", "state": "started" },
+ "vm:102": { "node": "node2", "state": "started" },
+ "vm:103": { "node": "node3", "state": "started" }
+}
--
2.39.5
More information about the pve-devel
mailing list