[pve-devel] [PATCH ha-manager v2 12/12] test: ha tester: add resource affinity test cases mixed with node affinity rules

Daniel Kral d.kral at proxmox.com
Fri Aug 1 18:22:27 CEST 2025


Add test cases for some scenarios, where node and positive/negative
resource affinity rules are applied together.

For the positive resource affinity rules, node affinity rules will
always take precedence, even if all or the majority of resources in the
resource affinity rule are already on another node contradicting the
node affinity rule.

For the negative resource affinity rules, node affinity rules will take
precedence if it is possible to do so. Currently, there are still
cases, which will need manual intervention. These should be accounted
for automatically in the future by providing more information to the
scheduler.

Signed-off-by: Daniel Kral <d.kral at proxmox.com>
---
 .../README                                    | 14 ++++
 .../cmdlist                                   |  3 +
 .../hardware_status                           |  5 ++
 .../log.expect                                | 41 ++++++++++++
 .../manager_status                            |  1 +
 .../rules_config                              |  7 ++
 .../service_config                            |  5 ++
 .../README                                    | 13 ++++
 .../cmdlist                                   |  3 +
 .../hardware_status                           |  7 ++
 .../log.expect                                | 63 +++++++++++++++++
 .../manager_status                            |  1 +
 .../rules_config                              |  7 ++
 .../service_config                            |  5 ++
 .../README                                    | 17 +++++
 .../cmdlist                                   |  6 ++
 .../hardware_status                           |  6 ++
 .../log.expect                                | 67 +++++++++++++++++++
 .../manager_status                            |  1 +
 .../rules_config                              | 15 +++++
 .../service_config                            |  5 ++
 .../README                                    | 15 +++++
 .../cmdlist                                   |  3 +
 .../hardware_status                           |  5 ++
 .../log.expect                                | 49 ++++++++++++++
 .../manager_status                            |  1 +
 .../rules_config                              |  7 ++
 .../service_config                            |  5 ++
 .../README                                    | 15 +++++
 .../cmdlist                                   |  3 +
 .../hardware_status                           |  5 ++
 .../log.expect                                | 49 ++++++++++++++
 .../manager_status                            |  1 +
 .../rules_config                              |  7 ++
 .../service_config                            |  5 ++
 35 files changed, 462 insertions(+)
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative1/README
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative1/cmdlist
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative1/hardware_status
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative1/log.expect
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative1/manager_status
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative1/rules_config
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative1/service_config
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative2/README
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative2/cmdlist
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative2/hardware_status
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative2/log.expect
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative2/manager_status
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative2/rules_config
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative2/service_config
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative3/README
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative3/cmdlist
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative3/hardware_status
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative3/log.expect
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative3/manager_status
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative3/rules_config
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-negative3/service_config
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive1/README
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive1/cmdlist
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive1/hardware_status
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive1/log.expect
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive1/manager_status
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive1/rules_config
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive1/service_config
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive2/README
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive2/cmdlist
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive2/hardware_status
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive2/log.expect
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive2/manager_status
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive2/rules_config
 create mode 100644 src/test/test-resource-affinity-with-node-affinity-strict-positive2/service_config

diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative1/README b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/README
new file mode 100644
index 00000000..5f68bbb8
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/README
@@ -0,0 +1,14 @@
+Test whether a strict negative resource affinity rule among three resources,
+where the resources are contradicting the negative resource affinity rule and
+one of them should be on a specific node by its node affinity rule, makes the
+resource in the node affinity rule migrate to its preferred node, if possible.
+
+The test scenario is:
+- vm:102 should be on node2
+- vm:101, vm:102, and vm:103 must be kept separate
+- vm:101 is currently on node1
+- vm:102 and vm:103 are currently on node1, which must be separated
+
+The expected outcome is:
+- As vm:102 and vm:103 are still on the same node, make vm:102 migrate to node2
+  to fulfill its node affinity rule
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative1/cmdlist b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/cmdlist
new file mode 100644
index 00000000..13f90cd7
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/cmdlist
@@ -0,0 +1,3 @@
+[
+    [ "power node1 on", "power node2 on", "power node3 on" ]
+]
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative1/hardware_status b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/hardware_status
new file mode 100644
index 00000000..451beb13
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/hardware_status
@@ -0,0 +1,5 @@
+{
+  "node1": { "power": "off", "network": "off" },
+  "node2": { "power": "off", "network": "off" },
+  "node3": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative1/log.expect b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/log.expect
new file mode 100644
index 00000000..216aeb66
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/log.expect
@@ -0,0 +1,41 @@
+info      0     hardware: starting simulation
+info     20      cmdlist: execute power node1 on
+info     20    node1/crm: status change startup => wait_for_quorum
+info     20    node1/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node2 on
+info     20    node2/crm: status change startup => wait_for_quorum
+info     20    node2/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node3 on
+info     20    node3/crm: status change startup => wait_for_quorum
+info     20    node3/lrm: status change startup => wait_for_agent_lock
+info     20    node1/crm: got lock 'ha_manager_lock'
+info     20    node1/crm: status change wait_for_quorum => master
+info     20    node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info     20    node1/crm: adding new service 'vm:101' on node 'node3'
+info     20    node1/crm: adding new service 'vm:102' on node 'node1'
+info     20    node1/crm: adding new service 'vm:103' on node 'node1'
+info     20    node1/crm: service 'vm:101': state changed from 'request_start' to 'started'  (node = node3)
+info     20    node1/crm: service 'vm:102': state changed from 'request_start' to 'started'  (node = node1)
+info     20    node1/crm: service 'vm:103': state changed from 'request_start' to 'started'  (node = node1)
+info     20    node1/crm: migrate service 'vm:102' to node 'node2' (running)
+info     20    node1/crm: service 'vm:102': state changed from 'started' to 'migrate'  (node = node1, target = node2)
+info     21    node1/lrm: got lock 'ha_agent_node1_lock'
+info     21    node1/lrm: status change wait_for_agent_lock => active
+info     21    node1/lrm: service vm:102 - start migrate to node 'node2'
+info     21    node1/lrm: service vm:102 - end migrate to node 'node2'
+info     21    node1/lrm: starting service vm:103
+info     21    node1/lrm: service status vm:103 started
+info     22    node2/crm: status change wait_for_quorum => slave
+info     23    node2/lrm: got lock 'ha_agent_node2_lock'
+info     23    node2/lrm: status change wait_for_agent_lock => active
+info     24    node3/crm: status change wait_for_quorum => slave
+info     25    node3/lrm: got lock 'ha_agent_node3_lock'
+info     25    node3/lrm: status change wait_for_agent_lock => active
+info     25    node3/lrm: starting service vm:101
+info     25    node3/lrm: service status vm:101 started
+info     40    node1/crm: service 'vm:102': state changed from 'migrate' to 'started'  (node = node2)
+info     43    node2/lrm: starting service vm:102
+info     43    node2/lrm: service status vm:102 started
+info    620     hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative1/manager_status b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/manager_status
new file mode 100644
index 00000000..0967ef42
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/manager_status
@@ -0,0 +1 @@
+{}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative1/rules_config b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/rules_config
new file mode 100644
index 00000000..be874144
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/rules_config
@@ -0,0 +1,7 @@
+node-affinity: vm102-must-be-on-node2
+	resources vm:102
+	nodes node2,node3
+
+resource-affinity: lonely-must-vms-be
+	resources vm:101,vm:102,vm:103
+	affinity negative
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative1/service_config b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/service_config
new file mode 100644
index 00000000..b98edc85
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative1/service_config
@@ -0,0 +1,5 @@
+{
+    "vm:101": { "node": "node3", "state": "started" },
+    "vm:102": { "node": "node1", "state": "started" },
+    "vm:103": { "node": "node1", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative2/README b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/README
new file mode 100644
index 00000000..e2de70fb
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/README
@@ -0,0 +1,13 @@
+Test whether a strict negative resource affinity rule among three resources,
+where all resources are in a node affinity rule restricting them to three
+nodes, are migrated to these nodes as all three resources are still on a common
+node outside of the cluster.
+
+The test scenario is:
+- vm:101, vm:102, and vm:103 should be on node2, node3 or node4
+- vm:101, vm:102, and vm:103 must be kept separate
+- vm:101, vm:102, and vm:103 is currently on node1
+
+The expected outcome is:
+- As vm:101, vm:102, and vm:103 are still on the same node and should be on
+  node2, node3 or node4, migrate them to node2, node3, and node4 respectively
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative2/cmdlist b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/cmdlist
new file mode 100644
index 00000000..8cdc6092
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/cmdlist
@@ -0,0 +1,3 @@
+[
+    [ "power node1 on", "power node2 on", "power node3 on", "power node4 on", "power node5 on" ]
+]
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative2/hardware_status b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/hardware_status
new file mode 100644
index 00000000..7b8e961e
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/hardware_status
@@ -0,0 +1,7 @@
+{
+  "node1": { "power": "off", "network": "off" },
+  "node2": { "power": "off", "network": "off" },
+  "node3": { "power": "off", "network": "off" },
+  "node4": { "power": "off", "network": "off" },
+  "node5": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative2/log.expect b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/log.expect
new file mode 100644
index 00000000..3e75c6bf
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/log.expect
@@ -0,0 +1,63 @@
+info      0     hardware: starting simulation
+info     20      cmdlist: execute power node1 on
+info     20    node1/crm: status change startup => wait_for_quorum
+info     20    node1/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node2 on
+info     20    node2/crm: status change startup => wait_for_quorum
+info     20    node2/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node3 on
+info     20    node3/crm: status change startup => wait_for_quorum
+info     20    node3/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node4 on
+info     20    node4/crm: status change startup => wait_for_quorum
+info     20    node4/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node5 on
+info     20    node5/crm: status change startup => wait_for_quorum
+info     20    node5/lrm: status change startup => wait_for_agent_lock
+info     20    node1/crm: got lock 'ha_manager_lock'
+info     20    node1/crm: status change wait_for_quorum => master
+info     20    node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node4': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node5': state changed from 'unknown' => 'online'
+info     20    node1/crm: adding new service 'vm:101' on node 'node1'
+info     20    node1/crm: adding new service 'vm:102' on node 'node1'
+info     20    node1/crm: adding new service 'vm:103' on node 'node1'
+info     20    node1/crm: service 'vm:101': state changed from 'request_start' to 'started'  (node = node1)
+info     20    node1/crm: service 'vm:102': state changed from 'request_start' to 'started'  (node = node1)
+info     20    node1/crm: service 'vm:103': state changed from 'request_start' to 'started'  (node = node1)
+info     20    node1/crm: migrate service 'vm:101' to node 'node2' (running)
+info     20    node1/crm: service 'vm:101': state changed from 'started' to 'migrate'  (node = node1, target = node2)
+info     20    node1/crm: migrate service 'vm:102' to node 'node3' (running)
+info     20    node1/crm: service 'vm:102': state changed from 'started' to 'migrate'  (node = node1, target = node3)
+info     20    node1/crm: migrate service 'vm:103' to node 'node4' (running)
+info     20    node1/crm: service 'vm:103': state changed from 'started' to 'migrate'  (node = node1, target = node4)
+info     21    node1/lrm: got lock 'ha_agent_node1_lock'
+info     21    node1/lrm: status change wait_for_agent_lock => active
+info     21    node1/lrm: service vm:101 - start migrate to node 'node2'
+info     21    node1/lrm: service vm:101 - end migrate to node 'node2'
+info     21    node1/lrm: service vm:102 - start migrate to node 'node3'
+info     21    node1/lrm: service vm:102 - end migrate to node 'node3'
+info     21    node1/lrm: service vm:103 - start migrate to node 'node4'
+info     21    node1/lrm: service vm:103 - end migrate to node 'node4'
+info     22    node2/crm: status change wait_for_quorum => slave
+info     23    node2/lrm: got lock 'ha_agent_node2_lock'
+info     23    node2/lrm: status change wait_for_agent_lock => active
+info     24    node3/crm: status change wait_for_quorum => slave
+info     25    node3/lrm: got lock 'ha_agent_node3_lock'
+info     25    node3/lrm: status change wait_for_agent_lock => active
+info     26    node4/crm: status change wait_for_quorum => slave
+info     27    node4/lrm: got lock 'ha_agent_node4_lock'
+info     27    node4/lrm: status change wait_for_agent_lock => active
+info     28    node5/crm: status change wait_for_quorum => slave
+info     40    node1/crm: service 'vm:101': state changed from 'migrate' to 'started'  (node = node2)
+info     40    node1/crm: service 'vm:102': state changed from 'migrate' to 'started'  (node = node3)
+info     40    node1/crm: service 'vm:103': state changed from 'migrate' to 'started'  (node = node4)
+info     43    node2/lrm: starting service vm:101
+info     43    node2/lrm: service status vm:101 started
+info     45    node3/lrm: starting service vm:102
+info     45    node3/lrm: service status vm:102 started
+info     47    node4/lrm: starting service vm:103
+info     47    node4/lrm: service status vm:103 started
+info    620     hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative2/manager_status b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/manager_status
new file mode 100644
index 00000000..0967ef42
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/manager_status
@@ -0,0 +1 @@
+{}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative2/rules_config b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/rules_config
new file mode 100644
index 00000000..b84d7702
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/rules_config
@@ -0,0 +1,7 @@
+node-affinity: vms-must-be-on-subcluster
+	resources vm:101,vm:102,vm:103
+	nodes node2,node3,node4
+
+resource-affinity: lonely-must-vms-be
+	resources vm:101,vm:102,vm:103
+	affinity negative
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative2/service_config b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/service_config
new file mode 100644
index 00000000..57e3579d
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative2/service_config
@@ -0,0 +1,5 @@
+{
+    "vm:101": { "node": "node1", "state": "started" },
+    "vm:102": { "node": "node1", "state": "started" },
+    "vm:103": { "node": "node1", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative3/README b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/README
new file mode 100644
index 00000000..588f9020
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/README
@@ -0,0 +1,17 @@
+Test whether a strict negative resource affinity rule among three resources,
+where two resources are restricted each to nodes they are not yet on, can be
+exchanged to the nodes described by their node affinity rules, if one of the
+resources is stopped.
+
+The test scenario is:
+- vm:101, vm:102, and vm:103 should be on node2, node3 or node1 respectively
+- vm:101, vm:102, and vm:103 must be kept separate
+- vm:101, vm:102, and vm:103 are currently on node1, node2, node3 respectively
+
+The expected outcome is:
+- the resources can neither be manually migrated nor automatically exchange
+  their nodes to match their node affinity rules, because of the strict
+  condition, that they cannot be on either a node, where a resource with
+  negative affinity is currently on or is migrated to
+- therefore, one of the resources must be stopped manually to allow the
+  rearrangement to fullfill the node affinity rules
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative3/cmdlist b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/cmdlist
new file mode 100644
index 00000000..2f2c80f5
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/cmdlist
@@ -0,0 +1,6 @@
+[
+    [ "power node1 on", "power node2 on", "power node3 on" ],
+    [ "service vm:103 migrate node1" ],
+    [ "service vm:101 stopped" ],
+    [ "service vm:101 started" ]
+]
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative3/hardware_status b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/hardware_status
new file mode 100644
index 00000000..4aed08a1
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/hardware_status
@@ -0,0 +1,6 @@
+{
+  "node1": { "power": "off", "network": "off" },
+  "node2": { "power": "off", "network": "off" },
+  "node3": { "power": "off", "network": "off" },
+  "node4": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative3/log.expect b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/log.expect
new file mode 100644
index 00000000..1ed34c36
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/log.expect
@@ -0,0 +1,67 @@
+info      0     hardware: starting simulation
+info     20      cmdlist: execute power node1 on
+info     20    node1/crm: status change startup => wait_for_quorum
+info     20    node1/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node2 on
+info     20    node2/crm: status change startup => wait_for_quorum
+info     20    node2/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node3 on
+info     20    node3/crm: status change startup => wait_for_quorum
+info     20    node3/lrm: status change startup => wait_for_agent_lock
+info     20    node1/crm: got lock 'ha_manager_lock'
+info     20    node1/crm: status change wait_for_quorum => master
+info     20    node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info     20    node1/crm: adding new service 'vm:101' on node 'node1'
+info     20    node1/crm: adding new service 'vm:102' on node 'node2'
+info     20    node1/crm: adding new service 'vm:103' on node 'node3'
+info     20    node1/crm: service 'vm:101': state changed from 'request_start' to 'started'  (node = node1)
+info     20    node1/crm: service 'vm:102': state changed from 'request_start' to 'started'  (node = node2)
+info     20    node1/crm: service 'vm:103': state changed from 'request_start' to 'started'  (node = node3)
+info     21    node1/lrm: got lock 'ha_agent_node1_lock'
+info     21    node1/lrm: status change wait_for_agent_lock => active
+info     21    node1/lrm: starting service vm:101
+info     21    node1/lrm: service status vm:101 started
+info     22    node2/crm: status change wait_for_quorum => slave
+info     23    node2/lrm: got lock 'ha_agent_node2_lock'
+info     23    node2/lrm: status change wait_for_agent_lock => active
+info     23    node2/lrm: starting service vm:102
+info     23    node2/lrm: service status vm:102 started
+info     24    node3/crm: status change wait_for_quorum => slave
+info     25    node3/lrm: got lock 'ha_agent_node3_lock'
+info     25    node3/lrm: status change wait_for_agent_lock => active
+info     25    node3/lrm: starting service vm:103
+info     25    node3/lrm: service status vm:103 started
+info    120      cmdlist: execute service vm:103 migrate node1
+err     120    node1/crm: crm command 'migrate vm:103 node1' error - service 'vm:101' on node 'node1' in negative affinity with service 'vm:103'
+info    220      cmdlist: execute service vm:101 stopped
+info    220    node1/crm: service 'vm:101': state changed from 'started' to 'request_stop'
+info    221    node1/lrm: stopping service vm:101
+info    221    node1/lrm: service status vm:101 stopped
+info    240    node1/crm: service 'vm:101': state changed from 'request_stop' to 'stopped'
+info    240    node1/crm: migrate service 'vm:103' to node 'node1' (running)
+info    240    node1/crm: service 'vm:103': state changed from 'started' to 'migrate'  (node = node3, target = node1)
+info    245    node3/lrm: service vm:103 - start migrate to node 'node1'
+info    245    node3/lrm: service vm:103 - end migrate to node 'node1'
+info    260    node1/crm: service 'vm:103': state changed from 'migrate' to 'started'  (node = node1)
+info    260    node1/crm: migrate service 'vm:102' to node 'node3' (running)
+info    260    node1/crm: service 'vm:102': state changed from 'started' to 'migrate'  (node = node2, target = node3)
+info    261    node1/lrm: starting service vm:103
+info    261    node1/lrm: service status vm:103 started
+info    263    node2/lrm: service vm:102 - start migrate to node 'node3'
+info    263    node2/lrm: service vm:102 - end migrate to node 'node3'
+info    280    node1/crm: service 'vm:102': state changed from 'migrate' to 'started'  (node = node3)
+info    285    node3/lrm: starting service vm:102
+info    285    node3/lrm: service status vm:102 started
+info    320      cmdlist: execute service vm:101 started
+info    320    node1/crm: service 'vm:101': state changed from 'stopped' to 'request_start'  (node = node1)
+info    320    node1/crm: service 'vm:101': state changed from 'request_start' to 'started'  (node = node1)
+info    320    node1/crm: migrate service 'vm:101' to node 'node2' (running)
+info    320    node1/crm: service 'vm:101': state changed from 'started' to 'migrate'  (node = node1, target = node2)
+info    321    node1/lrm: service vm:101 - start migrate to node 'node2'
+info    321    node1/lrm: service vm:101 - end migrate to node 'node2'
+info    340    node1/crm: service 'vm:101': state changed from 'migrate' to 'started'  (node = node2)
+info    343    node2/lrm: starting service vm:101
+info    343    node2/lrm: service status vm:101 started
+info    920     hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative3/manager_status b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/manager_status
new file mode 100644
index 00000000..0967ef42
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/manager_status
@@ -0,0 +1 @@
+{}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative3/rules_config b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/rules_config
new file mode 100644
index 00000000..2362d220
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/rules_config
@@ -0,0 +1,15 @@
+node-affinity: vm101-must-be-on-node2
+	resources vm:101
+	nodes node2
+
+node-affinity: vm102-must-be-on-node3
+	resources vm:102
+	nodes node3
+
+node-affinity: vm103-must-be-on-node1
+	resources vm:103
+	nodes node1
+
+resource-affinity: lonely-must-vms-be
+	resources vm:101,vm:102,vm:103
+	affinity negative
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-negative3/service_config b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/service_config
new file mode 100644
index 00000000..4b26f6b4
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-negative3/service_config
@@ -0,0 +1,5 @@
+{
+    "vm:101": { "node": "node1", "state": "started" },
+    "vm:102": { "node": "node2", "state": "started" },
+    "vm:103": { "node": "node3", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive1/README b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/README
new file mode 100644
index 00000000..2202f5a3
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/README
@@ -0,0 +1,15 @@
+Test whether a strict positive resource affinity rule among three resources,
+where one of these resources is restricted to another node than they are
+currently on with a node affinity rule, makes all resources migrate to that
+node.
+
+The test scenario is:
+- vm:102 should be kept on node2
+- vm:101, vm:102, and vm:103 must be kept together
+- vm:101, vm:102, and vm:103 are currently running on node3
+
+The expected outcome is:
+- As vm:102 is on node3, which contradicts its node affinity rule, vm:102 is
+  migrated to node2 to fullfill its node affinity rule
+- As vm:102 is in a positive resource affinity rule with vm:101 and vm:103, all
+  of them are migrated to node2 as these are inferred for all of them
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive1/cmdlist b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/cmdlist
new file mode 100644
index 00000000..13f90cd7
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/cmdlist
@@ -0,0 +1,3 @@
+[
+    [ "power node1 on", "power node2 on", "power node3 on" ]
+]
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive1/hardware_status b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/hardware_status
new file mode 100644
index 00000000..451beb13
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/hardware_status
@@ -0,0 +1,5 @@
+{
+  "node1": { "power": "off", "network": "off" },
+  "node2": { "power": "off", "network": "off" },
+  "node3": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive1/log.expect b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/log.expect
new file mode 100644
index 00000000..d84b2228
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/log.expect
@@ -0,0 +1,49 @@
+info      0     hardware: starting simulation
+info     20      cmdlist: execute power node1 on
+info     20    node1/crm: status change startup => wait_for_quorum
+info     20    node1/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node2 on
+info     20    node2/crm: status change startup => wait_for_quorum
+info     20    node2/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node3 on
+info     20    node3/crm: status change startup => wait_for_quorum
+info     20    node3/lrm: status change startup => wait_for_agent_lock
+info     20    node1/crm: got lock 'ha_manager_lock'
+info     20    node1/crm: status change wait_for_quorum => master
+info     20    node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info     20    node1/crm: adding new service 'vm:101' on node 'node3'
+info     20    node1/crm: adding new service 'vm:102' on node 'node3'
+info     20    node1/crm: adding new service 'vm:103' on node 'node3'
+info     20    node1/crm: service 'vm:101': state changed from 'request_start' to 'started'  (node = node3)
+info     20    node1/crm: service 'vm:102': state changed from 'request_start' to 'started'  (node = node3)
+info     20    node1/crm: service 'vm:103': state changed from 'request_start' to 'started'  (node = node3)
+info     20    node1/crm: migrate service 'vm:101' to node 'node2' (running)
+info     20    node1/crm: service 'vm:101': state changed from 'started' to 'migrate'  (node = node3, target = node2)
+info     20    node1/crm: migrate service 'vm:102' to node 'node2' (running)
+info     20    node1/crm: service 'vm:102': state changed from 'started' to 'migrate'  (node = node3, target = node2)
+info     20    node1/crm: migrate service 'vm:103' to node 'node2' (running)
+info     20    node1/crm: service 'vm:103': state changed from 'started' to 'migrate'  (node = node3, target = node2)
+info     22    node2/crm: status change wait_for_quorum => slave
+info     23    node2/lrm: got lock 'ha_agent_node2_lock'
+info     23    node2/lrm: status change wait_for_agent_lock => active
+info     24    node3/crm: status change wait_for_quorum => slave
+info     25    node3/lrm: got lock 'ha_agent_node3_lock'
+info     25    node3/lrm: status change wait_for_agent_lock => active
+info     25    node3/lrm: service vm:101 - start migrate to node 'node2'
+info     25    node3/lrm: service vm:101 - end migrate to node 'node2'
+info     25    node3/lrm: service vm:102 - start migrate to node 'node2'
+info     25    node3/lrm: service vm:102 - end migrate to node 'node2'
+info     25    node3/lrm: service vm:103 - start migrate to node 'node2'
+info     25    node3/lrm: service vm:103 - end migrate to node 'node2'
+info     40    node1/crm: service 'vm:101': state changed from 'migrate' to 'started'  (node = node2)
+info     40    node1/crm: service 'vm:102': state changed from 'migrate' to 'started'  (node = node2)
+info     40    node1/crm: service 'vm:103': state changed from 'migrate' to 'started'  (node = node2)
+info     43    node2/lrm: starting service vm:101
+info     43    node2/lrm: service status vm:101 started
+info     43    node2/lrm: starting service vm:102
+info     43    node2/lrm: service status vm:102 started
+info     43    node2/lrm: starting service vm:103
+info     43    node2/lrm: service status vm:103 started
+info    620     hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive1/manager_status b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/manager_status
new file mode 100644
index 00000000..0967ef42
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/manager_status
@@ -0,0 +1 @@
+{}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive1/rules_config b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/rules_config
new file mode 100644
index 00000000..655f5161
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/rules_config
@@ -0,0 +1,7 @@
+node-affinity: vm102-must-be-on-node2
+	resources vm:102
+	nodes node2
+
+resource-affinity: vms-must-stick-together
+	resources vm:101,vm:102,vm:103
+	affinity positive
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive1/service_config b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/service_config
new file mode 100644
index 00000000..299a58c9
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive1/service_config
@@ -0,0 +1,5 @@
+{
+    "vm:101": { "node": "node3", "state": "started" },
+    "vm:102": { "node": "node3", "state": "started" },
+    "vm:103": { "node": "node3", "state": "started" }
+}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive2/README b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/README
new file mode 100644
index 00000000..c5f4b469
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/README
@@ -0,0 +1,15 @@
+Test whether a strict positive resource affinity rule among three resources,
+where one of these resources is restricted to another node than they are
+currently on with a node affinity rule, makes all resources migrate to that
+node.
+
+The test scenario is:
+- vm:102 must be kept on node1 or node2
+- vm:101, vm:102, and vm:103 must be kept together
+- vm:101, vm:102, and vm:103 are currently running on node3
+
+The expected outcome is:
+- As vm:102 is on node3, which contradicts its node affinity rule, vm:102 is
+  migrated to node1 to fullfill its node affinity rule
+- As vm:102 is in a positive resource affinity rule with vm:101 and vm:103, all
+  of them are migrated to node1 as these are inferred for all of them
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive2/cmdlist b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/cmdlist
new file mode 100644
index 00000000..13f90cd7
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/cmdlist
@@ -0,0 +1,3 @@
+[
+    [ "power node1 on", "power node2 on", "power node3 on" ]
+]
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive2/hardware_status b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/hardware_status
new file mode 100644
index 00000000..451beb13
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/hardware_status
@@ -0,0 +1,5 @@
+{
+  "node1": { "power": "off", "network": "off" },
+  "node2": { "power": "off", "network": "off" },
+  "node3": { "power": "off", "network": "off" }
+}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive2/log.expect b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/log.expect
new file mode 100644
index 00000000..22fb5ced
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/log.expect
@@ -0,0 +1,49 @@
+info      0     hardware: starting simulation
+info     20      cmdlist: execute power node1 on
+info     20    node1/crm: status change startup => wait_for_quorum
+info     20    node1/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node2 on
+info     20    node2/crm: status change startup => wait_for_quorum
+info     20    node2/lrm: status change startup => wait_for_agent_lock
+info     20      cmdlist: execute power node3 on
+info     20    node3/crm: status change startup => wait_for_quorum
+info     20    node3/lrm: status change startup => wait_for_agent_lock
+info     20    node1/crm: got lock 'ha_manager_lock'
+info     20    node1/crm: status change wait_for_quorum => master
+info     20    node1/crm: node 'node1': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node2': state changed from 'unknown' => 'online'
+info     20    node1/crm: node 'node3': state changed from 'unknown' => 'online'
+info     20    node1/crm: adding new service 'vm:101' on node 'node3'
+info     20    node1/crm: adding new service 'vm:102' on node 'node3'
+info     20    node1/crm: adding new service 'vm:103' on node 'node3'
+info     20    node1/crm: service 'vm:101': state changed from 'request_start' to 'started'  (node = node3)
+info     20    node1/crm: service 'vm:102': state changed from 'request_start' to 'started'  (node = node3)
+info     20    node1/crm: service 'vm:103': state changed from 'request_start' to 'started'  (node = node3)
+info     20    node1/crm: migrate service 'vm:101' to node 'node1' (running)
+info     20    node1/crm: service 'vm:101': state changed from 'started' to 'migrate'  (node = node3, target = node1)
+info     20    node1/crm: migrate service 'vm:102' to node 'node1' (running)
+info     20    node1/crm: service 'vm:102': state changed from 'started' to 'migrate'  (node = node3, target = node1)
+info     20    node1/crm: migrate service 'vm:103' to node 'node1' (running)
+info     20    node1/crm: service 'vm:103': state changed from 'started' to 'migrate'  (node = node3, target = node1)
+info     21    node1/lrm: got lock 'ha_agent_node1_lock'
+info     21    node1/lrm: status change wait_for_agent_lock => active
+info     22    node2/crm: status change wait_for_quorum => slave
+info     24    node3/crm: status change wait_for_quorum => slave
+info     25    node3/lrm: got lock 'ha_agent_node3_lock'
+info     25    node3/lrm: status change wait_for_agent_lock => active
+info     25    node3/lrm: service vm:101 - start migrate to node 'node1'
+info     25    node3/lrm: service vm:101 - end migrate to node 'node1'
+info     25    node3/lrm: service vm:102 - start migrate to node 'node1'
+info     25    node3/lrm: service vm:102 - end migrate to node 'node1'
+info     25    node3/lrm: service vm:103 - start migrate to node 'node1'
+info     25    node3/lrm: service vm:103 - end migrate to node 'node1'
+info     40    node1/crm: service 'vm:101': state changed from 'migrate' to 'started'  (node = node1)
+info     40    node1/crm: service 'vm:102': state changed from 'migrate' to 'started'  (node = node1)
+info     40    node1/crm: service 'vm:103': state changed from 'migrate' to 'started'  (node = node1)
+info     41    node1/lrm: starting service vm:101
+info     41    node1/lrm: service status vm:101 started
+info     41    node1/lrm: starting service vm:102
+info     41    node1/lrm: service status vm:102 started
+info     41    node1/lrm: starting service vm:103
+info     41    node1/lrm: service status vm:103 started
+info    620     hardware: exit simulation - done
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive2/manager_status b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/manager_status
new file mode 100644
index 00000000..0967ef42
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/manager_status
@@ -0,0 +1 @@
+{}
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive2/rules_config b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/rules_config
new file mode 100644
index 00000000..6db94930
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/rules_config
@@ -0,0 +1,7 @@
+node-affinity: vm102-must-be-on-node1-or-node2
+	resources vm:102
+	nodes node1,node2
+
+resource-affinity: vms-must-stick-together
+	resources vm:101,vm:102,vm:103
+	affinity positive
diff --git a/src/test/test-resource-affinity-with-node-affinity-strict-positive2/service_config b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/service_config
new file mode 100644
index 00000000..299a58c9
--- /dev/null
+++ b/src/test/test-resource-affinity-with-node-affinity-strict-positive2/service_config
@@ -0,0 +1,5 @@
+{
+    "vm:101": { "node": "node3", "state": "started" },
+    "vm:102": { "node": "node3", "state": "started" },
+    "vm:103": { "node": "node3", "state": "started" }
+}
-- 
2.47.2





More information about the pve-devel mailing list