diff --git a/SOURCES/001-new-behavior.patch b/SOURCES/001-new-behavior.patch new file mode 100644 index 00000000..c09405b8 --- /dev/null +++ b/SOURCES/001-new-behavior.patch @@ -0,0 +1,99 @@ +From d0278eca6f2f8d4e707f73d12b4f8161f07e42fe Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 2 Nov 2017 18:26:03 -0500 +Subject: [PATCH 1/2] Feature: tools: enable new crm_resource + --cleanup/--refresh behavior + +it was temporarily disabled by 3576364 +--- + tools/crm_resource.c | 6 +----- + 1 file changed, 1 insertion(+), 5 deletions(-) + +diff --git a/tools/crm_resource.c b/tools/crm_resource.c +index 92255df..356bb05 100644 +--- a/tools/crm_resource.c ++++ b/tools/crm_resource.c +@@ -212,14 +212,11 @@ static struct crm_option long_options[] = { + }, + { + "cleanup", no_argument, NULL, 'C', +-#if 0 +- // new behavior disabled until 2.0.0 + "\t\tDelete failed operations from a resource's history allowing its current state to be rechecked.\n" + "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n" + }, + { + "refresh", no_argument, NULL, 'R', +-#endif + "\t\tDelete resource's history (including failures) so its current state is rechecked.\n" + "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n" + "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be cleaned" +@@ -384,7 +381,6 @@ static struct crm_option long_options[] = { + {"un-migrate", no_argument, NULL, 'U', NULL, pcmk_option_hidden}, + {"un-move", no_argument, NULL, 'U', NULL, pcmk_option_hidden}, + +- {"refresh", 0, 0, 'R', NULL, pcmk_option_hidden}, // remove this line for 2.0.0 + {"reprobe", no_argument, NULL, 'P', NULL, pcmk_option_hidden}, + + {"-spacer-", 1, NULL, '-', "\nExamples:", pcmk_option_paragraph}, +@@ -645,7 +641,7 @@ main(int argc, char **argv) + if (cib_file == NULL) { + require_crmd = TRUE; + } +- just_errors = FALSE; // disable until 2.0.0 ++ just_errors = TRUE; + rsc_cmd = 'C'; + find_flags = pe_find_renamed|pe_find_anon; + break; +-- +1.8.3.1 + + +From b48ceeb041cee65a9b93b9b76235e475fa1a128f Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 16 Oct 2017 09:45:18 -0500 +Subject: [PATCH 2/2] Feature: crmd: default record-pending to TRUE + +--- + crmd/lrm.c | 15 ++++++--------- + 1 file changed, 6 insertions(+), 9 deletions(-) + +diff --git a/crmd/lrm.c b/crmd/lrm.c +index eb4e16e..36dc076 100644 +--- a/crmd/lrm.c ++++ b/crmd/lrm.c +@@ -2061,25 +2061,22 @@ stop_recurring_actions(gpointer key, gpointer value, gpointer user_data) + static void + record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op) + { ++ const char *record_pending = NULL; ++ + CRM_CHECK(node_name != NULL, return); + CRM_CHECK(rsc != NULL, return); + CRM_CHECK(op != NULL, return); + +- if (op->op_type == NULL ++ if ((op->op_type == NULL) || (op->params == NULL) + || safe_str_eq(op->op_type, CRMD_ACTION_CANCEL) + || safe_str_eq(op->op_type, CRMD_ACTION_DELETE)) { + return; + } + +- if (op->params == NULL) { ++ // defaults to true ++ record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING); ++ if (record_pending && !crm_is_true(record_pending)) { + return; +- +- } else { +- const char *record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING); +- +- if (record_pending == NULL || crm_is_true(record_pending) == FALSE) { +- return; +- } + } + + op->call_id = -1; +-- +1.8.3.1 + diff --git a/SOURCES/002-fixes.patch b/SOURCES/002-fixes.patch new file mode 100644 index 00000000..02d563df --- /dev/null +++ b/SOURCES/002-fixes.patch @@ -0,0 +1,7624 @@ +From 87856f05a85e2d20b7265b78373657e97dbf18e4 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Wed, 29 Nov 2017 17:21:29 -0600 +Subject: [PATCH 01/16] Fix: attrd: ensure node name is broadcast at start-up + (CLBZ#5330) + +This fixes a regression introduced in 1.1.18. + +Since c9d1c3cd, the crmd no longer explicitly clears the terminate and shutdown +node attributes at first join. An unwanted side effect of this was that the +attrd writer no longer reliably learned a joining node's name. If a node is +known only by its ID, the writer can not write its attributes to the CIB. + +The worst outcome is that the joining node would be unable to shut down, +since the shutdown attribute would never trigger the policy engine. The window +was limited because the writer learns the node's name if a new attrd +election was required, or a node attribute was set locally on the joining node. + +The fix is to set a new private attribute, #attrd-protocol, at attrd start-up, +with the supported attrd protocol version. This has the additional benefit of +allowing any node to determine the minimum supported protocol version across +all active cluster nodes. +--- + attrd/commands.c | 30 ++++++++++++++++++++++++------ + attrd/internal.h | 1 + + attrd/main.c | 7 +++++++ + include/crm/crm.h | 1 + + 4 files changed, 33 insertions(+), 6 deletions(-) + +diff --git a/attrd/commands.c b/attrd/commands.c +index 967703f..0a20b26 100644 +--- a/attrd/commands.c ++++ b/attrd/commands.c +@@ -35,8 +35,9 @@ + * heartbeat, CMAN, or corosync-plugin stacks) is unversioned. + * + * With atomic attrd, each attrd will send ATTRD_PROTOCOL_VERSION with every +- * peer request and reply. Currently, there is no way to know the minimum +- * version supported by all peers, which limits its usefulness. ++ * peer request and reply. As of Pacemaker 2.0.0, at start-up each attrd will ++ * also set a private attribute for itself with its version, so any attrd can ++ * determine the minimum version supported by all peers. + * + * Protocol Pacemaker Significant changes + * -------- --------- ------------------- +@@ -289,11 +290,10 @@ void + attrd_client_clear_failure(xmlNode *xml) + { + #if 0 +- /* @TODO This would be most efficient, but there is currently no way to +- * verify that all peers support the op. If that ever changes, we could +- * enable this code. ++ /* @TODO Track the minimum supported protocol version across all nodes, ++ * then enable this more-efficient code. + */ +- if (all_peers_support_clear_failure) { ++ if (compare_version("2", minimum_protocol_version) <= 0) { + /* Propagate to all peers (including ourselves). + * This ends up at attrd_peer_message(). + */ +@@ -523,6 +523,24 @@ attrd_peer_clear_failure(crm_node_t *peer, xmlNode *xml) + regfree(®ex); + } + ++/*! ++ \internal ++ \brief Broadcast private attribute for local node with protocol version ++*/ ++void ++attrd_broadcast_protocol() ++{ ++ xmlNode *attrd_op = create_xml_node(NULL, __FUNCTION__); ++ ++ crm_xml_add(attrd_op, F_TYPE, T_ATTRD); ++ crm_xml_add(attrd_op, F_ORIG, crm_system_name); ++ crm_xml_add(attrd_op, F_ATTRD_TASK, ATTRD_OP_UPDATE); ++ crm_xml_add(attrd_op, F_ATTRD_ATTRIBUTE, CRM_ATTR_PROTOCOL); ++ crm_xml_add(attrd_op, F_ATTRD_VALUE, ATTRD_PROTOCOL_VERSION); ++ crm_xml_add_int(attrd_op, F_ATTRD_IS_PRIVATE, 1); ++ attrd_client_update(attrd_op); ++} ++ + void + attrd_peer_message(crm_node_t *peer, xmlNode *xml) + { +diff --git a/attrd/internal.h b/attrd/internal.h +index 99fc3fd..23bcbda 100644 +--- a/attrd/internal.h ++++ b/attrd/internal.h +@@ -53,6 +53,7 @@ election_t *writer; + crm_ipcs_send_ack((client), (id), (flags), "ack", __FUNCTION__, __LINE__) + + void write_attributes(bool all); ++void attrd_broadcast_protocol(void); + void attrd_peer_message(crm_node_t *client, xmlNode *msg); + void attrd_client_peer_remove(const char *client_name, xmlNode *xml); + void attrd_client_clear_failure(xmlNode *xml); +diff --git a/attrd/main.c b/attrd/main.c +index 2670dc5..7721439 100644 +--- a/attrd/main.c ++++ b/attrd/main.c +@@ -220,6 +220,13 @@ attrd_cib_connect(int max_retry) + // Always read the CIB at start-up + mainloop_set_trigger(attrd_config_read); + ++ /* Set a private attribute for ourselves with the protocol version we ++ * support. This lets all nodes determine the minimum supported version ++ * across all nodes. It also ensures that the writer learns our node name, ++ * so it can send our attributes to the CIB. ++ */ ++ attrd_broadcast_protocol(); ++ + return pcmk_ok; + + cleanup: +diff --git a/include/crm/crm.h b/include/crm/crm.h +index 05ec555..6e2bcfa 100644 +--- a/include/crm/crm.h ++++ b/include/crm/crm.h +@@ -106,6 +106,7 @@ extern char *crm_system_name; + # define CRM_ATTR_DIGESTS_ALL "#digests-all" + # define CRM_ATTR_DIGESTS_SECURE "#digests-secure" + # define CRM_ATTR_RA_VERSION "#ra-version" ++# define CRM_ATTR_PROTOCOL "#attrd-protocol" + + /* Valid operations */ + # define CRM_OP_NOOP "noop" +-- +1.8.3.1 + + +From a87421042f5030e6dd7823cd80d7632b91296519 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 1 Dec 2017 11:02:54 -0600 +Subject: [PATCH 02/16] Refactor: pengine: functionize checking whether node + was unfenced + +reduces code duplication and enhances readability +--- + pengine/native.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +diff --git a/pengine/native.c b/pengine/native.c +index e72dec4..c998e4b 100644 +--- a/pengine/native.c ++++ b/pengine/native.c +@@ -429,6 +429,14 @@ rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const c + return work; + } + ++static inline bool ++node_has_been_unfenced(node_t *node) ++{ ++ const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED); ++ ++ return unfenced && strcmp("0", unfenced); ++} ++ + node_t * + native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) + { +@@ -2524,10 +2532,9 @@ StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * d + + if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { + action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, data_set); +- const char *unfenced = pe_node_attribute_raw(current, CRM_ATTR_UNFENCED); + + order_actions(stop, unfence, pe_order_implies_first); +- if (unfenced == NULL || safe_str_eq("0", unfenced)) { ++ if (!node_has_been_unfenced(current)) { + pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname); + } + } +@@ -2547,11 +2554,9 @@ StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * + + if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { + action_t *unfence = pe_fence_op(next, "on", TRUE, NULL, data_set); +- const char *unfenced = pe_node_attribute_raw(next, CRM_ATTR_UNFENCED); + + order_actions(unfence, start, pe_order_implies_then); +- +- if (unfenced == NULL || safe_str_eq("0", unfenced)) { ++ if (!node_has_been_unfenced(next)) { + char *reason = crm_strdup_printf("Required by %s", rsc->id); + trigger_unfencing(NULL, next, reason, NULL, data_set); + free(reason); +-- +1.8.3.1 + + +From b6b3fb9e8c6c6b34fb39c9d7f0b89ef41e9486fa Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 1 Dec 2017 11:45:31 -0600 +Subject: [PATCH 03/16] Refactor: pengine: functionize checking for unfence + device + +Reduces code duplication and enhances readability. This also comments out some +dead code from when probe_complete was still used. +--- + pengine/native.c | 24 ++++++++++++++---------- + 1 file changed, 14 insertions(+), 10 deletions(-) + +diff --git a/pengine/native.c b/pengine/native.c +index c998e4b..e57fbc7 100644 +--- a/pengine/native.c ++++ b/pengine/native.c +@@ -437,6 +437,13 @@ node_has_been_unfenced(node_t *node) + return unfenced && strcmp("0", unfenced); + } + ++static inline bool ++is_unfence_device(resource_t *rsc, pe_working_set_t *data_set) ++{ ++ return is_set(rsc->flags, pe_rsc_fence_device) ++ && is_set(data_set->flags, pe_flag_enable_unfencing); ++} ++ + node_t * + native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) + { +@@ -3015,12 +3022,8 @@ native_create_probe(resource_t * rsc, node_t * node, action_t * complete, + crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role), + is_set(probe->flags, pe_action_runnable), rsc->running_on); + +- if(is_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing)) { ++ if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) { + top = rsc; +- +- } else if (pe_rsc_is_clone(top) == FALSE) { +- top = rsc; +- + } else { + crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id); + } +@@ -3041,17 +3044,18 @@ native_create_probe(resource_t * rsc, node_t * node, action_t * complete, + top, reload_key(rsc), NULL, + pe_order_optional, data_set); + +- if(is_set(rsc->flags, pe_rsc_fence_device) && is_set(data_set->flags, pe_flag_enable_unfencing)) { ++#if 0 ++ // complete is always null currently ++ if (!is_unfence_device(rsc, data_set)) { + /* Normally rsc.start depends on probe complete which depends +- * on rsc.probe. But this can't be the case in this scenario as +- * it would create graph loops. ++ * on rsc.probe. But this can't be the case for fence devices ++ * with unfencing, as it would create graph loops. + * + * So instead we explicitly order 'rsc.probe then rsc.start' + */ +- +- } else { + order_actions(probe, complete, pe_order_implies_then); + } ++#endif + return TRUE; + } + +-- +1.8.3.1 + + +From 63431baae2e544dc3b21d51b035942dfeeca5561 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 1 Dec 2017 12:06:16 -0600 +Subject: [PATCH 04/16] Fix: pengine: unfence before probing or starting fence + devices + +Regression since 7f8ba307 +--- + pengine/native.c | 62 ++++++++++++++++++++++++++++++++------------------------ + 1 file changed, 35 insertions(+), 27 deletions(-) + +diff --git a/pengine/native.c b/pengine/native.c +index e57fbc7..0013e33 100644 +--- a/pengine/native.c ++++ b/pengine/native.c +@@ -2550,6 +2550,39 @@ StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * d + return TRUE; + } + ++static void ++order_after_unfencing(resource_t *rsc, pe_node_t *node, action_t *action, ++ enum pe_ordering order, pe_working_set_t *data_set) ++{ ++ /* When unfencing is in use, we order unfence actions before any probe or ++ * start of resources that require unfencing, and also of fence devices. ++ * ++ * This might seem to violate the principle that fence devices require ++ * only quorum. However, fence agents that unfence often don't have enough ++ * information to even probe or start unless the node is first unfenced. ++ */ ++ if (is_unfence_device(rsc, data_set) ++ || is_set(rsc->flags, pe_rsc_needs_unfencing)) { ++ ++ /* Start with an optional ordering. Requiring unfencing would result in ++ * the node being unfenced, and all its resources being stopped, ++ * whenever a new resource is added -- which would be highly suboptimal. ++ */ ++ action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set); ++ ++ order_actions(unfence, action, order); ++ ++ if (!node_has_been_unfenced(node)) { ++ // But unfencing is required if it has never been done ++ char *reason = crm_strdup_printf("required by %s %s", ++ rsc->id, action->task); ++ ++ trigger_unfencing(NULL, node, reason, NULL, data_set); ++ free(reason); ++ } ++ } ++} ++ + gboolean + StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set) + { +@@ -2559,16 +2592,7 @@ StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * + pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0); + start = start_action(rsc, next, TRUE); + +- if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { +- action_t *unfence = pe_fence_op(next, "on", TRUE, NULL, data_set); +- +- order_actions(unfence, start, pe_order_implies_then); +- if (!node_has_been_unfenced(next)) { +- char *reason = crm_strdup_printf("Required by %s", rsc->id); +- trigger_unfencing(NULL, next, reason, NULL, data_set); +- free(reason); +- } +- } ++ order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set); + + if (is_set(start->flags, pe_action_runnable) && optional == FALSE) { + update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); +@@ -2989,23 +3013,7 @@ native_create_probe(resource_t * rsc, node_t * node, action_t * complete, + probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); + update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); + +- /* If enabled, require unfencing before probing any fence devices +- * but ensure it happens after any resources that require +- * unfencing have been probed. +- * +- * Doing it the other way (requiring unfencing after probing +- * resources that need it) would result in the node being +- * unfenced, and all its resources being stopped, whenever a new +- * resource is added. Which would be highly suboptimal. +- * +- * So essentially, at the point the fencing device(s) have been +- * probed, we know the state of all resources that require +- * unfencing and that unfencing occurred. +- */ +- if(is_set(rsc->flags, pe_rsc_needs_unfencing)) { +- action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set); +- order_actions(unfence, probe, pe_order_optional); +- } ++ order_after_unfencing(rsc, node, probe, pe_order_optional, data_set); + + /* + * We need to know if it's running_on (not just known_on) this node +-- +1.8.3.1 + + +From 9d3840f374122f6258ddfe44bf85ff43d394d209 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 1 Dec 2017 12:24:55 -0600 +Subject: [PATCH 05/16] Test: PE: update regression tests for unfencing change + +--- + pengine/test10/start-then-stop-with-unfence.dot | 3 +++ + pengine/test10/start-then-stop-with-unfence.exp | 15 +++++++++++++-- + pengine/test10/start-then-stop-with-unfence.summary | 10 +++++----- + pengine/test10/unfence-definition.dot | 2 ++ + pengine/test10/unfence-definition.exp | 9 ++++++++- + pengine/test10/unfence-definition.summary | 4 ++-- + pengine/test10/unfence-parameters.dot | 2 ++ + pengine/test10/unfence-parameters.exp | 9 ++++++++- + pengine/test10/unfence-parameters.summary | 4 ++-- + pengine/test10/unfence-startup.dot | 1 + + pengine/test10/unfence-startup.exp | 6 +++++- + pengine/test10/unfence-startup.summary | 4 ++-- + 12 files changed, 53 insertions(+), 16 deletions(-) + +diff --git a/pengine/test10/start-then-stop-with-unfence.dot b/pengine/test10/start-then-stop-with-unfence.dot +index 6e9569b..b324339 100644 +--- a/pengine/test10/start-then-stop-with-unfence.dot ++++ b/pengine/test10/start-then-stop-with-unfence.dot +@@ -23,5 +23,8 @@ digraph "g" { + "mpath-node2_monitor_0 rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] + "stonith 'on' rhel7-node1.example.com" -> "ip1_start_0 rhel7-node1.example.com" [ style = bold] + "stonith 'on' rhel7-node1.example.com" -> "jrummy_start_0 rhel7-node1.example.com" [ style = bold] ++"stonith 'on' rhel7-node1.example.com" -> "mpath-node1_monitor_0 rhel7-node1.example.com" [ style = bold] ++"stonith 'on' rhel7-node1.example.com" -> "mpath-node1_start_0 rhel7-node1.example.com" [ style = bold] ++"stonith 'on' rhel7-node1.example.com" -> "mpath-node2_monitor_0 rhel7-node1.example.com" [ style = bold] + "stonith 'on' rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] + } +diff --git a/pengine/test10/start-then-stop-with-unfence.exp b/pengine/test10/start-then-stop-with-unfence.exp +index 75cb356..715ba40 100644 +--- a/pengine/test10/start-then-stop-with-unfence.exp ++++ b/pengine/test10/start-then-stop-with-unfence.exp +@@ -6,7 +6,11 @@ + + + +- ++ ++ ++ ++ ++ + + + +@@ -30,6 +34,9 @@ + + + ++ ++ ++ + + + +@@ -41,7 +48,11 @@ + + + +- ++ ++ ++ ++ ++ + + + +diff --git a/pengine/test10/start-then-stop-with-unfence.summary b/pengine/test10/start-then-stop-with-unfence.summary +index 2e02a21..b2114d7 100644 +--- a/pengine/test10/start-then-stop-with-unfence.summary ++++ b/pengine/test10/start-then-stop-with-unfence.summary +@@ -11,23 +11,23 @@ Online: [ rhel7-node1.example.com rhel7-node2.example.com ] + Stopped: [ rhel7-node1.example.com ] + + Transition Summary: +- * Fence (on) rhel7-node1.example.com 'Required by ip1' ++ * Fence (on) rhel7-node1.example.com 'required by mpath-node2 monitor' + * Start mpath-node1 (rhel7-node1.example.com) + * Move ip1 ( rhel7-node2.example.com -> rhel7-node1.example.com ) + * Start jrummy:1 (rhel7-node1.example.com) + + Executing cluster transition: +- * Resource action: mpath-node2 monitor on rhel7-node1.example.com +- * Resource action: mpath-node1 monitor on rhel7-node1.example.com + * Pseudo action: jrummy-clone_start_0 + * Fencing rhel7-node1.example.com (on) +- * Resource action: mpath-node1 start on rhel7-node1.example.com ++ * Resource action: mpath-node2 monitor on rhel7-node1.example.com ++ * Resource action: mpath-node1 monitor on rhel7-node1.example.com + * Resource action: jrummy start on rhel7-node1.example.com + * Pseudo action: jrummy-clone_running_0 +- * Resource action: mpath-node1 monitor=60000 on rhel7-node1.example.com ++ * Resource action: mpath-node1 start on rhel7-node1.example.com + * Resource action: ip1 stop on rhel7-node2.example.com + * Resource action: jrummy monitor=10000 on rhel7-node1.example.com + * Pseudo action: all_stopped ++ * Resource action: mpath-node1 monitor=60000 on rhel7-node1.example.com + * Resource action: ip1 start on rhel7-node1.example.com + * Resource action: ip1 monitor=10000 on rhel7-node1.example.com + +diff --git a/pengine/test10/unfence-definition.dot b/pengine/test10/unfence-definition.dot +index 3bc29d3..c42391a 100644 +--- a/pengine/test10/unfence-definition.dot ++++ b/pengine/test10/unfence-definition.dot +@@ -66,11 +66,13 @@ digraph "g" { + "fencing_stop_0 virt-1" [ style=bold color="green" fontcolor="black"] + "stonith 'on' virt-1" -> "clvmd_start_0 virt-1" [ style = bold] + "stonith 'on' virt-1" -> "dlm_start_0 virt-1" [ style = bold] ++"stonith 'on' virt-1" -> "fencing_start_0 virt-1" [ style = bold] + "stonith 'on' virt-1" [ style=bold color="green" fontcolor="black"] + "stonith 'on' virt-3" -> "clvmd:2_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "clvmd:2_start_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "dlm:2_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "dlm:2_start_0 virt-3" [ style = bold] ++"stonith 'on' virt-3" -> "fencing_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" [ style=bold color="green" fontcolor="black"] + "stonith 'reboot' virt-4" -> "stonith_complete" [ style = bold] + "stonith 'reboot' virt-4" [ style=bold color="green" fontcolor="black"] +diff --git a/pengine/test10/unfence-definition.exp b/pengine/test10/unfence-definition.exp +index b1e241a..25c5674 100644 +--- a/pengine/test10/unfence-definition.exp ++++ b/pengine/test10/unfence-definition.exp +@@ -11,6 +11,9 @@ + + + ++ ++ ++ + + + +@@ -28,7 +31,11 @@ + + + +- ++ ++ ++ ++ ++ + + + +diff --git a/pengine/test10/unfence-definition.summary b/pengine/test10/unfence-definition.summary +index 4ca9344..2051c51 100644 +--- a/pengine/test10/unfence-definition.summary ++++ b/pengine/test10/unfence-definition.summary +@@ -13,7 +13,7 @@ Online: [ virt-1 virt-2 virt-3 ] + + Transition Summary: + * Fence (reboot) virt-4 'node is unclean' +- * Fence (on) virt-3 'Required by dlm:2' ++ * Fence (on) virt-3 'required by fencing monitor' + * Fence (on) virt-1 'Device definition changed' + * Restart fencing ( virt-1 ) + * Restart dlm:0 ( virt-1 ) due to required stonith +@@ -23,13 +23,13 @@ Transition Summary: + * Start clvmd:2 (virt-3) + + Executing cluster transition: +- * Resource action: fencing monitor on virt-3 + * Resource action: fencing stop on virt-1 + * Resource action: clvmd monitor on virt-2 + * Pseudo action: clvmd-clone_stop_0 + * Fencing virt-4 (reboot) + * Pseudo action: stonith_complete + * Fencing virt-3 (on) ++ * Resource action: fencing monitor on virt-3 + * Resource action: fencing delete on virt-1 + * Resource action: dlm monitor on virt-3 + * Resource action: clvmd stop on virt-1 +diff --git a/pengine/test10/unfence-parameters.dot b/pengine/test10/unfence-parameters.dot +index ce006c4..3c27b22 100644 +--- a/pengine/test10/unfence-parameters.dot ++++ b/pengine/test10/unfence-parameters.dot +@@ -63,11 +63,13 @@ digraph "g" { + "fencing_stop_0 virt-1" [ style=bold color="green" fontcolor="black"] + "stonith 'on' virt-1" -> "clvmd_start_0 virt-1" [ style = bold] + "stonith 'on' virt-1" -> "dlm_start_0 virt-1" [ style = bold] ++"stonith 'on' virt-1" -> "fencing_start_0 virt-1" [ style = bold] + "stonith 'on' virt-1" [ style=bold color="green" fontcolor="black"] + "stonith 'on' virt-3" -> "clvmd:2_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "clvmd:2_start_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "dlm:2_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "dlm:2_start_0 virt-3" [ style = bold] ++"stonith 'on' virt-3" -> "fencing_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" [ style=bold color="green" fontcolor="black"] + "stonith 'reboot' virt-4" -> "stonith_complete" [ style = bold] + "stonith 'reboot' virt-4" [ style=bold color="green" fontcolor="black"] +diff --git a/pengine/test10/unfence-parameters.exp b/pengine/test10/unfence-parameters.exp +index b8053c7..3b73fc7 100644 +--- a/pengine/test10/unfence-parameters.exp ++++ b/pengine/test10/unfence-parameters.exp +@@ -15,7 +15,11 @@ + + + +- ++ ++ ++ ++ ++ + + + +@@ -29,6 +33,9 @@ + + + ++ ++ ++ + + + +diff --git a/pengine/test10/unfence-parameters.summary b/pengine/test10/unfence-parameters.summary +index 5b582d9..2cc9e27 100644 +--- a/pengine/test10/unfence-parameters.summary ++++ b/pengine/test10/unfence-parameters.summary +@@ -13,7 +13,7 @@ Online: [ virt-1 virt-2 virt-3 ] + + Transition Summary: + * Fence (reboot) virt-4 'node is unclean' +- * Fence (on) virt-3 'Required by dlm:2' ++ * Fence (on) virt-3 'required by fencing monitor' + * Fence (on) virt-1 'Device parameters changed (reload)' + * Restart fencing ( virt-1 ) due to resource definition change + * Restart dlm:0 ( virt-1 ) due to required stonith +@@ -24,12 +24,12 @@ Transition Summary: + + Executing cluster transition: + * Resource action: fencing stop on virt-1 +- * Resource action: fencing monitor on virt-3 + * Resource action: clvmd monitor on virt-2 + * Pseudo action: clvmd-clone_stop_0 + * Fencing virt-4 (reboot) + * Pseudo action: stonith_complete + * Fencing virt-3 (on) ++ * Resource action: fencing monitor on virt-3 + * Resource action: dlm monitor on virt-3 + * Resource action: clvmd stop on virt-1 + * Resource action: clvmd monitor on virt-3 +diff --git a/pengine/test10/unfence-startup.dot b/pengine/test10/unfence-startup.dot +index d496956..642f795 100644 +--- a/pengine/test10/unfence-startup.dot ++++ b/pengine/test10/unfence-startup.dot +@@ -29,6 +29,7 @@ digraph "g" { + "stonith 'on' virt-3" -> "clvmd:2_start_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "dlm:2_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "dlm:2_start_0 virt-3" [ style = bold] ++"stonith 'on' virt-3" -> "fencing_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" [ style=bold color="green" fontcolor="black"] + "stonith 'reboot' virt-4" -> "stonith_complete" [ style = bold] + "stonith 'reboot' virt-4" [ style=bold color="green" fontcolor="black"] +diff --git a/pengine/test10/unfence-startup.exp b/pengine/test10/unfence-startup.exp +index 70c1686..bfd24c8 100644 +--- a/pengine/test10/unfence-startup.exp ++++ b/pengine/test10/unfence-startup.exp +@@ -6,7 +6,11 @@ + + + +- ++ ++ ++ ++ ++ + + + +diff --git a/pengine/test10/unfence-startup.summary b/pengine/test10/unfence-startup.summary +index 276358c..4601f31 100644 +--- a/pengine/test10/unfence-startup.summary ++++ b/pengine/test10/unfence-startup.summary +@@ -13,18 +13,18 @@ Online: [ virt-1 virt-2 virt-3 ] + + Transition Summary: + * Fence (reboot) virt-4 'node is unclean' +- * Fence (on) virt-3 'Required by dlm:2' ++ * Fence (on) virt-3 'required by fencing monitor' + * Start dlm:2 (virt-3) + * Start clvmd:1 (virt-2) + * Start clvmd:2 (virt-3) + + Executing cluster transition: +- * Resource action: fencing monitor on virt-3 + * Resource action: clvmd monitor on virt-2 + * Fencing virt-4 (reboot) + * Pseudo action: stonith_complete + * Fencing virt-3 (on) + * Pseudo action: all_stopped ++ * Resource action: fencing monitor on virt-3 + * Resource action: dlm monitor on virt-3 + * Pseudo action: dlm-clone_start_0 + * Resource action: clvmd monitor on virt-3 +-- +1.8.3.1 + + +From c11d10ef4f04bbdb2e6b7e6251b88e50faccaaca Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 1 Dec 2017 14:36:03 -0600 +Subject: [PATCH 06/16] Test: PE: add regression test for unfencing with only + fence devices + +--- + pengine/regression.sh | 1 + + pengine/test10/unfence-device.dot | 18 ++++++ + pengine/test10/unfence-device.exp | 100 ++++++++++++++++++++++++++++++++++ + pengine/test10/unfence-device.scores | 5 ++ + pengine/test10/unfence-device.summary | 29 ++++++++++ + pengine/test10/unfence-device.xml | 66 ++++++++++++++++++++++ + 6 files changed, 219 insertions(+) + create mode 100644 pengine/test10/unfence-device.dot + create mode 100644 pengine/test10/unfence-device.exp + create mode 100644 pengine/test10/unfence-device.scores + create mode 100644 pengine/test10/unfence-device.summary + create mode 100644 pengine/test10/unfence-device.xml + +diff --git a/pengine/regression.sh b/pengine/regression.sh +index db101e7..47cf0ba 100755 +--- a/pengine/regression.sh ++++ b/pengine/regression.sh +@@ -393,6 +393,7 @@ echo "" + do_test unfence-startup "Clean unfencing" + do_test unfence-definition "Unfencing when the agent changes" + do_test unfence-parameters "Unfencing when the agent parameters changes" ++do_test unfence-device "Unfencing when a cluster has only fence devices" + + echo "" + do_test master-0 "Stopped -> Slave" +diff --git a/pengine/test10/unfence-device.dot b/pengine/test10/unfence-device.dot +new file mode 100644 +index 0000000..e383fd2 +--- /dev/null ++++ b/pengine/test10/unfence-device.dot +@@ -0,0 +1,18 @@ ++digraph "g" { ++"fence_scsi_monitor_0 virt-008" -> "fence_scsi_start_0 virt-008" [ style = bold] ++"fence_scsi_monitor_0 virt-008" [ style=bold color="green" fontcolor="black"] ++"fence_scsi_monitor_0 virt-009" -> "fence_scsi_start_0 virt-008" [ style = bold] ++"fence_scsi_monitor_0 virt-009" [ style=bold color="green" fontcolor="black"] ++"fence_scsi_monitor_0 virt-013" -> "fence_scsi_start_0 virt-008" [ style = bold] ++"fence_scsi_monitor_0 virt-013" [ style=bold color="green" fontcolor="black"] ++"fence_scsi_monitor_60000 virt-008" [ style=bold color="green" fontcolor="black"] ++"fence_scsi_start_0 virt-008" -> "fence_scsi_monitor_60000 virt-008" [ style = bold] ++"fence_scsi_start_0 virt-008" [ style=bold color="green" fontcolor="black"] ++"stonith 'on' virt-008" -> "fence_scsi_monitor_0 virt-008" [ style = bold] ++"stonith 'on' virt-008" -> "fence_scsi_start_0 virt-008" [ style = bold] ++"stonith 'on' virt-008" [ style=bold color="green" fontcolor="black"] ++"stonith 'on' virt-009" -> "fence_scsi_monitor_0 virt-009" [ style = bold] ++"stonith 'on' virt-009" [ style=bold color="green" fontcolor="black"] ++"stonith 'on' virt-013" -> "fence_scsi_monitor_0 virt-013" [ style = bold] ++"stonith 'on' virt-013" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/pengine/test10/unfence-device.exp b/pengine/test10/unfence-device.exp +new file mode 100644 +index 0000000..98cb548 +--- /dev/null ++++ b/pengine/test10/unfence-device.exp +@@ -0,0 +1,100 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/pengine/test10/unfence-device.scores b/pengine/test10/unfence-device.scores +new file mode 100644 +index 0000000..8ea5036 +--- /dev/null ++++ b/pengine/test10/unfence-device.scores +@@ -0,0 +1,5 @@ ++Allocation scores: ++Using the original execution date of: 2017-11-30 10:44:29Z ++native_color: fence_scsi allocation score on virt-008: 0 ++native_color: fence_scsi allocation score on virt-009: 0 ++native_color: fence_scsi allocation score on virt-013: 0 +diff --git a/pengine/test10/unfence-device.summary b/pengine/test10/unfence-device.summary +new file mode 100644 +index 0000000..181724b +--- /dev/null ++++ b/pengine/test10/unfence-device.summary +@@ -0,0 +1,29 @@ ++Using the original execution date of: 2017-11-30 10:44:29Z ++ ++Current cluster status: ++Online: [ virt-008 virt-009 virt-013 ] ++ ++ fence_scsi (stonith:fence_scsi): Stopped ++ ++Transition Summary: ++ * Fence (on) virt-013 'required by fence_scsi monitor' ++ * Fence (on) virt-009 'required by fence_scsi monitor' ++ * Fence (on) virt-008 'required by fence_scsi monitor' ++ * Start fence_scsi ( virt-008 ) ++ ++Executing cluster transition: ++ * Fencing virt-013 (on) ++ * Fencing virt-009 (on) ++ * Fencing virt-008 (on) ++ * Resource action: fence_scsi monitor on virt-013 ++ * Resource action: fence_scsi monitor on virt-009 ++ * Resource action: fence_scsi monitor on virt-008 ++ * Resource action: fence_scsi start on virt-008 ++ * Resource action: fence_scsi monitor=60000 on virt-008 ++Using the original execution date of: 2017-11-30 10:44:29Z ++ ++Revised cluster status: ++Online: [ virt-008 virt-009 virt-013 ] ++ ++ fence_scsi (stonith:fence_scsi): Started virt-008 ++ +diff --git a/pengine/test10/unfence-device.xml b/pengine/test10/unfence-device.xml +new file mode 100644 +index 0000000..e977d9b +--- /dev/null ++++ b/pengine/test10/unfence-device.xml +@@ -0,0 +1,66 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From 2948a8e329cda42e5e7e106c0374d49d93b65481 Mon Sep 17 00:00:00 2001 +From: Andrew Beekhof +Date: Wed, 6 Dec 2017 14:05:05 +1100 +Subject: [PATCH 07/16] Fix: PE: Passing boolean instead of a pointer + +--- + lib/pengine/container.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/pengine/container.c b/lib/pengine/container.c +index 52b60a4..4d2d876 100644 +--- a/lib/pengine/container.c ++++ b/lib/pengine/container.c +@@ -557,7 +557,7 @@ create_remote_resource( + * remote should be ordered relative to docker. + */ + xml_remote = pe_create_remote_xml(NULL, id, tuple->docker->id, +- XML_BOOLEAN_FALSE, NULL, "60s", NULL, ++ NULL, NULL, "60s", NULL, + NULL, connect_name, + (data->control_port? + data->control_port : port_s)); +-- +1.8.3.1 + + +From f3593e410643dcafa81e28da27c3a623e306fa61 Mon Sep 17 00:00:00 2001 +From: Andrew Beekhof +Date: Wed, 6 Dec 2017 14:48:57 +1100 +Subject: [PATCH 08/16] Fix: PE: Ordering bundle child stops/demotes after + container fencing causes graph loops + +--- + include/crm/pengine/status.h | 6 ++++++ + lib/pengine/utils.c | 2 +- + pengine/allocate.c | 4 +++- + pengine/native.c | 9 +++++++-- + pengine/test10/bundle-order-fencing.dot | 5 ----- + pengine/test10/bundle-order-fencing.exp | 15 --------------- + 6 files changed, 17 insertions(+), 24 deletions(-) + +diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h +index f2a8910..fca7f12 100644 +--- a/include/crm/pengine/status.h ++++ b/include/crm/pengine/status.h +@@ -517,4 +517,10 @@ pe_rsc_is_anon_clone(resource_t *rsc) + return pe_rsc_is_clone(rsc) && is_not_set(rsc->flags, pe_rsc_unique); + } + ++static inline bool ++pe_rsc_is_bundled(resource_t *rsc) ++{ ++ return uber_parent(rsc)->parent != NULL; ++} ++ + #endif +diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c +index 0ce0e30..a875226 100644 +--- a/lib/pengine/utils.c ++++ b/lib/pengine/utils.c +@@ -1015,7 +1015,7 @@ unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, + value = "nothing (resource)"; + } + +- pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->task, value); ++ pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->uuid, value); + + value = unpack_operation_on_fail(action); + +diff --git a/pengine/allocate.c b/pengine/allocate.c +index 98464a9..2ae491c 100644 +--- a/pengine/allocate.c ++++ b/pengine/allocate.c +@@ -1470,7 +1470,9 @@ fence_guest(pe_node_t *node, pe_action_t *done, pe_working_set_t *data_set) + + /* Order/imply other actions relative to pseudo-fence as with real fence */ + stonith_constraints(node, stonith_op, data_set); +- order_actions(stonith_op, done, pe_order_implies_then); ++ if(done) { ++ order_actions(stonith_op, done, pe_order_implies_then); ++ } + } + + /* +diff --git a/pengine/native.c b/pengine/native.c +index 0013e33..96c9a26 100644 +--- a/pengine/native.c ++++ b/pengine/native.c +@@ -3164,7 +3164,9 @@ native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_ + */ + flags |= pe_order_preserve; + } +- order_actions(stonith_op, action, flags); ++ if (pe_rsc_is_bundled(rsc) == FALSE) { ++ order_actions(stonith_op, action, flags); ++ } + order_actions(stonith_op, parent_stop, flags); + } + +@@ -3252,7 +3254,10 @@ native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_ + update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__); + update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__); + +- if (start == NULL || start->needs > rsc_req_quorum) { ++ if (pe_rsc_is_bundled(rsc)) { ++ /* Do nothing, let the recovery be ordered after the parent's implied stop */ ++ ++ } else if (start == NULL || start->needs > rsc_req_quorum) { + order_actions(stonith_op, action, pe_order_preserve|pe_order_optional); + } + } +diff --git a/pengine/test10/bundle-order-fencing.dot b/pengine/test10/bundle-order-fencing.dot +index a7e5805..64b6326 100644 +--- a/pengine/test10/bundle-order-fencing.dot ++++ b/pengine/test10/bundle-order-fencing.dot +@@ -403,19 +403,14 @@ digraph "g" { + "redis_stop_0 redis-bundle-0" -> "redis_start_0 redis-bundle-0" [ style = dashed] + "redis_stop_0 redis-bundle-0" [ style=bold color="green" fontcolor="orange"] + "stonith 'off' galera-bundle-0" -> "galera-bundle-master_stop_0" [ style = bold] +-"stonith 'off' galera-bundle-0" -> "galera_demote_0 galera-bundle-0" [ style = bold] +-"stonith 'off' galera-bundle-0" -> "galera_stop_0 galera-bundle-0" [ style = bold] + "stonith 'off' galera-bundle-0" -> "stonith_complete" [ style = bold] + "stonith 'off' galera-bundle-0" [ style=bold color="green" fontcolor="orange"] + "stonith 'off' rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_stop_0" [ style = bold] + "stonith 'off' rabbitmq-bundle-0" -> "rabbitmq_post_notify_stonith_0" [ style = bold] +-"stonith 'off' rabbitmq-bundle-0" -> "rabbitmq_stop_0 rabbitmq-bundle-0" [ style = bold] + "stonith 'off' rabbitmq-bundle-0" -> "stonith_complete" [ style = bold] + "stonith 'off' rabbitmq-bundle-0" [ style=bold color="green" fontcolor="orange"] + "stonith 'off' redis-bundle-0" -> "redis-bundle-master_stop_0" [ style = bold] +-"stonith 'off' redis-bundle-0" -> "redis_demote_0 redis-bundle-0" [ style = bold] + "stonith 'off' redis-bundle-0" -> "redis_post_notify_stonith_0" [ style = bold] +-"stonith 'off' redis-bundle-0" -> "redis_stop_0 redis-bundle-0" [ style = bold] + "stonith 'off' redis-bundle-0" -> "stonith_complete" [ style = bold] + "stonith 'off' redis-bundle-0" [ style=bold color="green" fontcolor="orange"] + "stonith 'reboot' controller-0" -> "galera-bundle-0_stop_0 controller-0" [ style = bold] +diff --git a/pengine/test10/bundle-order-fencing.exp b/pengine/test10/bundle-order-fencing.exp +index 8e35f32..78ce675 100644 +--- a/pengine/test10/bundle-order-fencing.exp ++++ b/pengine/test10/bundle-order-fencing.exp +@@ -55,9 +55,6 @@ + + + +- +- +- + + + +@@ -440,9 +437,6 @@ + + + +- +- +- + + + +@@ -455,9 +449,6 @@ + + + +- +- +- + + + +@@ -701,9 +692,6 @@ + + + +- +- +- + + + +@@ -716,9 +704,6 @@ + + + +- +- +- + + + +-- +1.8.3.1 + + +From 906cd4a9e6b871eefb6d113354f9045c1826711a Mon Sep 17 00:00:00 2001 +From: Andrew Beekhof +Date: Wed, 6 Dec 2017 15:04:21 +1100 +Subject: [PATCH 09/16] Fix: PE: Only allowed nodes need to be considered when + ordering resource startup after _all_ recovery + +--- + pengine/native.c | 1 + + pengine/test10/bundle-order-fencing.dot | 2 -- + pengine/test10/bundle-order-fencing.exp | 6 ------ + pengine/test10/bundle-order-fencing.summary | 8 ++++---- + 4 files changed, 5 insertions(+), 12 deletions(-) + +diff --git a/pengine/native.c b/pengine/native.c +index 96c9a26..d4f1ff7 100644 +--- a/pengine/native.c ++++ b/pengine/native.c +@@ -3088,6 +3088,7 @@ native_start_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set + order_actions(stonith_done, action, pe_order_optional); + + } else if (safe_str_eq(action->task, RSC_START) ++ && NULL != pe_hash_table_lookup(rsc->allowed_nodes, target->details->id) + && NULL == pe_hash_table_lookup(rsc->known_on, target->details->id)) { + /* if known == NULL, then we don't know if + * the resource is active on the node +diff --git a/pengine/test10/bundle-order-fencing.dot b/pengine/test10/bundle-order-fencing.dot +index 64b6326..d653250 100644 +--- a/pengine/test10/bundle-order-fencing.dot ++++ b/pengine/test10/bundle-order-fencing.dot +@@ -3,8 +3,6 @@ digraph "g" { + "Cancel redis_monitor_45000 redis-bundle-1" [ style=bold color="green" fontcolor="black"] + "Cancel redis_monitor_60000 redis-bundle-1" -> "redis_promote_0 redis-bundle-1" [ style = bold] + "Cancel redis_monitor_60000 redis-bundle-1" [ style=bold color="green" fontcolor="black"] +-"all_stopped" -> "stonith-fence_ipmilan-5254000dcb3f_start_0 controller-2" [ style = bold] +-"all_stopped" -> "stonith-fence_ipmilan-5254003e8e97_start_0 controller-1" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "galera-bundle-0_monitor_0 controller-1" -> "galera-bundle-0_start_0 controller-2" [ style = dashed] + "galera-bundle-0_monitor_0 controller-1" [ style=bold color="green" fontcolor="black"] +diff --git a/pengine/test10/bundle-order-fencing.exp b/pengine/test10/bundle-order-fencing.exp +index 78ce675..708815f 100644 +--- a/pengine/test10/bundle-order-fencing.exp ++++ b/pengine/test10/bundle-order-fencing.exp +@@ -1624,9 +1624,6 @@ + + + +- +- +- + + + +@@ -1661,9 +1658,6 @@ + + + +- +- +- + + + +diff --git a/pengine/test10/bundle-order-fencing.summary b/pengine/test10/bundle-order-fencing.summary +index e78c531..ee2c361 100644 +--- a/pengine/test10/bundle-order-fencing.summary ++++ b/pengine/test10/bundle-order-fencing.summary +@@ -91,6 +91,8 @@ Executing cluster transition: + * Pseudo action: redis-bundle-master_demote_0 + * Pseudo action: redis-bundle-0_stop_0 + * Pseudo action: haproxy-bundle-docker-0_stop_0 ++ * Resource action: stonith-fence_ipmilan-5254003e8e97 start on controller-1 ++ * Resource action: stonith-fence_ipmilan-5254000dcb3f start on controller-2 + * Pseudo action: stonith-redis-bundle-0-off on redis-bundle-0 + * Pseudo action: stonith-rabbitmq-bundle-0-off on rabbitmq-bundle-0 + * Pseudo action: stonith-galera-bundle-0-off on galera-bundle-0 +@@ -107,6 +109,8 @@ Executing cluster transition: + * Pseudo action: ip-192.168.24.7_stop_0 + * Pseudo action: ip-10.0.0.109_stop_0 + * Pseudo action: ip-172.17.4.11_stop_0 ++ * Resource action: stonith-fence_ipmilan-5254003e8e97 monitor=60000 on controller-1 ++ * Resource action: stonith-fence_ipmilan-5254000dcb3f monitor=60000 on controller-2 + * Pseudo action: galera-bundle_demoted_0 + * Pseudo action: galera-bundle_stop_0 + * Pseudo action: rabbitmq_stop_0 +@@ -172,11 +176,7 @@ Executing cluster transition: + * Pseudo action: rabbitmq-bundle_running_0 + * Pseudo action: all_stopped + * Pseudo action: redis-bundle-master_running_0 +- * Resource action: stonith-fence_ipmilan-5254003e8e97 start on controller-1 +- * Resource action: stonith-fence_ipmilan-5254000dcb3f start on controller-2 + * Pseudo action: redis-bundle-master_post_notify_running_0 +- * Resource action: stonith-fence_ipmilan-5254003e8e97 monitor=60000 on controller-1 +- * Resource action: stonith-fence_ipmilan-5254000dcb3f monitor=60000 on controller-2 + * Resource action: redis notify on redis-bundle-0 + * Resource action: redis notify on redis-bundle-1 + * Resource action: redis notify on redis-bundle-2 +-- +1.8.3.1 + + +From c6d208dfbda95d8610519de50075087e56a4f8c0 Mon Sep 17 00:00:00 2001 +From: Andrew Beekhof +Date: Wed, 6 Dec 2017 23:50:12 +1100 +Subject: [PATCH 10/16] Fix: PE: Remote connection resources are safe to to + require only quorum + +--- + lib/pengine/complex.c | 6 ++++ + pengine/test10/bug-rh-1097457.dot | 2 +- + pengine/test10/bug-rh-1097457.exp | 6 ++-- + pengine/test10/bug-rh-1097457.summary | 14 ++++---- + pengine/test10/bundle-order-fencing.dot | 6 ---- + pengine/test10/bundle-order-fencing.exp | 18 ++-------- + pengine/test10/bundle-order-fencing.summary | 8 ++--- + pengine/test10/guest-node-host-dies.dot | 6 ++-- + pengine/test10/guest-node-host-dies.exp | 24 +++++--------- + pengine/test10/guest-node-host-dies.summary | 12 +++---- + pengine/test10/remote-fence-unclean.dot | 2 +- + pengine/test10/remote-fence-unclean.exp | 2 +- + pengine/test10/remote-partial-migrate2.dot | 6 +--- + pengine/test10/remote-partial-migrate2.exp | 27 ++++----------- + pengine/test10/remote-partial-migrate2.summary | 38 +++++++++++----------- + pengine/test10/remote-recover-all.dot | 3 +- + pengine/test10/remote-recover-all.exp | 10 ++---- + pengine/test10/remote-recover-all.summary | 8 ++--- + pengine/test10/remote-recover-connection.dot | 6 ---- + pengine/test10/remote-recover-connection.exp | 27 ++------------- + pengine/test10/remote-recover-connection.summary | 24 +++++++------- + pengine/test10/remote-recover-fail.dot | 2 +- + pengine/test10/remote-recover-fail.exp | 2 +- + pengine/test10/remote-recover-no-resources.dot | 3 +- + pengine/test10/remote-recover-no-resources.exp | 10 ++---- + pengine/test10/remote-recover-no-resources.summary | 8 ++--- + pengine/test10/remote-recover-unknown.dot | 3 +- + pengine/test10/remote-recover-unknown.exp | 10 ++---- + pengine/test10/remote-recover-unknown.summary | 8 ++--- + pengine/test10/remote-recovery.dot | 6 ---- + pengine/test10/remote-recovery.exp | 27 ++------------- + pengine/test10/remote-recovery.summary | 24 +++++++------- + pengine/test10/remote-unclean2.dot | 2 +- + pengine/test10/remote-unclean2.exp | 2 +- + pengine/test10/whitebox-fail1.dot | 2 +- + pengine/test10/whitebox-fail1.exp | 6 ++-- + pengine/test10/whitebox-fail1.summary | 8 ++--- + pengine/test10/whitebox-fail2.dot | 2 +- + pengine/test10/whitebox-fail2.exp | 6 ++-- + pengine/test10/whitebox-fail2.summary | 8 ++--- + pengine/test10/whitebox-imply-stop-on-fence.dot | 6 ++-- + pengine/test10/whitebox-imply-stop-on-fence.exp | 24 +++++--------- + .../test10/whitebox-imply-stop-on-fence.summary | 20 ++++++------ + pengine/test10/whitebox-ms-ordering.dot | 4 +-- + pengine/test10/whitebox-ms-ordering.exp | 12 +++---- + pengine/test10/whitebox-ms-ordering.summary | 8 ++--- + pengine/test10/whitebox-unexpectedly-running.dot | 2 ++ + pengine/test10/whitebox-unexpectedly-running.exp | 6 ++++ + 48 files changed, 182 insertions(+), 294 deletions(-) + +diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c +index 3e0abed..d58d6be 100644 +--- a/lib/pengine/complex.c ++++ b/lib/pengine/complex.c +@@ -784,6 +784,12 @@ common_unpack(xmlNode * xml_obj, resource_t ** rsc, + if(is_set((*rsc)->flags, pe_rsc_fence_device)) { + value = "quorum"; + ++ } else if (safe_str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS), "ocf") ++ && safe_str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_PROVIDER), "pacemaker") ++ && safe_str_eq(crm_element_value((*rsc)->xml, XML_ATTR_TYPE), "remote") ++ ) { ++ value = "quorum"; ++ + } else if (is_set(data_set->flags, pe_flag_enable_unfencing)) { + value = "unfencing"; + +diff --git a/pengine/test10/bug-rh-1097457.dot b/pengine/test10/bug-rh-1097457.dot +index 5984811..94ffe13 100644 +--- a/pengine/test10/bug-rh-1097457.dot ++++ b/pengine/test10/bug-rh-1097457.dot +@@ -80,6 +80,7 @@ digraph "g" { + "VM2_stop_0 lama3" -> "all_stopped" [ style = bold] + "VM2_stop_0 lama3" -> "stonith 'reboot' lamaVM2" [ style = bold] + "VM2_stop_0 lama3" [ style=bold color="green" fontcolor="black"] ++"all_stopped" -> "lamaVM2_start_0 lama3" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "lamaVM2-G4_running_0" [ style=bold color="green" fontcolor="orange"] + "lamaVM2-G4_start_0" -> "FAKE4-IP_start_0 lamaVM2" [ style = bold] +@@ -121,6 +122,5 @@ digraph "g" { + "stonith_complete" -> "FSlun3_start_0 lama2" [ style = bold] + "stonith_complete" -> "VM2_start_0 lama3" [ style = bold] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "lamaVM2_start_0 lama3" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/bug-rh-1097457.exp b/pengine/test10/bug-rh-1097457.exp +index 4eedd91..f1451b5 100644 +--- a/pengine/test10/bug-rh-1097457.exp ++++ b/pengine/test10/bug-rh-1097457.exp +@@ -599,13 +599,13 @@ + + + +- ++ + + +- ++ + + +- ++ + + + +diff --git a/pengine/test10/bug-rh-1097457.summary b/pengine/test10/bug-rh-1097457.summary +index e23c6ad..0e7d2e0 100644 +--- a/pengine/test10/bug-rh-1097457.summary ++++ b/pengine/test10/bug-rh-1097457.summary +@@ -70,26 +70,26 @@ Executing cluster transition: + * Pseudo action: lamaVM2-G4_stop_0 + * Pseudo action: FAKE4-IP_stop_0 + * Pseudo action: FAKE6-clone_stop_0 +- * Resource action: lamaVM2 start on lama3 +- * Resource action: lamaVM2 monitor=30000 on lama3 +- * Resource action: FSlun3 monitor=10000 on lamaVM2 + * Pseudo action: FAKE4_stop_0 + * Pseudo action: FAKE6_stop_0 + * Pseudo action: FAKE6-clone_stopped_0 + * Pseudo action: FAKE6-clone_start_0 + * Pseudo action: lamaVM2-G4_stopped_0 +- * Resource action: FAKE6 start on lamaVM2 +- * Resource action: FAKE6 monitor=30000 on lamaVM2 +- * Pseudo action: FAKE6-clone_running_0 + * Pseudo action: FSlun3_stop_0 + * Pseudo action: all_stopped + * Resource action: FSlun3 start on lama2 + * Pseudo action: lamaVM2-G4_start_0 ++ * Resource action: lamaVM2 start on lama3 ++ * Resource action: lamaVM2 monitor=30000 on lama3 ++ * Resource action: FSlun3 monitor=10000 on lama2 ++ * Resource action: FSlun3 monitor=10000 on lamaVM2 + * Resource action: FAKE4 start on lamaVM2 + * Resource action: FAKE4 monitor=30000 on lamaVM2 + * Resource action: FAKE4-IP start on lamaVM2 + * Resource action: FAKE4-IP monitor=30000 on lamaVM2 +- * Resource action: FSlun3 monitor=10000 on lama2 ++ * Resource action: FAKE6 start on lamaVM2 ++ * Resource action: FAKE6 monitor=30000 on lamaVM2 ++ * Pseudo action: FAKE6-clone_running_0 + * Pseudo action: lamaVM2-G4_running_0 + + Revised cluster status: +diff --git a/pengine/test10/bundle-order-fencing.dot b/pengine/test10/bundle-order-fencing.dot +index d653250..980bab4 100644 +--- a/pengine/test10/bundle-order-fencing.dot ++++ b/pengine/test10/bundle-order-fencing.dot +@@ -411,15 +411,12 @@ digraph "g" { + "stonith 'off' redis-bundle-0" -> "redis_post_notify_stonith_0" [ style = bold] + "stonith 'off' redis-bundle-0" -> "stonith_complete" [ style = bold] + "stonith 'off' redis-bundle-0" [ style=bold color="green" fontcolor="orange"] +-"stonith 'reboot' controller-0" -> "galera-bundle-0_stop_0 controller-0" [ style = bold] + "stonith 'reboot' controller-0" -> "galera-bundle-docker-0_stop_0 controller-0" [ style = bold] + "stonith 'reboot' controller-0" -> "haproxy-bundle-docker-0_stop_0 controller-0" [ style = bold] + "stonith 'reboot' controller-0" -> "ip-10.0.0.109_stop_0 controller-0" [ style = bold] + "stonith 'reboot' controller-0" -> "ip-172.17.4.11_stop_0 controller-0" [ style = bold] + "stonith 'reboot' controller-0" -> "ip-192.168.24.7_stop_0 controller-0" [ style = bold] +-"stonith 'reboot' controller-0" -> "rabbitmq-bundle-0_stop_0 controller-0" [ style = bold] + "stonith 'reboot' controller-0" -> "rabbitmq-bundle-docker-0_stop_0 controller-0" [ style = bold] +-"stonith 'reboot' controller-0" -> "redis-bundle-0_stop_0 controller-0" [ style = bold] + "stonith 'reboot' controller-0" -> "redis-bundle-docker-0_stop_0 controller-0" [ style = bold] + "stonith 'reboot' controller-0" -> "stonith 'off' galera-bundle-0" [ style = bold] + "stonith 'reboot' controller-0" -> "stonith 'off' rabbitmq-bundle-0" [ style = bold] +@@ -439,14 +436,11 @@ digraph "g" { + "stonith-fence_ipmilan-5254003e8e97_stop_0 controller-0" -> "stonith-fence_ipmilan-5254003e8e97_start_0 controller-1" [ style = bold] + "stonith-fence_ipmilan-5254003e8e97_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "galera-bundle-0_start_0 controller-2" [ style = dashed] + "stonith_complete" -> "galera_start_0 galera-bundle-0" [ style = dashed] + "stonith_complete" -> "ip-10.0.0.109_start_0 controller-1" [ style = bold] + "stonith_complete" -> "ip-172.17.4.11_start_0 controller-1" [ style = bold] + "stonith_complete" -> "ip-192.168.24.7_start_0 controller-2" [ style = bold] +-"stonith_complete" -> "rabbitmq-bundle-0_start_0 controller-1" [ style = dashed] + "stonith_complete" -> "rabbitmq_start_0 rabbitmq-bundle-0" [ style = dashed] +-"stonith_complete" -> "redis-bundle-0_start_0 controller-1" [ style = dashed] + "stonith_complete" -> "redis_promote_0 redis-bundle-1" [ style = bold] + "stonith_complete" -> "redis_start_0 redis-bundle-0" [ style = dashed] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] +diff --git a/pengine/test10/bundle-order-fencing.exp b/pengine/test10/bundle-order-fencing.exp +index 708815f..dc4c5c9 100644 +--- a/pengine/test10/bundle-order-fencing.exp ++++ b/pengine/test10/bundle-order-fencing.exp +@@ -379,11 +379,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -565,11 +561,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -1413,11 +1405,7 @@ + + + +- +- +- +- +- ++ + + + +diff --git a/pengine/test10/bundle-order-fencing.summary b/pengine/test10/bundle-order-fencing.summary +index ee2c361..0457f83 100644 +--- a/pengine/test10/bundle-order-fencing.summary ++++ b/pengine/test10/bundle-order-fencing.summary +@@ -56,10 +56,12 @@ Transition Summary: + + Executing cluster transition: + * Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0 ++ * Pseudo action: rabbitmq-bundle-0_stop_0 + * Resource action: rabbitmq-bundle-0 monitor on controller-2 + * Resource action: rabbitmq-bundle-0 monitor on controller-1 + * Resource action: rabbitmq-bundle-1 monitor on controller-2 + * Resource action: rabbitmq-bundle-2 monitor on controller-1 ++ * Pseudo action: galera-bundle-0_stop_0 + * Resource action: galera-bundle-0 monitor on controller-2 + * Resource action: galera-bundle-0 monitor on controller-1 + * Resource action: galera-bundle-1 monitor on controller-2 +@@ -67,6 +69,7 @@ Executing cluster transition: + * Resource action: redis cancel=45000 on redis-bundle-1 + * Resource action: redis cancel=60000 on redis-bundle-1 + * Pseudo action: redis-bundle-master_pre_notify_demote_0 ++ * Pseudo action: redis-bundle-0_stop_0 + * Resource action: redis-bundle-0 monitor on controller-2 + * Resource action: redis-bundle-0 monitor on controller-1 + * Resource action: redis-bundle-1 monitor on controller-2 +@@ -82,14 +85,12 @@ Executing cluster transition: + * Resource action: rabbitmq notify on rabbitmq-bundle-1 + * Resource action: rabbitmq notify on rabbitmq-bundle-2 + * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0 +- * Pseudo action: rabbitmq-bundle-0_stop_0 ++ * Pseudo action: rabbitmq-bundle-docker-0_stop_0 + * Pseudo action: galera-bundle-master_demote_0 +- * Pseudo action: galera-bundle-0_stop_0 + * Resource action: redis notify on redis-bundle-1 + * Resource action: redis notify on redis-bundle-2 + * Pseudo action: redis-bundle-master_confirmed-pre_notify_demote_0 + * Pseudo action: redis-bundle-master_demote_0 +- * Pseudo action: redis-bundle-0_stop_0 + * Pseudo action: haproxy-bundle-docker-0_stop_0 + * Resource action: stonith-fence_ipmilan-5254003e8e97 start on controller-1 + * Resource action: stonith-fence_ipmilan-5254000dcb3f start on controller-2 +@@ -100,7 +101,6 @@ Executing cluster transition: + * Pseudo action: haproxy-bundle_stopped_0 + * Pseudo action: rabbitmq_post_notify_stop_0 + * Pseudo action: rabbitmq-bundle-clone_stop_0 +- * Pseudo action: rabbitmq-bundle-docker-0_stop_0 + * Pseudo action: galera_demote_0 + * Pseudo action: galera-bundle-master_demoted_0 + * Pseudo action: redis_post_notify_stop_0 +diff --git a/pengine/test10/guest-node-host-dies.dot b/pengine/test10/guest-node-host-dies.dot +index a85250d..c50e071 100644 +--- a/pengine/test10/guest-node-host-dies.dot ++++ b/pengine/test10/guest-node-host-dies.dot +@@ -6,6 +6,8 @@ digraph "g" { + "Fencing_stop_0 rhel7-4" -> "all_stopped" [ style = bold] + "Fencing_stop_0 rhel7-4" [ style=bold color="green" fontcolor="black"] + "all_stopped" -> "Fencing_start_0 rhel7-4" [ style = bold] ++"all_stopped" -> "lxc1_start_0 rhel7-2" [ style = bold] ++"all_stopped" -> "lxc2_start_0 rhel7-3" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "container1_start_0 rhel7-2" -> "lxc-ms_promote_0 lxc1" [ style = bold] + "container1_start_0 rhel7-2" -> "lxc-ms_start_0 lxc1" [ style = bold] +@@ -115,8 +117,6 @@ digraph "g" { + "stonith 'reboot' lxc2" [ style=bold color="green" fontcolor="orange"] + "stonith 'reboot' rhel7-1" -> "container1_stop_0 rhel7-1" [ style = bold] + "stonith 'reboot' rhel7-1" -> "container2_stop_0 rhel7-1" [ style = bold] +-"stonith 'reboot' rhel7-1" -> "lxc1_stop_0 rhel7-1" [ style = bold] +-"stonith 'reboot' rhel7-1" -> "lxc2_stop_0 rhel7-1" [ style = bold] + "stonith 'reboot' rhel7-1" -> "rsc_rhel7-1_stop_0 rhel7-1" [ style = bold] + "stonith 'reboot' rhel7-1" -> "stonith_complete" [ style = bold] + "stonith 'reboot' rhel7-1" [ style=bold color="green" fontcolor="black"] +@@ -126,8 +126,6 @@ digraph "g" { + "stonith_complete" -> "lxc-ms_promote_0 lxc1" [ style = bold] + "stonith_complete" -> "lxc-ms_start_0 lxc1" [ style = bold] + "stonith_complete" -> "lxc-ms_start_0 lxc2" [ style = bold] +-"stonith_complete" -> "lxc1_start_0 rhel7-2" [ style = bold] +-"stonith_complete" -> "lxc2_start_0 rhel7-3" [ style = bold] + "stonith_complete" -> "rsc_rhel7-1_start_0 rhel7-5" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/guest-node-host-dies.exp b/pengine/test10/guest-node-host-dies.exp +index 8dbadde..b5a34ea 100644 +--- a/pengine/test10/guest-node-host-dies.exp ++++ b/pengine/test10/guest-node-host-dies.exp +@@ -432,6 +432,9 @@ + + + ++ ++ ++ + + + +@@ -446,9 +449,6 @@ + + + +- +- +- + + + +@@ -457,11 +457,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -512,6 +508,9 @@ + + + ++ ++ ++ + + + +@@ -526,9 +525,6 @@ + + + +- +- +- + + + +@@ -537,11 +533,7 @@ + + + +- +- +- +- +- ++ + + + +diff --git a/pengine/test10/guest-node-host-dies.summary b/pengine/test10/guest-node-host-dies.summary +index 4feee88..9813d2b 100644 +--- a/pengine/test10/guest-node-host-dies.summary ++++ b/pengine/test10/guest-node-host-dies.summary +@@ -26,16 +26,16 @@ Transition Summary: + Executing cluster transition: + * Resource action: Fencing stop on rhel7-4 + * Pseudo action: lxc-ms-master_demote_0 ++ * Pseudo action: lxc1_stop_0 + * Resource action: lxc1 monitor on rhel7-5 + * Resource action: lxc1 monitor on rhel7-4 + * Resource action: lxc1 monitor on rhel7-3 ++ * Pseudo action: lxc2_stop_0 + * Resource action: lxc2 monitor on rhel7-5 + * Resource action: lxc2 monitor on rhel7-4 + * Resource action: lxc2 monitor on rhel7-2 + * Fencing rhel7-1 (reboot) + * Pseudo action: rsc_rhel7-1_stop_0 +- * Pseudo action: lxc1_stop_0 +- * Pseudo action: lxc2_stop_0 + * Pseudo action: container1_stop_0 + * Pseudo action: container2_stop_0 + * Pseudo action: stonith-lxc2-reboot on lxc2 +@@ -47,21 +47,21 @@ Executing cluster transition: + * Pseudo action: lxc-ms_demote_0 + * Pseudo action: lxc-ms-master_demoted_0 + * Pseudo action: lxc-ms-master_stop_0 +- * Resource action: lxc1 start on rhel7-2 +- * Resource action: lxc2 start on rhel7-3 + * Resource action: rsc_rhel7-1 monitor=5000 on rhel7-5 + * Pseudo action: lxc-ms_stop_0 + * Pseudo action: lxc-ms_stop_0 + * Pseudo action: lxc-ms-master_stopped_0 + * Pseudo action: lxc-ms-master_start_0 +- * Resource action: lxc1 monitor=30000 on rhel7-2 +- * Resource action: lxc2 monitor=30000 on rhel7-3 + * Pseudo action: all_stopped + * Resource action: Fencing start on rhel7-4 + * Resource action: Fencing monitor=120000 on rhel7-4 ++ * Resource action: lxc1 start on rhel7-2 ++ * Resource action: lxc2 start on rhel7-3 + * Resource action: lxc-ms start on lxc1 + * Resource action: lxc-ms start on lxc2 + * Pseudo action: lxc-ms-master_running_0 ++ * Resource action: lxc1 monitor=30000 on rhel7-2 ++ * Resource action: lxc2 monitor=30000 on rhel7-3 + * Resource action: lxc-ms monitor=10000 on lxc2 + * Pseudo action: lxc-ms-master_promote_0 + * Resource action: lxc-ms promote on lxc1 +diff --git a/pengine/test10/remote-fence-unclean.dot b/pengine/test10/remote-fence-unclean.dot +index b2829a7..76a676d 100644 +--- a/pengine/test10/remote-fence-unclean.dot ++++ b/pengine/test10/remote-fence-unclean.dot +@@ -18,6 +18,7 @@ + "FAKE4_stop_0 18node1" -> "FAKE4_start_0 18node2" [ style = bold] + "FAKE4_stop_0 18node1" -> "all_stopped" [ style = bold] + "FAKE4_stop_0 18node1" [ style=bold color="green" fontcolor="black"] ++"all_stopped" -> "remote1_start_0 18node1" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "remote1_monitor_60000 18node1" [ style=bold color="green" fontcolor="black"] + "remote1_start_0 18node1" -> "remote1_monitor_60000 18node1" [ style = bold] +@@ -32,6 +33,5 @@ + "stonith_complete" -> "FAKE3_start_0 18node1" [ style = bold] + "stonith_complete" -> "FAKE4_start_0 18node2" [ style = bold] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "remote1_start_0 18node1" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/remote-fence-unclean.exp b/pengine/test10/remote-fence-unclean.exp +index 3a07384..f77d7f6 100644 +--- a/pengine/test10/remote-fence-unclean.exp ++++ b/pengine/test10/remote-fence-unclean.exp +@@ -11,7 +11,7 @@ + + + +- ++ + + + +diff --git a/pengine/test10/remote-partial-migrate2.dot b/pengine/test10/remote-partial-migrate2.dot +index a8bf29b..17c8bf3 100644 +--- a/pengine/test10/remote-partial-migrate2.dot ++++ b/pengine/test10/remote-partial-migrate2.dot +@@ -89,6 +89,7 @@ + "FAKE9_stop_0 pcmk2" -> "FAKE9_start_0 pcmk_remote4" [ style = bold] + "FAKE9_stop_0 pcmk2" -> "all_stopped" [ style = bold] + "FAKE9_stop_0 pcmk2" [ style=bold color="green" fontcolor="black"] ++"all_stopped" -> "pcmk_remote5_start_0 pcmk2" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "pcmk_remote2_migrate_from_0 pcmk1" -> "pcmk_remote2_start_0 pcmk1" [ style = bold] + "pcmk_remote2_migrate_from_0 pcmk1" -> "pcmk_remote2_stop_0 pcmk3" [ style = bold] +@@ -150,10 +151,5 @@ + "stonith_complete" -> "FAKE5_start_0 pcmk_remote4" [ style = bold] + "stonith_complete" -> "FAKE9_start_0 pcmk_remote4" [ style = bold] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "pcmk_remote2_migrate_from_0 pcmk1" [ style = bold] +-"stonith_complete" -> "pcmk_remote2_start_0 pcmk1" [ style = bold] +-"stonith_complete" -> "pcmk_remote4_start_0 pcmk2" [ style = bold] +-"stonith_complete" -> "pcmk_remote5_migrate_to_0 pcmk1" [ style = bold] +-"stonith_complete" -> "pcmk_remote5_start_0 pcmk2" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/remote-partial-migrate2.exp b/pengine/test10/remote-partial-migrate2.exp +index abf281f..bae190c 100644 +--- a/pengine/test10/remote-partial-migrate2.exp ++++ b/pengine/test10/remote-partial-migrate2.exp +@@ -6,11 +6,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -38,9 +34,6 @@ + + + +- +- +- + + + +@@ -76,11 +69,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -102,11 +91,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -129,13 +114,13 @@ + + + +- ++ + + +- ++ + + +- ++ + + + +diff --git a/pengine/test10/remote-partial-migrate2.summary b/pengine/test10/remote-partial-migrate2.summary +index 2a242bd..6b6428d 100644 +--- a/pengine/test10/remote-partial-migrate2.summary ++++ b/pengine/test10/remote-partial-migrate2.summary +@@ -84,6 +84,10 @@ Transition Summary: + * Move FAKE49 ( pcmk_remote3 -> pcmk_remote4 ) + + Executing cluster transition: ++ * Resource action: pcmk_remote2 migrate_from on pcmk1 ++ * Resource action: pcmk_remote2 stop on pcmk3 ++ * Resource action: pcmk_remote4 start on pcmk2 ++ * Resource action: pcmk_remote5 migrate_to on pcmk1 + * Resource action: FAKE5 stop on pcmk1 + * Resource action: FAKE9 stop on pcmk2 + * Resource action: FAKE12 stop on pcmk1 +@@ -99,11 +103,15 @@ Executing cluster transition: + * Resource action: FAKE48 stop on pcmk1 + * Resource action: FAKE49 stop on pcmk_remote3 + * Fencing pcmk4 (reboot) ++ * Pseudo action: pcmk_remote2_start_0 ++ * Resource action: pcmk_remote4 monitor=60000 on pcmk2 ++ * Resource action: pcmk_remote5 migrate_from on pcmk2 ++ * Resource action: pcmk_remote5 stop on pcmk1 ++ * Resource action: FAKE41 stop on pcmk_remote2 + * Pseudo action: stonith_complete +- * Resource action: pcmk_remote2 migrate_from on pcmk1 +- * Resource action: pcmk_remote2 stop on pcmk3 +- * Resource action: pcmk_remote4 start on pcmk2 +- * Resource action: pcmk_remote5 migrate_to on pcmk1 ++ * Pseudo action: all_stopped ++ * Resource action: pcmk_remote2 monitor=60000 on pcmk1 ++ * Pseudo action: pcmk_remote5_start_0 + * Resource action: FAKE5 start on pcmk_remote4 + * Resource action: FAKE9 start on pcmk_remote4 + * Resource action: FAKE12 start on pcmk2 +@@ -114,12 +122,12 @@ Executing cluster transition: + * Resource action: FAKE30 start on pcmk_remote1 + * Resource action: FAKE33 start on pcmk_remote4 + * Resource action: FAKE38 start on pcmk_remote1 ++ * Resource action: FAKE39 start on pcmk_remote2 ++ * Resource action: FAKE41 start on pcmk_remote4 ++ * Resource action: FAKE47 start on pcmk_remote2 + * Resource action: FAKE48 start on pcmk_remote3 + * Resource action: FAKE49 start on pcmk_remote4 +- * Pseudo action: pcmk_remote2_start_0 +- * Resource action: pcmk_remote4 monitor=60000 on pcmk2 +- * Resource action: pcmk_remote5 migrate_from on pcmk2 +- * Resource action: pcmk_remote5 stop on pcmk1 ++ * Resource action: pcmk_remote5 monitor=60000 on pcmk2 + * Resource action: FAKE5 monitor=10000 on pcmk_remote4 + * Resource action: FAKE9 monitor=10000 on pcmk_remote4 + * Resource action: FAKE12 monitor=10000 on pcmk2 +@@ -130,19 +138,11 @@ Executing cluster transition: + * Resource action: FAKE30 monitor=10000 on pcmk_remote1 + * Resource action: FAKE33 monitor=10000 on pcmk_remote4 + * Resource action: FAKE38 monitor=10000 on pcmk_remote1 +- * Resource action: FAKE39 start on pcmk_remote2 +- * Resource action: FAKE41 stop on pcmk_remote2 +- * Resource action: FAKE47 start on pcmk_remote2 +- * Resource action: FAKE48 monitor=10000 on pcmk_remote3 +- * Resource action: FAKE49 monitor=10000 on pcmk_remote4 +- * Pseudo action: all_stopped +- * Resource action: pcmk_remote2 monitor=60000 on pcmk1 +- * Pseudo action: pcmk_remote5_start_0 + * Resource action: FAKE39 monitor=10000 on pcmk_remote2 +- * Resource action: FAKE41 start on pcmk_remote4 +- * Resource action: FAKE47 monitor=10000 on pcmk_remote2 +- * Resource action: pcmk_remote5 monitor=60000 on pcmk2 + * Resource action: FAKE41 monitor=10000 on pcmk_remote4 ++ * Resource action: FAKE47 monitor=10000 on pcmk_remote2 ++ * Resource action: FAKE48 monitor=10000 on pcmk_remote3 ++ * Resource action: FAKE49 monitor=10000 on pcmk_remote4 + + Revised cluster status: + Online: [ pcmk1 pcmk2 pcmk3 ] +diff --git a/pengine/test10/remote-recover-all.dot b/pengine/test10/remote-recover-all.dot +index ad421e6..5b79602 100644 +--- a/pengine/test10/remote-recover-all.dot ++++ b/pengine/test10/remote-recover-all.dot +@@ -1,4 +1,5 @@ + digraph "g" { ++"all_stopped" -> "galera-0_start_0 controller-2" [ style = bold] + "all_stopped" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] + "all_stopped" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] + "all_stopped" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] +@@ -114,7 +115,6 @@ digraph "g" { + "redis_stop_0 controller-1" -> "all_stopped" [ style = bold] + "redis_stop_0 controller-1" -> "redis-master_stopped_0" [ style = bold] + "redis_stop_0 controller-1" [ style=bold color="green" fontcolor="orange"] +-"stonith 'reboot' controller-1" -> "galera-0_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "galera-2_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "haproxy-clone_stop_0" [ style = bold] + "stonith 'reboot' controller-1" -> "haproxy_stop_0 controller-1" [ style = bold] +@@ -156,7 +156,6 @@ digraph "g" { + "stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] + "stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "galera-0_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.1.14_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.1.17_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.4.11_start_0 controller-2" [ style = bold] +diff --git a/pengine/test10/remote-recover-all.exp b/pengine/test10/remote-recover-all.exp +index b0af5c4..556ccfd 100644 +--- a/pengine/test10/remote-recover-all.exp ++++ b/pengine/test10/remote-recover-all.exp +@@ -36,10 +36,10 @@ + + + +- ++ + + +- ++ + + + +@@ -49,11 +49,7 @@ + + + +- +- +- +- +- ++ + + + +diff --git a/pengine/test10/remote-recover-all.summary b/pengine/test10/remote-recover-all.summary +index 6c9f058..ba074e5 100644 +--- a/pengine/test10/remote-recover-all.summary ++++ b/pengine/test10/remote-recover-all.summary +@@ -56,13 +56,13 @@ Transition Summary: + * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) + + Executing cluster transition: ++ * Pseudo action: galera-0_stop_0 + * Pseudo action: galera-master_demote_0 + * Pseudo action: redis-master_pre_notify_stop_0 + * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 + * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 + * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 + * Fencing controller-1 (reboot) +- * Pseudo action: galera-0_stop_0 + * Pseudo action: redis_post_notify_stop_0 + * Resource action: redis notify on controller-0 + * Resource action: redis notify on controller-2 +@@ -79,17 +79,14 @@ Executing cluster transition: + * Pseudo action: haproxy-clone_stopped_0 + * Fencing messaging-1 (reboot) + * Pseudo action: stonith_complete +- * Resource action: galera-0 start on controller-2 + * Pseudo action: rabbitmq_post_notify_stop_0 + * Pseudo action: rabbitmq-clone_stop_0 + * Pseudo action: galera_stop_0 +- * Resource action: galera monitor=10000 on galera-0 + * Pseudo action: galera-master_stopped_0 + * Pseudo action: redis-master_post_notify_stopped_0 + * Pseudo action: ip-172.17.1.14_stop_0 + * Pseudo action: ip-172.17.1.17_stop_0 + * Pseudo action: ip-172.17.4.11_stop_0 +- * Resource action: galera-0 monitor=20000 on controller-2 + * Pseudo action: galera-2_stop_0 + * Resource action: rabbitmq notify on messaging-2 + * Resource action: rabbitmq notify on messaging-0 +@@ -108,11 +105,14 @@ Executing cluster transition: + * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 + * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 + * Pseudo action: all_stopped ++ * Resource action: galera-0 start on controller-2 ++ * Resource action: galera monitor=10000 on galera-0 + * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 + * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 + * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 + * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 + * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 ++ * Resource action: galera-0 monitor=20000 on controller-2 + * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 + Using the original execution date of: 2017-05-03 13:33:24Z + +diff --git a/pengine/test10/remote-recover-connection.dot b/pengine/test10/remote-recover-connection.dot +index d6fdefe..6cd342f 100644 +--- a/pengine/test10/remote-recover-connection.dot ++++ b/pengine/test10/remote-recover-connection.dot +@@ -89,14 +89,11 @@ digraph "g" { + "redis_stop_0 controller-1" -> "all_stopped" [ style = bold] + "redis_stop_0 controller-1" -> "redis-master_stopped_0" [ style = bold] + "redis_stop_0 controller-1" [ style=bold color="green" fontcolor="orange"] +-"stonith 'reboot' controller-1" -> "galera-0_stop_0 controller-1" [ style = bold] +-"stonith 'reboot' controller-1" -> "galera-2_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "haproxy-clone_stop_0" [ style = bold] + "stonith 'reboot' controller-1" -> "haproxy_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "ip-172.17.1.14_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "ip-172.17.1.17_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "ip-172.17.4.11_stop_0 controller-1" [ style = bold] +-"stonith 'reboot' controller-1" -> "messaging-1_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "redis-master_stop_0" [ style = bold] + "stonith 'reboot' controller-1" -> "redis_post_notify_stonith_0" [ style = bold] + "stonith 'reboot' controller-1" -> "redis_stop_0 controller-1" [ style = bold] +@@ -121,11 +118,8 @@ digraph "g" { + "stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] + "stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "galera-0_start_0 controller-2" [ style = bold] +-"stonith_complete" -> "galera-2_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.1.14_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.1.17_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.4.11_start_0 controller-2" [ style = bold] +-"stonith_complete" -> "messaging-1_start_0 controller-2" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/remote-recover-connection.exp b/pengine/test10/remote-recover-connection.exp +index cf74efb..40338b4 100644 +--- a/pengine/test10/remote-recover-connection.exp ++++ b/pengine/test10/remote-recover-connection.exp +@@ -23,9 +23,6 @@ + + + +- +- +- + + + +@@ -34,11 +31,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -64,9 +57,6 @@ + + + +- +- +- + + + +@@ -75,11 +65,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -105,9 +91,6 @@ + + + +- +- +- + + + +@@ -116,11 +99,7 @@ + + + +- +- +- +- +- ++ + + + +diff --git a/pengine/test10/remote-recover-connection.summary b/pengine/test10/remote-recover-connection.summary +index b0433fe..8246cd9 100644 +--- a/pengine/test10/remote-recover-connection.summary ++++ b/pengine/test10/remote-recover-connection.summary +@@ -52,6 +52,9 @@ Transition Summary: + * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) + + Executing cluster transition: ++ * Pseudo action: messaging-1_stop_0 ++ * Pseudo action: galera-0_stop_0 ++ * Pseudo action: galera-2_stop_0 + * Pseudo action: redis-master_pre_notify_stop_0 + * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 + * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 +@@ -61,9 +64,12 @@ Executing cluster transition: + * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 + * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 + * Fencing controller-1 (reboot) +- * Pseudo action: messaging-1_stop_0 +- * Pseudo action: galera-0_stop_0 +- * Pseudo action: galera-2_stop_0 ++ * Resource action: messaging-1 start on controller-2 ++ * Resource action: galera-0 start on controller-2 ++ * Resource action: galera-2 start on controller-2 ++ * Resource action: rabbitmq monitor=10000 on messaging-1 ++ * Resource action: galera monitor=10000 on galera-2 ++ * Resource action: galera monitor=10000 on galera-0 + * Pseudo action: redis_post_notify_stop_0 + * Resource action: redis notify on controller-0 + * Resource action: redis notify on controller-2 +@@ -72,20 +78,14 @@ Executing cluster transition: + * Pseudo action: haproxy-clone_stop_0 + * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 + * Pseudo action: stonith_complete +- * Resource action: messaging-1 start on controller-2 +- * Resource action: galera-0 start on controller-2 +- * Resource action: galera-2 start on controller-2 +- * Resource action: rabbitmq monitor=10000 on messaging-1 +- * Resource action: galera monitor=10000 on galera-2 +- * Resource action: galera monitor=10000 on galera-0 ++ * Resource action: messaging-1 monitor=20000 on controller-2 ++ * Resource action: galera-0 monitor=20000 on controller-2 ++ * Resource action: galera-2 monitor=20000 on controller-2 + * Pseudo action: redis_stop_0 + * Pseudo action: redis-master_stopped_0 + * Pseudo action: haproxy_stop_0 + * Pseudo action: haproxy-clone_stopped_0 + * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 +- * Resource action: messaging-1 monitor=20000 on controller-2 +- * Resource action: galera-0 monitor=20000 on controller-2 +- * Resource action: galera-2 monitor=20000 on controller-2 + * Pseudo action: redis-master_post_notify_stopped_0 + * Pseudo action: ip-172.17.1.14_stop_0 + * Pseudo action: ip-172.17.1.17_stop_0 +diff --git a/pengine/test10/remote-recover-fail.dot b/pengine/test10/remote-recover-fail.dot +index 7b6edaa..3375687 100644 +--- a/pengine/test10/remote-recover-fail.dot ++++ b/pengine/test10/remote-recover-fail.dot +@@ -18,6 +18,7 @@ + "FAKE6_stop_0 rhel7-auto4" -> "all_stopped" [ style = bold] + "FAKE6_stop_0 rhel7-auto4" -> "rhel7-auto4_stop_0 rhel7-auto2" [ style = bold] + "FAKE6_stop_0 rhel7-auto4" [ style=bold color="green" fontcolor="orange"] ++"all_stopped" -> "rhel7-auto4_start_0 rhel7-auto2" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "rhel7-auto4_monitor_60000 rhel7-auto2" [ style=bold color="green" fontcolor="black"] + "rhel7-auto4_start_0 rhel7-auto2" -> "rhel7-auto4_monitor_60000 rhel7-auto2" [ style = bold] +@@ -33,6 +34,5 @@ + "stonith_complete" -> "FAKE2_start_0 rhel7-auto3" [ style = bold] + "stonith_complete" -> "FAKE6_start_0 rhel7-auto2" [ style = bold] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "rhel7-auto4_start_0 rhel7-auto2" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/remote-recover-fail.exp b/pengine/test10/remote-recover-fail.exp +index bd014ae..f908566 100644 +--- a/pengine/test10/remote-recover-fail.exp ++++ b/pengine/test10/remote-recover-fail.exp +@@ -24,7 +24,7 @@ + + + +- ++ + + + +diff --git a/pengine/test10/remote-recover-no-resources.dot b/pengine/test10/remote-recover-no-resources.dot +index 1e16221..8c2f783 100644 +--- a/pengine/test10/remote-recover-no-resources.dot ++++ b/pengine/test10/remote-recover-no-resources.dot +@@ -1,4 +1,5 @@ + digraph "g" { ++"all_stopped" -> "galera-0_start_0 controller-2" [ style = bold] + "all_stopped" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] + "all_stopped" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] + "all_stopped" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] +@@ -98,7 +99,6 @@ digraph "g" { + "redis_stop_0 controller-1" -> "all_stopped" [ style = bold] + "redis_stop_0 controller-1" -> "redis-master_stopped_0" [ style = bold] + "redis_stop_0 controller-1" [ style=bold color="green" fontcolor="orange"] +-"stonith 'reboot' controller-1" -> "galera-0_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "galera-2_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "haproxy-clone_stop_0" [ style = bold] + "stonith 'reboot' controller-1" -> "haproxy_stop_0 controller-1" [ style = bold] +@@ -135,7 +135,6 @@ digraph "g" { + "stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] + "stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "galera-0_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.1.14_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.1.17_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.4.11_start_0 controller-2" [ style = bold] +diff --git a/pengine/test10/remote-recover-no-resources.exp b/pengine/test10/remote-recover-no-resources.exp +index 987acfd..0a57e27 100644 +--- a/pengine/test10/remote-recover-no-resources.exp ++++ b/pengine/test10/remote-recover-no-resources.exp +@@ -36,10 +36,10 @@ + + + +- ++ + + +- ++ + + + +@@ -49,11 +49,7 @@ + + + +- +- +- +- +- ++ + + + +diff --git a/pengine/test10/remote-recover-no-resources.summary b/pengine/test10/remote-recover-no-resources.summary +index b682e5f..bed02d0 100644 +--- a/pengine/test10/remote-recover-no-resources.summary ++++ b/pengine/test10/remote-recover-no-resources.summary +@@ -54,12 +54,12 @@ Transition Summary: + * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) + + Executing cluster transition: ++ * Pseudo action: galera-0_stop_0 + * Pseudo action: redis-master_pre_notify_stop_0 + * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 + * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 + * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 + * Fencing controller-1 (reboot) +- * Pseudo action: galera-0_stop_0 + * Pseudo action: galera-2_stop_0 + * Pseudo action: redis_post_notify_stop_0 + * Resource action: redis notify on controller-0 +@@ -69,15 +69,12 @@ Executing cluster transition: + * Pseudo action: haproxy-clone_stop_0 + * Fencing messaging-1 (reboot) + * Pseudo action: stonith_complete +- * Resource action: galera-0 start on controller-2 + * Pseudo action: rabbitmq_post_notify_stop_0 + * Pseudo action: rabbitmq-clone_stop_0 +- * Resource action: galera monitor=10000 on galera-0 + * Pseudo action: redis_stop_0 + * Pseudo action: redis-master_stopped_0 + * Pseudo action: haproxy_stop_0 + * Pseudo action: haproxy-clone_stopped_0 +- * Resource action: galera-0 monitor=20000 on controller-2 + * Resource action: rabbitmq notify on messaging-2 + * Resource action: rabbitmq notify on messaging-0 + * Pseudo action: rabbitmq_notified_0 +@@ -99,11 +96,14 @@ Executing cluster transition: + * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 + * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 + * Pseudo action: all_stopped ++ * Resource action: galera-0 start on controller-2 ++ * Resource action: galera monitor=10000 on galera-0 + * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 + * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 + * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 + * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 + * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 ++ * Resource action: galera-0 monitor=20000 on controller-2 + * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 + Using the original execution date of: 2017-05-03 13:33:24Z + +diff --git a/pengine/test10/remote-recover-unknown.dot b/pengine/test10/remote-recover-unknown.dot +index a8b4e18..8ce59b4 100644 +--- a/pengine/test10/remote-recover-unknown.dot ++++ b/pengine/test10/remote-recover-unknown.dot +@@ -1,4 +1,5 @@ + digraph "g" { ++"all_stopped" -> "galera-0_start_0 controller-2" [ style = bold] + "all_stopped" -> "stonith-fence_ipmilan-5254005bdbb5_start_0 controller-2" [ style = bold] + "all_stopped" -> "stonith-fence_ipmilan-525400b4f6bd_start_0 controller-0" [ style = bold] + "all_stopped" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] +@@ -99,7 +100,6 @@ digraph "g" { + "redis_stop_0 controller-1" -> "all_stopped" [ style = bold] + "redis_stop_0 controller-1" -> "redis-master_stopped_0" [ style = bold] + "redis_stop_0 controller-1" [ style=bold color="green" fontcolor="orange"] +-"stonith 'reboot' controller-1" -> "galera-0_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "galera-2_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "haproxy-clone_stop_0" [ style = bold] + "stonith 'reboot' controller-1" -> "haproxy_stop_0 controller-1" [ style = bold] +@@ -138,7 +138,6 @@ digraph "g" { + "stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] + "stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "galera-0_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.1.14_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.1.17_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.4.11_start_0 controller-2" [ style = bold] +diff --git a/pengine/test10/remote-recover-unknown.exp b/pengine/test10/remote-recover-unknown.exp +index b8d51be..0d7b318 100644 +--- a/pengine/test10/remote-recover-unknown.exp ++++ b/pengine/test10/remote-recover-unknown.exp +@@ -36,10 +36,10 @@ + + + +- ++ + + +- ++ + + + +@@ -49,11 +49,7 @@ + + + +- +- +- +- +- ++ + + + +diff --git a/pengine/test10/remote-recover-unknown.summary b/pengine/test10/remote-recover-unknown.summary +index 09f10d8..d47f174 100644 +--- a/pengine/test10/remote-recover-unknown.summary ++++ b/pengine/test10/remote-recover-unknown.summary +@@ -55,12 +55,12 @@ Transition Summary: + * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) + + Executing cluster transition: ++ * Pseudo action: galera-0_stop_0 + * Pseudo action: redis-master_pre_notify_stop_0 + * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 + * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0 + * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 + * Fencing controller-1 (reboot) +- * Pseudo action: galera-0_stop_0 + * Pseudo action: galera-2_stop_0 + * Pseudo action: redis_post_notify_stop_0 + * Resource action: redis notify on controller-0 +@@ -71,15 +71,12 @@ Executing cluster transition: + * Fencing galera-2 (reboot) + * Fencing messaging-1 (reboot) + * Pseudo action: stonith_complete +- * Resource action: galera-0 start on controller-2 + * Pseudo action: rabbitmq_post_notify_stop_0 + * Pseudo action: rabbitmq-clone_stop_0 +- * Resource action: galera monitor=10000 on galera-0 + * Pseudo action: redis_stop_0 + * Pseudo action: redis-master_stopped_0 + * Pseudo action: haproxy_stop_0 + * Pseudo action: haproxy-clone_stopped_0 +- * Resource action: galera-0 monitor=20000 on controller-2 + * Resource action: rabbitmq notify on messaging-2 + * Resource action: rabbitmq notify on messaging-0 + * Pseudo action: rabbitmq_notified_0 +@@ -101,11 +98,14 @@ Executing cluster transition: + * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 + * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 + * Pseudo action: all_stopped ++ * Resource action: galera-0 start on controller-2 ++ * Resource action: galera monitor=10000 on galera-0 + * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 + * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0 + * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0 + * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 + * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 ++ * Resource action: galera-0 monitor=20000 on controller-2 + * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 + Using the original execution date of: 2017-05-03 13:33:24Z + +diff --git a/pengine/test10/remote-recovery.dot b/pengine/test10/remote-recovery.dot +index d6fdefe..6cd342f 100644 +--- a/pengine/test10/remote-recovery.dot ++++ b/pengine/test10/remote-recovery.dot +@@ -89,14 +89,11 @@ digraph "g" { + "redis_stop_0 controller-1" -> "all_stopped" [ style = bold] + "redis_stop_0 controller-1" -> "redis-master_stopped_0" [ style = bold] + "redis_stop_0 controller-1" [ style=bold color="green" fontcolor="orange"] +-"stonith 'reboot' controller-1" -> "galera-0_stop_0 controller-1" [ style = bold] +-"stonith 'reboot' controller-1" -> "galera-2_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "haproxy-clone_stop_0" [ style = bold] + "stonith 'reboot' controller-1" -> "haproxy_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "ip-172.17.1.14_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "ip-172.17.1.17_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "ip-172.17.4.11_stop_0 controller-1" [ style = bold] +-"stonith 'reboot' controller-1" -> "messaging-1_stop_0 controller-1" [ style = bold] + "stonith 'reboot' controller-1" -> "redis-master_stop_0" [ style = bold] + "stonith 'reboot' controller-1" -> "redis_post_notify_stonith_0" [ style = bold] + "stonith 'reboot' controller-1" -> "redis_stop_0 controller-1" [ style = bold] +@@ -121,11 +118,8 @@ digraph "g" { + "stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" -> "stonith-fence_ipmilan-525400bbf613_start_0 controller-0" [ style = bold] + "stonith-fence_ipmilan-525400bbf613_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "galera-0_start_0 controller-2" [ style = bold] +-"stonith_complete" -> "galera-2_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.1.14_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.1.17_start_0 controller-2" [ style = bold] + "stonith_complete" -> "ip-172.17.4.11_start_0 controller-2" [ style = bold] +-"stonith_complete" -> "messaging-1_start_0 controller-2" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/remote-recovery.exp b/pengine/test10/remote-recovery.exp +index cf74efb..40338b4 100644 +--- a/pengine/test10/remote-recovery.exp ++++ b/pengine/test10/remote-recovery.exp +@@ -23,9 +23,6 @@ + + + +- +- +- + + + +@@ -34,11 +31,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -64,9 +57,6 @@ + + + +- +- +- + + + +@@ -75,11 +65,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -105,9 +91,6 @@ + + + +- +- +- + + + +@@ -116,11 +99,7 @@ + + + +- +- +- +- +- ++ + + + +diff --git a/pengine/test10/remote-recovery.summary b/pengine/test10/remote-recovery.summary +index b0433fe..8246cd9 100644 +--- a/pengine/test10/remote-recovery.summary ++++ b/pengine/test10/remote-recovery.summary +@@ -52,6 +52,9 @@ Transition Summary: + * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) + + Executing cluster transition: ++ * Pseudo action: messaging-1_stop_0 ++ * Pseudo action: galera-0_stop_0 ++ * Pseudo action: galera-2_stop_0 + * Pseudo action: redis-master_pre_notify_stop_0 + * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0 + * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0 +@@ -61,9 +64,12 @@ Executing cluster transition: + * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0 + * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 + * Fencing controller-1 (reboot) +- * Pseudo action: messaging-1_stop_0 +- * Pseudo action: galera-0_stop_0 +- * Pseudo action: galera-2_stop_0 ++ * Resource action: messaging-1 start on controller-2 ++ * Resource action: galera-0 start on controller-2 ++ * Resource action: galera-2 start on controller-2 ++ * Resource action: rabbitmq monitor=10000 on messaging-1 ++ * Resource action: galera monitor=10000 on galera-2 ++ * Resource action: galera monitor=10000 on galera-0 + * Pseudo action: redis_post_notify_stop_0 + * Resource action: redis notify on controller-0 + * Resource action: redis notify on controller-2 +@@ -72,20 +78,14 @@ Executing cluster transition: + * Pseudo action: haproxy-clone_stop_0 + * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 + * Pseudo action: stonith_complete +- * Resource action: messaging-1 start on controller-2 +- * Resource action: galera-0 start on controller-2 +- * Resource action: galera-2 start on controller-2 +- * Resource action: rabbitmq monitor=10000 on messaging-1 +- * Resource action: galera monitor=10000 on galera-2 +- * Resource action: galera monitor=10000 on galera-0 ++ * Resource action: messaging-1 monitor=20000 on controller-2 ++ * Resource action: galera-0 monitor=20000 on controller-2 ++ * Resource action: galera-2 monitor=20000 on controller-2 + * Pseudo action: redis_stop_0 + * Pseudo action: redis-master_stopped_0 + * Pseudo action: haproxy_stop_0 + * Pseudo action: haproxy-clone_stopped_0 + * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 +- * Resource action: messaging-1 monitor=20000 on controller-2 +- * Resource action: galera-0 monitor=20000 on controller-2 +- * Resource action: galera-2 monitor=20000 on controller-2 + * Pseudo action: redis-master_post_notify_stopped_0 + * Pseudo action: ip-172.17.1.14_stop_0 + * Pseudo action: ip-172.17.1.17_stop_0 +diff --git a/pengine/test10/remote-unclean2.dot b/pengine/test10/remote-unclean2.dot +index 3f8981b..2311a72 100644 +--- a/pengine/test10/remote-unclean2.dot ++++ b/pengine/test10/remote-unclean2.dot +@@ -1,4 +1,5 @@ + digraph "g" { ++"all_stopped" -> "rhel7-auto4_start_0 rhel7-auto1" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "rhel7-auto4_monitor_60000 rhel7-auto1" [ style=bold color="green" fontcolor="black"] + "rhel7-auto4_start_0 rhel7-auto1" -> "rhel7-auto4_monitor_60000 rhel7-auto1" [ style = bold] +@@ -9,6 +10,5 @@ + "stonith 'reboot' rhel7-auto4" -> "stonith_complete" [ style = bold] + "stonith 'reboot' rhel7-auto4" [ style=bold color="green" fontcolor="black"] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "rhel7-auto4_start_0 rhel7-auto1" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/remote-unclean2.exp b/pengine/test10/remote-unclean2.exp +index ca0b3ba..2c73d82 100644 +--- a/pengine/test10/remote-unclean2.exp ++++ b/pengine/test10/remote-unclean2.exp +@@ -11,7 +11,7 @@ + + + +- ++ + + + +diff --git a/pengine/test10/whitebox-fail1.dot b/pengine/test10/whitebox-fail1.dot +index bfff4bf..9b755f9 100644 +--- a/pengine/test10/whitebox-fail1.dot ++++ b/pengine/test10/whitebox-fail1.dot +@@ -26,6 +26,7 @@ digraph "g" { + "M_stop_0 lxc1" -> "M_start_0 lxc1" [ style = bold] + "M_stop_0 lxc1" -> "all_stopped" [ style = bold] + "M_stop_0 lxc1" [ style=bold color="green" fontcolor="orange"] ++"all_stopped" -> "lxc1_start_0 18node2" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "container1_start_0 18node2" -> "B_start_0 lxc1" [ style = bold] + "container1_start_0 18node2" -> "M_start_0 lxc1" [ style = bold] +@@ -55,6 +56,5 @@ digraph "g" { + "stonith_complete" -> "M_start_0 lxc1" [ style = bold] + "stonith_complete" -> "all_stopped" [ style = bold] + "stonith_complete" -> "container1_start_0 18node2" [ style = bold] +-"stonith_complete" -> "lxc1_start_0 18node2" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/whitebox-fail1.exp b/pengine/test10/whitebox-fail1.exp +index 901a1e3..1532c6e 100644 +--- a/pengine/test10/whitebox-fail1.exp ++++ b/pengine/test10/whitebox-fail1.exp +@@ -227,13 +227,13 @@ + + + +- ++ + + +- ++ + + +- ++ + + + +diff --git a/pengine/test10/whitebox-fail1.summary b/pengine/test10/whitebox-fail1.summary +index d1f3480..a5b85dd 100644 +--- a/pengine/test10/whitebox-fail1.summary ++++ b/pengine/test10/whitebox-fail1.summary +@@ -31,17 +31,17 @@ Executing cluster transition: + * Resource action: container1 start on 18node2 + * Pseudo action: M-clone_stop_0 + * Pseudo action: B_stop_0 +- * Resource action: lxc1 start on 18node2 +- * Resource action: lxc1 monitor=30000 on 18node2 + * Pseudo action: M_stop_0 + * Pseudo action: M-clone_stopped_0 + * Pseudo action: M-clone_start_0 +- * Resource action: B start on lxc1 + * Pseudo action: all_stopped ++ * Resource action: lxc1 start on 18node2 ++ * Resource action: lxc1 monitor=30000 on 18node2 + * Resource action: M start on lxc1 + * Pseudo action: M-clone_running_0 +- * Resource action: B monitor=10000 on lxc1 ++ * Resource action: B start on lxc1 + * Resource action: M monitor=10000 on lxc1 ++ * Resource action: B monitor=10000 on lxc1 + + Revised cluster status: + Online: [ 18node1 18node2 18node3 ] +diff --git a/pengine/test10/whitebox-fail2.dot b/pengine/test10/whitebox-fail2.dot +index bfff4bf..9b755f9 100644 +--- a/pengine/test10/whitebox-fail2.dot ++++ b/pengine/test10/whitebox-fail2.dot +@@ -26,6 +26,7 @@ digraph "g" { + "M_stop_0 lxc1" -> "M_start_0 lxc1" [ style = bold] + "M_stop_0 lxc1" -> "all_stopped" [ style = bold] + "M_stop_0 lxc1" [ style=bold color="green" fontcolor="orange"] ++"all_stopped" -> "lxc1_start_0 18node2" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "container1_start_0 18node2" -> "B_start_0 lxc1" [ style = bold] + "container1_start_0 18node2" -> "M_start_0 lxc1" [ style = bold] +@@ -55,6 +56,5 @@ digraph "g" { + "stonith_complete" -> "M_start_0 lxc1" [ style = bold] + "stonith_complete" -> "all_stopped" [ style = bold] + "stonith_complete" -> "container1_start_0 18node2" [ style = bold] +-"stonith_complete" -> "lxc1_start_0 18node2" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/whitebox-fail2.exp b/pengine/test10/whitebox-fail2.exp +index 901a1e3..1532c6e 100644 +--- a/pengine/test10/whitebox-fail2.exp ++++ b/pengine/test10/whitebox-fail2.exp +@@ -227,13 +227,13 @@ + + + +- ++ + + +- ++ + + +- ++ + + + +diff --git a/pengine/test10/whitebox-fail2.summary b/pengine/test10/whitebox-fail2.summary +index ebf6c51..afee261 100644 +--- a/pengine/test10/whitebox-fail2.summary ++++ b/pengine/test10/whitebox-fail2.summary +@@ -31,17 +31,17 @@ Executing cluster transition: + * Resource action: container1 start on 18node2 + * Pseudo action: M-clone_stop_0 + * Pseudo action: B_stop_0 +- * Resource action: lxc1 start on 18node2 +- * Resource action: lxc1 monitor=30000 on 18node2 + * Pseudo action: M_stop_0 + * Pseudo action: M-clone_stopped_0 + * Pseudo action: M-clone_start_0 +- * Resource action: B start on lxc1 + * Pseudo action: all_stopped ++ * Resource action: lxc1 start on 18node2 ++ * Resource action: lxc1 monitor=30000 on 18node2 + * Resource action: M start on lxc1 + * Pseudo action: M-clone_running_0 +- * Resource action: B monitor=10000 on lxc1 ++ * Resource action: B start on lxc1 + * Resource action: M monitor=10000 on lxc1 ++ * Resource action: B monitor=10000 on lxc1 + + Revised cluster status: + Online: [ 18node1 18node2 18node3 ] +diff --git a/pengine/test10/whitebox-imply-stop-on-fence.dot b/pengine/test10/whitebox-imply-stop-on-fence.dot +index 7b536ea..1ef3cba 100644 +--- a/pengine/test10/whitebox-imply-stop-on-fence.dot ++++ b/pengine/test10/whitebox-imply-stop-on-fence.dot +@@ -18,6 +18,8 @@ + "R-lxc-02_kiff-01_stop_0 kiff-01" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold] + "R-lxc-02_kiff-01_stop_0 kiff-01" [ style=bold color="green" fontcolor="orange"] + "all_stopped" -> "fence-kiff-02_start_0 kiff-02" [ style = bold] ++"all_stopped" -> "lxc-01_kiff-01_start_0 kiff-02" [ style = bold] ++"all_stopped" -> "lxc-02_kiff-01_start_0 kiff-02" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "clvmd-clone_stop_0" -> "clvmd-clone_stopped_0" [ style = bold] + "clvmd-clone_stop_0" -> "clvmd_stop_0 kiff-01" [ style = bold] +@@ -78,8 +80,6 @@ + "stonith 'reboot' kiff-01" -> "clvmd_stop_0 kiff-01" [ style = bold] + "stonith 'reboot' kiff-01" -> "dlm-clone_stop_0" [ style = bold] + "stonith 'reboot' kiff-01" -> "dlm_stop_0 kiff-01" [ style = bold] +-"stonith 'reboot' kiff-01" -> "lxc-01_kiff-01_stop_0 kiff-01" [ style = bold] +-"stonith 'reboot' kiff-01" -> "lxc-02_kiff-01_stop_0 kiff-01" [ style = bold] + "stonith 'reboot' kiff-01" -> "shared0-clone_stop_0" [ style = bold] + "stonith 'reboot' kiff-01" -> "shared0_stop_0 kiff-01" [ style = bold] + "stonith 'reboot' kiff-01" -> "stonith 'reboot' lxc-01_kiff-01" [ style = bold] +@@ -94,8 +94,6 @@ + "stonith_complete" -> "R-lxc-01_kiff-01_start_0 kiff-02" [ style = bold] + "stonith_complete" -> "R-lxc-02_kiff-01_start_0 kiff-02" [ style = bold] + "stonith_complete" -> "all_stopped" [ style = bold] +-"stonith_complete" -> "lxc-01_kiff-01_start_0 kiff-02" [ style = bold] +-"stonith_complete" -> "lxc-02_kiff-01_start_0 kiff-02" [ style = bold] + "stonith_complete" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + "vm-fs_monitor_0 lxc-01_kiff-02" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold] +diff --git a/pengine/test10/whitebox-imply-stop-on-fence.exp b/pengine/test10/whitebox-imply-stop-on-fence.exp +index f80dde1..c73d1d2 100644 +--- a/pengine/test10/whitebox-imply-stop-on-fence.exp ++++ b/pengine/test10/whitebox-imply-stop-on-fence.exp +@@ -421,13 +421,13 @@ + + + +- ++ + + +- ++ + + +- ++ + + + +@@ -437,11 +437,7 @@ + + + +- +- +- +- +- ++ + + + +@@ -465,13 +461,13 @@ + + + +- ++ + + +- ++ + + +- ++ + + + +@@ -481,11 +477,7 @@ + + + +- +- +- +- +- ++ + + + +diff --git a/pengine/test10/whitebox-imply-stop-on-fence.summary b/pengine/test10/whitebox-imply-stop-on-fence.summary +index d272b25..5ce580e 100644 +--- a/pengine/test10/whitebox-imply-stop-on-fence.summary ++++ b/pengine/test10/whitebox-imply-stop-on-fence.summary +@@ -48,29 +48,23 @@ Executing cluster transition: + * Resource action: shared0 monitor on lxc-01_kiff-02 + * Resource action: vm-fs monitor on lxc-02_kiff-02 + * Resource action: vm-fs monitor on lxc-01_kiff-02 +- * Fencing kiff-01 (reboot) + * Pseudo action: lxc-01_kiff-01_stop_0 + * Pseudo action: lxc-02_kiff-01_stop_0 ++ * Fencing kiff-01 (reboot) ++ * Pseudo action: R-lxc-01_kiff-01_stop_0 ++ * Pseudo action: R-lxc-02_kiff-01_stop_0 + * Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01 + * Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01 + * Pseudo action: stonith_complete +- * Pseudo action: R-lxc-01_kiff-01_stop_0 +- * Pseudo action: R-lxc-02_kiff-01_stop_0 +- * Pseudo action: vm-fs_stop_0 + * Pseudo action: shared0-clone_stop_0 + * Resource action: R-lxc-01_kiff-01 start on kiff-02 + * Resource action: R-lxc-02_kiff-01 start on kiff-02 +- * Resource action: lxc-01_kiff-01 start on kiff-02 +- * Resource action: lxc-02_kiff-01 start on kiff-02 ++ * Pseudo action: vm-fs_stop_0 + * Pseudo action: shared0_stop_0 + * Pseudo action: shared0-clone_stopped_0 + * Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02 + * Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02 +- * Resource action: vm-fs start on lxc-01_kiff-01 +- * Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02 +- * Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02 + * Pseudo action: clvmd-clone_stop_0 +- * Resource action: vm-fs monitor=20000 on lxc-01_kiff-01 + * Pseudo action: clvmd_stop_0 + * Pseudo action: clvmd-clone_stopped_0 + * Pseudo action: dlm-clone_stop_0 +@@ -78,7 +72,13 @@ Executing cluster transition: + * Pseudo action: dlm-clone_stopped_0 + * Pseudo action: all_stopped + * Resource action: fence-kiff-02 start on kiff-02 ++ * Resource action: lxc-01_kiff-01 start on kiff-02 ++ * Resource action: lxc-02_kiff-01 start on kiff-02 + * Resource action: fence-kiff-02 monitor=60000 on kiff-02 ++ * Resource action: vm-fs start on lxc-01_kiff-01 ++ * Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02 ++ * Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02 ++ * Resource action: vm-fs monitor=20000 on lxc-01_kiff-01 + + Revised cluster status: + Online: [ kiff-02 ] +diff --git a/pengine/test10/whitebox-ms-ordering.dot b/pengine/test10/whitebox-ms-ordering.dot +index 1f4d95b..d5112b9 100644 +--- a/pengine/test10/whitebox-ms-ordering.dot ++++ b/pengine/test10/whitebox-ms-ordering.dot +@@ -1,4 +1,6 @@ + digraph "g" { ++"all_stopped" -> "lxc1_start_0 18node1" [ style = bold] ++"all_stopped" -> "lxc2_start_0 18node1" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "container1_monitor_0 18node1" -> "container1_start_0 18node1" [ style = bold] + "container1_monitor_0 18node1" [ style=bold color="green" fontcolor="black"] +@@ -106,7 +108,5 @@ + "stonith_complete" -> "lxc-ms_promote_0 lxc1" [ style = bold] + "stonith_complete" -> "lxc-ms_start_0 lxc1" [ style = bold] + "stonith_complete" -> "lxc-ms_start_0 lxc2" [ style = bold] +-"stonith_complete" -> "lxc1_start_0 18node1" [ style = bold] +-"stonith_complete" -> "lxc2_start_0 18node1" [ style = bold] + "stonith_complete" [ style=bold color="green" fontcolor="orange"] + } +diff --git a/pengine/test10/whitebox-ms-ordering.exp b/pengine/test10/whitebox-ms-ordering.exp +index c8fee5e..d5608e4 100644 +--- a/pengine/test10/whitebox-ms-ordering.exp ++++ b/pengine/test10/whitebox-ms-ordering.exp +@@ -419,6 +419,9 @@ + + + ++ ++ ++ + + + +@@ -430,9 +433,6 @@ + + + +- +- +- + + + +@@ -484,6 +484,9 @@ + + + ++ ++ ++ + + + +@@ -495,9 +498,6 @@ + + + +- +- +- + + + +diff --git a/pengine/test10/whitebox-ms-ordering.summary b/pengine/test10/whitebox-ms-ordering.summary +index d8ff62c..46fe9d1 100644 +--- a/pengine/test10/whitebox-ms-ordering.summary ++++ b/pengine/test10/whitebox-ms-ordering.summary +@@ -43,18 +43,18 @@ Executing cluster transition: + * Pseudo action: lxc-ms_demote_0 + * Pseudo action: lxc-ms-master_demoted_0 + * Pseudo action: lxc-ms-master_stop_0 +- * Resource action: lxc1 start on 18node1 +- * Resource action: lxc2 start on 18node1 + * Pseudo action: lxc-ms_stop_0 + * Pseudo action: lxc-ms_stop_0 + * Pseudo action: lxc-ms-master_stopped_0 + * Pseudo action: lxc-ms-master_start_0 +- * Resource action: lxc1 monitor=30000 on 18node1 +- * Resource action: lxc2 monitor=30000 on 18node1 + * Pseudo action: all_stopped ++ * Resource action: lxc1 start on 18node1 ++ * Resource action: lxc2 start on 18node1 + * Resource action: lxc-ms start on lxc1 + * Resource action: lxc-ms start on lxc2 + * Pseudo action: lxc-ms-master_running_0 ++ * Resource action: lxc1 monitor=30000 on 18node1 ++ * Resource action: lxc2 monitor=30000 on 18node1 + * Resource action: lxc-ms monitor=10000 on lxc2 + * Pseudo action: lxc-ms-master_promote_0 + * Resource action: lxc-ms promote on lxc1 +diff --git a/pengine/test10/whitebox-unexpectedly-running.dot b/pengine/test10/whitebox-unexpectedly-running.dot +index fa1171e..f16e705 100644 +--- a/pengine/test10/whitebox-unexpectedly-running.dot ++++ b/pengine/test10/whitebox-unexpectedly-running.dot +@@ -8,6 +8,8 @@ + "FAKE-crashed_stop_0 18builder" -> "stonith 'reboot' remote2" [ style = bold] + "FAKE-crashed_stop_0 18builder" [ style=bold color="green" fontcolor="black"] + "FAKE_monitor_60000 18builder" [ style=bold color="green" fontcolor="black"] ++"all_stopped" -> "remote1_start_0 18builder" [ style = bold] ++"all_stopped" -> "remote2_start_0 18builder" [ style = bold] + "all_stopped" [ style=bold color="green" fontcolor="orange"] + "remote1_monitor_0 18builder" -> "remote1_start_0 18builder" [ style = bold] + "remote1_monitor_0 18builder" [ style=bold color="green" fontcolor="black"] +diff --git a/pengine/test10/whitebox-unexpectedly-running.exp b/pengine/test10/whitebox-unexpectedly-running.exp +index c4e13b9..46376a5 100644 +--- a/pengine/test10/whitebox-unexpectedly-running.exp ++++ b/pengine/test10/whitebox-unexpectedly-running.exp +@@ -65,6 +65,9 @@ + + + ++ ++ ++ + + + +@@ -100,6 +103,9 @@ + + + ++ ++ ++ + + + +-- +1.8.3.1 + + +From b04b392925daa70af17f2abdef9a6198127c5608 Mon Sep 17 00:00:00 2001 +From: Andrew Beekhof +Date: Fri, 8 Dec 2017 13:53:36 +1100 +Subject: [PATCH 11/16] Fix: PE: Ensure stop operations occur after stopped + remote connections have been brought up + +--- + pengine/allocate.c | 55 +++++++++++++++++++++++++++++++++++++++--------------- + 1 file changed, 40 insertions(+), 15 deletions(-) + +diff --git a/pengine/allocate.c b/pengine/allocate.c +index 2ae491c..1c95e97 100644 +--- a/pengine/allocate.c ++++ b/pengine/allocate.c +@@ -48,6 +48,25 @@ enum remote_connection_state { + remote_state_stopped = 4 + }; + ++static const char * ++state2text(enum remote_connection_state state) ++{ ++ switch (state) { ++ case remote_state_unknown: ++ return "unknown"; ++ case remote_state_alive: ++ return "alive"; ++ case remote_state_resting: ++ return "resting"; ++ case remote_state_failed: ++ return "failed"; ++ case remote_state_stopped: ++ return "stopped"; ++ } ++ ++ return "impossible"; ++} ++ + resource_alloc_functions_t resource_class_alloc_functions[] = { + { + native_merge_weights, +@@ -2011,10 +2030,10 @@ apply_remote_ordering(action_t *action, pe_working_set_t *data_set) + cluster_node = remote_rsc->running_on->data; + } + +- crm_trace("Order %s action %s relative to %s%s (state %d)", ++ crm_trace("Order %s action %s relative to %s%s (state: %s)", + action->task, action->uuid, + is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "", +- remote_rsc->id, state); ++ remote_rsc->id, state2text(state)); + + if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE) + || safe_str_eq(action->task, CRMD_ACTION_MIGRATE)) { +@@ -2042,23 +2061,29 @@ apply_remote_ordering(action_t *action, pe_working_set_t *data_set) + /* Handle special case with remote node where stop actions need to be + * ordered after the connection resource starts somewhere else. + */ +- if(state == remote_state_resting) { +- /* Wait for the connection resource to be up and assume everything is as we left it */ +- order_start_then_action(remote_rsc, action, pe_order_none, +- data_set); ++ if(state == remote_state_alive) { ++ order_action_then_stop(action, remote_rsc, ++ pe_order_implies_first, data_set); + +- } else { +- if(state == remote_state_failed) { +- /* We would only be here if the resource is +- * running on the remote node. Since we have no +- * way to stop it, it is necessary to fence the +- * node. +- */ +- pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable"); +- } ++ } else if(state == remote_state_failed) { ++ /* We would only be here if the resource is ++ * running on the remote node. Since we have no ++ * way to stop it, it is necessary to fence the ++ * node. ++ */ ++ pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable"); ++ order_action_then_stop(action, remote_rsc, ++ pe_order_implies_first, data_set); + ++ } else if(remote_rsc->next_role == RSC_ROLE_STOPPED) { ++ /* If its not coming back up, better do what we need first */ + order_action_then_stop(action, remote_rsc, + pe_order_implies_first, data_set); ++ ++ } else { ++ /* Wait for the connection resource to be up and assume everything is as we left it */ ++ order_start_then_action(remote_rsc, action, pe_order_none, data_set); ++ + } + break; + +-- +1.8.3.1 + + +From 502770c763807bc7ac19bf2b2fd50d4a0e195df0 Mon Sep 17 00:00:00 2001 +From: Andrew Beekhof +Date: Fri, 8 Dec 2017 13:56:03 +1100 +Subject: [PATCH 12/16] Test: PE: Ensure stop operations occur after stopped + remote connections have been brought up + +--- + pengine/regression.sh | 1 + + pengine/test10/bundle-order-stop-on-remote.dot | 307 ++++ + pengine/test10/bundle-order-stop-on-remote.exp | 1607 ++++++++++++++++++++ + pengine/test10/bundle-order-stop-on-remote.scores | 934 ++++++++++++ + pengine/test10/bundle-order-stop-on-remote.summary | 224 +++ + pengine/test10/bundle-order-stop-on-remote.xml | 1165 ++++++++++++++ + 6 files changed, 4238 insertions(+) + create mode 100644 pengine/test10/bundle-order-stop-on-remote.dot + create mode 100644 pengine/test10/bundle-order-stop-on-remote.exp + create mode 100644 pengine/test10/bundle-order-stop-on-remote.scores + create mode 100644 pengine/test10/bundle-order-stop-on-remote.summary + create mode 100644 pengine/test10/bundle-order-stop-on-remote.xml + +diff --git a/pengine/regression.sh b/pengine/regression.sh +index 47cf0ba..cf1824a 100755 +--- a/pengine/regression.sh ++++ b/pengine/regression.sh +@@ -819,6 +819,7 @@ do_test bundle-order-partial-start "Bundle startup ordering when some dependanci + do_test bundle-order-partial-start-2 "Bundle startup ordering when some dependancies and the container are already running" + do_test bundle-order-stop "Bundle stop ordering" + do_test bundle-order-partial-stop "Bundle startup ordering when some dependancies are already stopped" ++do_test bundle-order-stop-on-remote "Stop nested resource after bringing up the connection" + + do_test bundle-order-startup-clone "Prevent startup because bundle isn't promoted" + do_test bundle-order-startup-clone-2 "Bundle startup with clones" +diff --git a/pengine/test10/bundle-order-stop-on-remote.dot b/pengine/test10/bundle-order-stop-on-remote.dot +new file mode 100644 +index 0000000..f0b6336 +--- /dev/null ++++ b/pengine/test10/bundle-order-stop-on-remote.dot +@@ -0,0 +1,307 @@ ++digraph "g" { ++"all_stopped" [ style=bold color="green" fontcolor="orange"] ++"database-0_monitor_20000 controller-0" [ style=bold color="green" fontcolor="black"] ++"database-0_start_0 controller-0" -> "database-0_monitor_20000 controller-0" [ style = bold] ++"database-0_start_0 controller-0" -> "galera-bundle-docker-0_monitor_60000 database-0" [ style = bold] ++"database-0_start_0 controller-0" -> "galera-bundle-docker-0_start_0 database-0" [ style = bold] ++"database-0_start_0 controller-0" -> "galera-bundle-docker-0_stop_0 database-0" [ style = bold] ++"database-0_start_0 controller-0" [ style=bold color="green" fontcolor="black"] ++"database-2_monitor_20000 controller-1" [ style=bold color="green" fontcolor="black"] ++"database-2_start_0 controller-1" -> "database-2_monitor_20000 controller-1" [ style = bold] ++"database-2_start_0 controller-1" -> "galera-bundle-docker-2_monitor_60000 database-2" [ style = bold] ++"database-2_start_0 controller-1" -> "galera-bundle-docker-2_start_0 database-2" [ style = bold] ++"database-2_start_0 controller-1" -> "galera-bundle-docker-2_stop_0 database-2" [ style = bold] ++"database-2_start_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-0_monitor_0 controller-1" -> "galera-bundle-0_start_0 controller-0" [ style = bold] ++"galera-bundle-0_monitor_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-0_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-0_start_0 controller-0" -> "galera-bundle-0_monitor_60000 controller-0" [ style = bold] ++"galera-bundle-0_start_0 controller-0" -> "galera_monitor_10000 galera-bundle-0" [ style = bold] ++"galera-bundle-0_start_0 controller-0" -> "galera_promote_0 galera-bundle-0" [ style = bold] ++"galera-bundle-0_start_0 controller-0" -> "galera_start_0 galera-bundle-0" [ style = bold] ++"galera-bundle-0_start_0 controller-0" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-2_monitor_0 controller-1" -> "galera-bundle-2_start_0 controller-1" [ style = bold] ++"galera-bundle-2_monitor_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-2_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-2_start_0 controller-1" -> "galera-bundle-2_monitor_60000 controller-1" [ style = bold] ++"galera-bundle-2_start_0 controller-1" -> "galera_monitor_10000 galera-bundle-2" [ style = bold] ++"galera-bundle-2_start_0 controller-1" -> "galera_promote_0 galera-bundle-2" [ style = bold] ++"galera-bundle-2_start_0 controller-1" -> "galera_start_0 galera-bundle-2" [ style = bold] ++"galera-bundle-2_start_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-docker-0_monitor_60000 database-0" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-docker-0_start_0 database-0" -> "galera-bundle-0_monitor_0 controller-1" [ style = bold] ++"galera-bundle-docker-0_start_0 database-0" -> "galera-bundle-0_start_0 controller-0" [ style = bold] ++"galera-bundle-docker-0_start_0 database-0" -> "galera-bundle-docker-0_monitor_60000 database-0" [ style = bold] ++"galera-bundle-docker-0_start_0 database-0" -> "galera-bundle_running_0" [ style = bold] ++"galera-bundle-docker-0_start_0 database-0" -> "galera_promote_0 galera-bundle-0" [ style = bold] ++"galera-bundle-docker-0_start_0 database-0" -> "galera_start_0 galera-bundle-0" [ style = bold] ++"galera-bundle-docker-0_start_0 database-0" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-docker-0_stop_0 database-0" -> "all_stopped" [ style = bold] ++"galera-bundle-docker-0_stop_0 database-0" -> "galera-bundle-docker-0_start_0 database-0" [ style = bold] ++"galera-bundle-docker-0_stop_0 database-0" -> "galera-bundle_stopped_0" [ style = bold] ++"galera-bundle-docker-0_stop_0 database-0" -> "stonith 'reboot' galera-bundle-0" [ style = bold] ++"galera-bundle-docker-0_stop_0 database-0" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-docker-2_monitor_60000 database-2" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-docker-2_start_0 database-2" -> "galera-bundle-2_monitor_0 controller-1" [ style = bold] ++"galera-bundle-docker-2_start_0 database-2" -> "galera-bundle-2_start_0 controller-1" [ style = bold] ++"galera-bundle-docker-2_start_0 database-2" -> "galera-bundle-docker-2_monitor_60000 database-2" [ style = bold] ++"galera-bundle-docker-2_start_0 database-2" -> "galera-bundle_running_0" [ style = bold] ++"galera-bundle-docker-2_start_0 database-2" -> "galera_promote_0 galera-bundle-2" [ style = bold] ++"galera-bundle-docker-2_start_0 database-2" -> "galera_start_0 galera-bundle-2" [ style = bold] ++"galera-bundle-docker-2_start_0 database-2" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-docker-2_stop_0 database-2" -> "all_stopped" [ style = bold] ++"galera-bundle-docker-2_stop_0 database-2" -> "galera-bundle-docker-2_start_0 database-2" [ style = bold] ++"galera-bundle-docker-2_stop_0 database-2" -> "galera-bundle_stopped_0" [ style = bold] ++"galera-bundle-docker-2_stop_0 database-2" -> "stonith 'reboot' galera-bundle-2" [ style = bold] ++"galera-bundle-docker-2_stop_0 database-2" [ style=bold color="green" fontcolor="black"] ++"galera-bundle-master_demote_0" -> "galera-bundle-master_demoted_0" [ style = bold] ++"galera-bundle-master_demote_0" -> "galera_demote_0 galera-bundle-0" [ style = bold] ++"galera-bundle-master_demote_0" -> "galera_demote_0 galera-bundle-2" [ style = bold] ++"galera-bundle-master_demote_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle-master_demoted_0" -> "galera-bundle-master_promote_0" [ style = bold] ++"galera-bundle-master_demoted_0" -> "galera-bundle-master_start_0" [ style = bold] ++"galera-bundle-master_demoted_0" -> "galera-bundle-master_stop_0" [ style = bold] ++"galera-bundle-master_demoted_0" -> "galera-bundle_demoted_0" [ style = bold] ++"galera-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle-master_promote_0" -> "galera_promote_0 galera-bundle-0" [ style = bold] ++"galera-bundle-master_promote_0" -> "galera_promote_0 galera-bundle-2" [ style = bold] ++"galera-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle-master_promoted_0" -> "galera-bundle_promoted_0" [ style = bold] ++"galera-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle-master_running_0" -> "galera-bundle-master_promote_0" [ style = bold] ++"galera-bundle-master_running_0" -> "galera-bundle_running_0" [ style = bold] ++"galera-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle-master_start_0" -> "galera-bundle-master_running_0" [ style = bold] ++"galera-bundle-master_start_0" -> "galera_start_0 galera-bundle-0" [ style = bold] ++"galera-bundle-master_start_0" -> "galera_start_0 galera-bundle-2" [ style = bold] ++"galera-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle-master_stop_0" -> "galera-bundle-master_stopped_0" [ style = bold] ++"galera-bundle-master_stop_0" -> "galera_stop_0 galera-bundle-0" [ style = bold] ++"galera-bundle-master_stop_0" -> "galera_stop_0 galera-bundle-2" [ style = bold] ++"galera-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle-master_stopped_0" -> "galera-bundle-master_promote_0" [ style = bold] ++"galera-bundle-master_stopped_0" -> "galera-bundle-master_start_0" [ style = bold] ++"galera-bundle-master_stopped_0" -> "galera-bundle_stopped_0" [ style = bold] ++"galera-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle_demote_0" -> "galera-bundle-master_demote_0" [ style = bold] ++"galera-bundle_demote_0" -> "galera-bundle_demoted_0" [ style = bold] ++"galera-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle_demoted_0" -> "galera-bundle_promote_0" [ style = bold] ++"galera-bundle_demoted_0" -> "galera-bundle_start_0" [ style = bold] ++"galera-bundle_demoted_0" -> "galera-bundle_stop_0" [ style = bold] ++"galera-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle_promote_0" -> "galera-bundle-master_promote_0" [ style = bold] ++"galera-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle_running_0" -> "galera-bundle_promote_0" [ style = bold] ++"galera-bundle_running_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle_start_0" -> "galera-bundle-docker-0_start_0 database-0" [ style = bold] ++"galera-bundle_start_0" -> "galera-bundle-docker-2_start_0 database-2" [ style = bold] ++"galera-bundle_start_0" -> "galera-bundle-master_start_0" [ style = bold] ++"galera-bundle_start_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle_stop_0" -> "galera-bundle-docker-0_stop_0 database-0" [ style = bold] ++"galera-bundle_stop_0" -> "galera-bundle-docker-2_stop_0 database-2" [ style = bold] ++"galera-bundle_stop_0" -> "galera-bundle-master_stop_0" [ style = bold] ++"galera-bundle_stop_0" -> "galera_stop_0 galera-bundle-0" [ style = bold] ++"galera-bundle_stop_0" -> "galera_stop_0 galera-bundle-2" [ style = bold] ++"galera-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] ++"galera-bundle_stopped_0" -> "galera-bundle_promote_0" [ style = bold] ++"galera-bundle_stopped_0" -> "galera-bundle_start_0" [ style = bold] ++"galera-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] ++"galera_demote_0 galera-bundle-0" -> "galera-bundle-master_demoted_0" [ style = bold] ++"galera_demote_0 galera-bundle-0" -> "galera_promote_0 galera-bundle-0" [ style = bold] ++"galera_demote_0 galera-bundle-0" -> "galera_stop_0 galera-bundle-0" [ style = bold] ++"galera_demote_0 galera-bundle-0" [ style=bold color="green" fontcolor="orange"] ++"galera_demote_0 galera-bundle-2" -> "galera-bundle-master_demoted_0" [ style = bold] ++"galera_demote_0 galera-bundle-2" -> "galera_promote_0 galera-bundle-2" [ style = bold] ++"galera_demote_0 galera-bundle-2" -> "galera_stop_0 galera-bundle-2" [ style = bold] ++"galera_demote_0 galera-bundle-2" [ style=bold color="green" fontcolor="orange"] ++"galera_monitor_10000 galera-bundle-0" [ style=bold color="green" fontcolor="black"] ++"galera_monitor_10000 galera-bundle-2" [ style=bold color="green" fontcolor="black"] ++"galera_promote_0 galera-bundle-0" -> "galera-bundle-master_promoted_0" [ style = bold] ++"galera_promote_0 galera-bundle-0" -> "galera_monitor_10000 galera-bundle-0" [ style = bold] ++"galera_promote_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"] ++"galera_promote_0 galera-bundle-2" -> "galera-bundle-master_promoted_0" [ style = bold] ++"galera_promote_0 galera-bundle-2" -> "galera_monitor_10000 galera-bundle-2" [ style = bold] ++"galera_promote_0 galera-bundle-2" [ style=bold color="green" fontcolor="black"] ++"galera_start_0 galera-bundle-0" -> "galera-bundle-master_running_0" [ style = bold] ++"galera_start_0 galera-bundle-0" -> "galera_monitor_10000 galera-bundle-0" [ style = bold] ++"galera_start_0 galera-bundle-0" -> "galera_promote_0 galera-bundle-0" [ style = bold] ++"galera_start_0 galera-bundle-0" -> "galera_start_0 galera-bundle-2" [ style = bold] ++"galera_start_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"] ++"galera_start_0 galera-bundle-2" -> "galera-bundle-master_running_0" [ style = bold] ++"galera_start_0 galera-bundle-2" -> "galera_monitor_10000 galera-bundle-2" [ style = bold] ++"galera_start_0 galera-bundle-2" -> "galera_promote_0 galera-bundle-2" [ style = bold] ++"galera_start_0 galera-bundle-2" [ style=bold color="green" fontcolor="black"] ++"galera_stop_0 galera-bundle-0" -> "all_stopped" [ style = bold] ++"galera_stop_0 galera-bundle-0" -> "galera-bundle-master_stopped_0" [ style = bold] ++"galera_stop_0 galera-bundle-0" -> "galera_start_0 galera-bundle-0" [ style = bold] ++"galera_stop_0 galera-bundle-0" [ style=bold color="green" fontcolor="orange"] ++"galera_stop_0 galera-bundle-2" -> "all_stopped" [ style = bold] ++"galera_stop_0 galera-bundle-2" -> "galera-bundle-master_stopped_0" [ style = bold] ++"galera_stop_0 galera-bundle-2" -> "galera_start_0 galera-bundle-2" [ style = bold] ++"galera_stop_0 galera-bundle-2" -> "galera_stop_0 galera-bundle-0" [ style = bold] ++"galera_stop_0 galera-bundle-2" [ style=bold color="green" fontcolor="orange"] ++"haproxy-bundle-docker-1_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] ++"haproxy-bundle-docker-1_start_0 controller-1" -> "haproxy-bundle-docker-1_monitor_60000 controller-1" [ style = bold] ++"haproxy-bundle-docker-1_start_0 controller-1" -> "haproxy-bundle_running_0" [ style = bold] ++"haproxy-bundle-docker-1_start_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"haproxy-bundle_running_0" [ style=bold color="green" fontcolor="orange"] ++"haproxy-bundle_start_0" -> "haproxy-bundle-docker-1_start_0 controller-1" [ style = bold] ++"haproxy-bundle_start_0" [ style=bold color="green" fontcolor="orange"] ++"ip-10.0.0.104_monitor_10000 controller-1" [ style=bold color="green" fontcolor="black"] ++"ip-10.0.0.104_start_0 controller-1" -> "haproxy-bundle_start_0" [ style = bold] ++"ip-10.0.0.104_start_0 controller-1" -> "ip-10.0.0.104_monitor_10000 controller-1" [ style = bold] ++"ip-10.0.0.104_start_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"ip-172.17.1.11_monitor_10000 controller-0" [ style=bold color="green" fontcolor="black"] ++"ip-172.17.1.11_start_0 controller-0" -> "haproxy-bundle_start_0" [ style = bold] ++"ip-172.17.1.11_start_0 controller-0" -> "ip-172.17.1.11_monitor_10000 controller-0" [ style = bold] ++"ip-172.17.1.11_start_0 controller-0" [ style=bold color="green" fontcolor="black"] ++"ip-172.17.3.13_monitor_10000 controller-1" [ style=bold color="green" fontcolor="black"] ++"ip-172.17.3.13_start_0 controller-1" -> "haproxy-bundle_start_0" [ style = bold] ++"ip-172.17.3.13_start_0 controller-1" -> "ip-172.17.3.13_monitor_10000 controller-1" [ style = bold] ++"ip-172.17.3.13_start_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"ip-192.168.24.11_monitor_10000 controller-0" [ style=bold color="green" fontcolor="black"] ++"ip-192.168.24.11_start_0 controller-0" -> "haproxy-bundle_start_0" [ style = bold] ++"ip-192.168.24.11_start_0 controller-0" -> "ip-192.168.24.11_monitor_10000 controller-0" [ style = bold] ++"ip-192.168.24.11_start_0 controller-0" [ style=bold color="green" fontcolor="black"] ++"openstack-cinder-volume_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] ++"openstack-cinder-volume_start_0 controller-0" -> "openstack-cinder-volume_monitor_60000 controller-0" [ style = bold] ++"openstack-cinder-volume_start_0 controller-0" [ style=bold color="green" fontcolor="black"] ++"redis-bundle-1_monitor_0 controller-1" -> "redis-bundle-1_start_0 controller-1" [ style = bold] ++"redis-bundle-1_monitor_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"redis-bundle-1_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] ++"redis-bundle-1_start_0 controller-1" -> "redis-bundle-1_monitor_60000 controller-1" [ style = bold] ++"redis-bundle-1_start_0 controller-1" -> "redis:1_monitor_45000 redis-bundle-1" [ style = bold] ++"redis-bundle-1_start_0 controller-1" -> "redis:1_monitor_60000 redis-bundle-1" [ style = bold] ++"redis-bundle-1_start_0 controller-1" -> "redis:1_start_0 redis-bundle-1" [ style = bold] ++"redis-bundle-1_start_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"redis-bundle-docker-1_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] ++"redis-bundle-docker-1_start_0 controller-1" -> "redis-bundle-1_monitor_0 controller-1" [ style = bold] ++"redis-bundle-docker-1_start_0 controller-1" -> "redis-bundle-1_start_0 controller-1" [ style = bold] ++"redis-bundle-docker-1_start_0 controller-1" -> "redis-bundle-docker-1_monitor_60000 controller-1" [ style = bold] ++"redis-bundle-docker-1_start_0 controller-1" -> "redis-bundle_running_0" [ style = bold] ++"redis-bundle-docker-1_start_0 controller-1" -> "redis:1_start_0 redis-bundle-1" [ style = bold] ++"redis-bundle-docker-1_start_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"redis-bundle-master_confirmed-post_notify_promoted_0" -> "redis-bundle_promoted_0" [ style = bold] ++"redis-bundle-master_confirmed-post_notify_promoted_0" -> "redis:1_monitor_45000 redis-bundle-1" [ style = bold] ++"redis-bundle-master_confirmed-post_notify_promoted_0" -> "redis:1_monitor_60000 redis-bundle-1" [ style = bold] ++"redis-bundle-master_confirmed-post_notify_promoted_0" -> "redis_monitor_20000 redis-bundle-0" [ style = bold] ++"redis-bundle-master_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_confirmed-post_notify_running_0" -> "redis-bundle-master_pre_notify_promote_0" [ style = bold] ++"redis-bundle-master_confirmed-post_notify_running_0" -> "redis-bundle_running_0" [ style = bold] ++"redis-bundle-master_confirmed-post_notify_running_0" -> "redis:1_monitor_45000 redis-bundle-1" [ style = bold] ++"redis-bundle-master_confirmed-post_notify_running_0" -> "redis:1_monitor_60000 redis-bundle-1" [ style = bold] ++"redis-bundle-master_confirmed-post_notify_running_0" -> "redis_monitor_20000 redis-bundle-0" [ style = bold] ++"redis-bundle-master_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_confirmed-pre_notify_promote_0" -> "redis-bundle-master_post_notify_promoted_0" [ style = bold] ++"redis-bundle-master_confirmed-pre_notify_promote_0" -> "redis-bundle-master_promote_0" [ style = bold] ++"redis-bundle-master_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_confirmed-pre_notify_start_0" -> "redis-bundle-master_post_notify_running_0" [ style = bold] ++"redis-bundle-master_confirmed-pre_notify_start_0" -> "redis-bundle-master_start_0" [ style = bold] ++"redis-bundle-master_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_post_notify_promoted_0" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] ++"redis-bundle-master_post_notify_promoted_0" -> "redis:1_post_notify_promote_0 redis-bundle-1" [ style = bold] ++"redis-bundle-master_post_notify_promoted_0" -> "redis_post_notify_promoted_0 redis-bundle-0" [ style = bold] ++"redis-bundle-master_post_notify_promoted_0" -> "redis_post_notify_promoted_0 redis-bundle-2" [ style = bold] ++"redis-bundle-master_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_post_notify_running_0" -> "redis-bundle-master_confirmed-post_notify_running_0" [ style = bold] ++"redis-bundle-master_post_notify_running_0" -> "redis:1_post_notify_start_0 redis-bundle-1" [ style = bold] ++"redis-bundle-master_post_notify_running_0" -> "redis_post_notify_running_0 redis-bundle-0" [ style = bold] ++"redis-bundle-master_post_notify_running_0" -> "redis_post_notify_running_0 redis-bundle-2" [ style = bold] ++"redis-bundle-master_post_notify_running_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_pre_notify_promote_0" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] ++"redis-bundle-master_pre_notify_promote_0" -> "redis:1_pre_notify_promote_0 redis-bundle-1" [ style = bold] ++"redis-bundle-master_pre_notify_promote_0" -> "redis_pre_notify_promote_0 redis-bundle-0" [ style = bold] ++"redis-bundle-master_pre_notify_promote_0" -> "redis_pre_notify_promote_0 redis-bundle-2" [ style = bold] ++"redis-bundle-master_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_pre_notify_start_0" -> "redis-bundle-master_confirmed-pre_notify_start_0" [ style = bold] ++"redis-bundle-master_pre_notify_start_0" -> "redis_pre_notify_start_0 redis-bundle-0" [ style = bold] ++"redis-bundle-master_pre_notify_start_0" -> "redis_pre_notify_start_0 redis-bundle-2" [ style = bold] ++"redis-bundle-master_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_promote_0" -> "redis_promote_0 redis-bundle-0" [ style = bold] ++"redis-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_promoted_0" -> "redis-bundle-master_post_notify_promoted_0" [ style = bold] ++"redis-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_running_0" -> "redis-bundle-master_post_notify_running_0" [ style = bold] ++"redis-bundle-master_running_0" -> "redis-bundle-master_promote_0" [ style = bold] ++"redis-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle-master_start_0" -> "redis-bundle-master_running_0" [ style = bold] ++"redis-bundle-master_start_0" -> "redis:1_start_0 redis-bundle-1" [ style = bold] ++"redis-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle_promote_0" -> "redis-bundle-master_promote_0" [ style = bold] ++"redis-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle_running_0" -> "redis-bundle_promote_0" [ style = bold] ++"redis-bundle_running_0" [ style=bold color="green" fontcolor="orange"] ++"redis-bundle_start_0" -> "redis-bundle-docker-1_start_0 controller-1" [ style = bold] ++"redis-bundle_start_0" -> "redis-bundle-master_start_0" [ style = bold] ++"redis-bundle_start_0" [ style=bold color="green" fontcolor="orange"] ++"redis:1_monitor_45000 redis-bundle-1" [ style=bold color="green" fontcolor="black"] ++"redis:1_monitor_60000 redis-bundle-1" [ style=bold color="green" fontcolor="black"] ++"redis:1_post_notify_promote_0 redis-bundle-1" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] ++"redis:1_post_notify_promote_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"] ++"redis:1_post_notify_start_0 redis-bundle-1" -> "redis-bundle-master_confirmed-post_notify_running_0" [ style = bold] ++"redis:1_post_notify_start_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"] ++"redis:1_pre_notify_promote_0 redis-bundle-1" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] ++"redis:1_pre_notify_promote_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"] ++"redis:1_start_0 redis-bundle-1" -> "redis-bundle-master_running_0" [ style = bold] ++"redis:1_start_0 redis-bundle-1" -> "redis:1_monitor_45000 redis-bundle-1" [ style = bold] ++"redis:1_start_0 redis-bundle-1" -> "redis:1_monitor_60000 redis-bundle-1" [ style = bold] ++"redis:1_start_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"] ++"redis_monitor_20000 redis-bundle-0" [ style=bold color="green" fontcolor="black"] ++"redis_post_notify_promoted_0 redis-bundle-0" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] ++"redis_post_notify_promoted_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"] ++"redis_post_notify_promoted_0 redis-bundle-2" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] ++"redis_post_notify_promoted_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"] ++"redis_post_notify_running_0 redis-bundle-0" -> "redis-bundle-master_confirmed-post_notify_running_0" [ style = bold] ++"redis_post_notify_running_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"] ++"redis_post_notify_running_0 redis-bundle-2" -> "redis-bundle-master_confirmed-post_notify_running_0" [ style = bold] ++"redis_post_notify_running_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"] ++"redis_pre_notify_promote_0 redis-bundle-0" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] ++"redis_pre_notify_promote_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"] ++"redis_pre_notify_promote_0 redis-bundle-2" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] ++"redis_pre_notify_promote_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"] ++"redis_pre_notify_start_0 redis-bundle-0" -> "redis-bundle-master_confirmed-pre_notify_start_0" [ style = bold] ++"redis_pre_notify_start_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"] ++"redis_pre_notify_start_0 redis-bundle-2" -> "redis-bundle-master_confirmed-pre_notify_start_0" [ style = bold] ++"redis_pre_notify_start_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"] ++"redis_promote_0 redis-bundle-0" -> "redis-bundle-master_promoted_0" [ style = bold] ++"redis_promote_0 redis-bundle-0" -> "redis_monitor_20000 redis-bundle-0" [ style = bold] ++"redis_promote_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"] ++"stonith 'reboot' galera-bundle-0" -> "galera-bundle-master_stop_0" [ style = bold] ++"stonith 'reboot' galera-bundle-0" -> "stonith_complete" [ style = bold] ++"stonith 'reboot' galera-bundle-0" [ style=bold color="green" fontcolor="orange"] ++"stonith 'reboot' galera-bundle-2" -> "galera-bundle-master_stop_0" [ style = bold] ++"stonith 'reboot' galera-bundle-2" -> "stonith_complete" [ style = bold] ++"stonith 'reboot' galera-bundle-2" [ style=bold color="green" fontcolor="orange"] ++"stonith-fence_ipmilan-525400498d34_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] ++"stonith-fence_ipmilan-525400498d34_start_0 controller-1" -> "stonith-fence_ipmilan-525400498d34_monitor_60000 controller-1" [ style = bold] ++"stonith-fence_ipmilan-525400498d34_start_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"stonith-fence_ipmilan-525400542c06_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] ++"stonith-fence_ipmilan-525400542c06_start_0 controller-0" -> "stonith-fence_ipmilan-525400542c06_monitor_60000 controller-0" [ style = bold] ++"stonith-fence_ipmilan-525400542c06_start_0 controller-0" [ style=bold color="green" fontcolor="black"] ++"stonith-fence_ipmilan-5254005ea387_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] ++"stonith-fence_ipmilan-5254005ea387_start_0 controller-1" -> "stonith-fence_ipmilan-5254005ea387_monitor_60000 controller-1" [ style = bold] ++"stonith-fence_ipmilan-5254005ea387_start_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"stonith-fence_ipmilan-525400c709f7_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"] ++"stonith-fence_ipmilan-525400c709f7_start_0 controller-1" -> "stonith-fence_ipmilan-525400c709f7_monitor_60000 controller-1" [ style = bold] ++"stonith-fence_ipmilan-525400c709f7_start_0 controller-1" [ style=bold color="green" fontcolor="black"] ++"stonith_complete" -> "all_stopped" [ style = bold] ++"stonith_complete" -> "galera-bundle-docker-0_start_0 database-0" [ style = bold] ++"stonith_complete" -> "galera-bundle-docker-2_start_0 database-2" [ style = bold] ++"stonith_complete" -> "galera_promote_0 galera-bundle-0" [ style = bold] ++"stonith_complete" -> "galera_promote_0 galera-bundle-2" [ style = bold] ++"stonith_complete" -> "galera_start_0 galera-bundle-0" [ style = bold] ++"stonith_complete" -> "galera_start_0 galera-bundle-2" [ style = bold] ++"stonith_complete" -> "haproxy-bundle-docker-1_start_0 controller-1" [ style = bold] ++"stonith_complete" -> "ip-10.0.0.104_start_0 controller-1" [ style = bold] ++"stonith_complete" -> "ip-172.17.1.11_start_0 controller-0" [ style = bold] ++"stonith_complete" -> "ip-172.17.3.13_start_0 controller-1" [ style = bold] ++"stonith_complete" -> "ip-192.168.24.11_start_0 controller-0" [ style = bold] ++"stonith_complete" -> "openstack-cinder-volume_start_0 controller-0" [ style = bold] ++"stonith_complete" -> "redis-bundle-docker-1_start_0 controller-1" [ style = bold] ++"stonith_complete" -> "redis:1_start_0 redis-bundle-1" [ style = bold] ++"stonith_complete" -> "redis_promote_0 redis-bundle-0" [ style = bold] ++"stonith_complete" [ style=bold color="green" fontcolor="orange"] ++} +diff --git a/pengine/test10/bundle-order-stop-on-remote.exp b/pengine/test10/bundle-order-stop-on-remote.exp +new file mode 100644 +index 0000000..db5386b +--- /dev/null ++++ b/pengine/test10/bundle-order-stop-on-remote.exp +@@ -0,0 +1,1607 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/pengine/test10/bundle-order-stop-on-remote.scores b/pengine/test10/bundle-order-stop-on-remote.scores +new file mode 100644 +index 0000000..e26f511 +--- /dev/null ++++ b/pengine/test10/bundle-order-stop-on-remote.scores +@@ -0,0 +1,934 @@ ++Allocation scores: ++clone_color: galera-bundle-master allocation score on controller-0: -INFINITY ++clone_color: galera-bundle-master allocation score on controller-1: -INFINITY ++clone_color: galera-bundle-master allocation score on controller-2: -INFINITY ++clone_color: galera-bundle-master allocation score on database-0: -INFINITY ++clone_color: galera-bundle-master allocation score on database-1: -INFINITY ++clone_color: galera-bundle-master allocation score on database-2: -INFINITY ++clone_color: galera-bundle-master allocation score on galera-bundle-0: 0 ++clone_color: galera-bundle-master allocation score on galera-bundle-1: 0 ++clone_color: galera-bundle-master allocation score on galera-bundle-2: 0 ++clone_color: galera-bundle-master allocation score on messaging-0: -INFINITY ++clone_color: galera-bundle-master allocation score on messaging-1: -INFINITY ++clone_color: galera-bundle-master allocation score on messaging-2: -INFINITY ++clone_color: galera:0 allocation score on galera-bundle-0: INFINITY ++clone_color: galera:1 allocation score on galera-bundle-1: INFINITY ++clone_color: galera:2 allocation score on galera-bundle-2: INFINITY ++clone_color: rabbitmq-bundle-clone allocation score on controller-0: -INFINITY ++clone_color: rabbitmq-bundle-clone allocation score on controller-1: -INFINITY ++clone_color: rabbitmq-bundle-clone allocation score on controller-2: -INFINITY ++clone_color: rabbitmq-bundle-clone allocation score on database-0: -INFINITY ++clone_color: rabbitmq-bundle-clone allocation score on database-1: -INFINITY ++clone_color: rabbitmq-bundle-clone allocation score on database-2: -INFINITY ++clone_color: rabbitmq-bundle-clone allocation score on messaging-0: -INFINITY ++clone_color: rabbitmq-bundle-clone allocation score on messaging-1: -INFINITY ++clone_color: rabbitmq-bundle-clone allocation score on messaging-2: -INFINITY ++clone_color: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: 0 ++clone_color: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: 0 ++clone_color: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: 0 ++clone_color: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY ++clone_color: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY ++clone_color: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY ++clone_color: redis-bundle-master allocation score on controller-0: -INFINITY ++clone_color: redis-bundle-master allocation score on controller-1: -INFINITY ++clone_color: redis-bundle-master allocation score on controller-2: -INFINITY ++clone_color: redis-bundle-master allocation score on database-0: -INFINITY ++clone_color: redis-bundle-master allocation score on database-1: -INFINITY ++clone_color: redis-bundle-master allocation score on database-2: -INFINITY ++clone_color: redis-bundle-master allocation score on messaging-0: -INFINITY ++clone_color: redis-bundle-master allocation score on messaging-1: -INFINITY ++clone_color: redis-bundle-master allocation score on messaging-2: -INFINITY ++clone_color: redis-bundle-master allocation score on redis-bundle-0: 0 ++clone_color: redis-bundle-master allocation score on redis-bundle-1: 0 ++clone_color: redis-bundle-master allocation score on redis-bundle-2: 0 ++clone_color: redis:0 allocation score on redis-bundle-0: INFINITY ++clone_color: redis:1 allocation score on redis-bundle-1: INFINITY ++clone_color: redis:2 allocation score on redis-bundle-2: INFINITY ++container_color: galera-bundle allocation score on controller-0: -INFINITY ++container_color: galera-bundle allocation score on controller-1: -INFINITY ++container_color: galera-bundle allocation score on controller-2: -INFINITY ++container_color: galera-bundle allocation score on database-0: 0 ++container_color: galera-bundle allocation score on database-1: 0 ++container_color: galera-bundle allocation score on database-2: 0 ++container_color: galera-bundle allocation score on messaging-0: -INFINITY ++container_color: galera-bundle allocation score on messaging-1: -INFINITY ++container_color: galera-bundle allocation score on messaging-2: -INFINITY ++container_color: galera-bundle-0 allocation score on controller-0: 0 ++container_color: galera-bundle-0 allocation score on controller-1: 0 ++container_color: galera-bundle-0 allocation score on controller-2: 0 ++container_color: galera-bundle-0 allocation score on database-0: -INFINITY ++container_color: galera-bundle-0 allocation score on database-1: -INFINITY ++container_color: galera-bundle-0 allocation score on database-2: -INFINITY ++container_color: galera-bundle-0 allocation score on messaging-0: -INFINITY ++container_color: galera-bundle-0 allocation score on messaging-1: -INFINITY ++container_color: galera-bundle-0 allocation score on messaging-2: -INFINITY ++container_color: galera-bundle-1 allocation score on controller-0: 0 ++container_color: galera-bundle-1 allocation score on controller-1: 0 ++container_color: galera-bundle-1 allocation score on controller-2: INFINITY ++container_color: galera-bundle-1 allocation score on database-0: -INFINITY ++container_color: galera-bundle-1 allocation score on database-1: -INFINITY ++container_color: galera-bundle-1 allocation score on database-2: -INFINITY ++container_color: galera-bundle-1 allocation score on messaging-0: -INFINITY ++container_color: galera-bundle-1 allocation score on messaging-1: -INFINITY ++container_color: galera-bundle-1 allocation score on messaging-2: -INFINITY ++container_color: galera-bundle-2 allocation score on controller-0: 0 ++container_color: galera-bundle-2 allocation score on controller-1: 0 ++container_color: galera-bundle-2 allocation score on controller-2: 0 ++container_color: galera-bundle-2 allocation score on database-0: -INFINITY ++container_color: galera-bundle-2 allocation score on database-1: -INFINITY ++container_color: galera-bundle-2 allocation score on database-2: -INFINITY ++container_color: galera-bundle-2 allocation score on messaging-0: -INFINITY ++container_color: galera-bundle-2 allocation score on messaging-1: -INFINITY ++container_color: galera-bundle-2 allocation score on messaging-2: -INFINITY ++container_color: galera-bundle-docker-0 allocation score on controller-0: -INFINITY ++container_color: galera-bundle-docker-0 allocation score on controller-1: -INFINITY ++container_color: galera-bundle-docker-0 allocation score on controller-2: -INFINITY ++container_color: galera-bundle-docker-0 allocation score on database-0: INFINITY ++container_color: galera-bundle-docker-0 allocation score on database-1: 0 ++container_color: galera-bundle-docker-0 allocation score on database-2: 0 ++container_color: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY ++container_color: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY ++container_color: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY ++container_color: galera-bundle-docker-1 allocation score on controller-0: -INFINITY ++container_color: galera-bundle-docker-1 allocation score on controller-1: -INFINITY ++container_color: galera-bundle-docker-1 allocation score on controller-2: -INFINITY ++container_color: galera-bundle-docker-1 allocation score on database-0: 0 ++container_color: galera-bundle-docker-1 allocation score on database-1: INFINITY ++container_color: galera-bundle-docker-1 allocation score on database-2: 0 ++container_color: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY ++container_color: galera-bundle-docker-1 allocation score on messaging-1: -INFINITY ++container_color: galera-bundle-docker-1 allocation score on messaging-2: -INFINITY ++container_color: galera-bundle-docker-2 allocation score on controller-0: -INFINITY ++container_color: galera-bundle-docker-2 allocation score on controller-1: -INFINITY ++container_color: galera-bundle-docker-2 allocation score on controller-2: -INFINITY ++container_color: galera-bundle-docker-2 allocation score on database-0: 0 ++container_color: galera-bundle-docker-2 allocation score on database-1: 0 ++container_color: galera-bundle-docker-2 allocation score on database-2: INFINITY ++container_color: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY ++container_color: galera-bundle-docker-2 allocation score on messaging-1: -INFINITY ++container_color: galera-bundle-docker-2 allocation score on messaging-2: -INFINITY ++container_color: galera-bundle-master allocation score on controller-0: 0 ++container_color: galera-bundle-master allocation score on controller-1: 0 ++container_color: galera-bundle-master allocation score on controller-2: 0 ++container_color: galera-bundle-master allocation score on database-0: 0 ++container_color: galera-bundle-master allocation score on database-1: 0 ++container_color: galera-bundle-master allocation score on database-2: 0 ++container_color: galera-bundle-master allocation score on galera-bundle-0: -INFINITY ++container_color: galera-bundle-master allocation score on galera-bundle-1: -INFINITY ++container_color: galera-bundle-master allocation score on galera-bundle-2: -INFINITY ++container_color: galera-bundle-master allocation score on messaging-0: 0 ++container_color: galera-bundle-master allocation score on messaging-1: 0 ++container_color: galera-bundle-master allocation score on messaging-2: 0 ++container_color: galera:0 allocation score on galera-bundle-0: INFINITY ++container_color: galera:1 allocation score on galera-bundle-1: INFINITY ++container_color: galera:2 allocation score on galera-bundle-2: INFINITY ++container_color: haproxy-bundle allocation score on controller-0: 0 ++container_color: haproxy-bundle allocation score on controller-0: 0 ++container_color: haproxy-bundle allocation score on controller-0: 0 ++container_color: haproxy-bundle allocation score on controller-0: 0 ++container_color: haproxy-bundle allocation score on controller-0: 0 ++container_color: haproxy-bundle allocation score on controller-0: 0 ++container_color: haproxy-bundle allocation score on controller-0: 0 ++container_color: haproxy-bundle allocation score on controller-1: 0 ++container_color: haproxy-bundle allocation score on controller-1: 0 ++container_color: haproxy-bundle allocation score on controller-1: 0 ++container_color: haproxy-bundle allocation score on controller-1: 0 ++container_color: haproxy-bundle allocation score on controller-1: 0 ++container_color: haproxy-bundle allocation score on controller-1: 0 ++container_color: haproxy-bundle allocation score on controller-1: 0 ++container_color: haproxy-bundle allocation score on controller-2: 0 ++container_color: haproxy-bundle allocation score on controller-2: 0 ++container_color: haproxy-bundle allocation score on controller-2: 0 ++container_color: haproxy-bundle allocation score on controller-2: 0 ++container_color: haproxy-bundle allocation score on controller-2: 0 ++container_color: haproxy-bundle allocation score on controller-2: 0 ++container_color: haproxy-bundle allocation score on controller-2: 0 ++container_color: haproxy-bundle allocation score on database-0: -INFINITY ++container_color: haproxy-bundle allocation score on database-0: -INFINITY ++container_color: haproxy-bundle allocation score on database-0: -INFINITY ++container_color: haproxy-bundle allocation score on database-0: -INFINITY ++container_color: haproxy-bundle allocation score on database-0: -INFINITY ++container_color: haproxy-bundle allocation score on database-0: -INFINITY ++container_color: haproxy-bundle allocation score on database-0: -INFINITY ++container_color: haproxy-bundle allocation score on database-1: -INFINITY ++container_color: haproxy-bundle allocation score on database-1: -INFINITY ++container_color: haproxy-bundle allocation score on database-1: -INFINITY ++container_color: haproxy-bundle allocation score on database-1: -INFINITY ++container_color: haproxy-bundle allocation score on database-1: -INFINITY ++container_color: haproxy-bundle allocation score on database-1: -INFINITY ++container_color: haproxy-bundle allocation score on database-1: -INFINITY ++container_color: haproxy-bundle allocation score on database-2: -INFINITY ++container_color: haproxy-bundle allocation score on database-2: -INFINITY ++container_color: haproxy-bundle allocation score on database-2: -INFINITY ++container_color: haproxy-bundle allocation score on database-2: -INFINITY ++container_color: haproxy-bundle allocation score on database-2: -INFINITY ++container_color: haproxy-bundle allocation score on database-2: -INFINITY ++container_color: haproxy-bundle allocation score on database-2: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-0 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-0 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-0 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-0 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-0 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-0 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on controller-2: 0 ++container_color: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-0: 0 ++container_color: haproxy-bundle-docker-1 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-1 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-1 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-1 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-1 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-1 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-1 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on controller-2: 0 ++container_color: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on controller-0: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-0: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-0: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-0: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-0: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-0: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-0: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-1: 0 ++container_color: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY ++container_color: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY ++container_color: rabbitmq-bundle allocation score on controller-0: -INFINITY ++container_color: rabbitmq-bundle allocation score on controller-1: -INFINITY ++container_color: rabbitmq-bundle allocation score on controller-2: -INFINITY ++container_color: rabbitmq-bundle allocation score on database-0: -INFINITY ++container_color: rabbitmq-bundle allocation score on database-1: -INFINITY ++container_color: rabbitmq-bundle allocation score on database-2: -INFINITY ++container_color: rabbitmq-bundle allocation score on messaging-0: 0 ++container_color: rabbitmq-bundle allocation score on messaging-1: 0 ++container_color: rabbitmq-bundle allocation score on messaging-2: 0 ++container_color: rabbitmq-bundle-0 allocation score on controller-0: 0 ++container_color: rabbitmq-bundle-0 allocation score on controller-1: 0 ++container_color: rabbitmq-bundle-0 allocation score on controller-2: INFINITY ++container_color: rabbitmq-bundle-0 allocation score on database-0: -INFINITY ++container_color: rabbitmq-bundle-0 allocation score on database-1: -INFINITY ++container_color: rabbitmq-bundle-0 allocation score on database-2: -INFINITY ++container_color: rabbitmq-bundle-0 allocation score on messaging-0: -INFINITY ++container_color: rabbitmq-bundle-0 allocation score on messaging-1: -INFINITY ++container_color: rabbitmq-bundle-0 allocation score on messaging-2: -INFINITY ++container_color: rabbitmq-bundle-1 allocation score on controller-0: 0 ++container_color: rabbitmq-bundle-1 allocation score on controller-1: 0 ++container_color: rabbitmq-bundle-1 allocation score on controller-2: INFINITY ++container_color: rabbitmq-bundle-1 allocation score on database-0: -INFINITY ++container_color: rabbitmq-bundle-1 allocation score on database-1: -INFINITY ++container_color: rabbitmq-bundle-1 allocation score on database-2: -INFINITY ++container_color: rabbitmq-bundle-1 allocation score on messaging-0: -INFINITY ++container_color: rabbitmq-bundle-1 allocation score on messaging-1: -INFINITY ++container_color: rabbitmq-bundle-1 allocation score on messaging-2: -INFINITY ++container_color: rabbitmq-bundle-2 allocation score on controller-0: 0 ++container_color: rabbitmq-bundle-2 allocation score on controller-1: 0 ++container_color: rabbitmq-bundle-2 allocation score on controller-2: INFINITY ++container_color: rabbitmq-bundle-2 allocation score on database-0: -INFINITY ++container_color: rabbitmq-bundle-2 allocation score on database-1: -INFINITY ++container_color: rabbitmq-bundle-2 allocation score on database-2: -INFINITY ++container_color: rabbitmq-bundle-2 allocation score on messaging-0: -INFINITY ++container_color: rabbitmq-bundle-2 allocation score on messaging-1: -INFINITY ++container_color: rabbitmq-bundle-2 allocation score on messaging-2: -INFINITY ++container_color: rabbitmq-bundle-clone allocation score on controller-0: 0 ++container_color: rabbitmq-bundle-clone allocation score on controller-1: 0 ++container_color: rabbitmq-bundle-clone allocation score on controller-2: 0 ++container_color: rabbitmq-bundle-clone allocation score on database-0: 0 ++container_color: rabbitmq-bundle-clone allocation score on database-1: 0 ++container_color: rabbitmq-bundle-clone allocation score on database-2: 0 ++container_color: rabbitmq-bundle-clone allocation score on messaging-0: 0 ++container_color: rabbitmq-bundle-clone allocation score on messaging-1: 0 ++container_color: rabbitmq-bundle-clone allocation score on messaging-2: 0 ++container_color: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY ++container_color: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY ++container_color: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY ++container_color: rabbitmq-bundle-docker-0 allocation score on controller-0: -INFINITY ++container_color: rabbitmq-bundle-docker-0 allocation score on controller-1: -INFINITY ++container_color: rabbitmq-bundle-docker-0 allocation score on controller-2: -INFINITY ++container_color: rabbitmq-bundle-docker-0 allocation score on database-0: -INFINITY ++container_color: rabbitmq-bundle-docker-0 allocation score on database-1: -INFINITY ++container_color: rabbitmq-bundle-docker-0 allocation score on database-2: -INFINITY ++container_color: rabbitmq-bundle-docker-0 allocation score on messaging-0: INFINITY ++container_color: rabbitmq-bundle-docker-0 allocation score on messaging-1: 0 ++container_color: rabbitmq-bundle-docker-0 allocation score on messaging-2: 0 ++container_color: rabbitmq-bundle-docker-1 allocation score on controller-0: -INFINITY ++container_color: rabbitmq-bundle-docker-1 allocation score on controller-1: -INFINITY ++container_color: rabbitmq-bundle-docker-1 allocation score on controller-2: -INFINITY ++container_color: rabbitmq-bundle-docker-1 allocation score on database-0: -INFINITY ++container_color: rabbitmq-bundle-docker-1 allocation score on database-1: -INFINITY ++container_color: rabbitmq-bundle-docker-1 allocation score on database-2: -INFINITY ++container_color: rabbitmq-bundle-docker-1 allocation score on messaging-0: 0 ++container_color: rabbitmq-bundle-docker-1 allocation score on messaging-1: INFINITY ++container_color: rabbitmq-bundle-docker-1 allocation score on messaging-2: 0 ++container_color: rabbitmq-bundle-docker-2 allocation score on controller-0: -INFINITY ++container_color: rabbitmq-bundle-docker-2 allocation score on controller-1: -INFINITY ++container_color: rabbitmq-bundle-docker-2 allocation score on controller-2: -INFINITY ++container_color: rabbitmq-bundle-docker-2 allocation score on database-0: -INFINITY ++container_color: rabbitmq-bundle-docker-2 allocation score on database-1: -INFINITY ++container_color: rabbitmq-bundle-docker-2 allocation score on database-2: -INFINITY ++container_color: rabbitmq-bundle-docker-2 allocation score on messaging-0: 0 ++container_color: rabbitmq-bundle-docker-2 allocation score on messaging-1: 0 ++container_color: rabbitmq-bundle-docker-2 allocation score on messaging-2: INFINITY ++container_color: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY ++container_color: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY ++container_color: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY ++container_color: redis-bundle allocation score on controller-0: 0 ++container_color: redis-bundle allocation score on controller-1: 0 ++container_color: redis-bundle allocation score on controller-2: 0 ++container_color: redis-bundle allocation score on database-0: -INFINITY ++container_color: redis-bundle allocation score on database-1: -INFINITY ++container_color: redis-bundle allocation score on database-2: -INFINITY ++container_color: redis-bundle allocation score on messaging-0: -INFINITY ++container_color: redis-bundle allocation score on messaging-1: -INFINITY ++container_color: redis-bundle allocation score on messaging-2: -INFINITY ++container_color: redis-bundle-0 allocation score on controller-0: INFINITY ++container_color: redis-bundle-0 allocation score on controller-1: 0 ++container_color: redis-bundle-0 allocation score on controller-2: 0 ++container_color: redis-bundle-0 allocation score on database-0: -INFINITY ++container_color: redis-bundle-0 allocation score on database-1: -INFINITY ++container_color: redis-bundle-0 allocation score on database-2: -INFINITY ++container_color: redis-bundle-0 allocation score on messaging-0: -INFINITY ++container_color: redis-bundle-0 allocation score on messaging-1: -INFINITY ++container_color: redis-bundle-0 allocation score on messaging-2: -INFINITY ++container_color: redis-bundle-1 allocation score on controller-0: 0 ++container_color: redis-bundle-1 allocation score on controller-1: 0 ++container_color: redis-bundle-1 allocation score on controller-2: 0 ++container_color: redis-bundle-1 allocation score on database-0: -INFINITY ++container_color: redis-bundle-1 allocation score on database-1: -INFINITY ++container_color: redis-bundle-1 allocation score on database-2: -INFINITY ++container_color: redis-bundle-1 allocation score on messaging-0: -INFINITY ++container_color: redis-bundle-1 allocation score on messaging-1: -INFINITY ++container_color: redis-bundle-1 allocation score on messaging-2: -INFINITY ++container_color: redis-bundle-2 allocation score on controller-0: 0 ++container_color: redis-bundle-2 allocation score on controller-1: 0 ++container_color: redis-bundle-2 allocation score on controller-2: INFINITY ++container_color: redis-bundle-2 allocation score on database-0: -INFINITY ++container_color: redis-bundle-2 allocation score on database-1: -INFINITY ++container_color: redis-bundle-2 allocation score on database-2: -INFINITY ++container_color: redis-bundle-2 allocation score on messaging-0: -INFINITY ++container_color: redis-bundle-2 allocation score on messaging-1: -INFINITY ++container_color: redis-bundle-2 allocation score on messaging-2: -INFINITY ++container_color: redis-bundle-docker-0 allocation score on controller-0: INFINITY ++container_color: redis-bundle-docker-0 allocation score on controller-1: 0 ++container_color: redis-bundle-docker-0 allocation score on controller-2: 0 ++container_color: redis-bundle-docker-0 allocation score on database-0: -INFINITY ++container_color: redis-bundle-docker-0 allocation score on database-1: -INFINITY ++container_color: redis-bundle-docker-0 allocation score on database-2: -INFINITY ++container_color: redis-bundle-docker-0 allocation score on messaging-0: -INFINITY ++container_color: redis-bundle-docker-0 allocation score on messaging-1: -INFINITY ++container_color: redis-bundle-docker-0 allocation score on messaging-2: -INFINITY ++container_color: redis-bundle-docker-1 allocation score on controller-0: 0 ++container_color: redis-bundle-docker-1 allocation score on controller-1: 0 ++container_color: redis-bundle-docker-1 allocation score on controller-2: 0 ++container_color: redis-bundle-docker-1 allocation score on database-0: -INFINITY ++container_color: redis-bundle-docker-1 allocation score on database-1: -INFINITY ++container_color: redis-bundle-docker-1 allocation score on database-2: -INFINITY ++container_color: redis-bundle-docker-1 allocation score on messaging-0: -INFINITY ++container_color: redis-bundle-docker-1 allocation score on messaging-1: -INFINITY ++container_color: redis-bundle-docker-1 allocation score on messaging-2: -INFINITY ++container_color: redis-bundle-docker-2 allocation score on controller-0: 0 ++container_color: redis-bundle-docker-2 allocation score on controller-1: 0 ++container_color: redis-bundle-docker-2 allocation score on controller-2: INFINITY ++container_color: redis-bundle-docker-2 allocation score on database-0: -INFINITY ++container_color: redis-bundle-docker-2 allocation score on database-1: -INFINITY ++container_color: redis-bundle-docker-2 allocation score on database-2: -INFINITY ++container_color: redis-bundle-docker-2 allocation score on messaging-0: -INFINITY ++container_color: redis-bundle-docker-2 allocation score on messaging-1: -INFINITY ++container_color: redis-bundle-docker-2 allocation score on messaging-2: -INFINITY ++container_color: redis-bundle-master allocation score on controller-0: 0 ++container_color: redis-bundle-master allocation score on controller-1: 0 ++container_color: redis-bundle-master allocation score on controller-2: 0 ++container_color: redis-bundle-master allocation score on database-0: 0 ++container_color: redis-bundle-master allocation score on database-1: 0 ++container_color: redis-bundle-master allocation score on database-2: 0 ++container_color: redis-bundle-master allocation score on messaging-0: 0 ++container_color: redis-bundle-master allocation score on messaging-1: 0 ++container_color: redis-bundle-master allocation score on messaging-2: 0 ++container_color: redis-bundle-master allocation score on redis-bundle-0: -INFINITY ++container_color: redis-bundle-master allocation score on redis-bundle-1: -INFINITY ++container_color: redis-bundle-master allocation score on redis-bundle-2: -INFINITY ++container_color: redis:0 allocation score on redis-bundle-0: INFINITY ++container_color: redis:1 allocation score on redis-bundle-1: 500 ++container_color: redis:2 allocation score on redis-bundle-2: INFINITY ++galera:0 promotion score on galera-bundle-0: 100 ++galera:1 promotion score on galera-bundle-1: 100 ++galera:2 promotion score on galera-bundle-2: 100 ++native_color: database-0 allocation score on controller-0: 0 ++native_color: database-0 allocation score on controller-1: 0 ++native_color: database-0 allocation score on controller-2: 0 ++native_color: database-0 allocation score on database-0: -INFINITY ++native_color: database-0 allocation score on database-1: -INFINITY ++native_color: database-0 allocation score on database-2: -INFINITY ++native_color: database-0 allocation score on messaging-0: -INFINITY ++native_color: database-0 allocation score on messaging-1: -INFINITY ++native_color: database-0 allocation score on messaging-2: -INFINITY ++native_color: database-1 allocation score on controller-0: 0 ++native_color: database-1 allocation score on controller-1: 0 ++native_color: database-1 allocation score on controller-2: INFINITY ++native_color: database-1 allocation score on database-0: -INFINITY ++native_color: database-1 allocation score on database-1: -INFINITY ++native_color: database-1 allocation score on database-2: -INFINITY ++native_color: database-1 allocation score on messaging-0: -INFINITY ++native_color: database-1 allocation score on messaging-1: -INFINITY ++native_color: database-1 allocation score on messaging-2: -INFINITY ++native_color: database-2 allocation score on controller-0: 0 ++native_color: database-2 allocation score on controller-1: 0 ++native_color: database-2 allocation score on controller-2: 0 ++native_color: database-2 allocation score on database-0: -INFINITY ++native_color: database-2 allocation score on database-1: -INFINITY ++native_color: database-2 allocation score on database-2: -INFINITY ++native_color: database-2 allocation score on messaging-0: -INFINITY ++native_color: database-2 allocation score on messaging-1: -INFINITY ++native_color: database-2 allocation score on messaging-2: -INFINITY ++native_color: galera-bundle-0 allocation score on controller-0: 0 ++native_color: galera-bundle-0 allocation score on controller-1: -INFINITY ++native_color: galera-bundle-0 allocation score on controller-2: -INFINITY ++native_color: galera-bundle-0 allocation score on database-0: -INFINITY ++native_color: galera-bundle-0 allocation score on database-1: -INFINITY ++native_color: galera-bundle-0 allocation score on database-2: -INFINITY ++native_color: galera-bundle-0 allocation score on messaging-0: -INFINITY ++native_color: galera-bundle-0 allocation score on messaging-1: -INFINITY ++native_color: galera-bundle-0 allocation score on messaging-2: -INFINITY ++native_color: galera-bundle-1 allocation score on controller-0: -INFINITY ++native_color: galera-bundle-1 allocation score on controller-1: -INFINITY ++native_color: galera-bundle-1 allocation score on controller-2: INFINITY ++native_color: galera-bundle-1 allocation score on database-0: -INFINITY ++native_color: galera-bundle-1 allocation score on database-1: -INFINITY ++native_color: galera-bundle-1 allocation score on database-2: -INFINITY ++native_color: galera-bundle-1 allocation score on messaging-0: -INFINITY ++native_color: galera-bundle-1 allocation score on messaging-1: -INFINITY ++native_color: galera-bundle-1 allocation score on messaging-2: -INFINITY ++native_color: galera-bundle-2 allocation score on controller-0: -INFINITY ++native_color: galera-bundle-2 allocation score on controller-1: 0 ++native_color: galera-bundle-2 allocation score on controller-2: -INFINITY ++native_color: galera-bundle-2 allocation score on database-0: -INFINITY ++native_color: galera-bundle-2 allocation score on database-1: -INFINITY ++native_color: galera-bundle-2 allocation score on database-2: -INFINITY ++native_color: galera-bundle-2 allocation score on messaging-0: -INFINITY ++native_color: galera-bundle-2 allocation score on messaging-1: -INFINITY ++native_color: galera-bundle-2 allocation score on messaging-2: -INFINITY ++native_color: galera-bundle-docker-0 allocation score on controller-0: -INFINITY ++native_color: galera-bundle-docker-0 allocation score on controller-1: -INFINITY ++native_color: galera-bundle-docker-0 allocation score on controller-2: -INFINITY ++native_color: galera-bundle-docker-0 allocation score on database-0: INFINITY ++native_color: galera-bundle-docker-0 allocation score on database-1: -INFINITY ++native_color: galera-bundle-docker-0 allocation score on database-2: -10000 ++native_color: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY ++native_color: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY ++native_color: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY ++native_color: galera-bundle-docker-1 allocation score on controller-0: -INFINITY ++native_color: galera-bundle-docker-1 allocation score on controller-1: -INFINITY ++native_color: galera-bundle-docker-1 allocation score on controller-2: -INFINITY ++native_color: galera-bundle-docker-1 allocation score on database-0: -10000 ++native_color: galera-bundle-docker-1 allocation score on database-1: INFINITY ++native_color: galera-bundle-docker-1 allocation score on database-2: -10000 ++native_color: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY ++native_color: galera-bundle-docker-1 allocation score on messaging-1: -INFINITY ++native_color: galera-bundle-docker-1 allocation score on messaging-2: -INFINITY ++native_color: galera-bundle-docker-2 allocation score on controller-0: -INFINITY ++native_color: galera-bundle-docker-2 allocation score on controller-1: -INFINITY ++native_color: galera-bundle-docker-2 allocation score on controller-2: -INFINITY ++native_color: galera-bundle-docker-2 allocation score on database-0: -INFINITY ++native_color: galera-bundle-docker-2 allocation score on database-1: -INFINITY ++native_color: galera-bundle-docker-2 allocation score on database-2: INFINITY ++native_color: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY ++native_color: galera-bundle-docker-2 allocation score on messaging-1: -INFINITY ++native_color: galera-bundle-docker-2 allocation score on messaging-2: -INFINITY ++native_color: galera:0 allocation score on galera-bundle-0: INFINITY ++native_color: galera:1 allocation score on galera-bundle-1: INFINITY ++native_color: galera:2 allocation score on galera-bundle-2: INFINITY ++native_color: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY ++native_color: haproxy-bundle-docker-0 allocation score on controller-1: 0 ++native_color: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY ++native_color: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY ++native_color: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY ++native_color: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY ++native_color: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY ++native_color: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY ++native_color: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY ++native_color: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY ++native_color: haproxy-bundle-docker-1 allocation score on controller-1: 0 ++native_color: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY ++native_color: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY ++native_color: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY ++native_color: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY ++native_color: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY ++native_color: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY ++native_color: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY ++native_color: haproxy-bundle-docker-2 allocation score on controller-0: 0 ++native_color: haproxy-bundle-docker-2 allocation score on controller-1: 0 ++native_color: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY ++native_color: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY ++native_color: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY ++native_color: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY ++native_color: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY ++native_color: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY ++native_color: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY ++native_color: ip-10.0.0.104 allocation score on controller-0: 0 ++native_color: ip-10.0.0.104 allocation score on controller-1: 0 ++native_color: ip-10.0.0.104 allocation score on controller-2: 0 ++native_color: ip-10.0.0.104 allocation score on database-0: -INFINITY ++native_color: ip-10.0.0.104 allocation score on database-1: -INFINITY ++native_color: ip-10.0.0.104 allocation score on database-2: -INFINITY ++native_color: ip-10.0.0.104 allocation score on messaging-0: -INFINITY ++native_color: ip-10.0.0.104 allocation score on messaging-1: -INFINITY ++native_color: ip-10.0.0.104 allocation score on messaging-2: -INFINITY ++native_color: ip-172.17.1.11 allocation score on controller-0: 0 ++native_color: ip-172.17.1.11 allocation score on controller-1: 0 ++native_color: ip-172.17.1.11 allocation score on controller-2: 0 ++native_color: ip-172.17.1.11 allocation score on database-0: -INFINITY ++native_color: ip-172.17.1.11 allocation score on database-1: -INFINITY ++native_color: ip-172.17.1.11 allocation score on database-2: -INFINITY ++native_color: ip-172.17.1.11 allocation score on messaging-0: -INFINITY ++native_color: ip-172.17.1.11 allocation score on messaging-1: -INFINITY ++native_color: ip-172.17.1.11 allocation score on messaging-2: -INFINITY ++native_color: ip-172.17.1.19 allocation score on controller-0: 0 ++native_color: ip-172.17.1.19 allocation score on controller-1: 0 ++native_color: ip-172.17.1.19 allocation score on controller-2: INFINITY ++native_color: ip-172.17.1.19 allocation score on database-0: -INFINITY ++native_color: ip-172.17.1.19 allocation score on database-1: -INFINITY ++native_color: ip-172.17.1.19 allocation score on database-2: -INFINITY ++native_color: ip-172.17.1.19 allocation score on messaging-0: -INFINITY ++native_color: ip-172.17.1.19 allocation score on messaging-1: -INFINITY ++native_color: ip-172.17.1.19 allocation score on messaging-2: -INFINITY ++native_color: ip-172.17.3.13 allocation score on controller-0: 0 ++native_color: ip-172.17.3.13 allocation score on controller-1: 0 ++native_color: ip-172.17.3.13 allocation score on controller-2: 0 ++native_color: ip-172.17.3.13 allocation score on database-0: -INFINITY ++native_color: ip-172.17.3.13 allocation score on database-1: -INFINITY ++native_color: ip-172.17.3.13 allocation score on database-2: -INFINITY ++native_color: ip-172.17.3.13 allocation score on messaging-0: -INFINITY ++native_color: ip-172.17.3.13 allocation score on messaging-1: -INFINITY ++native_color: ip-172.17.3.13 allocation score on messaging-2: -INFINITY ++native_color: ip-172.17.4.19 allocation score on controller-0: 0 ++native_color: ip-172.17.4.19 allocation score on controller-1: 0 ++native_color: ip-172.17.4.19 allocation score on controller-2: INFINITY ++native_color: ip-172.17.4.19 allocation score on database-0: -INFINITY ++native_color: ip-172.17.4.19 allocation score on database-1: -INFINITY ++native_color: ip-172.17.4.19 allocation score on database-2: -INFINITY ++native_color: ip-172.17.4.19 allocation score on messaging-0: -INFINITY ++native_color: ip-172.17.4.19 allocation score on messaging-1: -INFINITY ++native_color: ip-172.17.4.19 allocation score on messaging-2: -INFINITY ++native_color: ip-192.168.24.11 allocation score on controller-0: 0 ++native_color: ip-192.168.24.11 allocation score on controller-1: 0 ++native_color: ip-192.168.24.11 allocation score on controller-2: 0 ++native_color: ip-192.168.24.11 allocation score on database-0: -INFINITY ++native_color: ip-192.168.24.11 allocation score on database-1: -INFINITY ++native_color: ip-192.168.24.11 allocation score on database-2: -INFINITY ++native_color: ip-192.168.24.11 allocation score on messaging-0: -INFINITY ++native_color: ip-192.168.24.11 allocation score on messaging-1: -INFINITY ++native_color: ip-192.168.24.11 allocation score on messaging-2: -INFINITY ++native_color: messaging-0 allocation score on controller-0: 0 ++native_color: messaging-0 allocation score on controller-1: 0 ++native_color: messaging-0 allocation score on controller-2: INFINITY ++native_color: messaging-0 allocation score on database-0: -INFINITY ++native_color: messaging-0 allocation score on database-1: -INFINITY ++native_color: messaging-0 allocation score on database-2: -INFINITY ++native_color: messaging-0 allocation score on messaging-0: -INFINITY ++native_color: messaging-0 allocation score on messaging-1: -INFINITY ++native_color: messaging-0 allocation score on messaging-2: -INFINITY ++native_color: messaging-1 allocation score on controller-0: 0 ++native_color: messaging-1 allocation score on controller-1: 0 ++native_color: messaging-1 allocation score on controller-2: INFINITY ++native_color: messaging-1 allocation score on database-0: -INFINITY ++native_color: messaging-1 allocation score on database-1: -INFINITY ++native_color: messaging-1 allocation score on database-2: -INFINITY ++native_color: messaging-1 allocation score on messaging-0: -INFINITY ++native_color: messaging-1 allocation score on messaging-1: -INFINITY ++native_color: messaging-1 allocation score on messaging-2: -INFINITY ++native_color: messaging-2 allocation score on controller-0: 0 ++native_color: messaging-2 allocation score on controller-1: 0 ++native_color: messaging-2 allocation score on controller-2: INFINITY ++native_color: messaging-2 allocation score on database-0: -INFINITY ++native_color: messaging-2 allocation score on database-1: -INFINITY ++native_color: messaging-2 allocation score on database-2: -INFINITY ++native_color: messaging-2 allocation score on messaging-0: -INFINITY ++native_color: messaging-2 allocation score on messaging-1: -INFINITY ++native_color: messaging-2 allocation score on messaging-2: -INFINITY ++native_color: openstack-cinder-volume allocation score on controller-0: 0 ++native_color: openstack-cinder-volume allocation score on controller-1: 0 ++native_color: openstack-cinder-volume allocation score on controller-2: 0 ++native_color: openstack-cinder-volume allocation score on database-0: -INFINITY ++native_color: openstack-cinder-volume allocation score on database-1: -INFINITY ++native_color: openstack-cinder-volume allocation score on database-2: -INFINITY ++native_color: openstack-cinder-volume allocation score on messaging-0: -INFINITY ++native_color: openstack-cinder-volume allocation score on messaging-1: -INFINITY ++native_color: openstack-cinder-volume allocation score on messaging-2: -INFINITY ++native_color: rabbitmq-bundle-0 allocation score on controller-0: -INFINITY ++native_color: rabbitmq-bundle-0 allocation score on controller-1: -INFINITY ++native_color: rabbitmq-bundle-0 allocation score on controller-2: INFINITY ++native_color: rabbitmq-bundle-0 allocation score on database-0: -INFINITY ++native_color: rabbitmq-bundle-0 allocation score on database-1: -INFINITY ++native_color: rabbitmq-bundle-0 allocation score on database-2: -INFINITY ++native_color: rabbitmq-bundle-0 allocation score on messaging-0: -INFINITY ++native_color: rabbitmq-bundle-0 allocation score on messaging-1: -INFINITY ++native_color: rabbitmq-bundle-0 allocation score on messaging-2: -INFINITY ++native_color: rabbitmq-bundle-1 allocation score on controller-0: -INFINITY ++native_color: rabbitmq-bundle-1 allocation score on controller-1: -INFINITY ++native_color: rabbitmq-bundle-1 allocation score on controller-2: INFINITY ++native_color: rabbitmq-bundle-1 allocation score on database-0: -INFINITY ++native_color: rabbitmq-bundle-1 allocation score on database-1: -INFINITY ++native_color: rabbitmq-bundle-1 allocation score on database-2: -INFINITY ++native_color: rabbitmq-bundle-1 allocation score on messaging-0: -INFINITY ++native_color: rabbitmq-bundle-1 allocation score on messaging-1: -INFINITY ++native_color: rabbitmq-bundle-1 allocation score on messaging-2: -INFINITY ++native_color: rabbitmq-bundle-2 allocation score on controller-0: -INFINITY ++native_color: rabbitmq-bundle-2 allocation score on controller-1: -INFINITY ++native_color: rabbitmq-bundle-2 allocation score on controller-2: INFINITY ++native_color: rabbitmq-bundle-2 allocation score on database-0: -INFINITY ++native_color: rabbitmq-bundle-2 allocation score on database-1: -INFINITY ++native_color: rabbitmq-bundle-2 allocation score on database-2: -INFINITY ++native_color: rabbitmq-bundle-2 allocation score on messaging-0: -INFINITY ++native_color: rabbitmq-bundle-2 allocation score on messaging-1: -INFINITY ++native_color: rabbitmq-bundle-2 allocation score on messaging-2: -INFINITY ++native_color: rabbitmq-bundle-docker-0 allocation score on controller-0: -INFINITY ++native_color: rabbitmq-bundle-docker-0 allocation score on controller-1: -INFINITY ++native_color: rabbitmq-bundle-docker-0 allocation score on controller-2: -INFINITY ++native_color: rabbitmq-bundle-docker-0 allocation score on database-0: -INFINITY ++native_color: rabbitmq-bundle-docker-0 allocation score on database-1: -INFINITY ++native_color: rabbitmq-bundle-docker-0 allocation score on database-2: -INFINITY ++native_color: rabbitmq-bundle-docker-0 allocation score on messaging-0: INFINITY ++native_color: rabbitmq-bundle-docker-0 allocation score on messaging-1: -10000 ++native_color: rabbitmq-bundle-docker-0 allocation score on messaging-2: -10000 ++native_color: rabbitmq-bundle-docker-1 allocation score on controller-0: -INFINITY ++native_color: rabbitmq-bundle-docker-1 allocation score on controller-1: -INFINITY ++native_color: rabbitmq-bundle-docker-1 allocation score on controller-2: -INFINITY ++native_color: rabbitmq-bundle-docker-1 allocation score on database-0: -INFINITY ++native_color: rabbitmq-bundle-docker-1 allocation score on database-1: -INFINITY ++native_color: rabbitmq-bundle-docker-1 allocation score on database-2: -INFINITY ++native_color: rabbitmq-bundle-docker-1 allocation score on messaging-0: -INFINITY ++native_color: rabbitmq-bundle-docker-1 allocation score on messaging-1: INFINITY ++native_color: rabbitmq-bundle-docker-1 allocation score on messaging-2: -10000 ++native_color: rabbitmq-bundle-docker-2 allocation score on controller-0: -INFINITY ++native_color: rabbitmq-bundle-docker-2 allocation score on controller-1: -INFINITY ++native_color: rabbitmq-bundle-docker-2 allocation score on controller-2: -INFINITY ++native_color: rabbitmq-bundle-docker-2 allocation score on database-0: -INFINITY ++native_color: rabbitmq-bundle-docker-2 allocation score on database-1: -INFINITY ++native_color: rabbitmq-bundle-docker-2 allocation score on database-2: -INFINITY ++native_color: rabbitmq-bundle-docker-2 allocation score on messaging-0: -INFINITY ++native_color: rabbitmq-bundle-docker-2 allocation score on messaging-1: -INFINITY ++native_color: rabbitmq-bundle-docker-2 allocation score on messaging-2: INFINITY ++native_color: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY ++native_color: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY ++native_color: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY ++native_color: redis-bundle-0 allocation score on controller-0: INFINITY ++native_color: redis-bundle-0 allocation score on controller-1: 0 ++native_color: redis-bundle-0 allocation score on controller-2: 0 ++native_color: redis-bundle-0 allocation score on database-0: -INFINITY ++native_color: redis-bundle-0 allocation score on database-1: -INFINITY ++native_color: redis-bundle-0 allocation score on database-2: -INFINITY ++native_color: redis-bundle-0 allocation score on messaging-0: -INFINITY ++native_color: redis-bundle-0 allocation score on messaging-1: -INFINITY ++native_color: redis-bundle-0 allocation score on messaging-2: -INFINITY ++native_color: redis-bundle-1 allocation score on controller-0: 0 ++native_color: redis-bundle-1 allocation score on controller-1: 10000 ++native_color: redis-bundle-1 allocation score on controller-2: 0 ++native_color: redis-bundle-1 allocation score on database-0: -INFINITY ++native_color: redis-bundle-1 allocation score on database-1: -INFINITY ++native_color: redis-bundle-1 allocation score on database-2: -INFINITY ++native_color: redis-bundle-1 allocation score on messaging-0: -INFINITY ++native_color: redis-bundle-1 allocation score on messaging-1: -INFINITY ++native_color: redis-bundle-1 allocation score on messaging-2: -INFINITY ++native_color: redis-bundle-2 allocation score on controller-0: 0 ++native_color: redis-bundle-2 allocation score on controller-1: 0 ++native_color: redis-bundle-2 allocation score on controller-2: INFINITY ++native_color: redis-bundle-2 allocation score on database-0: -INFINITY ++native_color: redis-bundle-2 allocation score on database-1: -INFINITY ++native_color: redis-bundle-2 allocation score on database-2: -INFINITY ++native_color: redis-bundle-2 allocation score on messaging-0: -INFINITY ++native_color: redis-bundle-2 allocation score on messaging-1: -INFINITY ++native_color: redis-bundle-2 allocation score on messaging-2: -INFINITY ++native_color: redis-bundle-docker-0 allocation score on controller-0: INFINITY ++native_color: redis-bundle-docker-0 allocation score on controller-1: 0 ++native_color: redis-bundle-docker-0 allocation score on controller-2: 0 ++native_color: redis-bundle-docker-0 allocation score on database-0: -INFINITY ++native_color: redis-bundle-docker-0 allocation score on database-1: -INFINITY ++native_color: redis-bundle-docker-0 allocation score on database-2: -INFINITY ++native_color: redis-bundle-docker-0 allocation score on messaging-0: -INFINITY ++native_color: redis-bundle-docker-0 allocation score on messaging-1: -INFINITY ++native_color: redis-bundle-docker-0 allocation score on messaging-2: -INFINITY ++native_color: redis-bundle-docker-1 allocation score on controller-0: -INFINITY ++native_color: redis-bundle-docker-1 allocation score on controller-1: 0 ++native_color: redis-bundle-docker-1 allocation score on controller-2: -INFINITY ++native_color: redis-bundle-docker-1 allocation score on database-0: -INFINITY ++native_color: redis-bundle-docker-1 allocation score on database-1: -INFINITY ++native_color: redis-bundle-docker-1 allocation score on database-2: -INFINITY ++native_color: redis-bundle-docker-1 allocation score on messaging-0: -INFINITY ++native_color: redis-bundle-docker-1 allocation score on messaging-1: -INFINITY ++native_color: redis-bundle-docker-1 allocation score on messaging-2: -INFINITY ++native_color: redis-bundle-docker-2 allocation score on controller-0: -INFINITY ++native_color: redis-bundle-docker-2 allocation score on controller-1: 0 ++native_color: redis-bundle-docker-2 allocation score on controller-2: INFINITY ++native_color: redis-bundle-docker-2 allocation score on database-0: -INFINITY ++native_color: redis-bundle-docker-2 allocation score on database-1: -INFINITY ++native_color: redis-bundle-docker-2 allocation score on database-2: -INFINITY ++native_color: redis-bundle-docker-2 allocation score on messaging-0: -INFINITY ++native_color: redis-bundle-docker-2 allocation score on messaging-1: -INFINITY ++native_color: redis-bundle-docker-2 allocation score on messaging-2: -INFINITY ++native_color: redis:0 allocation score on redis-bundle-0: INFINITY ++native_color: redis:1 allocation score on redis-bundle-1: INFINITY ++native_color: redis:2 allocation score on redis-bundle-2: INFINITY ++native_color: stonith-fence_ipmilan-525400244e09 allocation score on controller-0: 0 ++native_color: stonith-fence_ipmilan-525400244e09 allocation score on controller-1: 0 ++native_color: stonith-fence_ipmilan-525400244e09 allocation score on controller-2: INFINITY ++native_color: stonith-fence_ipmilan-525400244e09 allocation score on database-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400244e09 allocation score on database-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400244e09 allocation score on database-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400244e09 allocation score on messaging-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400244e09 allocation score on messaging-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400244e09 allocation score on messaging-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400498d34 allocation score on controller-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400498d34 allocation score on controller-1: 0 ++native_color: stonith-fence_ipmilan-525400498d34 allocation score on controller-2: 0 ++native_color: stonith-fence_ipmilan-525400498d34 allocation score on database-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400498d34 allocation score on database-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400498d34 allocation score on database-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400498d34 allocation score on messaging-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400498d34 allocation score on messaging-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400498d34 allocation score on messaging-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400542c06 allocation score on controller-0: 0 ++native_color: stonith-fence_ipmilan-525400542c06 allocation score on controller-1: 0 ++native_color: stonith-fence_ipmilan-525400542c06 allocation score on controller-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400542c06 allocation score on database-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400542c06 allocation score on database-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400542c06 allocation score on database-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400542c06 allocation score on messaging-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400542c06 allocation score on messaging-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400542c06 allocation score on messaging-2: -INFINITY ++native_color: stonith-fence_ipmilan-5254005ea387 allocation score on controller-0: 0 ++native_color: stonith-fence_ipmilan-5254005ea387 allocation score on controller-1: 0 ++native_color: stonith-fence_ipmilan-5254005ea387 allocation score on controller-2: 0 ++native_color: stonith-fence_ipmilan-5254005ea387 allocation score on database-0: -INFINITY ++native_color: stonith-fence_ipmilan-5254005ea387 allocation score on database-1: -INFINITY ++native_color: stonith-fence_ipmilan-5254005ea387 allocation score on database-2: -INFINITY ++native_color: stonith-fence_ipmilan-5254005ea387 allocation score on messaging-0: -INFINITY ++native_color: stonith-fence_ipmilan-5254005ea387 allocation score on messaging-1: -INFINITY ++native_color: stonith-fence_ipmilan-5254005ea387 allocation score on messaging-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400a25787 allocation score on controller-0: 0 ++native_color: stonith-fence_ipmilan-525400a25787 allocation score on controller-1: 0 ++native_color: stonith-fence_ipmilan-525400a25787 allocation score on controller-2: INFINITY ++native_color: stonith-fence_ipmilan-525400a25787 allocation score on database-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400a25787 allocation score on database-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400a25787 allocation score on database-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400a25787 allocation score on messaging-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400a25787 allocation score on messaging-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400a25787 allocation score on messaging-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400a7f9e0 allocation score on controller-0: INFINITY ++native_color: stonith-fence_ipmilan-525400a7f9e0 allocation score on controller-1: 0 ++native_color: stonith-fence_ipmilan-525400a7f9e0 allocation score on controller-2: 0 ++native_color: stonith-fence_ipmilan-525400a7f9e0 allocation score on database-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400a7f9e0 allocation score on database-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400a7f9e0 allocation score on database-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400a7f9e0 allocation score on messaging-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400a7f9e0 allocation score on messaging-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400a7f9e0 allocation score on messaging-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400aac413 allocation score on controller-0: 0 ++native_color: stonith-fence_ipmilan-525400aac413 allocation score on controller-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400aac413 allocation score on controller-2: INFINITY ++native_color: stonith-fence_ipmilan-525400aac413 allocation score on database-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400aac413 allocation score on database-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400aac413 allocation score on database-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400aac413 allocation score on messaging-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400aac413 allocation score on messaging-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400aac413 allocation score on messaging-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400c709f7 allocation score on controller-0: 0 ++native_color: stonith-fence_ipmilan-525400c709f7 allocation score on controller-1: 0 ++native_color: stonith-fence_ipmilan-525400c709f7 allocation score on controller-2: 0 ++native_color: stonith-fence_ipmilan-525400c709f7 allocation score on database-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400c709f7 allocation score on database-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400c709f7 allocation score on database-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400c709f7 allocation score on messaging-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400c709f7 allocation score on messaging-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400c709f7 allocation score on messaging-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400cdec10 allocation score on controller-0: 0 ++native_color: stonith-fence_ipmilan-525400cdec10 allocation score on controller-1: 0 ++native_color: stonith-fence_ipmilan-525400cdec10 allocation score on controller-2: INFINITY ++native_color: stonith-fence_ipmilan-525400cdec10 allocation score on database-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400cdec10 allocation score on database-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400cdec10 allocation score on database-2: -INFINITY ++native_color: stonith-fence_ipmilan-525400cdec10 allocation score on messaging-0: -INFINITY ++native_color: stonith-fence_ipmilan-525400cdec10 allocation score on messaging-1: -INFINITY ++native_color: stonith-fence_ipmilan-525400cdec10 allocation score on messaging-2: -INFINITY ++redis:0 promotion score on redis-bundle-0: 1 ++redis:1 promotion score on redis-bundle-1: -1 ++redis:2 promotion score on redis-bundle-2: 1 +diff --git a/pengine/test10/bundle-order-stop-on-remote.summary b/pengine/test10/bundle-order-stop-on-remote.summary +new file mode 100644 +index 0000000..8a04599 +--- /dev/null ++++ b/pengine/test10/bundle-order-stop-on-remote.summary +@@ -0,0 +1,224 @@ ++ ++Current cluster status: ++RemoteNode database-0: UNCLEAN (offline) ++RemoteNode database-2: UNCLEAN (offline) ++Online: [ controller-0 controller-1 controller-2 ] ++RemoteOnline: [ database-1 messaging-0 messaging-1 messaging-2 ] ++Containers: [ galera-bundle-1:galera-bundle-docker-1 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-2:redis-bundle-docker-2 ] ++ ++ database-0 (ocf::pacemaker:remote): Stopped ++ database-1 (ocf::pacemaker:remote): Started controller-2 ++ database-2 (ocf::pacemaker:remote): Stopped ++ messaging-0 (ocf::pacemaker:remote): Started controller-2 ++ messaging-1 (ocf::pacemaker:remote): Started controller-2 ++ messaging-2 (ocf::pacemaker:remote): Started controller-2 ++ Docker container set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest] ++ rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started messaging-0 ++ rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started messaging-1 ++ rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started messaging-2 ++ Docker container set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest] ++ galera-bundle-0 (ocf::heartbeat:galera): FAILED Master database-0 (UNCLEAN) ++ galera-bundle-1 (ocf::heartbeat:galera): Master database-1 ++ galera-bundle-2 (ocf::heartbeat:galera): FAILED Master database-2 (UNCLEAN) ++ Docker container set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest] ++ redis-bundle-0 (ocf::heartbeat:redis): Slave controller-0 ++ redis-bundle-1 (ocf::heartbeat:redis): Stopped ++ redis-bundle-2 (ocf::heartbeat:redis): Slave controller-2 ++ ip-192.168.24.11 (ocf::heartbeat:IPaddr2): Stopped ++ ip-10.0.0.104 (ocf::heartbeat:IPaddr2): Stopped ++ ip-172.17.1.19 (ocf::heartbeat:IPaddr2): Started controller-2 ++ ip-172.17.1.11 (ocf::heartbeat:IPaddr2): Stopped ++ ip-172.17.3.13 (ocf::heartbeat:IPaddr2): Stopped ++ ip-172.17.4.19 (ocf::heartbeat:IPaddr2): Started controller-2 ++ Docker container set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest] ++ haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started controller-0 ++ haproxy-bundle-docker-1 (ocf::heartbeat:docker): Stopped ++ haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started controller-2 ++ openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped ++ stonith-fence_ipmilan-525400244e09 (stonith:fence_ipmilan): Started controller-2 ++ stonith-fence_ipmilan-525400cdec10 (stonith:fence_ipmilan): Started controller-2 ++ stonith-fence_ipmilan-525400c709f7 (stonith:fence_ipmilan): Stopped ++ stonith-fence_ipmilan-525400a7f9e0 (stonith:fence_ipmilan): Started controller-0 ++ stonith-fence_ipmilan-525400a25787 (stonith:fence_ipmilan): Started controller-2 ++ stonith-fence_ipmilan-5254005ea387 (stonith:fence_ipmilan): Stopped ++ stonith-fence_ipmilan-525400542c06 (stonith:fence_ipmilan): Stopped ++ stonith-fence_ipmilan-525400aac413 (stonith:fence_ipmilan): Started controller-2 ++ stonith-fence_ipmilan-525400498d34 (stonith:fence_ipmilan): Stopped ++ ++Transition Summary: ++ * Fence (reboot) galera-bundle-2 (resource: galera-bundle-docker-2) 'guest is unclean' ++ * Fence (reboot) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean' ++ * Start database-0 ( controller-0 ) ++ * Start database-2 ( controller-1 ) ++ * Recover galera-bundle-docker-0 ( database-0 ) ++ * Start galera-bundle-0 ( controller-0 ) ++ * Recover galera:0 ( Master galera-bundle-0 ) ++ * Recover galera-bundle-docker-2 ( database-2 ) ++ * Start galera-bundle-2 ( controller-1 ) ++ * Recover galera:2 ( Master galera-bundle-2 ) ++ * Promote redis:0 ( Slave -> Master redis-bundle-0 ) ++ * Start redis-bundle-docker-1 ( controller-1 ) ++ * Start redis-bundle-1 ( controller-1 ) ++ * Start redis:1 ( redis-bundle-1 ) ++ * Start ip-192.168.24.11 ( controller-0 ) ++ * Start ip-10.0.0.104 ( controller-1 ) ++ * Start ip-172.17.1.11 ( controller-0 ) ++ * Start ip-172.17.3.13 ( controller-1 ) ++ * Start haproxy-bundle-docker-1 ( controller-1 ) ++ * Start openstack-cinder-volume ( controller-0 ) ++ * Start stonith-fence_ipmilan-525400c709f7 ( controller-1 ) ++ * Start stonith-fence_ipmilan-5254005ea387 ( controller-1 ) ++ * Start stonith-fence_ipmilan-525400542c06 ( controller-0 ) ++ * Start stonith-fence_ipmilan-525400498d34 ( controller-1 ) ++ ++Executing cluster transition: ++ * Resource action: database-0 start on controller-0 ++ * Resource action: database-2 start on controller-1 ++ * Pseudo action: redis-bundle-master_pre_notify_start_0 ++ * Resource action: stonith-fence_ipmilan-525400c709f7 start on controller-1 ++ * Resource action: stonith-fence_ipmilan-5254005ea387 start on controller-1 ++ * Resource action: stonith-fence_ipmilan-525400542c06 start on controller-0 ++ * Resource action: stonith-fence_ipmilan-525400498d34 start on controller-1 ++ * Pseudo action: redis-bundle_start_0 ++ * Pseudo action: galera-bundle_demote_0 ++ * Resource action: database-0 monitor=20000 on controller-0 ++ * Resource action: database-2 monitor=20000 on controller-1 ++ * Pseudo action: galera-bundle-master_demote_0 ++ * Resource action: redis notify on redis-bundle-0 ++ * Resource action: redis notify on redis-bundle-2 ++ * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 ++ * Pseudo action: redis-bundle-master_start_0 ++ * Resource action: stonith-fence_ipmilan-525400c709f7 monitor=60000 on controller-1 ++ * Resource action: stonith-fence_ipmilan-5254005ea387 monitor=60000 on controller-1 ++ * Resource action: stonith-fence_ipmilan-525400542c06 monitor=60000 on controller-0 ++ * Resource action: stonith-fence_ipmilan-525400498d34 monitor=60000 on controller-1 ++ * Pseudo action: galera_demote_0 ++ * Pseudo action: galera_demote_0 ++ * Pseudo action: galera-bundle-master_demoted_0 ++ * Pseudo action: galera-bundle_demoted_0 ++ * Pseudo action: galera-bundle_stop_0 ++ * Resource action: galera-bundle-docker-0 stop on database-0 ++ * Resource action: galera-bundle-docker-2 stop on database-2 ++ * Pseudo action: stonith-galera-bundle-2-reboot on galera-bundle-2 ++ * Pseudo action: stonith-galera-bundle-0-reboot on galera-bundle-0 ++ * Pseudo action: stonith_complete ++ * Pseudo action: galera-bundle-master_stop_0 ++ * Resource action: redis-bundle-docker-1 start on controller-1 ++ * Resource action: redis-bundle-1 monitor on controller-1 ++ * Resource action: ip-192.168.24.11 start on controller-0 ++ * Resource action: ip-10.0.0.104 start on controller-1 ++ * Resource action: ip-172.17.1.11 start on controller-0 ++ * Resource action: ip-172.17.3.13 start on controller-1 ++ * Resource action: openstack-cinder-volume start on controller-0 ++ * Pseudo action: haproxy-bundle_start_0 ++ * Pseudo action: galera_stop_0 ++ * Resource action: redis-bundle-docker-1 monitor=60000 on controller-1 ++ * Resource action: redis-bundle-1 start on controller-1 ++ * Resource action: ip-192.168.24.11 monitor=10000 on controller-0 ++ * Resource action: ip-10.0.0.104 monitor=10000 on controller-1 ++ * Resource action: ip-172.17.1.11 monitor=10000 on controller-0 ++ * Resource action: ip-172.17.3.13 monitor=10000 on controller-1 ++ * Resource action: haproxy-bundle-docker-1 start on controller-1 ++ * Resource action: openstack-cinder-volume monitor=60000 on controller-0 ++ * Pseudo action: haproxy-bundle_running_0 ++ * Pseudo action: galera_stop_0 ++ * Pseudo action: galera-bundle-master_stopped_0 ++ * Resource action: redis start on redis-bundle-1 ++ * Pseudo action: redis-bundle-master_running_0 ++ * Resource action: redis-bundle-1 monitor=60000 on controller-1 ++ * Resource action: haproxy-bundle-docker-1 monitor=60000 on controller-1 ++ * Pseudo action: galera-bundle_stopped_0 ++ * Pseudo action: galera-bundle_start_0 ++ * Pseudo action: all_stopped ++ * Pseudo action: galera-bundle-master_start_0 ++ * Resource action: galera-bundle-docker-0 start on database-0 ++ * Resource action: galera-bundle-0 monitor on controller-1 ++ * Resource action: galera-bundle-docker-2 start on database-2 ++ * Resource action: galera-bundle-2 monitor on controller-1 ++ * Pseudo action: redis-bundle-master_post_notify_running_0 ++ * Resource action: galera-bundle-docker-0 monitor=60000 on database-0 ++ * Resource action: galera-bundle-0 start on controller-0 ++ * Resource action: galera-bundle-docker-2 monitor=60000 on database-2 ++ * Resource action: galera-bundle-2 start on controller-1 ++ * Resource action: redis notify on redis-bundle-0 ++ * Resource action: redis notify on redis-bundle-1 ++ * Resource action: redis notify on redis-bundle-2 ++ * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 ++ * Pseudo action: redis-bundle_running_0 ++ * Resource action: galera start on galera-bundle-0 ++ * Resource action: galera start on galera-bundle-2 ++ * Pseudo action: galera-bundle-master_running_0 ++ * Resource action: galera-bundle-0 monitor=60000 on controller-0 ++ * Resource action: galera-bundle-2 monitor=60000 on controller-1 ++ * Pseudo action: redis-bundle-master_pre_notify_promote_0 ++ * Pseudo action: redis-bundle_promote_0 ++ * Pseudo action: galera-bundle_running_0 ++ * Resource action: redis notify on redis-bundle-0 ++ * Resource action: redis notify on redis-bundle-1 ++ * Resource action: redis notify on redis-bundle-2 ++ * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 ++ * Pseudo action: redis-bundle-master_promote_0 ++ * Pseudo action: galera-bundle_promote_0 ++ * Pseudo action: galera-bundle-master_promote_0 ++ * Resource action: redis promote on redis-bundle-0 ++ * Pseudo action: redis-bundle-master_promoted_0 ++ * Resource action: galera promote on galera-bundle-0 ++ * Resource action: galera promote on galera-bundle-2 ++ * Pseudo action: galera-bundle-master_promoted_0 ++ * Pseudo action: redis-bundle-master_post_notify_promoted_0 ++ * Pseudo action: galera-bundle_promoted_0 ++ * Resource action: galera monitor=10000 on galera-bundle-0 ++ * Resource action: galera monitor=10000 on galera-bundle-2 ++ * Resource action: redis notify on redis-bundle-0 ++ * Resource action: redis notify on redis-bundle-1 ++ * Resource action: redis notify on redis-bundle-2 ++ * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 ++ * Pseudo action: redis-bundle_promoted_0 ++ * Resource action: redis monitor=20000 on redis-bundle-0 ++ * Resource action: redis monitor=60000 on redis-bundle-1 ++ * Resource action: redis monitor=45000 on redis-bundle-1 ++ ++Revised cluster status: ++Online: [ controller-0 controller-1 controller-2 ] ++RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] ++Containers: [ galera-bundle-0:galera-bundle-docker-0 galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ] ++ ++ database-0 (ocf::pacemaker:remote): Started controller-0 ++ database-1 (ocf::pacemaker:remote): Started controller-2 ++ database-2 (ocf::pacemaker:remote): Started controller-1 ++ messaging-0 (ocf::pacemaker:remote): Started controller-2 ++ messaging-1 (ocf::pacemaker:remote): Started controller-2 ++ messaging-2 (ocf::pacemaker:remote): Started controller-2 ++ Docker container set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest] ++ rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started messaging-0 ++ rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started messaging-1 ++ rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started messaging-2 ++ Docker container set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest] ++ galera-bundle-0 (ocf::heartbeat:galera): Master database-0 ++ galera-bundle-1 (ocf::heartbeat:galera): Master database-1 ++ galera-bundle-2 (ocf::heartbeat:galera): Master database-2 ++ Docker container set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest] ++ redis-bundle-0 (ocf::heartbeat:redis): Master controller-0 ++ redis-bundle-1 (ocf::heartbeat:redis): Slave controller-1 ++ redis-bundle-2 (ocf::heartbeat:redis): Slave controller-2 ++ ip-192.168.24.11 (ocf::heartbeat:IPaddr2): Started controller-0 ++ ip-10.0.0.104 (ocf::heartbeat:IPaddr2): Started controller-1 ++ ip-172.17.1.19 (ocf::heartbeat:IPaddr2): Started controller-2 ++ ip-172.17.1.11 (ocf::heartbeat:IPaddr2): Started controller-0 ++ ip-172.17.3.13 (ocf::heartbeat:IPaddr2): Started controller-1 ++ ip-172.17.4.19 (ocf::heartbeat:IPaddr2): Started controller-2 ++ Docker container set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest] ++ haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started controller-0 ++ haproxy-bundle-docker-1 (ocf::heartbeat:docker): Started controller-1 ++ haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started controller-2 ++ openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 ++ stonith-fence_ipmilan-525400244e09 (stonith:fence_ipmilan): Started controller-2 ++ stonith-fence_ipmilan-525400cdec10 (stonith:fence_ipmilan): Started controller-2 ++ stonith-fence_ipmilan-525400c709f7 (stonith:fence_ipmilan): Started controller-1 ++ stonith-fence_ipmilan-525400a7f9e0 (stonith:fence_ipmilan): Started controller-0 ++ stonith-fence_ipmilan-525400a25787 (stonith:fence_ipmilan): Started controller-2 ++ stonith-fence_ipmilan-5254005ea387 (stonith:fence_ipmilan): Started controller-1 ++ stonith-fence_ipmilan-525400542c06 (stonith:fence_ipmilan): Started controller-0 ++ stonith-fence_ipmilan-525400aac413 (stonith:fence_ipmilan): Started controller-2 ++ stonith-fence_ipmilan-525400498d34 (stonith:fence_ipmilan): Started controller-1 ++ +diff --git a/pengine/test10/bundle-order-stop-on-remote.xml b/pengine/test10/bundle-order-stop-on-remote.xml +new file mode 100644 +index 0000000..d3b87c8 +--- /dev/null ++++ b/pengine/test10/bundle-order-stop-on-remote.xml +@@ -0,0 +1,1165 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + + +From 47a5f6f5cd1fba2c6bac140329e563abd34b2ef4 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 8 Dec 2017 16:54:07 -0600 +Subject: [PATCH 13/16] Low: PE: correct mispelled constant + +original intention of 2b1aae07 +--- + pengine/allocate.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/pengine/allocate.c b/pengine/allocate.c +index 1c95e97..481a0ec 100644 +--- a/pengine/allocate.c ++++ b/pengine/allocate.c +@@ -1870,7 +1870,7 @@ apply_container_ordering(action_t *action, pe_working_set_t *data_set) + container->id); + + if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE) +- || safe_str_eq(action->task, CRMD_ACTION_MIGRATE)) { ++ || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) { + /* Migration ops map to "no_action", but we need to apply the same + * ordering as for stop or demote (see get_router_node()). + */ +@@ -2036,7 +2036,7 @@ apply_remote_ordering(action_t *action, pe_working_set_t *data_set) + remote_rsc->id, state2text(state)); + + if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE) +- || safe_str_eq(action->task, CRMD_ACTION_MIGRATE)) { ++ || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) { + /* Migration ops map to "no_action", but we need to apply the same + * ordering as for stop or demote (see get_router_node()). + */ +-- +1.8.3.1 + + +From 55c9b5ef9c6f531ea808926abaaea5c7c8890dad Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 8 Dec 2017 17:31:23 -0600 +Subject: [PATCH 14/16] Doc: PE: update remote stop ordering comments for + recent changes + +--- + pengine/allocate.c | 13 +++++++------ + 1 file changed, 7 insertions(+), 6 deletions(-) + +diff --git a/pengine/allocate.c b/pengine/allocate.c +index 481a0ec..7ae4e02 100644 +--- a/pengine/allocate.c ++++ b/pengine/allocate.c +@@ -2058,9 +2058,6 @@ apply_remote_ordering(action_t *action, pe_working_set_t *data_set) + break; + + case stop_rsc: +- /* Handle special case with remote node where stop actions need to be +- * ordered after the connection resource starts somewhere else. +- */ + if(state == remote_state_alive) { + order_action_then_stop(action, remote_rsc, + pe_order_implies_first, data_set); +@@ -2076,14 +2073,18 @@ apply_remote_ordering(action_t *action, pe_working_set_t *data_set) + pe_order_implies_first, data_set); + + } else if(remote_rsc->next_role == RSC_ROLE_STOPPED) { +- /* If its not coming back up, better do what we need first */ ++ /* State must be remote_state_unknown or remote_state_stopped. ++ * Since the connection is not coming back up in this ++ * transition, stop this resource first. ++ */ + order_action_then_stop(action, remote_rsc, + pe_order_implies_first, data_set); + + } else { +- /* Wait for the connection resource to be up and assume everything is as we left it */ ++ /* The connection is going to be started somewhere else, so ++ * stop this resource after that completes. ++ */ + order_start_then_action(remote_rsc, action, pe_order_none, data_set); +- + } + break; + +-- +1.8.3.1 + + +From 39441fa1dfe625cf00af463269052d4c2dafaa16 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 8 Dec 2017 17:16:55 -0600 +Subject: [PATCH 15/16] Low: libpe_status: limit resource type check to + primitives + +--- + lib/pengine/complex.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c +index d58d6be..86f290c 100644 +--- a/lib/pengine/complex.c ++++ b/lib/pengine/complex.c +@@ -784,7 +784,9 @@ common_unpack(xmlNode * xml_obj, resource_t ** rsc, + if(is_set((*rsc)->flags, pe_rsc_fence_device)) { + value = "quorum"; + +- } else if (safe_str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS), "ocf") ++ } else if (((*rsc)->variant == pe_native) ++ && safe_str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS), ++ PCMK_RESOURCE_CLASS_OCF) + && safe_str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_PROVIDER), "pacemaker") + && safe_str_eq(crm_element_value((*rsc)->xml, XML_ATTR_TYPE), "remote") + ) { +-- +1.8.3.1 + + +From 68438917c3b1ed305af6da2acd23454cd777e1d1 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 8 Dec 2017 18:00:12 -0600 +Subject: [PATCH 16/16] Fix: lrmd: always use most recent remote proxy + +Any working proxy is sufficient, but the newest connection is the most likely +to be working. We want to avoid using an old proxy that has failed but whose +TCP connection has not yet timed out. +--- + lrmd/ipc_proxy.c | 41 +++++++++++++---------------------------- + 1 file changed, 13 insertions(+), 28 deletions(-) + +diff --git a/lrmd/ipc_proxy.c b/lrmd/ipc_proxy.c +index 5d6ab34..4d1ee01 100644 +--- a/lrmd/ipc_proxy.c ++++ b/lrmd/ipc_proxy.c +@@ -42,7 +42,7 @@ static qb_ipcs_service_t *crmd_ipcs = NULL; + static qb_ipcs_service_t *stonith_ipcs = NULL; + + /* ipc providers == crmd clients connecting from cluster nodes */ +-static GHashTable *ipc_providers = NULL; ++static GList *ipc_providers = NULL; + /* ipc clients == things like cibadmin, crm_resource, connecting locally */ + static GHashTable *ipc_clients = NULL; + +@@ -52,24 +52,14 @@ static GHashTable *ipc_clients = NULL; + * + * \return Pointer to a provider if one exists, NULL otherwise + * +- * \note Grab the first provider available; any provider will work, and usually +- * there will be only one. These are client connections originating from a +- * cluster node's crmd. ++ * \note Grab the first provider, which is the most recent connection. That way, ++ * if we haven't yet timed out an old, failed connection, we don't try to ++ * use it. + */ + crm_client_t * + ipc_proxy_get_provider() + { +- if (ipc_providers) { +- GHashTableIter iter; +- gpointer key = NULL; +- gpointer value = NULL; +- +- g_hash_table_iter_init(&iter, ipc_providers); +- if (g_hash_table_iter_next(&iter, &key, &value)) { +- return (crm_client_t*)value; +- } +- } +- return NULL; ++ return ipc_providers? (crm_client_t*) (ipc_providers->data) : NULL; + } + + static int32_t +@@ -378,10 +368,8 @@ static struct qb_ipcs_service_handlers cib_proxy_callbacks_rw = { + void + ipc_proxy_add_provider(crm_client_t *ipc_proxy) + { +- if (ipc_providers == NULL) { +- return; +- } +- g_hash_table_insert(ipc_providers, ipc_proxy->id, ipc_proxy); ++ // Prepending ensures the most recent connection is always first ++ ipc_providers = g_list_prepend(ipc_providers, ipc_proxy); + } + + void +@@ -393,11 +381,7 @@ ipc_proxy_remove_provider(crm_client_t *ipc_proxy) + GList *remove_these = NULL; + GListPtr gIter = NULL; + +- if (ipc_providers == NULL) { +- return; +- } +- +- g_hash_table_remove(ipc_providers, ipc_proxy->id); ++ ipc_providers = g_list_remove(ipc_providers, ipc_proxy); + + g_hash_table_iter_init(&iter, ipc_clients); + while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & ipc_client)) { +@@ -413,6 +397,8 @@ ipc_proxy_remove_provider(crm_client_t *ipc_proxy) + + for (gIter = remove_these; gIter != NULL; gIter = gIter->next) { + ipc_client = gIter->data; ++ ++ // Disconnection callback will free the client here + qb_ipcs_disconnect(ipc_client->ipcs); + } + +@@ -424,7 +410,6 @@ void + ipc_proxy_init(void) + { + ipc_clients = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); +- ipc_providers = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); + + cib_ipc_servers_init(&cib_ro, + &cib_rw, +@@ -446,10 +431,12 @@ void + ipc_proxy_cleanup(void) + { + if (ipc_providers) { +- g_hash_table_destroy(ipc_providers); ++ g_list_free(ipc_providers); ++ ipc_providers = NULL; + } + if (ipc_clients) { + g_hash_table_destroy(ipc_clients); ++ ipc_clients = NULL; + } + cib_ipc_servers_destroy(cib_ro, cib_rw, cib_shm); + qb_ipcs_destroy(attrd_ipcs); +@@ -458,6 +445,4 @@ ipc_proxy_cleanup(void) + cib_ro = NULL; + cib_rw = NULL; + cib_shm = NULL; +- ipc_providers = NULL; +- ipc_clients = NULL; + } +-- +1.8.3.1 + diff --git a/SOURCES/003-cleanup.patch b/SOURCES/003-cleanup.patch new file mode 100644 index 00000000..6ce9476c --- /dev/null +++ b/SOURCES/003-cleanup.patch @@ -0,0 +1,157 @@ +From c2d5c19a863f407a034a63f2877eb5faf7036d59 Mon Sep 17 00:00:00 2001 +From: "Gao,Yan" +Date: Fri, 8 Dec 2017 14:47:40 +0100 +Subject: [PATCH 1/2] Refactor: tools: crm_resource - Functionize cleaning up + resource failures + +--- + tools/crm_resource.c | 26 ++------------------------ + tools/crm_resource.h | 3 +++ + tools/crm_resource_runtime.c | 36 ++++++++++++++++++++++++++++++++++++ + 3 files changed, 41 insertions(+), 24 deletions(-) + +diff --git a/tools/crm_resource.c b/tools/crm_resource.c +index f93f688..4ddcef4 100644 +--- a/tools/crm_resource.c ++++ b/tools/crm_resource.c +@@ -1094,31 +1094,9 @@ main(int argc, char **argv) + + } else if (rsc_cmd == 'C' && just_errors) { + crmd_replies_needed = 0; +- for (xmlNode *xml_op = __xml_first_child(data_set.failed); xml_op != NULL; +- xml_op = __xml_next(xml_op)) { +- +- const char *node = crm_element_value(xml_op, XML_ATTR_UNAME); +- const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); +- const char *task_interval = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); +- const char *resource_name = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); +- +- if(resource_name == NULL) { +- continue; +- } else if(host_uname && safe_str_neq(host_uname, node)) { +- continue; +- } else if(rsc_id && safe_str_neq(rsc_id, resource_name)) { +- continue; +- } else if(operation && safe_str_neq(operation, task)) { +- continue; +- } else if(interval && safe_str_neq(interval, task_interval)) { +- continue; +- } + +- crm_debug("Erasing %s failure for %s (%s detected) on %s", +- task, rsc->id, resource_name, node); +- rc = cli_resource_delete(crmd_channel, node, rsc, task, +- task_interval, &data_set); +- } ++ rc = cli_resource_delete_failures(crmd_channel, host_uname, rsc, operation, ++ interval, &data_set); + + if(rsc && (rc == pcmk_ok) && (BE_QUIET == FALSE)) { + /* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */ +diff --git a/tools/crm_resource.h b/tools/crm_resource.h +index 0b8dd2a..e28c9ef 100644 +--- a/tools/crm_resource.h ++++ b/tools/crm_resource.h +@@ -76,6 +76,9 @@ int cli_resource_search(resource_t *rsc, const char *requested_name, + int cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, + resource_t *rsc, const char *operation, + const char *interval, pe_working_set_t *data_set); ++int cli_resource_delete_failures(crm_ipc_t *crmd_channel, const char *host_uname, ++ resource_t *rsc, const char *operation, ++ const char *interval, pe_working_set_t *data_set); + int cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * cib); + int cli_resource_move(resource_t *rsc, const char *rsc_id, + const char *host_name, cib_t *cib, +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index ffe4e5d..d250a07 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -655,6 +655,42 @@ cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, + return rc; + } + ++int ++cli_resource_delete_failures(crm_ipc_t *crmd_channel, const char *host_uname, ++ resource_t *rsc, const char *operation, ++ const char *interval, pe_working_set_t *data_set) ++{ ++ int rc = pcmk_ok; ++ ++ for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL; ++ xml_op = __xml_next(xml_op)) { ++ ++ const char *node = crm_element_value(xml_op, XML_ATTR_UNAME); ++ const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); ++ const char *task_interval = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); ++ const char *resource_name = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); ++ ++ if(resource_name == NULL) { ++ continue; ++ } else if(host_uname && safe_str_neq(host_uname, node)) { ++ continue; ++ } else if(rsc->id && safe_str_neq(rsc->id, resource_name)) { ++ continue; ++ } else if(operation && safe_str_neq(operation, task)) { ++ continue; ++ } else if(interval && safe_str_neq(interval, task_interval)) { ++ continue; ++ } ++ ++ crm_debug("Erasing %s failure for %s (%s detected) on %s", ++ task, rsc->id, resource_name, node); ++ rc = cli_resource_delete(crmd_channel, node, rsc, task, ++ task_interval, data_set); ++ } ++ ++ return rc; ++} ++ + void + cli_resource_check(cib_t * cib_conn, resource_t *rsc) + { +-- +1.8.3.1 + + +From 170ec0afcddb01fcfb8c2e8c86bc0e53594a42f9 Mon Sep 17 00:00:00 2001 +From: "Gao,Yan" +Date: Fri, 8 Dec 2017 16:22:54 +0100 +Subject: [PATCH 2/2] Fix: tools: crm_resource --cleanup for non-primitive + resources + +--- + tools/crm_resource_runtime.c | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index d250a07..1048636 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -662,6 +662,24 @@ cli_resource_delete_failures(crm_ipc_t *crmd_channel, const char *host_uname, + { + int rc = pcmk_ok; + ++ if (rsc == NULL) { ++ return -ENXIO; ++ ++ } else if (rsc->children) { ++ GListPtr lpc = NULL; ++ ++ for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) { ++ resource_t *child = (resource_t *) lpc->data; ++ ++ rc = cli_resource_delete_failures(crmd_channel, host_uname, child, operation, ++ interval, data_set); ++ if(rc != pcmk_ok) { ++ return rc; ++ } ++ } ++ return pcmk_ok; ++ } ++ + for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL; + xml_op = __xml_next(xml_op)) { + +-- +1.8.3.1 + diff --git a/SOURCES/004-cleanup.patch b/SOURCES/004-cleanup.patch new file mode 100644 index 00000000..3ec7535a --- /dev/null +++ b/SOURCES/004-cleanup.patch @@ -0,0 +1,419 @@ +From 7a813755269f00d7b815e819636841af991762c0 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 11 Dec 2017 12:23:06 -0600 +Subject: [PATCH] Fix: tools: crm_resource --cleanup + +The new "failures only" mode of crm_resource --cleanup had multiple issues, +including not working without --resource specified, comparing a +user-provided interval string against a milliseconds interval, and +considering no interval specified as all intervals rather than 0 +but only when clearing LRM history entries. +--- + tools/crm_resource.c | 35 +++--- + tools/crm_resource.h | 9 +- + tools/crm_resource_runtime.c | 258 ++++++++++++++++++++++++++++++------------- + 3 files changed, 202 insertions(+), 100 deletions(-) + +diff --git a/tools/crm_resource.c b/tools/crm_resource.c +index 4ddcef4..5152004 100644 +--- a/tools/crm_resource.c ++++ b/tools/crm_resource.c +@@ -1092,14 +1092,20 @@ main(int argc, char **argv) + rc = cli_resource_delete_attribute(rsc, rsc_id, prop_set, prop_id, + prop_name, cib_conn, &data_set); + +- } else if (rsc_cmd == 'C' && just_errors) { ++ } else if ((rsc_cmd == 'C') && rsc) { ++ if (do_force == FALSE) { ++ rsc = uber_parent(rsc); ++ } + crmd_replies_needed = 0; + +- rc = cli_resource_delete_failures(crmd_channel, host_uname, rsc, operation, +- interval, &data_set); ++ crm_debug("%s of %s (%s requested) on %s", ++ (just_errors? "Clearing failures" : "Re-checking the state"), ++ rsc->id, rsc_id, (host_uname? host_uname : "all hosts")); ++ rc = cli_resource_delete(crmd_channel, host_uname, rsc, operation, ++ interval, just_errors, &data_set); + +- if(rsc && (rc == pcmk_ok) && (BE_QUIET == FALSE)) { +- /* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */ ++ if ((rc == pcmk_ok) && !BE_QUIET) { ++ // Show any reasons why resource might stay stopped + cli_resource_check(cib_conn, rsc); + } + +@@ -1107,22 +1113,9 @@ main(int argc, char **argv) + start_mainloop(); + } + +- } else if ((rsc_cmd == 'C') && rsc) { +- if(do_force == FALSE) { +- rsc = uber_parent(rsc); +- } +- +- crm_debug("Re-checking the state of %s (%s requested) on %s", +- rsc->id, rsc_id, host_uname); +- crmd_replies_needed = 0; +- rc = cli_resource_delete(crmd_channel, host_uname, rsc, operation, +- interval, &data_set); +- +- if(rc == pcmk_ok && BE_QUIET == FALSE) { +- /* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */ +- cli_resource_check(cib_conn, rsc); +- } +- ++ } else if (rsc_cmd == 'C' && just_errors) { ++ rc = cli_cleanup_all(crmd_channel, host_uname, operation, interval, ++ &data_set); + if (rc == pcmk_ok) { + start_mainloop(); + } +diff --git a/tools/crm_resource.h b/tools/crm_resource.h +index e28c9ef..0ac51f2 100644 +--- a/tools/crm_resource.h ++++ b/tools/crm_resource.h +@@ -75,10 +75,11 @@ int cli_resource_search(resource_t *rsc, const char *requested_name, + pe_working_set_t *data_set); + int cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, + resource_t *rsc, const char *operation, +- const char *interval, pe_working_set_t *data_set); +-int cli_resource_delete_failures(crm_ipc_t *crmd_channel, const char *host_uname, +- resource_t *rsc, const char *operation, +- const char *interval, pe_working_set_t *data_set); ++ const char *interval, bool just_failures, ++ pe_working_set_t *data_set); ++int cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name, ++ const char *operation, const char *interval, ++ pe_working_set_t *data_set); + int cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * cib); + int cli_resource_move(resource_t *rsc, const char *rsc_id, + const char *host_name, cib_t *cib, +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index 1048636..bdebb0b 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -532,15 +532,129 @@ rsc_fail_name(resource_t *rsc) + return is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name); + } + ++static int ++clear_rsc_history(crm_ipc_t *crmd_channel, const char *host_uname, ++ const char *rsc_id, pe_working_set_t *data_set) ++{ ++ int rc = pcmk_ok; ++ ++ /* Erase the resource's entire LRM history in the CIB, even if we're only ++ * clearing a single operation's fail count. If we erased only entries for a ++ * single operation, we might wind up with a wrong idea of the current ++ * resource state, and we might not re-probe the resource. ++ */ ++ rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc_id, ++ TRUE, data_set); ++ if (rc != pcmk_ok) { ++ return rc; ++ } ++ crmd_replies_needed++; ++ ++ crm_trace("Processing %d mainloop inputs", crmd_replies_needed); ++ while (g_main_context_iteration(NULL, FALSE)) { ++ crm_trace("Processed mainloop input, %d still remaining", ++ crmd_replies_needed); ++ } ++ ++ if (crmd_replies_needed < 0) { ++ crmd_replies_needed = 0; ++ } ++ return rc; ++} ++ ++static int ++clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name, ++ const char *rsc_id, const char *operation, ++ const char *interval, pe_working_set_t *data_set) ++{ ++ int rc = pcmk_ok; ++ const char *failed_value = NULL; ++ const char *interval_ms_str = NULL; ++ GHashTable *rscs = NULL; ++ GHashTableIter iter; ++ ++ /* Create a hash table to use as a set of resources to clean. This lets us ++ * clean each resource only once (per node) regardless of how many failed ++ * operations it has. ++ */ ++ rscs = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); ++ ++ // Normalize interval to milliseconds for comparison to history entry ++ if (operation) { ++ interval_ms_str = crm_strdup_printf("%llu", crm_get_interval(interval)); ++ } ++ ++ for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL; ++ xml_op = __xml_next(xml_op)) { ++ ++ // No resource specified means all resources match ++ failed_value = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); ++ if (rsc_id == NULL) { ++ rsc_id = failed_value; ++ } else if (safe_str_neq(rsc_id, failed_value)) { ++ continue; ++ } ++ ++ // Host name should always have been provided by this point ++ failed_value = crm_element_value(xml_op, XML_ATTR_UNAME); ++ if (safe_str_neq(node_name, failed_value)) { ++ continue; ++ } ++ ++ // No operation specified means all operations match ++ if (operation) { ++ failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK); ++ if (safe_str_neq(operation, failed_value)) { ++ continue; ++ } ++ ++ // Interval (if operation was specified) defaults to 0 (not all) ++ failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); ++ if (safe_str_neq(interval_ms_str, failed_value)) { ++ continue; ++ } ++ } ++ ++ g_hash_table_add(rscs, (gpointer) rsc_id); ++ } ++ ++ g_hash_table_iter_init(&iter, rscs); ++ while (g_hash_table_iter_next(&iter, (gpointer *) &rsc_id, NULL)) { ++ crm_debug("Erasing failures of %s on %s", rsc_id, node_name); ++ rc = clear_rsc_history(crmd_channel, node_name, rsc_id, data_set); ++ if (rc != pcmk_ok) { ++ return rc; ++ } ++ } ++ g_hash_table_destroy(rscs); ++ return rc; ++} ++ ++static int ++clear_rsc_fail_attrs(resource_t *rsc, const char *operation, ++ const char *interval, node_t *node) ++{ ++ int rc = pcmk_ok; ++ int attr_options = attrd_opt_none; ++ char *rsc_name = rsc_fail_name(rsc); ++ ++ if (is_remote_node(node)) { ++ attr_options |= attrd_opt_remote; ++ } ++ rc = attrd_clear_delegate(NULL, node->details->uname, rsc_name, operation, ++ interval, NULL, attr_options); ++ free(rsc_name); ++ return rc; ++} ++ + int + cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, + resource_t *rsc, const char *operation, +- const char *interval, pe_working_set_t *data_set) ++ const char *interval, bool just_failures, ++ pe_working_set_t *data_set) + { + int rc = pcmk_ok; + node_t *node = NULL; +- char *rsc_name = NULL; +- int attr_options = attrd_opt_none; + + if (rsc == NULL) { + return -ENXIO; +@@ -552,8 +666,8 @@ cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, + resource_t *child = (resource_t *) lpc->data; + + rc = cli_resource_delete(crmd_channel, host_uname, child, operation, +- interval, data_set); +- if(rc != pcmk_ok) { ++ interval, just_failures, data_set); ++ if (rc != pcmk_ok) { + return rc; + } + } +@@ -585,8 +699,13 @@ cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, + node = (node_t *) lpc->data; + + if (node->details->online) { +- cli_resource_delete(crmd_channel, node->details->uname, rsc, +- operation, interval, data_set); ++ rc = cli_resource_delete(crmd_channel, node->details->uname, ++ rsc, operation, interval, ++ just_failures, data_set); ++ } ++ if (rc != pcmk_ok) { ++ g_list_free(nodes); ++ return rc; + } + } + +@@ -611,102 +730,91 @@ cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, + if (crmd_channel == NULL) { + printf("Dry run: skipping clean-up of %s on %s due to CIB_file\n", + rsc->id, host_uname); +- return rc; +- } ++ return pcmk_ok; ++ } + +- /* Erase the resource's entire LRM history in the CIB, even if we're only +- * clearing a single operation's fail count. If we erased only entries for a +- * single operation, we might wind up with a wrong idea of the current +- * resource state, and we might not re-probe the resource. +- */ +- rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc->id, +- TRUE, data_set); ++ rc = clear_rsc_fail_attrs(rsc, operation, interval, node); + if (rc != pcmk_ok) { +- printf("Unable to clean up %s history on %s: %s\n", +- rsc->id, host_uname, pcmk_strerror(rc)); ++ printf("Unable to clean up %s failures on %s: %s\n", ++ rsc->id, host_uname, pcmk_strerror(rc)); + return rc; + } +- crmd_replies_needed++; + +- crm_trace("Processing %d mainloop inputs", crmd_replies_needed); +- while(g_main_context_iteration(NULL, FALSE)) { +- crm_trace("Processed mainloop input, %d still remaining", +- crmd_replies_needed); +- } +- +- if(crmd_replies_needed < 0) { +- crmd_replies_needed = 0; +- } +- +- rsc_name = rsc_fail_name(rsc); +- if (is_remote_node(node)) { +- attr_options |= attrd_opt_remote; ++ if (just_failures) { ++ rc = clear_rsc_failures(crmd_channel, host_uname, rsc->id, operation, ++ interval, data_set); ++ } else { ++ rc = clear_rsc_history(crmd_channel, host_uname, rsc->id, data_set); + } +- rc = attrd_clear_delegate(NULL, host_uname, rsc_name, operation, interval, +- NULL, attr_options); + if (rc != pcmk_ok) { +- printf("Cleaned %s history on %s, but unable to clear failures: %s\n", ++ printf("Cleaned %s failures on %s, but unable to clean history: %s\n", + rsc->id, host_uname, pcmk_strerror(rc)); + } else { + printf("Cleaned up %s on %s\n", rsc->id, host_uname); + } +- free(rsc_name); +- + return rc; + } + + int +-cli_resource_delete_failures(crm_ipc_t *crmd_channel, const char *host_uname, +- resource_t *rsc, const char *operation, +- const char *interval, pe_working_set_t *data_set) ++cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name, ++ const char *operation, const char *interval, ++ pe_working_set_t *data_set) + { ++ int attr_options = attrd_opt_none; + int rc = pcmk_ok; ++ const char *display_name = node_name? node_name : "all nodes"; + +- if (rsc == NULL) { +- return -ENXIO; +- +- } else if (rsc->children) { +- GListPtr lpc = NULL; ++ if (crmd_channel == NULL) { ++ printf("Dry run: skipping clean-up of %s due to CIB_file\n", ++ display_name); ++ return pcmk_ok; ++ } ++ crmd_replies_needed = 0; + +- for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) { +- resource_t *child = (resource_t *) lpc->data; ++ if (node_name) { ++ node_t *node = pe_find_node(data_set->nodes, node_name); + +- rc = cli_resource_delete_failures(crmd_channel, host_uname, child, operation, +- interval, data_set); +- if(rc != pcmk_ok) { +- return rc; +- } ++ if (node == NULL) { ++ CMD_ERR("Unknown node: %s", node_name); ++ return -ENXIO; ++ } ++ if (is_remote_node(node)) { ++ attr_options |= attrd_opt_remote; + } +- return pcmk_ok; + } + +- for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL; +- xml_op = __xml_next(xml_op)) { +- +- const char *node = crm_element_value(xml_op, XML_ATTR_UNAME); +- const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); +- const char *task_interval = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); +- const char *resource_name = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); ++ rc = attrd_clear_delegate(NULL, node_name, NULL, operation, interval, ++ NULL, attr_options); ++ if (rc != pcmk_ok) { ++ printf("Unable to clean up all failures on %s: %s\n", ++ display_name, pcmk_strerror(rc)); ++ return rc; ++ } + +- if(resource_name == NULL) { +- continue; +- } else if(host_uname && safe_str_neq(host_uname, node)) { +- continue; +- } else if(rsc->id && safe_str_neq(rsc->id, resource_name)) { +- continue; +- } else if(operation && safe_str_neq(operation, task)) { +- continue; +- } else if(interval && safe_str_neq(interval, task_interval)) { +- continue; ++ if (node_name) { ++ rc = clear_rsc_failures(crmd_channel, node_name, NULL, ++ operation, interval, data_set); ++ if (rc != pcmk_ok) { ++ printf("Cleaned all resource failures on %s, but unable to clean history: %s\n", ++ node_name, pcmk_strerror(rc)); ++ return rc; + } ++ } else { ++ for (GList *iter = data_set->nodes; iter; iter = iter->next) { ++ pe_node_t *node = (pe_node_t *) iter->data; + +- crm_debug("Erasing %s failure for %s (%s detected) on %s", +- task, rsc->id, resource_name, node); +- rc = cli_resource_delete(crmd_channel, node, rsc, task, +- task_interval, data_set); ++ rc = clear_rsc_failures(crmd_channel, node->details->uname, NULL, ++ operation, interval, data_set); ++ if (rc != pcmk_ok) { ++ printf("Cleaned all resource failures on all nodes, but unable to clean history on %s: %s\n", ++ node->details->uname, pcmk_strerror(rc)); ++ return rc; ++ } ++ } + } + +- return rc; ++ printf("Cleaned up all resources on %s\n", display_name); ++ return pcmk_ok; + } + + void +-- +1.8.3.1 + diff --git a/SOURCES/005-cleanup.patch b/SOURCES/005-cleanup.patch new file mode 100644 index 00000000..8e568b1a --- /dev/null +++ b/SOURCES/005-cleanup.patch @@ -0,0 +1,62 @@ +From a2305469012b5fe3713427412c12459085ed61a1 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 12 Dec 2017 10:02:22 -0600 +Subject: [PATCH] Fix: tools: crm_resource --cleanup with no resource specified + +7a813755 failed to completely fix --cleanup without --resource +--- + tools/crm_resource_runtime.c | 20 ++++++++++++-------- + 1 file changed, 12 insertions(+), 8 deletions(-) + +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index bdebb0b..79f8b98 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -569,6 +569,7 @@ clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name, + { + int rc = pcmk_ok; + const char *failed_value = NULL; ++ const char *failed_id = NULL; + const char *interval_ms_str = NULL; + GHashTable *rscs = NULL; + GHashTableIter iter; +@@ -587,11 +588,14 @@ clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name, + for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL; + xml_op = __xml_next(xml_op)) { + ++ failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); ++ if (failed_id == NULL) { ++ // Malformed history entry, should never happen ++ continue; ++ } ++ + // No resource specified means all resources match +- failed_value = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); +- if (rsc_id == NULL) { +- rsc_id = failed_value; +- } else if (safe_str_neq(rsc_id, failed_value)) { ++ if (rsc_id && safe_str_neq(rsc_id, failed_id)) { + continue; + } + +@@ -615,13 +619,13 @@ clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name, + } + } + +- g_hash_table_add(rscs, (gpointer) rsc_id); ++ g_hash_table_add(rscs, (gpointer) failed_id); + } + + g_hash_table_iter_init(&iter, rscs); +- while (g_hash_table_iter_next(&iter, (gpointer *) &rsc_id, NULL)) { +- crm_debug("Erasing failures of %s on %s", rsc_id, node_name); +- rc = clear_rsc_history(crmd_channel, node_name, rsc_id, data_set); ++ while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { ++ crm_debug("Erasing failures of %s on %s", failed_id, node_name); ++ rc = clear_rsc_history(crmd_channel, node_name, failed_id, data_set); + if (rc != pcmk_ok) { + return rc; + } +-- +1.8.3.1 + diff --git a/SOURCES/006-leaks.patch b/SOURCES/006-leaks.patch new file mode 100644 index 00000000..ab9fb370 --- /dev/null +++ b/SOURCES/006-leaks.patch @@ -0,0 +1,296 @@ +From 5042a3b19a2f2bfa3d09b4d1029f53e6b674918b Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 14 Dec 2017 09:16:47 -0600 +Subject: [PATCH 1/5] Test: CTS: remove dead code + +makes static analysis happy +--- + cts/CTSaudits.py | 1 - + cts/environment.py | 1 - + cts/remote.py | 5 +---- + cts/watcher.py | 6 +++--- + 4 files changed, 4 insertions(+), 9 deletions(-) + +diff --git a/cts/CTSaudits.py b/cts/CTSaudits.py +index aff897f..d9fbeb9 100755 +--- a/cts/CTSaudits.py ++++ b/cts/CTSaudits.py +@@ -190,7 +190,6 @@ class DiskAudit(ClusterAudit): + + if answer and answer == "n": + raise ValueError("Disk full on %s" % (node)) +- ret = 0 + + elif remaining_mb < 100 or used_percent > 90: + self.CM.log("WARN: Low on log disk space (%dMB) on %s" % (remaining_mb, node)) +diff --git a/cts/environment.py b/cts/environment.py +index 75a18c8..6c4831c 100644 +--- a/cts/environment.py ++++ b/cts/environment.py +@@ -182,7 +182,6 @@ class Environment: + + else: + raise ValueError("Unknown stack: "+name) +- sys.exit(1) + + def get_stack_short(self): + # Create the Cluster Manager object +diff --git a/cts/remote.py b/cts/remote.py +index 8c36918..7cef40e 100644 +--- a/cts/remote.py ++++ b/cts/remote.py +@@ -220,10 +220,7 @@ class RemoteExec: + + if not silent: + for err in errors: +- if stdout == 3: +- result.append("error: "+err) +- else: +- self.debug("cmd: stderr: %s" % err) ++ self.debug("cmd: stderr: %s" % err) + + if stdout == 0: + if not silent and result: +diff --git a/cts/watcher.py b/cts/watcher.py +index de032f7..42685ad 100644 +--- a/cts/watcher.py ++++ b/cts/watcher.py +@@ -337,19 +337,19 @@ class LogWatcher(RemoteExec): + self.kind = kind + else: + raise +- self.kind = self.Env["LogWatcher"] ++ #self.kind = self.Env["LogWatcher"] + + if log: + self.filename = log + else: + raise +- self.filename = self.Env["LogFileName"] ++ #self.filename = self.Env["LogFileName"] + + if hosts: + self.hosts = hosts + else: + raise +- self.hosts = self.Env["nodes"] ++ #self.hosts = self.Env["nodes"] + + if trace_lw: + self.debug_level = 3 +-- +1.8.3.1 + + +From 570929eba229558b1a6900ffc54e4d5ee4150f74 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 14 Dec 2017 09:23:03 -0600 +Subject: [PATCH 2/5] Refactor: pengine: validate more function arguments + +not an issue with current code, but makes static analysis happy +--- + pengine/clone.c | 3 ++- + pengine/utilization.c | 1 + + 2 files changed, 3 insertions(+), 1 deletion(-) + +diff --git a/pengine/clone.c b/pengine/clone.c +index 99bac7e..e81dbc8 100644 +--- a/pengine/clone.c ++++ b/pengine/clone.c +@@ -955,6 +955,7 @@ is_child_compatible(resource_t *child_rsc, node_t * local_node, enum rsc_role_e + node_t *node = NULL; + enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current); + ++ CRM_CHECK(child_rsc && local_node, return FALSE); + if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { + /* We only want instances that haven't failed */ + node = child_rsc->fns->location(child_rsc, NULL, current); +@@ -965,7 +966,7 @@ is_child_compatible(resource_t *child_rsc, node_t * local_node, enum rsc_role_e + return FALSE; + } + +- if (node && local_node && node->details == local_node->details) { ++ if (node && (node->details == local_node->details)) { + return TRUE; + + } else if (node) { +diff --git a/pengine/utilization.c b/pengine/utilization.c +index f42c85d..05f8d78 100644 +--- a/pengine/utilization.c ++++ b/pengine/utilization.c +@@ -341,6 +341,7 @@ process_utilization(resource_t * rsc, node_t ** prefer, pe_working_set_t * data_ + { + int alloc_details = scores_log_level + 1; + ++ CRM_CHECK(rsc && prefer && data_set, return); + if (safe_str_neq(data_set->placement_strategy, "default")) { + GHashTableIter iter; + GListPtr colocated_rscs = NULL; +-- +1.8.3.1 + + +From db2fdc9a452fef11d397e25202fde8ba1bad4cd3 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 14 Dec 2017 10:36:20 -0600 +Subject: [PATCH 3/5] Low: libcrmservice: avoid memory leak on DBus error + +--- + lib/services/dbus.c | 47 +++++++++++++++++++++++++++++++++++++---------- + 1 file changed, 37 insertions(+), 10 deletions(-) + +diff --git a/lib/services/dbus.c b/lib/services/dbus.c +index fb3e867..58df927 100644 +--- a/lib/services/dbus.c ++++ b/lib/services/dbus.c +@@ -23,6 +23,15 @@ struct db_getall_data { + void (*callback)(const char *name, const char *value, void *userdata); + }; + ++static void ++free_db_getall_data(struct db_getall_data *data) ++{ ++ free(data->target); ++ free(data->object); ++ free(data->name); ++ free(data); ++} ++ + DBusConnection * + pcmk_dbus_connect(void) + { +@@ -196,6 +205,20 @@ pcmk_dbus_send_recv(DBusMessage *msg, DBusConnection *connection, + return reply; + } + ++/*! ++ * \internal ++ * \brief Send a DBus message with a callback for the reply ++ * ++ * \param[in] msg DBus message to send ++ * \param[in,out] connection DBus connection to send on ++ * \param[in] done Function to call when pending call completes ++ * \param[in] user_data Data to pass to done callback ++ * ++ * \return Handle for reply on success, NULL on error ++ * \note The caller can assume that the done callback is called always and ++ * only when the return value is non-NULL. (This allows the caller to ++ * know where it should free dynamically allocated user_data.) ++ */ + DBusPendingCall * + pcmk_dbus_send(DBusMessage *msg, DBusConnection *connection, + void(*done)(DBusPendingCall *pending, void *user_data), +@@ -359,11 +382,7 @@ pcmk_dbus_lookup_result(DBusMessage *reply, struct db_getall_data *data) + } + + cleanup: +- free(data->target); +- free(data->object); +- free(data->name); +- free(data); +- ++ free_db_getall_data(data); + return output; + } + +@@ -424,11 +443,19 @@ pcmk_dbus_get_property(DBusConnection *connection, const char *target, + query_data->name = strdup(name); + } + +- if(query_data->callback) { +- DBusPendingCall* _pending; +- _pending = pcmk_dbus_send(msg, connection, pcmk_dbus_lookup_cb, query_data, timeout); +- if (pending != NULL) { +- *pending = _pending; ++ if (query_data->callback) { ++ DBusPendingCall *local_pending; ++ ++ local_pending = pcmk_dbus_send(msg, connection, pcmk_dbus_lookup_cb, ++ query_data, timeout); ++ if (local_pending == NULL) { ++ // pcmk_dbus_lookup_cb() was not called in this case ++ free_db_getall_data(query_data); ++ query_data = NULL; ++ } ++ ++ if (pending) { ++ *pending = local_pending; + } + + } else { +-- +1.8.3.1 + + +From 4a774710ec7269ec3a1427ae09fc6ca435c66e92 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 14 Dec 2017 12:44:04 -0600 +Subject: [PATCH 4/5] Build: systemd unit files: restore DBus dependency + +06e2e26 removed the unit files' DBus dependency on the advice of a +systemd developer, but it is necessary +--- + lrmd/pacemaker_remote.service.in | 3 +++ + mcp/pacemaker.service.in | 4 ++++ + 2 files changed, 7 insertions(+) + +diff --git a/lrmd/pacemaker_remote.service.in b/lrmd/pacemaker_remote.service.in +index d5717f6..1c596e1 100644 +--- a/lrmd/pacemaker_remote.service.in ++++ b/lrmd/pacemaker_remote.service.in +@@ -2,8 +2,11 @@ + Description=Pacemaker Remote Service + Documentation=man:pacemaker_remoted http://clusterlabs.org/doc/en-US/Pacemaker/1.1-pcs/html/Pacemaker_Remote/index.html + ++# See main pacemaker unit file for descriptions of why these are needed + After=network.target + After=time-sync.target ++After=dbus.service ++Wants=dbus.service + After=resource-agents-deps.target + Wants=resource-agents-deps.target + After=syslog.service +diff --git a/mcp/pacemaker.service.in b/mcp/pacemaker.service.in +index 516de0f..e532ea2 100644 +--- a/mcp/pacemaker.service.in ++++ b/mcp/pacemaker.service.in +@@ -14,6 +14,10 @@ After=network.target + # and failure timestamps, so wait until it's done. + After=time-sync.target + ++# Managing systemd resources requires DBus. ++After=dbus.service ++Wants=dbus.service ++ + # Some OCF resources may have dependencies that aren't managed by the cluster; + # these must be started before Pacemaker and stopped after it. The + # resource-agents package provides this target, which lets system adminstrators +-- +1.8.3.1 + + +From 69de188a7263ba66afa0e8a3a46a64f07a7facca Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 14 Dec 2017 16:05:12 -0600 +Subject: [PATCH 5/5] Low: attrd: avoid small memory leak at start-up + +introduced by 3518544 +--- + attrd/commands.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/attrd/commands.c b/attrd/commands.c +index 0a20b26..20bd82f 100644 +--- a/attrd/commands.c ++++ b/attrd/commands.c +@@ -539,6 +539,7 @@ attrd_broadcast_protocol() + crm_xml_add(attrd_op, F_ATTRD_VALUE, ATTRD_PROTOCOL_VERSION); + crm_xml_add_int(attrd_op, F_ATTRD_IS_PRIVATE, 1); + attrd_client_update(attrd_op); ++ free_xml(attrd_op); + } + + void +-- +1.8.3.1 + diff --git a/SOURCES/007-bundles.patch b/SOURCES/007-bundles.patch new file mode 100644 index 00000000..28ea0ef9 --- /dev/null +++ b/SOURCES/007-bundles.patch @@ -0,0 +1,92 @@ +From 2ce5fc46463ff7b9a5a2c68602d8c5b35a7c37d7 Mon Sep 17 00:00:00 2001 +From: Andrew Beekhof +Date: Tue, 16 Jan 2018 19:05:31 +1100 +Subject: [PATCH 1/2] Bug rhbz#1519812 - Prevent notify actions from causing + --wait to hang + +--- + tools/crm_resource_runtime.c | 21 ++++++++++++++++----- + 1 file changed, 16 insertions(+), 5 deletions(-) + +diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c +index 22bdebf..189d1b3 100644 +--- a/tools/crm_resource_runtime.c ++++ b/tools/crm_resource_runtime.c +@@ -1343,10 +1343,19 @@ done: + return rc; + } + +-#define action_is_pending(action) \ +- ((is_set((action)->flags, pe_action_optional) == FALSE) \ +- && (is_set((action)->flags, pe_action_runnable) == TRUE) \ +- && (is_set((action)->flags, pe_action_pseudo) == FALSE)) ++static inline int action_is_pending(action_t *action) ++{ ++ if(is_set(action->flags, pe_action_optional)) { ++ return FALSE; ++ } else if(is_set(action->flags, pe_action_runnable) == FALSE) { ++ return FALSE; ++ } else if(is_set(action->flags, pe_action_pseudo)) { ++ return FALSE; ++ } else if(safe_str_eq("notify", action->task)) { ++ return FALSE; ++ } ++ return TRUE; ++} + + /*! + * \internal +@@ -1362,7 +1371,9 @@ actions_are_pending(GListPtr actions) + GListPtr action; + + for (action = actions; action != NULL; action = action->next) { +- if (action_is_pending((action_t *) action->data)) { ++ action_t *a = (action_t *)action->data; ++ if (action_is_pending(a)) { ++ crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags); + return TRUE; + } + } +-- +1.8.3.1 + + +From ef15ea4f687e7f9ba1f8a99548ee1e0bf9d4b50a Mon Sep 17 00:00:00 2001 +From: Andrew Beekhof +Date: Mon, 22 Jan 2018 21:18:46 +1100 +Subject: [PATCH 2/2] Fix: rhbz#1527072 - Correctly observe colocation + constraints with bundles in the Master role + +--- + pengine/container.c | 14 +++++++++++--- + 1 file changed, 11 insertions(+), 3 deletions(-) + +diff --git a/pengine/container.c b/pengine/container.c +index f5d916c..15d094d 100644 +--- a/pengine/container.c ++++ b/pengine/container.c +@@ -486,10 +486,18 @@ container_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc, rsc_colocatio + } else { + node_t *chosen = tuple->docker->fns->location(tuple->docker, NULL, FALSE); + +- if (chosen != NULL && is_set_recursive(tuple->docker, pe_rsc_block, TRUE) == FALSE) { +- pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight); +- allocated_rhs = g_list_prepend(allocated_rhs, chosen); ++ if (chosen == NULL || is_set_recursive(tuple->docker, pe_rsc_block, TRUE)) { ++ continue; ++ } ++ if(constraint->role_rh >= RSC_ROLE_MASTER && tuple->child == NULL) { ++ continue; + } ++ if(constraint->role_rh >= RSC_ROLE_MASTER && tuple->child->next_role < RSC_ROLE_MASTER) { ++ continue; ++ } ++ ++ pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight); ++ allocated_rhs = g_list_prepend(allocated_rhs, chosen); + } + } + +-- +1.8.3.1 + diff --git a/SOURCES/008-quorum.patch b/SOURCES/008-quorum.patch new file mode 100644 index 00000000..0d2deced --- /dev/null +++ b/SOURCES/008-quorum.patch @@ -0,0 +1,145 @@ +From 7c322f4b9a7f36eba1d3ca74d7dd8fe1093ca7bd Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 22 Jan 2018 11:38:22 -0600 +Subject: [PATCH] Low: crmd: quorum gain should always cause new transition + +0b689055 aborted the transition on quorum loss, but quorum can also be acquired +without triggering a new transition, if corosync gives quorum without a node +joining (e.g. forced via corosync-cmapctl, or perhaps via heuristics). + +This aborts the transition when quorum is gained, but only after a 5-second +delay, if the transition has not been aborted in that time. This avoids an +unnecessary abort in the vast majority of cases where an abort is already done, +and it allows some time for all nodes to connect when quorum is gained, rather +than immediately fencing remaining unseen nodes. +--- + crmd/membership.c | 22 +++++++++++++++++----- + crmd/te_utils.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++-- + crmd/tengine.h | 2 ++ + 3 files changed, 65 insertions(+), 7 deletions(-) + +diff --git a/crmd/membership.c b/crmd/membership.c +index c36dbed..4f2fa8a 100644 +--- a/crmd/membership.c ++++ b/crmd/membership.c +@@ -438,12 +438,24 @@ crm_update_quorum(gboolean quorum, gboolean force_update) + fsa_register_cib_callback(call_id, FALSE, NULL, cib_quorum_update_complete); + free_xml(update); + +- /* If a node not running any resources is cleanly shut down and drops us +- * below quorum, we won't necessarily abort the transition, so abort it +- * here to be safe. ++ /* Quorum changes usually cause a new transition via other activity: ++ * quorum gained via a node joining will abort via the node join, ++ * and quorum lost via a node leaving will usually abort via resource ++ * activity and/or fencing. ++ * ++ * However, it is possible that nothing else causes a transition (e.g. ++ * someone forces quorum via corosync-cmaptcl, or quorum is lost due to ++ * a node in standby shutting down cleanly), so here ensure a new ++ * transition is triggered. + */ +- if (quorum == FALSE) { +- abort_transition(INFINITY, tg_restart, "Quorum loss", NULL); ++ if (quorum) { ++ /* If quorum was gained, abort after a short delay, in case multiple ++ * nodes are joining around the same time, so the one that brings us ++ * to quorum doesn't cause all the remaining ones to be fenced. ++ */ ++ abort_after_delay(INFINITY, tg_restart, "Quorum gained", 5000); ++ } else { ++ abort_transition(INFINITY, tg_restart, "Quorum lost", NULL); + } + } + fsa_has_quorum = quorum; +diff --git a/crmd/te_utils.c b/crmd/te_utils.c +index dab02d3..8d105dc 100644 +--- a/crmd/te_utils.c ++++ b/crmd/te_utils.c +@@ -530,6 +530,46 @@ trigger_graph_processing(const char *fn, int line) + mainloop_set_trigger(transition_trigger); + } + ++static struct abort_timer_s { ++ bool aborted; ++ guint id; ++ int priority; ++ enum transition_action action; ++ const char *text; ++} abort_timer = { 0, }; ++ ++static gboolean ++abort_timer_popped(gpointer data) ++{ ++ if (abort_timer.aborted == FALSE) { ++ abort_transition(abort_timer.priority, abort_timer.action, ++ abort_timer.text, NULL); ++ } ++ abort_timer.id = 0; ++ return FALSE; // do not immediately reschedule timer ++} ++ ++/*! ++ * \internal ++ * \brief Abort transition after delay, if not already aborted in that time ++ * ++ * \param[in] abort_text Must be literal string ++ */ ++void ++abort_after_delay(int abort_priority, enum transition_action abort_action, ++ const char *abort_text, guint delay_ms) ++{ ++ if (abort_timer.id) { ++ // Timer already in progress, stop and reschedule ++ g_source_remove(abort_timer.id); ++ } ++ abort_timer.aborted = FALSE; ++ abort_timer.priority = abort_priority; ++ abort_timer.action = abort_action; ++ abort_timer.text = abort_text; ++ abort_timer.id = g_timeout_add(delay_ms, abort_timer_popped, NULL); ++} ++ + void + abort_transition_graph(int abort_priority, enum transition_action abort_action, + const char *abort_text, xmlNode * reason, const char *fn, int line) +@@ -557,6 +597,8 @@ abort_transition_graph(int abort_priority, enum transition_action abort_action, + break; + } + ++ abort_timer.aborted = TRUE; ++ + /* Make sure any queued calculations are discarded ASAP */ + free(fsa_pe_ref); + fsa_pe_ref = NULL; +@@ -660,10 +702,12 @@ abort_transition_graph(int abort_priority, enum transition_action abort_action, + (transition_graph->complete? "true" : "false")); + + } else { ++ const char *id = ID(reason); ++ + do_crm_log(level, "Transition aborted by %s.%s '%s': %s " + CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", +- TYPE(reason), ID(reason), (op? op : "change"), abort_text, +- add[0], add[1], add[2], fn, line, path, ++ TYPE(reason), (id? id : ""), (op? op : "change"), ++ abort_text, add[0], add[1], add[2], fn, line, path, + (transition_graph->complete? "true" : "false")); + } + } +diff --git a/crmd/tengine.h b/crmd/tengine.h +index 7205c16..6a75a08 100644 +--- a/crmd/tengine.h ++++ b/crmd/tengine.h +@@ -59,6 +59,8 @@ extern void notify_crmd(crm_graph_t * graph); + # include + + extern void trigger_graph_processing(const char *fn, int line); ++void abort_after_delay(int abort_priority, enum transition_action abort_action, ++ const char *abort_text, guint delay_ms); + extern void abort_transition_graph(int abort_priority, enum transition_action abort_action, + const char *abort_text, xmlNode * reason, const char *fn, + int line); +-- +1.8.3.1 + diff --git a/SOURCES/009-crm_resource.patch b/SOURCES/009-crm_resource.patch new file mode 100644 index 00000000..9a877e5d --- /dev/null +++ b/SOURCES/009-crm_resource.patch @@ -0,0 +1,97 @@ +From 30eb9a980db152f6c803a35d3b261a563ad4ee75 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Wed, 24 Jan 2018 10:51:34 -0600 +Subject: [PATCH 1/2] Low: tools: crm_resource --refresh should ignore + --operation and --interval + +It already did when a resource was not specified. +Also update help text to clarify cleanup vs refresh. +--- + tools/crm_resource.c | 26 ++++++++++++++++----------- + 1 file changed, 16 insertions(+), 10 deletions(-) + +diff --git a/tools/crm_resource.c b/tools/crm_resource.c +index 3fbc6e1..d007668 100644 +--- a/tools/crm_resource.c ++++ b/tools/crm_resource.c +@@ -212,14 +212,16 @@ static struct crm_option long_options[] = { + }, + { + "cleanup", no_argument, NULL, 'C', +- "\t\tDelete failed operations from a resource's history allowing its current state to be rechecked.\n" ++ "\t\tIf resource has any past failures, clear its history and fail count.\n" + "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n" ++ "\t\t\t\t--operation and --interval apply to fail counts, but entire history is always cleared,\n" ++ "\t\t\t\tto allow current state to be rechecked.\n" + }, + { + "refresh", no_argument, NULL, 'R', + "\t\tDelete resource's history (including failures) so its current state is rechecked.\n" +- "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n" +- "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be cleaned" ++ "\t\t\t\tOptionally filtered by --resource and --node (otherwise all).\n" ++ "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be refreshed." + }, + { + "set-parameter", required_argument, NULL, 'p', +@@ -438,7 +440,6 @@ main(int argc, char **argv) + bool require_resource = TRUE; /* whether command requires that resource be specified */ + bool require_dataset = TRUE; /* whether command requires populated dataset instance */ + bool require_crmd = FALSE; /* whether command requires connection to CRMd */ +- bool just_errors = TRUE; /* whether cleanup command deletes all history or just errors */ + + int rc = pcmk_ok; + int is_ocf_rc = 0; +@@ -630,8 +631,7 @@ main(int argc, char **argv) + if (cib_file == NULL) { + require_crmd = TRUE; + } +- just_errors = FALSE; +- rsc_cmd = 'C'; ++ rsc_cmd = 'R'; + find_flags = pe_find_renamed|pe_find_anon; + break; + +@@ -641,7 +641,6 @@ main(int argc, char **argv) + if (cib_file == NULL) { + require_crmd = TRUE; + } +- just_errors = TRUE; + rsc_cmd = 'C'; + find_flags = pe_find_renamed|pe_find_anon; + break; +@@ -1092,7 +1091,14 @@ main(int argc, char **argv) + rc = cli_resource_delete_attribute(rsc, rsc_id, prop_set, prop_id, + prop_name, cib_conn, &data_set); + +- } else if ((rsc_cmd == 'C') && rsc) { ++ } else if (((rsc_cmd == 'C') || (rsc_cmd == 'R')) && rsc) { ++ bool just_errors = TRUE; ++ ++ if (rsc_cmd == 'R') { ++ just_errors = FALSE; ++ operation = NULL; ++ interval = 0; ++ } + if (do_force == FALSE) { + rsc = uber_parent(rsc); + } +@@ -1113,14 +1119,14 @@ main(int argc, char **argv) + start_mainloop(); + } + +- } else if (rsc_cmd == 'C' && just_errors) { ++ } else if (rsc_cmd == 'C') { + rc = cli_cleanup_all(crmd_channel, host_uname, operation, interval, + &data_set); + if (rc == pcmk_ok) { + start_mainloop(); + } + +- } else if (rsc_cmd == 'C') { ++ } else if (rsc_cmd == 'R') { + #if HAVE_ATOMIC_ATTRD + const char *router_node = host_uname; + xmlNode *msg_data = NULL; +-- +1.8.3.1 diff --git a/SOURCES/010-crm_master.patch b/SOURCES/010-crm_master.patch new file mode 100644 index 00000000..61d50836 --- /dev/null +++ b/SOURCES/010-crm_master.patch @@ -0,0 +1,34 @@ +From 18572d4e1e84c9d1f293b9a3082190133367154e Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 26 Jan 2018 12:31:09 -0600 +Subject: [PATCH] Fix: tools: crm_master should always work on node attribute + +Before ccbdb2a, crm_master would always set --node, thus ensuring crm_attribute +would treat the value as a node attribute. That commit removed that so that +crm_attribute could determine the local node name properly, but that introduced +an issue where the master value would be set as a cluster property instead of a +node attribute if --lifetime (or --node) was not set explicitly. + +This fixes it by setting the default value of --lifetime explicitly. +--- + tools/crm_master | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/tools/crm_master b/tools/crm_master +index 7e31cea..f4a0772 100755 +--- a/tools/crm_master ++++ b/tools/crm_master +@@ -8,6 +8,10 @@ if [ $? != 0 ] ; then echo "crm_master - A convenience wrapper for crm_attribute + # Note the quotes around `$TEMP': they are essential! + eval set -- "$TEMP" + ++# Explicitly set the (usual default) lifetime, so the attribute gets set as a ++# node attribute and not a cluster property. ++options="--lifetime forever" ++ + while true ; do + case "$1" in + -N|--node|-U|--uname) options="$options $1 $2"; shift; shift;; +-- +1.8.3.1 + diff --git a/SOURCES/lrmd-protocol-version.patch b/SOURCES/lrmd-protocol-version.patch new file mode 100644 index 00000000..2a555e57 --- /dev/null +++ b/SOURCES/lrmd-protocol-version.patch @@ -0,0 +1,28 @@ +From 8c497bc794e1e6a3ed188a548da771d768cef8f1 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Wed, 26 Oct 2016 11:18:17 -0500 +Subject: [PATCH] Fix: lrmd: undo unnecessary LRMD protocol version change + +The change breaks rolling upgrades in a cluster with Pacemaker Remote nodes, +and was never necessary. This introduces a divergence from upstream that +will need to be reconciled in the future. +--- + include/crm/lrmd.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h +index 446b39c..a099315 100644 +--- a/include/crm/lrmd.h ++++ b/include/crm/lrmd.h +@@ -38,7 +38,7 @@ typedef struct lrmd_key_value_s { + /* This should be bumped every time there is an incompatible change that + * prevents older clients from connecting to this version of the server. + */ +-#define LRMD_PROTOCOL_VERSION "1.1" ++#define LRMD_PROTOCOL_VERSION "1.0" + + /* This is the version that the client version will actually be compared + * against. This should be identical to LRMD_PROTOCOL_VERSION. However, we +-- +1.8.3.1 + diff --git a/SOURCES/rhbz-url.patch b/SOURCES/rhbz-url.patch new file mode 100644 index 00000000..1c09cd2b --- /dev/null +++ b/SOURCES/rhbz-url.patch @@ -0,0 +1,25 @@ +From 9b74fb4d667cf187c1c80aeb39ff3b3c12846421 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 18 Apr 2017 14:17:38 -0500 +Subject: [PATCH] Low: tools: show Red Hat bugzilla URL when using crm_report + +--- + tools/crm_report.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/crm_report.in b/tools/crm_report.in +index 26050a7..4715155 100755 +--- a/tools/crm_report.in ++++ b/tools/crm_report.in +@@ -222,7 +222,7 @@ EOF + log "Collected results are available in $fname" + log " " + log "Please create a bug entry at" +- log " http://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker" ++ log " https://bugzilla.redhat.com/" + log "Include a description of your problem and attach this tarball" + log " " + log "Thank you for taking time to create this report." +-- +1.8.3.1 + diff --git a/SPECS/pacemaker.spec b/SPECS/pacemaker.spec new file mode 100644 index 00000000..8df165d4 --- /dev/null +++ b/SPECS/pacemaker.spec @@ -0,0 +1,2011 @@ +# Globals and defines to control package behavior (configure these as desired) + +## User and group to use for nonprivileged services +%global uname hacluster +%global gname haclient + +## Where to install Pacemaker documentation +%global pcmk_docdir %{_docdir}/%{name} + +## GitHub entity that distributes source (for ease of using a fork) +%global github_owner ClusterLabs + +## Upstream pacemaker version, and its package version (specversion +## can be incremented to build packages reliably considered "newer" +## than previously built packages with the same pcmkversion) +%global pcmkversion 1.1.18 +%global specversion 11 + +## Upstream commit (or git tag, such as "Pacemaker-" plus the +## {pcmkversion} macro for an official release) to use for this package +%global commit 2b07d5c5a908998891c3317faa30328c108d3a91 +## Since git v2.11, the extent of abbreviation is autoscaled by default +## (used to be constant of 7), so we need to convey it for non-tags, too. +%global commit_abbrev 7 + + +# Define globals for convenient use later + +## Workaround to use parentheses in other globals +%global lparen ( +%global rparen ) + +## Short version of git commit +%define shortcommit %(c=%{commit}; case ${c} in + Pacemaker-*%{rparen} echo ${c:10};; + *%{rparen} echo ${c:0:%{commit_abbrev}};; esac) + +## Whether this is a tagged release +%define tag_release %([ %{commit} != Pacemaker-%{shortcommit} ]; echo $?) + +## Whether this is a release candidate (in case of a tagged release) +%define pre_release %([ "%{tag_release}" -eq 0 ] || { + case "%{shortcommit}" in *-rc[[:digit:]]*%{rparen} false;; + esac; }; echo $?) + +## Whether this is a development branch +%define post_release %([ %{commit} = Pacemaker-%{shortcommit} ]; echo $?) + +## Turn off auto-compilation of python files outside site-packages directory, +## so that the -libs-devel package is multilib-compliant (no *.py[co] files) +%global __os_install_post %(echo '%{__os_install_post}' | { + sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; }) + +## Heuristic used to infer bleeding-edge deployments that are +## less likely to have working versions of the documentation tools +%define bleeding %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?) + +## Corosync version +%define cs_version %(pkg-config corosync --modversion 2>/dev/null | awk -F . '{print $1}') + +## Where to install python site libraries (currently, this uses the unversioned +## python_sitearch macro to get the default system python, but at some point, +## we should explicitly choose python2_sitearch or python3_sitearch -- or both) +%define py_site %{?python_sitearch}%{!?python_sitearch:%( + python -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} + +## Whether this platform defaults to using CMAN +%define cman_native (0%{?el6} || (0%{?fedora} > 0 && 0%{?fedora} < 17)) + +## Whether this platform defaults to using systemd as an init system +## (needs to be evaluated prior to BuildRequires being enumerated and +## installed as it's intended to conditionally select some of these, and +## for that there are only few indicators with varying reliability: +## - presence of systemd-defined macros (when building in a full-fledged +## environment, which is not the case with ordinary mock-based builds) +## - systemd-aware rpm as manifested with the presence of particular +## macro (rpm itself will trivially always be present when building) +## - existence of /usr/lib/os-release file, which is something heavily +## propagated by systemd project +## - when not good enough, there's always a possibility to check +## particular distro-specific macros (incl. version comparison) +%define systemd_native (%{?_unitdir:1}%{?!_unitdir:0}%{nil \ + } || %{?__transaction_systemd_inhibit:1}%{?!__transaction_systemd_inhibit:0}%{nil \ + } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?)) + +## Upstream commit to use for nagios-agents-metadata package +%global nagios_hash 105ab8a + + +# Definitions for backward compatibility with older RPM versions + +## Ensure the license macro behaves consistently (older RPM will otherwise +## overwrite it once it encounters "License:"). Courtesy Jason Tibbitts: +## https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77 +%if !%{defined _licensedir} +%define description %{lua: + rpm.define("license %doc") + print("%description") +} +%endif + + +# Define conditionals so that "rpmbuild --with " and +# "rpmbuild --without " can enable and disable specific features + +## Add option to enable support for stonith/external fencing agents +%bcond_with stonithd + +## Add option to create binaries suitable for use with profiling tools +%bcond_with profiling + +## Add option to create binaries with coverage analysis +%bcond_with coverage + +## Add option to generate documentation (requires Publican, Asciidoc and Inkscape) +%bcond_with doc + +## Add option to prefix package version with "0." +## (so later "official" packages will be considered updates) +%bcond_with pre_release + +## Add option to ship Upstart job files +%bcond_with upstart_job + +## Add option to enable CMAN support +%bcond_with cman + +## Add option to turn on SNMP / ESMTP support +%bcond_with snmp +%bcond_with esmtp + +## Add option to turn off hardening of libraries and daemon executables +%bcond_without hardening + + +# Keep sane profiling data if requested +%if %{with profiling} + +## Disable -debuginfo package and stripping binaries/libraries +%define debug_package %{nil} + +%endif + + +# Define the release version +# (do not look at externally enforced pre-release flag for tagged releases +# as only -rc tags, captured with the second condition, implies that then) +%if (!%{tag_release} && %{with pre_release}) || 0%{pre_release} +%if 0%{pre_release} +%define pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3}) +%else +%define pcmk_release 0.%{specversion}.%{shortcommit}.git +%endif +%else +%if 0%{tag_release} +%define pcmk_release %{specversion} +%else +# Never use the short commit in a RHEL release number +%define pcmk_release %{specversion} +%endif +%endif + +Name: pacemaker +Summary: Scalable High-Availability cluster resource manager +Version: %{pcmkversion} +Release: %{pcmk_release}%{?dist} +%if %{defined _unitdir} +License: GPLv2+ and LGPLv2+ +%else +# initscript is Revised BSD +License: GPLv2+ and LGPLv2+ and BSD +%endif +Url: http://www.clusterlabs.org +Group: System Environment/Daemons + +# Hint: use "spectool -s 0 pacemaker.spec" (rpmdevtools) to check the final URL: +# https://github.com/ClusterLabs/pacemaker/archive/e91769e5a39f5cb2f7b097d3c612368f0530535e/pacemaker-e91769e.tar.gz +Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz +Source1: nagios-agents-metadata-%{nagios_hash}.tar.gz + +# upstream commits +Patch1: 001-new-behavior.patch +Patch2: 002-fixes.patch +Patch3: 003-cleanup.patch +Patch4: 004-cleanup.patch +Patch5: 005-cleanup.patch +Patch6: 006-leaks.patch +Patch7: 007-bundles.patch +Patch8: 008-quorum.patch +Patch9: 009-crm_resource.patch +Patch10: 010-crm_master.patch + +# patches that aren't from upstream +Patch100: lrmd-protocol-version.patch +Patch101: rhbz-url.patch + +BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) +AutoReqProv: on +Requires: resource-agents +Requires: %{name}-libs = %{version}-%{release} +Requires: %{name}-cluster-libs = %{version}-%{release} +Requires: %{name}-cli = %{version}-%{release} +Obsoletes: rgmanager < 3.2.0 +Provides: rgmanager >= 3.2.0 +Provides: pcmk-cluster-manager + +%if %{defined systemd_requires} +%systemd_requires +%endif + +ExclusiveArch: i686 x86_64 ppc64le s390x + +# Pacemaker targets compatibility with python 2.6+ and 3.2+ +Requires: python >= 2.6 +BuildRequires: python-devel >= 2.6 + +# Pacemaker requires a minimum libqb functionality +Requires: libqb > 0.17.0 +BuildRequires: libqb-devel > 0.17.0 + +# Basics required for the build (even if usually satisfied through other BRs) +BuildRequires: coreutils findutils grep sed + +# Required for core functionality +BuildRequires: automake autoconf libtool pkgconfig libtool-ltdl-devel +## version lower bound for: G_GNUC_INTERNAL +BuildRequires: pkgconfig(glib-2.0) >= 2.6 +BuildRequires: libxml2-devel libxslt-devel libuuid-devel +BuildRequires: bzip2-devel pam-devel + +# Required for agent_config.h which specifies the correct scratch directory +BuildRequires: resource-agents + +# RH patches are created by git, so we need git to apply them +BuildRequires: git + +# Enables optional functionality +BuildRequires: ncurses-devel docbook-style-xsl +BuildRequires: bison byacc flex help2man gnutls-devel pkgconfig(dbus-1) + +%if %{systemd_native} +BuildRequires: pkgconfig(systemd) +%endif + +%if %{with cman} && %{cman_native} +BuildRequires: clusterlib-devel +# pacemaker initscript: cman initscript, fence_tool (+ some soft-dependencies) +# "post" scriptlet: ccs_update_schema +Requires: cman +%endif + +Requires: corosync +BuildRequires: corosynclib-devel + +%if %{with stonithd} +BuildRequires: cluster-glue-libs-devel +%endif + +## (note no avoiding effect when building through non-customized mock) +%if !%{bleeding} +%if %{with doc} +BuildRequires: publican inkscape asciidoc +%endif +%endif + +%description +Pacemaker is an advanced, scalable High-Availability cluster resource +manager for Corosync, CMAN and/or Linux-HA. + +It supports more than 16 node clusters with significant capabilities +for managing resources and dependencies. + +It will run scripts at initialization, when machines go up or down, +when related resources fail and can be configured to periodically check +resource health. + +Available rpmbuild rebuild options: + --with(out) : cman coverage doc stonithd hardening pre_release profiling + +%package cli +License: GPLv2+ and LGPLv2+ +Summary: Command line tools for controlling Pacemaker clusters +Group: System Environment/Daemons +Requires: %{name}-libs = %{version}-%{release} +Requires: perl-TimeDate + +%description cli +Pacemaker is an advanced, scalable High-Availability cluster resource +manager for Corosync, CMAN and/or Linux-HA. + +The %{name}-cli package contains command line tools that can be used +to query and control the cluster from machines that may, or may not, +be part of the cluster. + +%package -n %{name}-libs +License: GPLv2+ and LGPLv2+ +Summary: Core Pacemaker libraries +Group: System Environment/Daemons + +%description -n %{name}-libs +Pacemaker is an advanced, scalable High-Availability cluster resource +manager for Corosync, CMAN and/or Linux-HA. + +The %{name}-libs package contains shared libraries needed for cluster +nodes and those just running the CLI tools. + +%package -n %{name}-cluster-libs +License: GPLv2+ and LGPLv2+ +Summary: Cluster Libraries used by Pacemaker +Group: System Environment/Daemons +Requires: %{name}-libs = %{version}-%{release} + +%description -n %{name}-cluster-libs +Pacemaker is an advanced, scalable High-Availability cluster resource +manager for Corosync, CMAN and/or Linux-HA. + +The %{name}-cluster-libs package contains cluster-aware shared +libraries needed for nodes that will form part of the cluster nodes. + +%package remote +%if %{defined _unitdir} +License: GPLv2+ and LGPLv2+ +%else +# initscript is Revised BSD +License: GPLv2+ and LGPLv2+ and BSD +%endif +Summary: Pacemaker remote daemon for non-cluster nodes +Group: System Environment/Daemons +Requires: %{name}-libs = %{version}-%{release} +Requires: %{name}-cli = %{version}-%{release} +Requires: resource-agents +Provides: pcmk-cluster-manager +%if %{defined systemd_requires} +%systemd_requires +%endif + +%description remote +Pacemaker is an advanced, scalable High-Availability cluster resource +manager for Corosync, CMAN and/or Linux-HA. + +The %{name}-remote package contains the Pacemaker Remote daemon +which is capable of extending pacemaker functionality to remote +nodes not running the full corosync/cluster stack. + +%package -n %{name}-libs-devel +License: GPLv2+ and LGPLv2+ +Summary: Pacemaker development package +Group: Development/Libraries +Requires: %{name}-cts = %{version}-%{release} +Requires: %{name}-libs = %{version}-%{release} +Requires: %{name}-cluster-libs = %{version}-%{release} +Requires: libtool-ltdl-devel libqb-devel libuuid-devel +Requires: libxml2-devel libxslt-devel bzip2-devel glib2-devel +Requires: corosynclib-devel + +%description -n %{name}-libs-devel +Pacemaker is an advanced, scalable High-Availability cluster resource +manager for Corosync, CMAN and/or Linux-HA. + +The %{name}-libs-devel package contains headers and shared libraries +for developing tools for Pacemaker. + +# NOTE: can be noarch if lrmd_test is moved to another subpackage +%package cts +License: GPLv2+ and LGPLv2+ +Summary: Test framework for cluster-related technologies like Pacemaker +Group: System Environment/Daemons +Requires: python >= 2.6 +Requires: %{name}-libs = %{version}-%{release} + +# systemd python bindings are separate package in some distros +%if %{defined systemd_requires} + +%if 0%{?fedora} > 22 +Requires: python2-systemd +%else +%if 0%{?fedora} > 20 || 0%{?rhel} > 6 +Requires: systemd-python +%endif +%endif + +%endif + +%description cts +Test framework for cluster-related technologies like Pacemaker + +%package doc +License: CC-BY-SA +Summary: Documentation for Pacemaker +Group: Documentation + +%description doc +Documentation for Pacemaker. + +Pacemaker is an advanced, scalable High-Availability cluster resource +manager for Corosync, CMAN and/or Linux-HA. + +%package nagios-plugins-metadata +License: GPLv2+ and LGPLv2+ +Summary: Pacemaker Nagios Metadata +Group: System Environment/Daemons +# NOTE below are the plugins this metadata uses. +# These plugin packages are currently not requirements +# for the nagios metadata because rhel does not ship these +# plugins. This metadata is providing 3rd party support +# for nagios. Users may install the plugins via 3rd party +# rpm packages, or source. If rhel ships the nagios plugins +# in the future, we should consider enabling the following +# required fields. +#Requires: nagios-plugins-http +#Requires: nagios-plugins-ldap +#Requires: nagios-plugins-mysql +#Requires: nagios-plugins-pgsql +#Requires: nagios-plugins-tcp +Requires: pcmk-cluster-manager + +%description nagios-plugins-metadata +The metadata files required for Pacemaker to execute the nagios plugin +monitor resources. + +%prep +%autosetup -a 1 -n %{name}-%{commit} -S git_am -p 1 + +# Force the local time +# +# 'git' sets the file date to the date of the last commit. +# This can result in files having been created in the future +# when building on machines in timezones 'behind' the one the +# commit occurred in - which seriously confuses 'make' +find . -exec touch \{\} \; + +%build + +export CPPFLAGS="-DRHEL7_COMPAT" + +# Early versions of autotools (e.g. RHEL <= 5) do not support --docdir +export docdir=%{pcmk_docdir} + +export systemdunitdir=%{?_unitdir}%{?!_unitdir:no} + +%if %{with hardening} +# prefer distro-provided hardening flags in case they are defined +# through _hardening_{c,ld}flags macros, configure script will +# use its own defaults otherwise; if such hardenings are completely +# undesired, rpmbuild using "--without hardening" +# (or "--define '_without_hardening 1'") +export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}" +export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}" +export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}" +export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" +%endif + +./autogen.sh + +%{configure} \ + %{?with_profiling: --with-profiling} \ + %{?with_coverage: --with-coverage} \ + %{!?with_cman: --without-cman} \ + %{!?with_snmp: --without-snmp} \ + %{!?with_esmtp: --without-esmtp} \ + --without-heartbeat \ + %{!?with_doc: --with-brand=} \ + %{!?with_hardening: --disable-hardening} \ + --with-initdir=%{_initrddir} \ + --localstatedir=%{_var} \ + --with-nagios \ + --with-nagios-metadata-dir=%{_datadir}/pacemaker/nagios/plugins-metadata/ \ + --with-nagios-plugin-dir=%{_libdir}/nagios/plugins/ \ + --with-version=%{version}-%{release} + +%if 0%{?suse_version} >= 1200 +# Fedora handles rpath removal automagically +sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool +sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool +%endif + +make %{_smp_mflags} V=1 all + +%check +# Prevent false positives in rpmlint +./BasicSanity.sh -V pengine cli 2>&1 | sed s/[fF]ail/faiil/g + +%install +rm -rf %{buildroot} +make DESTDIR=%{buildroot} docdir=%{pcmk_docdir} V=1 install + +mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig +install -m 644 mcp/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker +install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon + +%if %{with upstart_job} +mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init +install -m 644 mcp/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf +install -m 644 mcp/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf +install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf +%endif + +mkdir -p %{buildroot}%{_datadir}/pacemaker/nagios/plugins-metadata +for file in $(find nagios-agents-metadata-%{nagios_hash}/metadata -type f); do + install -m 644 $file %{buildroot}%{_datadir}/pacemaker/nagios/plugins-metadata +done + +%if %{defined _unitdir} +mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} +%endif + +# Scripts that should be executable +chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py + +# These are not actually scripts +find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x + +# Don't package static libs +find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f +find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f + +# Do not package these either +rm -f %{buildroot}/%{_libdir}/service_crm.so +rm -f %{buildroot}/%{_sbindir}/fence_legacy +rm -f %{buildroot}/%{_mandir}/man8/fence_legacy.* +find %{buildroot} -name '*o2cb*' -type f -print0 | xargs -0 rm -f + +# Don't ship init scripts for systemd based platforms +%if %{defined _unitdir} +rm -f %{buildroot}/%{_initrddir}/pacemaker +rm -f %{buildroot}/%{_initrddir}/pacemaker_remote +%endif + +# Don't ship fence_pcmk where it has no use +%if %{without cman} +rm -f %{buildroot}/%{_sbindir}/fence_pcmk +%endif + +%if %{with coverage} +GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov +mkdir -p $GCOV_BASE +find . -name '*.gcno' -type f | while read F ; do + D=`dirname $F` + mkdir -p ${GCOV_BASE}/$D + cp $F ${GCOV_BASE}/$D +done +%endif + +%clean +rm -rf %{buildroot} + +%post +%if %{defined _unitdir} +%systemd_post pacemaker.service +%else +/sbin/chkconfig --add pacemaker || : +%if %{with cman} && %{cman_native} +# make fence_pcmk in cluster.conf valid instantly otherwise tools like ccs may +# choke (until schema gets auto-regenerated on the next start of cluster), +# per the protocol shared with other packages contributing to cluster.rng +/usr/sbin/ccs_update_schema >/dev/null 2>&1 || : +%endif +%endif + +%preun +%if %{defined _unitdir} +%systemd_preun pacemaker.service +%else +/sbin/service pacemaker stop >/dev/null 2>&1 || : +if [ $1 -eq 0 ]; then + # Package removal, not upgrade + /sbin/chkconfig --del pacemaker || : +fi +%endif + +%postun +%if %{defined _unitdir} +%systemd_postun_with_restart pacemaker.service +%endif + +%pre remote +%if %{defined _unitdir} +# Stop the service before anything is touched, and remember to restart +# it as one of the last actions (compared to using systemd_postun_with_restart, +# this avoids suicide when sbd is in use) +systemctl --quiet is-active pacemaker_remote +if [ $? -eq 0 ] ; then + mkdir -p %{_localstatedir}/lib/rpm-state/%{name} + touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote + systemctl stop pacemaker_remote >/dev/null 2>&1 +else + rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote +fi +%endif + +%post remote +%if %{defined _unitdir} +%systemd_post pacemaker_remote.service +%else +/sbin/chkconfig --add pacemaker_remote || : +%endif + +%preun remote +%if %{defined _unitdir} +%systemd_preun pacemaker_remote.service +%else +/sbin/service pacemaker_remote stop >/dev/null 2>&1 || : +if [ $1 -eq 0 ]; then + # Package removal, not upgrade + /sbin/chkconfig --del pacemaker_remote || : +fi +%endif + +%postun remote +%if %{defined _unitdir} +# This next line is a no-op, because we stopped the service earlier, but +# we leave it here because it allows us to revert to the standard behavior +# in the future if desired +%systemd_postun_with_restart pacemaker_remote.service +# Explicitly take care of removing the flag-file(s) upon final removal +if [ $1 -eq 0 ] ; then + rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote +fi +%endif + +%posttrans remote +%if %{defined _unitdir} +if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then + systemctl start pacemaker_remote >/dev/null 2>&1 + rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote +fi +%endif + +%post cli +%if %{defined _unitdir} +%systemd_post crm_mon.service +%endif + +%preun cli +%if %{defined _unitdir} +%systemd_preun crm_mon.service +%endif + +%postun cli +%if %{defined _unitdir} +%systemd_postun_with_restart crm_mon.service +%endif + +%pre -n %{name}-libs + +getent group %{gname} >/dev/null || groupadd -r %{gname} -g 189 +getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u 189 -s /sbin/nologin -c "cluster user" %{uname} +exit 0 + +%post -n %{name}-libs -p /sbin/ldconfig + +%postun -n %{name}-libs -p /sbin/ldconfig + +%post -n %{name}-cluster-libs -p /sbin/ldconfig + +%postun -n %{name}-cluster-libs -p /sbin/ldconfig + +%files +########################################################### +%defattr(-,root,root) + +%config(noreplace) %{_sysconfdir}/sysconfig/pacemaker +%{_sbindir}/pacemakerd + +%if %{defined _unitdir} +%{_unitdir}/pacemaker.service +%else +%{_initrddir}/pacemaker +%endif + +%exclude %{_libexecdir}/pacemaker/lrmd_test +%exclude %{_sbindir}/pacemaker_remoted +%{_libexecdir}/pacemaker/* + +%{_sbindir}/crm_attribute +%{_sbindir}/crm_master +%{_sbindir}/crm_node +%if %{with cman} +%{_sbindir}/fence_pcmk +%endif +%{_sbindir}/stonith_admin + +%doc %{_mandir}/man7/crmd.* +%doc %{_mandir}/man7/pengine.* +%doc %{_mandir}/man7/stonithd.* +%if %{without cman} || !%{cman_native} +%doc %{_mandir}/man7/ocf_pacemaker_controld.* +%endif +%doc %{_mandir}/man7/ocf_pacemaker_remote.* +%doc %{_mandir}/man8/crm_attribute.* +%doc %{_mandir}/man8/crm_node.* +%doc %{_mandir}/man8/crm_master.* +%if %{with cman} +%doc %{_mandir}/man8/fence_pcmk.* +%endif +%doc %{_mandir}/man8/pacemakerd.* +%doc %{_mandir}/man8/stonith_admin.* + +%doc %{_datadir}/pacemaker/alerts + +%license licenses/GPLv2 +%doc COPYING +%doc ChangeLog + +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine +%if %{without cman} || !%{cman_native} +/usr/lib/ocf/resource.d/pacemaker/controld +%endif +/usr/lib/ocf/resource.d/pacemaker/remote +/usr/lib/ocf/resource.d/.isolation + +%if "%{?cs_version}" != "UNKNOWN" +%if 0%{?cs_version} < 2 +%{_libexecdir}/lcrso/pacemaker.lcrso +%endif +%endif + +%if %{with upstart_job} +%config(noreplace) %{_sysconfdir}/init/pacemaker.conf +%config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf +%endif + +%files cli +%defattr(-,root,root) + +%config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker +%config(noreplace) %{_sysconfdir}/sysconfig/crm_mon + +%if %{defined _unitdir} +%{_unitdir}/crm_mon.service +%endif + +%if %{with upstart_job} +%config(noreplace) %{_sysconfdir}/init/crm_mon.conf +%endif + +%{_sbindir}/attrd_updater +%{_sbindir}/cibadmin +%{_sbindir}/crm_diff +%{_sbindir}/crm_error +%{_sbindir}/crm_failcount +%{_sbindir}/crm_mon +%{_sbindir}/crm_resource +%{_sbindir}/crm_standby +%{_sbindir}/crm_verify +%{_sbindir}/crmadmin +%{_sbindir}/iso8601 +%{_sbindir}/crm_shadow +%{_sbindir}/crm_simulate +%{_sbindir}/crm_report +%{_sbindir}/crm_ticket +%exclude %{_datadir}/pacemaker/alerts +%exclude %{_datadir}/pacemaker/tests +%exclude %{_datadir}/pacemaker/nagios +%{_datadir}/pacemaker +%{_datadir}/snmp/mibs/PCMK-MIB.txt + +%exclude /usr/lib/ocf/resource.d/pacemaker/controld +%exclude /usr/lib/ocf/resource.d/pacemaker/remote + +%dir /usr/lib/ocf +%dir /usr/lib/ocf/resource.d +/usr/lib/ocf/resource.d/pacemaker + +%doc %{_mandir}/man7/* +%exclude %{_mandir}/man7/crmd.* +%exclude %{_mandir}/man7/pengine.* +%exclude %{_mandir}/man7/stonithd.* +%exclude %{_mandir}/man7/ocf_pacemaker_controld.* +%exclude %{_mandir}/man7/ocf_pacemaker_remote.* +%doc %{_mandir}/man8/* +%exclude %{_mandir}/man8/crm_attribute.* +%exclude %{_mandir}/man8/crm_node.* +%exclude %{_mandir}/man8/crm_master.* +%exclude %{_mandir}/man8/fence_pcmk.* +%exclude %{_mandir}/man8/pacemakerd.* +%exclude %{_mandir}/man8/pacemaker_remoted.* +%exclude %{_mandir}/man8/stonith_admin.* + +%license licenses/GPLv2 +%doc COPYING +%doc ChangeLog + +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores + +%files -n %{name}-libs +%defattr(-,root,root) + +%{_libdir}/libcib.so.* +%{_libdir}/liblrmd.so.* +%{_libdir}/libcrmservice.so.* +%{_libdir}/libcrmcommon.so.* +%{_libdir}/libpe_status.so.* +%{_libdir}/libpe_rules.so.* +%{_libdir}/libpengine.so.* +%{_libdir}/libstonithd.so.* +%{_libdir}/libtransitioner.so.* +%license licenses/LGPLv2.1 +%doc COPYING +%doc ChangeLog + +%files -n %{name}-cluster-libs +%defattr(-,root,root) +%{_libdir}/libcrmcluster.so.* +%license licenses/LGPLv2.1 +%doc COPYING +%doc ChangeLog + +%files remote +%defattr(-,root,root) + +%config(noreplace) %{_sysconfdir}/sysconfig/pacemaker +%if %{defined _unitdir} +# state directory is shared between the subpackets +# let rpm take care of removing it once it isn't +# referenced anymore and empty +%ghost %dir %{_localstatedir}/lib/rpm-state/%{name} +%{_unitdir}/pacemaker_remote.service +%else +%{_initrddir}/pacemaker_remote +%endif + +%{_sbindir}/pacemaker_remoted +%{_mandir}/man8/pacemaker_remoted.* +%license licenses/GPLv2 +%doc COPYING +%doc ChangeLog + +%files doc +%defattr(-,root,root) +%doc %{pcmk_docdir} +%license licenses/CC-BY-SA-4.0 + +%files cts +%defattr(-,root,root) +%{py_site}/cts +%{_datadir}/pacemaker/tests/cts +%{_libexecdir}/pacemaker/lrmd_test +%license licenses/GPLv2 +%doc COPYING +%doc ChangeLog + +%files -n %{name}-libs-devel +%defattr(-,root,root) +%exclude %{_datadir}/pacemaker/tests/cts +%{_datadir}/pacemaker/tests +%{_includedir}/pacemaker +%{_libdir}/*.so +%if %{with coverage} +%{_var}/lib/pacemaker/gcov +%endif +%{_libdir}/pkgconfig/*.pc +%license licenses/LGPLv2.1 +%doc COPYING +%doc ChangeLog + +%files nagios-plugins-metadata +%defattr(-,root,root) +%dir %{_datadir}/pacemaker/nagios/plugins-metadata +%attr(0644,root,root) %{_datadir}/pacemaker/nagios/plugins-metadata/* + +%changelog +* Fri Jan 26 2018 Ken Gaillot - 1.1.18-11 +- Fix regression in crm_master +- Resolves: rhbz#1539113 + +* Wed Jan 24 2018 Ken Gaillot - 1.1.18-10 +- Always trigger transition when quorum changes +- Match clone names correctly with crm_resource --cleanup +- Fix pcs resource --wait timeout when bundles are used +- Observe colocation constraints correctly with bundles in master role +- Resolves: rhbz#1464068 +- Resolves: rhbz#1508350 +- Resolves: rhbz#1519812 +- Resolves: rhbz#1527072 + +* Mon Dec 18 2017 Ken Gaillot - 1.1.18-9 +- Fix small memory leak introduced by node attribute delay fix +- Resolves: rhbz#1454960 + +* Tue Dec 12 2017 Ken Gaillot - 1.1.18-8 +- Regression fix for "pcs resource cleanup" was incomplete +- Resolves: rhbz#1508350 + +* Mon Dec 11 2017 Ken Gaillot - 1.1.18-7 +- Avoid node attribute write delay when corosync.conf has only IP addresses +- Fix regressions in "pcs resource cleanup" behavior +- Restore ordering of unfencing before fence device starts +- Ensure --wait options work when bundles are in use +- Fix possible invalid transition with bundle ordering constraints +- Resolves: rhbz#1454960 +- Resolves: rhbz#1508350 +- Resolves: rhbz#1517796 +- Resolves: rhbz#1519812 +- Resolves: rhbz#1522822 + +* Wed Nov 15 2017 Ken Gaillot - 1.1.18-6 +- Rebase to upstream 2b07d5c5a908998891c3317faa30328c108d3a91 (1.1.18) +- If on-fail=ignore, migration-threshold should also be ignored +- Resolves: rhbz#1474428 +- Resolves: rhbz#1507344 + +* Fri Nov 3 2017 Ken Gaillot - 1.1.18-5 +- Properly clean up primitive inside bundle +- Scalability improvements +- Resolves: rhbz#1499217 +- Resolves: rhbz#1508373 + +* Fri Nov 3 2017 Ken Gaillot - 1.1.18-4 +- Rebase to upstream 1a4ef7d180e77bcd6423f342d62e05e516c4e852 (1.1.18-rc4) +- Resolves: rhbz#1381754 +- Resolves: rhbz#1474428 +- Resolves: rhbz#1499217 +- Resolves: rhbz#1508373 + +* Tue Oct 24 2017 Ken Gaillot - 1.1.18-3 +- Rebase to upstream 36d2962a8613322fc43d727d95720d61a47d0138 (1.1.18-rc3) +- Resolves: rhbz#1474428 + +* Mon Oct 16 2017 Ken Gaillot - 1.1.18-2 +- Rebase to upstream 5cccc41c95d6288eab27d93901b650b071f976dc (1.1.18-rc2) +- Default record-pending to true +- Resolves: rhbz#1323546 +- Resolves: rhbz#1376556 +- Resolves: rhbz#1382364 +- Resolves: rhbz#1461976 +- Resolves: rhbz#1474428 +- Resolves: rhbz#1500509 +- Resolves: rhbz#1501903 +- Resolves: rhbz#1501924 + +* Mon Oct 9 2017 Ken Gaillot - 1.1.18-1 +- Rebase to upstream 1cb712c5369c98f03d42bcf8648cacd86a5f48f7 (1.1.18-rc1) +- Resolves: rhbz#1298581 +- Resolves: rhbz#1394418 +- Resolves: rhbz#1427648 +- Resolves: rhbz#1454933 +- Resolves: rhbz#1454957 +- Resolves: rhbz#1454960 +- Resolves: rhbz#1462253 +- Resolves: rhbz#1464068 +- Resolves: rhbz#1465519 +- Resolves: rhbz#1470262 +- Resolves: rhbz#1471506 +- Resolves: rhbz#1474428 +- Resolves: rhbz#1474463 +- Resolves: rhbz#1482278 +- Resolves: rhbz#1489728 +- Resolves: rhbz#1489735 + +* Tue Jun 20 2017 Ken Gaillot - 1.1.16-12 +- Avoid unnecessary restarts when recovering remote connections +- Resolves: rhbz#1448773 + +* Fri Jun 9 2017 Ken Gaillot - 1.1.16-11 +- Support bundle meta-attributes +- Resolves: rhbz#1447903 + +* Tue May 23 2017 Ken Gaillot - 1.1.16-10 +- Fix issues when running bundles on Pacemaker Remote nodes +- Reap orphaned processes when running Pacemaker Remote as pid 1 +- Order remote actions after remote connection recovery + (fixes regression in RHEL 7.3) +- Avoid local resource manager daemon (lrmd) crash when an + in-flight systemd operation is cancelled +- Resolves: rhbz#1432722 +- Resolves: rhbz#1441603 +- Resolves: rhbz#1448772 +- Resolves: rhbz#1451170 + +* Tue May 9 2017 Ken Gaillot - 1.1.16-9 +- Allow cleanup of guest nodes when guest is unmanaged +- Allow bundles to run on Pacemaker Remote nodes +- Handle slow IPC clients better +- Update crmd throttle information when CPUs are hot-plugged in +- Order pacemaker systemd unit after resource-agents-deps target +- Resolves: rhbz#1303742 +- Resolves: rhbz#1432722 +- Resolves: rhbz#1435067 +- Resolves: rhbz#1444728 +- Resolves: rhbz#1446669 + +* Tue Apr 18 2017 Ken Gaillot - 1.1.16-8 +- Fix shell script syntax error introduced with URL patch +- Resolves: rhbz#1410886 + +* Tue Apr 18 2017 Ken Gaillot - 1.1.16-7 +- Avoid fencing old DC if it is shutting down while another node is joining +- Improve crmd's handling of repeated fencing failures +- Correct behavior when guest created by bundle has a node attribute +- Show Red Hat bugzilla URL rather than upstream when generating cluster report +- Resolves: rhbz#1430112 +- Resolves: rhbz#1432722 + +* Wed Apr 5 2017 Ken Gaillot - 1.1.16-6 +- Allow container without IP to use underlying hostname +- Resolves: rhbz#1432722 + +* Tue Apr 4 2017 Ken Gaillot - 1.1.16-5 +- Keep man pages compressed +- Bugfixes for container bundles +- Resolves: rhbz#1410886 +- Resolves: rhbz#1432722 + +* Mon Apr 3 2017 Ken Gaillot - 1.1.16-4 +- Add support for container bundles +- Treat systemd reloading state as monitor success +- Resolves: rhbz#1432722 +- Resolves: rhbz#1436696 + +* Mon Mar 20 2017 Ken Gaillot - 1.1.16-3 +- Avoid hang when shutting down unmanaged remote node connections +- Get correct node name when crm_node or crm_attribute is run on remote node +- Ignore action when configured as a stonith device parameter +- Include recent upstream bug fixes +- Resolves: rhbz#1388489 +- Resolves: rhbz#1410886 +- Resolves: rhbz#1417936 +- Resolves: rhbz#1421700 + +* Thu Jan 19 2017 Ken Gaillot - 1.1.16-2 +- Avoid grep crashes in crm_report when looking for system logs +- Properly ignore version with crm_diff --no-version +- Process guest node fencing properly +- Ensure filename is valid before using +- Build for ppc64le +- Resolves: rhbz#1288261 +- Resolves: rhbz#1289662 +- Resolves: rhbz#1383462 +- Resolves: rhbz#1405635 +- Resolves: rhbz#1412309 + +* Thu Jan 12 2017 Ken Gaillot - 1.1.16-1 +- Rebase to upstream 94ff4df51a55cc30d01843ea11b3292bac755432 (1.1.16) +- Resolves: rhbz#1374777 +- Resolves: rhbz#1378817 +- Resolves: rhbz#1410886 + +* Wed Oct 26 2016 Ken Gaillot - 1.1.15-12 +- Preserve rolling upgrades involving Pacemaker Remote nodes +- Resolves: rhbz#1388827 + +* Fri Oct 21 2016 Ken Gaillot - 1.1.15-11.1 +- Fix CVE-2016-7035 +- Resolves: rhbz#1374776 + +* Thu Sep 22 2016 Ken Gaillot - 1.1.15-11 +- Sanitize readable CIB output collected by crm_report +- Document crm_report --sos-mode option +- Speed up crm_report on Pacemaker Remote nodes +- Avoid sbd fencing when upgrading pacemaker_remote package +- Resolves: rhbz#1219188 +- Resolves: rhbz#1235434 +- Resolves: rhbz#1323544 +- Resolves: rhbz#1372009 + +* Mon Aug 15 2016 Ken Gaillot - 1.1.15-10 +- Only clear remote node operation history on startup +- Resend a lost shutdown request +- Correctly detect and report invalid configurations +- Don't include manual page for resource agent that isn't included +- Resolves: rhbz#1288929 +- Resolves: rhbz#1310486 +- Resolves: rhbz#1352039 + +* Fri Aug 5 2016 Ken Gaillot - 1.1.15-9 +- Make crm_mon XML schema handle multiple-active resources +- Resolves: rhbz#1364500 + +* Wed Aug 3 2016 Ken Gaillot - 1.1.15-8 +- Quote timestamp-format correctly in alert_snmp.sh.sample +- Unregister CIB callbacks correctly +- Print resources section heading consistently in crm_mon output +- Resolves: rhbz#773656 +- Resolves: rhbz#1361533 + +* Tue Jul 26 2016 Ken Gaillot - 1.1.15-7 +- Avoid null dereference +- Resolves: rhbz#1290592 + +* Tue Jul 26 2016 Ken Gaillot - 1.1.15-6 +- Fix transition failure with start-then-stop order constraint + unfencing +- Resolves: rhbz#1290592 + +* Fri Jul 1 2016 Ken Gaillot - 1.1.15-5 +- Update spec file for toolchain hardening +- Resolves: rhbz#1242258 + +* Tue Jun 28 2016 Ken Gaillot - 1.1.15-4 +- Take advantage of toolchain hardening +- Resolves: rhbz#1242258 + +* Wed Jun 22 2016 Ken Gaillot - 1.1.15-3 +- Rebase to upstream e174ec84857e087210b9dacee3318f8203176129 (1.1.15) +- Resolves: rhbz#1304771 + Resolves: rhbz#1303765 + Resolves: rhbz#1327469 + Resolves: rhbz#1337688 + Resolves: rhbz#1345876 + Resolves: rhbz#1346726 + +* Fri Jun 10 2016 Ken Gaillot - 1.1.15-2 +- Rebase to upstream 25920dbdbc7594fc944a963036996f724c63a8b8 (1.1.15-rc4) +- Resolves: rhbz#1304771 + Resolves: rhbz#773656 + Resolves: rhbz#1240330 + Resolves: rhbz#1281450 + Resolves: rhbz#1286316 + Resolves: rhbz#1287315 + Resolves: rhbz#1323544 + +* Tue May 31 2016 Ken Gaillot - 1.1.15-1 +- Rebase to upstream 2c148ac30dfcc2cfb91dc367ed469b6f227a8abc (1.1.15-rc3+) +- Resolves: rhbz#1304771 + Resolves: rhbz#1040685 + Resolves: rhbz#1219188 + Resolves: rhbz#1235434 + Resolves: rhbz#1268313 + Resolves: rhbz#1284069 + Resolves: rhbz#1287868 + Resolves: rhbz#1288929 + Resolves: rhbz#1312094 + Resolves: rhbz#1314157 + Resolves: rhbz#1321711 + Resolves: rhbz#1338623 + +* Thu Feb 18 2016 Ken Gaillot - 1.1.14-11 +- Rebase to upstream 2cccd43d6b7f2525d406251e14ef37626e29c51f (1.1.14+) +- Resolves: rhbz#1304771 + Resolves: rhbz#1207388 + Resolves: rhbz#1240330 + Resolves: rhbz#1281450 + Resolves: rhbz#1284069 + Resolves: rhbz#1286316 + Resolves: rhbz#1287315 + Resolves: rhbz#1287868 + Resolves: rhbz#1288929 + Resolves: rhbz#1303765 +- This also updates the packaging to follow upstream more closely, + most importantly moving some files from the pacemaker package to + pacemaker-cli (including XML schemas, SNMP MIB, attrd_updater command, + most ocf:pacemaker resource agents, and related man pages), + and deploying /etc/sysconfig/crm_mon. + +* Thu Oct 08 2015 Andrew Beekhof - 1.1.13-10 +- More improvements when updating and deleting meta attributes +- Resolves: rhbz#1267265 + +* Mon Oct 05 2015 Andrew Beekhof - 1.1.13-9 +- Fix regression when updating child meta attributes +- Resolves: rhbz#1267265 + +* Wed Sep 16 2015 Andrew Beekhof - 1.1.13-8 +- Fix regression when setting attributes for remote nodes +- Resolves: rhbz#1206647 + +* Thu Sep 10 2015 Andrew Beekhof - 1.1.13-7 +- Additional upstream patches +- Resolves: rhbz#1234680 + +* Wed Jul 22 2015 Andrew Beekhof - 1.1.13-6 +- Correctly apply and build patches +- Resolves: rhbz#1234680 + +* Wed Jul 22 2015 Andrew Beekhof - 1.1.13-5 +- Sync with upstream 63f8e9a +- Resolves: rhbz#1234680 + +* Mon Jul 20 2015 Andrew Beekhof - 1.1.13-4 +- Sync with upstream 63f8e9a +- Resolves: rhbz#1234680 + +* Fri Jun 26 2015 Andrew Beekhof - 1.1.13-3 +- New upstream tarball 44eb2ddf8d4f8fc05256aae2abc9fbf3ae4d1fbc +- Resolves: rhbz#1234680 + +* Thu Jun 11 2015 David Vossel - 1.1.13-2 +- Adds nagios metadata. + + Resolves: rhbz#1203053 + +* Tue May 12 2015 Andrew Beekhof - 1.1.13-0.1 +- New upstream tarball 8ae45302394b039fb098e150f156df29fc0cb576 + +* Wed Mar 18 2015 David Vossel - 1.1.12-25 +- Convince systemd to shutdown dbus after pacemaker. + + Resolves: rhbz#1198886 + +* Wed Mar 18 2015 David Vossel - 1.1.12-23 +- Ensure B with A, that B can not run if A can not run. + + Resolves: rhbz#1194475 + +* Thu Jan 15 2015 Andrew Beekhof - 1.1.12-22 +- Fix segfault encountered with orphaned remote node connections + + Resolves: rhbz#1176210 + +* Thu Jan 15 2015 Andrew Beekhof - 1.1.12-21 +- Fix use-after-free in CLI tool when restarting a resource + +* Tue Jan 13 2015 Andrew Beekhof - 1.1.12-20 +- Expose the -N/--node option for attrd_updater to allow attributes to + be set for other nodes + +* Sun Jan 11 2015 David Vossel - 1.1.12-19 +- Imply stop on actions within containers during host fencing +- acl correctly implement the reference acl direct + + Resolves: rhbz#1117341 + +* Tue Jan 6 2015 David Vossel - 1.1.12-18 +- clone order constraint require-all option. +- fix memory leaks in crmd and pacemakerd + + Resolves: rhbz#1176210 + +* Tue Dec 16 2014 David Vossel - 1.1.12-15 +- Include ipc and pacemaker remote related upstream fixes. + +* Wed Nov 26 2014 Andrew Beekhof - 1.1.12-13 +- Update patch level to upstream a433de6 +- Ensure we wait for long running systemd stop operations to complete + Resolves: rhbz#1165423 + +* Tue Nov 18 2014 Andrew Beekhof - 1.1.12-11 +- Update patch level to upstream 7dd9022 +- Ensure all internal caches are updated when nodes are removed from the cluster + Resolves: rhbz#1162727 + +* Wed Nov 05 2014 Andrew Beekhof - 1.1.12-10 +- Update patch level to upstream 98b6688 +- Support an intelligent resource restart operation +- Exclusive discovery implies running the resource is only possible on the listed nodes + +* Wed Nov 05 2014 Andrew Beekhof - 1.1.12-9 +- Update patch level to upstream fb94901 +- Prevent blocking by performing systemd reloads asynchronously + +* Tue Oct 28 2014 Andrew Beekhof - 1.1.12-8 +- Repair the ability to start when sbd is not enabled + +* Mon Oct 27 2014 Andrew Beekhof - 1.1.12-7 +- Update patch level to upstream afa0f33 + - Resolve coverity defects + +* Fri Oct 24 2014 Andrew Beekhof - 1.1.12-5 +- Update patch level to upstream 031e46c + - Prevent glib assert triggered by timers being removed from mainloop more than once + - Allow rsc discovery to be disabled in certain situations + - Allow remote-nodes to be placed in maintenance mode + - Improved sbd integration + +* Thu Oct 16 2014 Andrew Beekhof - 1.1.12-4 +- Add install dependancy on sbd + +* Wed Oct 01 2014 Andrew Beekhof - 1.1.12-3 +- Update patch level to upstream be1e835 + Resolves: rhbz#1147989 + +* Fri Sep 19 2014 Fabio M. Di Nitto - 1.1.12-2 +- Enable build on s390x + Resolves: rhbz#1140917 + +* Mon Sep 08 2014 Andrew Beekhof - 1.1.12-1 +- Rebase to upstream a14efad51ca8f1e3742fd8520e051cd7a0864f04 (1.1.12+) + Resolves: rhbz#1059626 + +* Fri Jul 04 2014 Andrew Beekhof - 1.1.10-32 + +- Fix: lrmd: Handle systemd reporting 'done' before a resource is actually stopped + Resolves: rhbz#1111747 + +* Thu Apr 17 2014 David Vossel - 1.1.10-31 + +- fencing: Fence using all required devices +- fencing: Execute all required fencing devices regardless of what topology level they are at +- fencing: default to 'off' when agent does not advertise 'reboot' in metadata + Resolves: rhbz#1078078 + +* Mon Apr 14 2014 Andrew Beekhof 1.1.10-30 + +- crmd: Do not erase the status section for unfenced nodes +- crmd: Correctly react to successful unfencing operations +- crmd: Report unsuccessful unfencing operations +- crmd: Do not overwrite existing node state when fencing completes +- fencing: Correctly record which peer performed the fencing operation +- fencing: Automatically switch from 'list' to 'status' to 'static-list' if those actions are not advertised in the metadata +- fencing: Filter self-fencing at the peers to allow unfencing to work correctly +- pengine: Automatically re-unfence a node if the fencing device definition changes +- pengine: Fencing devices default to only requiring quorum in order to start +- pengine: Delay unfencing until after we know the state of all resources that require unfencing +- pengine: Ensure unfencing occurs before fencing devices are (re-)probed +- pengine: Ensure unfencing only happens once, even if the transition is interrupted +- pengine: Do not unfence nodes that are offline, unclean or shutting down +- pengine: Unfencing is based on device probes, there is no need to unfence when normal resources are found active +- logging: daemons always get a log file, unless explicitly set to configured 'none' +- lrmd: Expose logging variables expected by OCF agents +- crm_report: Suppress logging errors after the target directory has been compressed +- crm_resource: Wait for the correct number of replies when cleaning up resources + Resolves: rhbz#1078078 + +* Tue Mar 25 2014 David Vossel - 1.1.10-29 + +- Low: controld: Remove '-q 0' from default dlm_controld arguments + Resolves: rhbz#1064519 + +* Tue Mar 25 2014 David Vossel - 1.1.10-28 + +- pengine: fixes invalid transition caused by clones with more than 10 instances + Resolves: rhbz#1078504 + +* Fri Feb 28 2014 Andrew Beekhof - 1.1.10-27 + +- crm_resource: Prevent use-of-NULL +- systemd: Prevent use-of-NULL when determining if an agent exists +- Fencing: Remove shadow definition and use of variable 'progress' + Resolves: rhbz#1070916 + +* Thu Feb 27 2014 Andrew Beekhof - 1.1.10-26 + +- Run automated regression tests after every build +- Fencing: Send details of stonith_api_time() and stonith_api_kick() to syslog +- Fencing: Pass the correct options when looking up the history by node name +- Fencing: stonith_api_time_helper now returns when the most recent fencing operation completed +- crm_report: Additional dlm detail if dlm_controld is running +- crmd: Gracefully handle actions that cannot be initiated +- pengine: Gracefully handle bad values for XML_ATTR_TRANSITION_MAGIC + Resolves: rhbz#1070916 + +* Tue Feb 25 2014 David Vossel - 1.1.10-25 + +- pengine: cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node + Resolves: rhbz#1069284 + +* Thu Feb 20 2014 David Vossel - 1.1.10-24 + +- controld: handling startup fencing within the controld agent, not the dlm + Resolves: rhbz#1064519 +- controld: Do not consider the dlm up until the address list is present + Resolves: rhbz#1067536 + +* Wed Feb 12 2014 Andrew Beekhof - 1.1.10-23 + +- mcp: Tell systemd not to respawn us if we return 100 +- services: Detect missing agents and permission errors before forking +- Use native DBus library for systemd support to avoid problematic use of threads + Resolves: rhbz#720543 (aka. 1057697) + +* Fri Dec 27 2013 Daniel Mach - 1.1.10-22 +- Mass rebuild 2013-12-27 + +* Wed Dec 04 2013 David Vossel - 1.1.10-21 + +- Fix: Removes unnecessary newlines in crm_resource -O output + Resolves: rhbz#720543 + +* Thu Nov 14 2013 Andrew Beekhof - 1.1.10-20 + +- Fix: tools: Fixes formatting of remote-nodes in crm_mon and crm_simulate +- Fix: Corosync: Attempt to retrieve a peers node name if it is not already known + Resolves: rhbz#720543 + +* Thu Nov 14 2013 David Vossel - 1.1.10-19 +- Fix: controld: Use the correct variant of dlm_controld for + corosync-2 clusters + + Resolves: rhbz#1028627 + +* Thu Nov 07 2013 David Vossel - 1.1.10-18 + +- High: remote: Add support for ipv6 into pacemaker_remote daemon + Resolves: rhbz#720543 + +* Wed Nov 06 2013 Andrew Beekhof - 1.1.10-17 + + Resolves: rhbz#720543 + +- Fix: core: Do not enabled blackbox for cli tools +- Fix: Command-line tools should stop after an assertion failure +- Fix: crmd: Dont add node_state to cib, if we have not seen or fenced this node yet +- Fix: crmd: Correctly update expected state when the previous DC shuts down +- Fix: crmd: Cache rsc_info retrieved from lrmd and pacemaker_remoted +- Fix: crmd: Pad internal lrmd rsc_info and metadata retrieval timeout +- Fix: crm_attribute: Detect orphaned remote-nodes when setting attributes +- Fix: crm_mon: Prevent use-of-NULL when ping resources do not define a host list +- Fix: crm_report: Record the output of the collector +- Fix: crm_report: Do not print garbage when collecting from the local node +- Fix: crm_resource: Wait for all replies when cleaning up resources +- Fix: fencing: Do not broadcast suicide if the on action is being executed +- Fix: fencing: Allow fencing for node after topology entries are deleted +- Fix: fencing: Deep copy current topology level list on remote op +- Fix: lrmd: Correctly cancel monitor actions for lsb/systemd/service resources on cleaning up +- Fix: pengine: Dont prevent clones from running due to dependant resources +- Fix: pengine: Probe containers not expected to be up +- Fix: ipc: Raise the default buffer size to 128k +- Fix: ipc: Use the higher of the configured buffer size or the default +- Fix: iso8601: Prevent dates from jumping backwards a day in some timezones +- Fix: remote: Properly version the remote connection protocol +- Fix: remote: Handle endian changes between client and server and improve forward compatibility + Resolves: rhbz#720543 + +* Mon Oct 07 2013 Andrew Beekhof - 1.1.10-16 + +- Remove unsupported resource agent +- Log: crmd: Supply arguments in the correct order +- Fix: crm_report: Correctly redirect error message to /dev/null +- Fix: Bug rhbz#1011618 - Consistently use 'Slave' as the role for unpromoted master/slave resources +- Fix: pengine: Location constraints with role=Started should prevent masters from running at all +- Fix: crm_resource: Observe --master modifier for --move +- Provide a meaningful error if --master is used for primitives and groups +- Fix: Fencing: Observe pcmk_host_list during automatic unfencing + Resolves: rhbz#996576 + +* Fri Sep 27 2013 David Vossel - 1.1.10-15 + + Fix: crmd: Allow transient attributes to be set on remote-nodes. + + Fix: pengine: Handle orphaned remote-nodes properly + + Low: cts: Add RemoteLXC regression test. + + Resolves: rhbz#1006465 + Resolves: rhbz#1006471 + +* Fri Aug 23 2013 Andrew Beekhof - 1.1.10-14 + + Fix: xml: Location constraints are allowed to specify a role + + Bug rhbz#902407 - crm_resource: Handle --ban for master/slave resources as advertised + Resolves: rhbz#902407 + +* Wed Aug 14 2013 Andrew Beekhof - 1.1.10-13 + + Fencing: Support agents that need the host to be unfenced at startup + Resolves: rhbz#996576 + + crm_report: Collect corosync quorum data + Resolves: rhbz#839342 + +* Thu Aug 08 2013 Andrew Beekhof - 1.1.10-12 +- Regenerate patches to have meaningful names + +* Thu Aug 08 2013 Andrew Beekhof - 1.1.10-11 + + Fix: systemd: Prevent glib assertion - only call g_error_free() with non-NULL arguments + + Fix: systemd: Prevent additional assertions in g_error_free + + Fix: logging: glib CRIT messages should not produce core files by default + + Doc: controld: Update the description + + Fix: pengine: Correctly account for the location preferences of things colocated with a group + + Fix: cib: Correctly log short-form xml diffs + + Fix: crmd: Correcty update the history cache when recurring ops change their return code + + Log: pengine: Better indicate when a resource has failed + + Log: crm_mon: Unmunge the output for failed operations + +* Fri Aug 02 2013 Andrew Beekhof - 1.1.10-10 + + Fix: pengine: Do not re-allocate clone instances that are blocked in the Stopped state + + Fix: pengine: Do not allow colocation with blocked clone instances + +* Thu Aug 01 2013 Andrew Beekhof - 1.1.10-9 + + Fix: crmd: Prevent crash by passing log arguments in the correct order + +* Thu Aug 01 2013 Andrew Beekhof - 1.1.10-8 + + Fix: pengine: Do not restart resources that depend on unmanaged resources + +* Thu Aug 01 2013 Andrew Beekhof - 1.1.10-7 + + Fix: crmd: Prevent recurring monitors being cancelled due to notify operations + +* Fri Jul 26 2013 Andrew Beekhof Pacemaker-1.1.10-6 +- Update source tarball to revision: 368c726 (Pacemaker-1.1.10-rc7) +- Changesets: 18 +- Diff: 9 files changed, 245 insertions(+), 170 deletions(-) + +- Features added since Pacemaker-1.1.10-rc7 + + crm_resource: Allow options to be set recursively + +- Changes since Pacemaker-1.1.10-rc7 + + Bug cl#5161 - crmd: Prevent memory leak in operation cache + + cib: Correctly read back archived configurations if the primary is corrupted + +* Mon Jul 22 2013 Andrew Beekhof - 1.1.10-5 +- Streamline spec file + +- Upstream patch for: + + cman: Only build migration tools for targets that may use them + + cib: Ensure we set up hacluster's groups in stand-alone mode + +- Update for new upstream tarball: Pacemaker-1.1.10-rc7 + + + Bug cl#5157 - Allow migration in the absence of some colocation constraints + + Bug cl#5168 - Prevent clones from being bounced around the cluster due to location constraints + + Bug cl#5170 - Correctly support on-fail=block for clones + + crmd: CID#1036761 Dereference null return value + + crmd: cl#5164 - Fixes crmd crash when using pacemaker-remote + + crmd: Ensure operations for cleaned up resources don't block recovery + + crmd: Prevent messages for remote crmd clients from being relayed to wrong daemons + + crmd: Properly handle recurring monitor operations for remote-node agent + + fencing: Correctly detect existing device entries when registering a new one + + logging: If SIGTRAP is sent before tracing is turned on, turn it on + + lrmd: Prevent use-of-NULL in client library + + pengine: cl#5128 - Support maintenance mode for a single node + + pengine: cl#5164 - Pengine segfault when calculating transition with remote-nodes. + + pengine: Do the right thing when admins specify the internal resource instead of the clone + + systemd: Turn off auto-respawning of systemd services when the cluster starts them + +* Wed Jul 10 2013 David Vossel - 1.1.10-4 +- Fixes crmd crash when using pacemaker_remote. + +* Mon Jun 17 2013 Andrew Beekhof - 1.1.10-3 +- Update to upstream 838e41e + + + Feature: pengine: Allow active nodes in our current membership to be fenced without quorum + + Fix: attrd: Fixes deleted attributes during dc election + + Fix: corosync: Fall back to uname for local nodes + + Fix: crm_report: Find logs in compressed files + + Fix: pengine: If fencing is unavailable or disabled, block further recovery for resources that fail to stop + + Fix: systemd: Ensure we get shut down correctly by systemd + +* Sun Jun 09 2013 Andrew Beekhof - 1.1.10-2 +- Update for new upstream tarball: Pacemaker-1.1.10-rc4 + +- Features in Pacemaker-1.1.10-rc4: + + PE: Display a list of nodes on which stopped anonymous clones are not active instead of meaningless clone IDs + + crm_error: Add the ability to list and print error symbols + + crm_resource: Implement --ban for moving resources away from nodes and --clear (replaces --unmove) + + crm_resource: Support OCF tracing when using --force-(check|start|stop) + +- Changes since Pacemaker-1.1.10-rc1 + + + Bug cl#5133 - pengine: Correctly observe on-fail=block for failed demote operation + + Bug cl#5152 - Correctly clean up fenced nodes during membership changes + + Bug cl#5153 - Correctly display clone failcounts in crm_mon + + Bug cl#5154 - Do not expire failures when on-fail=block is present + + Bug pengine: cl#5155 - Block the stop of resources if any depending resource is unmanaged + + crm_report: Correctly collect logs when 'uname -n' reports fully qualified names + + Check for and replace non-printing characters with their octal equivalent while exporting xml text + + Convert all exit codes to positive errno values + + Core: Ensure the blackbox is saved on abnormal program termination + + corosync: Detect the loss of members for which we only know the nodeid + + corosync: Nodes that can persist in sending CPG messages must be alive afterall + + crmd: Do not get stuck in S_POLICY_ENGINE if a node we couldn't fence returns + + crmd: Ensure all membership operations can complete while trying to cancel a transition + + crmd: Everyone who gets a fencing notification should mark the node as down + + crmd: Initiate node shutdown if another node claims to have successfully fenced us + + crm_resource: Gracefully fail when --force-* is attempted for stonith resources + + fencing: Restore the ability to manually confirm that fencing completed + + pengine: Correctly handle resources that recover before we operate on them + + pengine: Ensure per-node resource parameters are used during probes + + pengine: Implement the rest of get_timet_now() and rename to get_effective_time + + pengine: Mark unrunnable stop actions as "blocked" + + pengine: Re-initiate active recurring monitors that previously failed but have timed out + + xml: Restore the ability to embed comments in the cib + +* Wed Apr 17 2013 Andrew Beekhof - 1.1.10-1 +- Update for new upstream tarball: Pacemaker-1.1.10-rc1 +- Features added since Pacemaker-1.1.8 + + Performance enhancements for supporting 16 node clusters + + corosync: Use queues to avoid blocking when sending CPG messages + + ipc: Compress messages that exceed the configured IPC message limit + + ipc: Use queues to prevent slow clients from blocking the server + + ipc: Use shared memory by default + + lrmd: Support nagios remote monitoring + + lrmd: Pacemaker Remote Daemon for extending pacemaker functionality outside corosync cluster. + + pengine: Check for master/slave resources that are not OCF agents + + pengine: Support a 'requires' resource meta-attribute for controlling whether it needs quorum, fencing or nothing + + pengine: Support for resource containers + + pengine: Support resources that require unfencing before start + +- Changes since Pacemaker-1.1.8 + + attrd: Correctly handle deletion of non-existant attributes + + Bug cl#5135 - Improved detection of the active cluster type + + Bug rhbz#913093 - Use crm_node instead of uname + + cib: Prevent ordering changes when applying xml diffs + + cib: Remove text nodes from cib replace operations + + crmd: Prevent election storms caused by getrusage() values being too close + + date/time: Bug cl#5118 - Correctly convert seconds-since-epoch to the current time + + fencing: Attempt to provide more information that just 'generic error' for failed actions + + fencing: Correctly record completed but previously unknown fencing operations + + fencing: Correctly terminate when all device options have been exhausted + + fencing: cov#739453 - String not null terminated + + fencing: Do not merge new fencing requests with stale ones from dead nodes + + fencing: Do not start fencing until entire device topology is found or query results timeout. + + fencing: Do not wait for the query timeout if all replies have arrived + + fencing: Fix passing of parameters from CMAN containing '=' + + fencing: Fix non-comparison when sorting devices by priority + + fencing: On failure, only try a topology device once from the remote level. + + fencing: Only try peers for non-topology based operations once + + fencing: Retry stonith device for duration of action's timeout period. + + ipc: Bug cl#5110 - Prevent 100% CPU usage when looking for synchronous replies + + mcp: Re-attach to existing pacemaker components when mcp fails + + pengine: Any location constraint for the slave role applies to all roles + + pengine: Bug cl#5101 - Ensure stop order is preserved for partially active groups + + pengine: Bug cl#5140 - Allow set members to be stopped when the subseqent set has require-all=false + + pengine: Bug cl#5143 - Prevent shuffling of anonymous master/slave instances + + pengine: Bug rhbz#880249 - Ensure orphan masters are demoted before being stopped + + pengine: Bug rhbz#880249 - Teach the PE how to recover masters into primitives + + pengine: cl#5025 - Automatically clear failcount for start/monitor failures after resource parameters change + + pengine: cl#5099 - Probe operation uses the timeout value from the minimum interval monitor by default (#bnc776386) + + pengine: cl#5111 - When clone/master child rsc has on-fail=stop, insure all children stop on failure. + + pengine: cl#5142 - Do not delete orphaned children of an anonymous clone + + pengine: Correctly unpack active anonymous clones + + pengine: Ensure previous migrations are closed out before attempting another one + + pengine: rhbz#902459 - Remove rsc node status for orphan resources + + Replace the use of the insecure mktemp(3) with mkstemp(3) + +* Thu Apr 04 2013 David Vossel - 1.1.8-6 + Fixes depreciated use of gnutls 3.1 + +* Thu Apr 04 2013 David Vossel - 1.1.8-5 + Rebuilt for gnutls 3.1 + +* Thu Oct 25 2012 Andrew Beekhof - 1.1.8-4 +- Update for new upstream tarball: 5db5f53 + + + High: mcp: Re-attach to existing pacemaker components when pacemakerd fails + + High: pengine: cl#5111 - When clone/master child rsc has on-fail=stop, insure all children stop on failure. + + High: Replace the use of the insecure mktemp(3) with mkstemp(3) + + High: Core: Correctly process XML diff's involving element removal + + High: PE: Correctly unpack active anonymous clones + + High: PE: Fix clone_zero() and clone_strip() for single character resource names + + High: IPC: Bug cl#5110 - Prevent 100% CPU usage when looking for synchronous replies + + High: PE: Bug cl#5101 - Ensure stop order is preserved for partially active groups + + High: fencing: On failure, only try a topology device once from the remote level. + + High: fencing: Retry stonith device for duration of action's timeout period. + + High: PE: Fix memory leak on processing message (bnc#780224) + + High: fencing: Support 'on_target' option in fencing device metadata for forcing unfence on target node + + High: PE: Support resources that require unfencing before start + + High: PE: Support a 'requires' resource meta-attribute for controlling whether it needs quorum, fencing or nothing + + High: mcp: Only define HA_DEBUGLOG to avoid agent calls to ocf_log printing everything twice + + High: fencing: Do not start fencing until entire device topology is found or query results timeout. + + High: Cluster: Allow cman and corosync 2.0 nodes to use a name other than uname() + +* Fri Sep 21 2012 Andrew Beekhof 1.1.8-3 +- Only build for i386 and x86_64 as directed + +* Fri Sep 21 2012 Andrew Beekhof 1.1.8-1 +- Rebuild for upstream 1.1.8 release +- Documentation disabled pending a functional publican/ImageMagick combination + +- Statistics: + Changesets: 1019 + Diff: 2107 files changed, 117258 insertions(+), 73606 deletions(-) + +- See included ChangeLog file or https://raw.github.com/ClusterLabs/pacemaker/master/ChangeLog for full details + + + New IPC implementation from libqb + + New logging implementation from libqb + + Quieter - info, debug and trace logs are no longer sent to syslog + + Dropped dependancy on cluster-glue + + Config and core directories no longer located in heartbeat directories + + Support for managing systemd services + + Rewritten local resource management daemon + + Version bumps for every shared library due to API cleanups + + Removes crm shell, install/use pcs shell and GUI instead + +* Fri Jul 20 2012 Fedora Release Engineering - 1.1.7-2.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Wed Mar 28 2012 Andrew Beekhof Pacemaker-1.1.7-2 +- Reinstate the ghost directive for /var/run/crm + +* Wed Mar 28 2012 Andrew Beekhof Pacemaker-1.1.7-1 +- Update source tarball to upstream release: Pacemaker-1.1.7 +- See included ChangeLog file or https://raw.github.com/ClusterLabs/pacemaker/master/ChangeLog for details + +* Thu Feb 16 2012 Andrew Beekhof 1.1.7-0.3-7742926.git +- New upstream tarball: 7742926 +- Additional Provides and Obsoletes directives to enable upgrading from heartbeat +- Rebuild now that the Corosync CFG API has been removed + +* Thu Feb 02 2012 Andrew Beekhof 1.1.7-0.2-bc7c125.git +- Additional Provides and Obsoletes directives to enable upgrading from rgmanager + +* Thu Feb 02 2012 Andrew Beekhof 1.1.7-0.1-bc7c125.git +- New upstream tarball: bc7c125 +- Pre-release 1.1.7 build to deal with the removal of cman and support for corosync plugins +- Add libqb as a dependancy + +* Fri Jan 13 2012 Fedora Release Engineering - 1.1.6-3.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild + +* Mon Sep 26 2011 Andrew Beekhof 1.1.6-3 +- New upstream tarball: 89678d4 +- Move man pages to the correct subpackages + +* Mon Sep 26 2011 Andrew Beekhof 1.1.6-2 +- Do not build in support for heartbeat, snmp, esmtp by default +- Create a package for cluster unaware libraries to minimze our + footprint on non-cluster nodes +- Better package descriptions + +* Wed Sep 07 2011 Andrew Beekhof 1.1.6-1 +- Upstream release of 1.1.6 +- See included ChangeLog file or http://hg.clusterlabs.org/pacemaker/1.1/file/tip/ChangeLog for details + +- Disabled eSMTP and SNMP support. Painful to configure and rarely used. +- Created cli sub-package for non-cluster usage + +* Thu Jul 21 2011 Petr Sabata - 1.1.5-3.2 +- Perl mass rebuild + +* Wed Jul 20 2011 Petr Sabata - 1.1.5-3.1 +- Perl mass rebuild + +* Mon Jul 11 2011 Andrew Beekhof 1.1.5-3 +- Rebuild for new snmp .so + +* Fri Jun 17 2011 Marcela Mašláňová - 1.1.5-2.2 +- Perl mass rebuild + +* Fri Jun 10 2011 Marcela Mašláňová - 1.1.5-2.1 +- Perl 5.14 mass rebuild + +* Wed Apr 27 2011 Andrew Beekhof 1.1.5-2 +- Mark /var/run directories with ghost directive + Resolves: rhbz#656654 + +* Wed Apr 27 2011 Andrew Beekhof 1.1.5-1 +- New upstream release plus patches for CMAN integration + +* Tue Feb 08 2011 Fedora Release Engineering - 1.1.4-5.1 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild + +* Tue Jan 11 2011 Andrew Beekhof 1.1.4-5 +- Re-enable corosync and heartbeat support with correct bcond variable + usage + +* Wed Dec 8 2010 Fabio M. Di Nitto 1.1.4-4 +- Temporary drop publican doc build + +* Wed Dec 8 2010 Fabio M. Di Nitto 1.1.4-3 +- Fix publican build on x86 + +* Wed Dec 8 2010 Fabio M. Di Nitto 1.1.4-2 +- Drop double source entry and 22Mb from the srpm + +* Mon Nov 15 2010 Andrew Beekhof 1.1.4-1 +- Upstream release of 1.1.4 +- See included ChangeLog file or http://hg.clusterlabs.org/pacemaker/1.1/file/tip/ChangeLog for details + +* Wed Sep 29 2010 jkeating - 1.1.3-1.1 +- Rebuilt for gcc bug 634757 + +* Tue Sep 21 2010 Andrew Beekhof - 1.1.3-1 +- Upstream release of 1.1.3 + + High: crmd: Use the correct define/size for lrm resource IDs + + High: crmd: Bug lf#2458 - Ensure stop actions always have the relevant resource attributes + + High: crmd: Ensure we activate the DC timer if we detect an alternate DC + + High: mcp: Correctly initialize the string containing the list of active daemons + + High: mcp: Fix the expansion of the pid file in the init script + + High: mcp: Tell chkconfig we need to shut down early on + + High: PE: Bug lf#2476 - Repair on-fail=block for groups and primitive resources + + High: PE: Do not demote resources because something that requires it can't run + + High: PE: Rewrite the ordering constraint logic to be simplicity, clarity and maintainability + + High: PE: Wait until stonith is available, don't fall back to shutdown for nodes requesting termination + + High: PE: Prevent segfault by ensuring the arguments to do_calculations() are initialized + + High: stonith: Bug lf#2461 - Prevent segfault by not looking up operations if the hashtable hasn't been initialized yet + + High: Stonith: Bug lf#2473 - Ensure stonith operations complete within the timeout and are terminated if they run too long + + High: stonith: Bug lf#2473 - Gracefully handle remote operations that arrive late (after we've done notifications) + + High: stonith: Bug lf#2473 - Add the timeout at the top level where the daemon is looking for it + + High: stonith: Bug lf#2473 - Ensure timeouts are included for fencing operations + + High: Stonith: Use the timeout specified by the user + + High: Tools: Bug lf#2456 - Fix assertion failure in crm_resource + +* Mon Jul 26 2010 Andrew Beekhof - 1.1.3-0.1-b3cb4f4a30ae.hg +- Pre-release version of 1.1.3 + + High: ais: Bug lf2401 - Improved processing when the peer crmd processes join/leave + + High: ais: fix list of active processes sent to clients (bnc#603685) + + High: ais: Move the code for finding uid before the fork so that the child does no logging + + High: ais: Resolve coverity CONSTANT_EXPRESSION_RESULT defects + + High: cib: Also free query result for xpath operations that return more than one hit + + High: cib: Attempt to resolve memory corruption when forking a child to write the cib to disk + + High: cib: Correctly free memory when writing out the cib to disk + + High: cib: Fix the application of unversioned diffs + + High: cib: Remove old developmental error logging + + High: cib: Restructure the 'valid peer' check for deciding which instructions to ignore + + High: Core: Bug lf#2401 - Backed out changeset 6e6980376f01 + + High: Core: Correctly unpack HA_Messages containing multiple entries with the same name + + High: Core: crm_count_member() should only track nodes that have the full stack up + + High: Core: New developmental logging system inspired by the kernel and a PoC from Lars Ellenberg + + High: crmd: All nodes should see status updates, not just he DC + + High: crmd: Allow non-DC nodes to clear failcounts too + + High: crmd: Base DC election on process relative uptime + + High: crmd: Bug lf#2439 - cancel_op() can also return HA_RSCBUSY + + High: crmd: Bug lf#2439 - Handle asynchronous notification of resource deletion events + + High: crmd: Fix assertion failure when performing async resource failures + + High: crmd: Fix handling of async resource deletion results + + High: crmd: Include the action for crm graph operations + + High: crmd: Make sure the membership cache is accurate after a sucessful fencing operation + + High: crmd: Make sure we always poke the FSA after a transition to clear any TE_HALT actions + + High: crmd: Offer crm-level membership once the peer starts the crmd process + + High: crmd: Only need to request quorum update for plugin based clusters + + High: crmd: Prevent everyone from loosing DC elections by correctly initializing all relevant variables + + High: crmd: Prevent segmentation fault + + High: crmd: several fixes for async resource delete + + High: mcp: Add missing headers when built without heartbeat support + + High: mcp: New master control process for (re)spawning pacemaker daemons + + High: PE: Avoid creating invalid ordering constraints for probes that are not needed + + High: PE: Bug lf#1959 - Fail unmanaged resources should not prevent other services from shutting down + + High: PE: Bug lf#2422 - Ordering dependencies on partially active groups not observed properly + + High: PE: Bug lf#2424 - Use notify oepration definition if it exists in the configuration + + High: PE: Bug lf#2433 - No services should be stopped until probes finish + + High: PE: Bug lf#2453 - Enforce clone ordering in the absense of colocation constraints + + High: PE: Correctly detect when there is a real failcount that expired and needs to be cleared + + High: PE: Correctly handle pseudo action creation + + High: PE: Correctly order clone startup after group/clone start + + High: PE: Fix colocation for interleaved clones + + High: PE: Fix colocation with partially active groups + + High: PE: Fix potential use-after-free defect from coverity + + High: PE: Fix previous merge + + High: PE: Fix use-after-free in order_actions() reported by valgrind + + High: PE: Prevent endless loop when looking for operation definitions in the configuration + + High: Resolve coverity RESOURCE_LEAK defects + + High: Shell: Complete the transition to using crm_attribute instead of crm_failcount and crm_standby + + High: stonith: Advertise stonith-ng options in the metadata + + High: stonith: Correctly parse pcmk_host_list parameters that appear on a single line + + High: stonith: Map poweron/poweroff back to on/off expected by the stonith tool from cluster-glue + + High: stonith: pass the configuration to the stonith program via environment variables (bnc#620781) + + High: Support starting plugin-based Pacemaker clusters with the MCP as well + + High: tools: crm_report - corosync.conf wont necessarily contain the text 'pacemaker' anymore + + High: tools: crm_simulate - Resolve coverity USE_AFTER_FREE defect + + High: Tools: Drop the 'pingd' daemon and resource agent in favor of ocf:pacemaker:ping + + High: Tools: Fix recently introduced use-of-NULL + + High: Tools: Fix use-after-free defect from coverity + +* Wed Jul 21 2010 David Malcolm - 1.1.2-5.1 +- Rebuilt for https://fedoraproject.org/wiki/Features/Python_2.7/MassRebuild + +* Fri Jul 9 2010 Dan Horák - 1.1.2-5 +- re-enable AIS cluster on s390(x) + +* Fri Jul 9 2010 Dan Horák - 1.1.2-4 +- AIS cluster not available on s390(x) + +* Mon Jun 21 2010 Andrew Beekhof - 1.1.2-3 +- publican is only available as a dependancy on i386/x86_64 machines + +* Fri Jun 11 2010 Andrew Beekhof - 1.1.2-2 +- Resolves rhbz#602239 - Added patch to documentation so that it passes validation +- High: Core: Bug lf#2401 - Backed out changeset 6e6980376f01 + +* Tue Jun 01 2010 Marcela Maslanova - 1.1.2-1.1 +- Mass rebuild with perl-5.12.0 + +* Wed May 12 2010 Andrew Beekhof - 1.1.2-1 +- Update the tarball from the upstream 1.1.2 release + + High: ais: Bug lf#2340 - Force rogue child processes to terminate after waiting 2.5 minutes + + High: ais: Bug lf#2359 - Default expected votes to 2 inside Corosync/OpenAIS plugin + + High: ais: Bug lf#2359 - expected-quorum-votes not correctly updated after membership change + + High: ais: Bug rhbz#525552 - Move non-threadsafe calls to setenv() to after the fork() + + High: ais: Do not count votes from offline nodes and calculate current votes before sending quorum data + + High: ais: Ensure the list of active processes sent to clients is always up-to-date + + High: ais: Fix previous commit, actually return a result in get_process_list() + + High: ais: Fix two more uses of getpwnam() in non-thread-safe locations + + High: ais: Look for the correct conf variable for turning on file logging + + High: ais: Need to find a better and thread-safe way to set core_uses_pid. Disable for now. + + High: ais: Use the threadsafe version of getpwnam + + High: Core: Bug lf#2414 - Prevent use-after-free reported by valgrind when doing xpath based deletions + + High: Core: Bump the feature set due to the new failcount expiry feature + + High: Core: Fix memory leak in replace_xml_child() reported by valgrind + + High: Core: fix memory leaks exposed by valgrind + + High: crmd: Bug 2401 - Improved detection of partially active peers + + High: crmd: Bug bnc#578644 - Improve handling of cancelled operations caused by resource cleanup + + High: crmd: Bug lf#2379 - Ensure the cluster terminates when the PE is not available + + High: crmd: Bug lf#2414 - Prevent use-after-free of the PE connection after it dies + + High: crmd: Bug lf#2414 - Prevent use-after-free of the stonith-ng connection + + High: crmd: Do not allow the target_rc to be misused by resource agents + + High: crmd: Do not ignore action timeouts based on FSA state + + High: crmd: Ensure we dont get stuck in S_PENDING if we loose an election to someone that never talks to us again + + High: crmd: Fix memory leaks exposed by valgrind + + High: crmd: Remove race condition that could lead to multiple instances of a clone being active on a machine + + High: crmd: Send erase_status_tag() calls to the local CIB when the DC is fenced, since there is no DC to accept them + + High: crmd: Use global fencing notifications to prevent secondary fencing operations of the DC + + High: fencing: Account for stonith_get_info() always returning a pointer to the same static buffer + + High: PE: Allow startup probes to be disabled - their calculation is a major bottleneck for very large clusters + + High: PE: Bug lf#2317 - Avoid needless restart of primitive depending on a clone + + High: PE: Bug lf#2358 - Fix master-master anti-colocation + + High: PE: Bug lf#2361 - Ensure clones observe mandatory ordering constraints if the LHS is unrunnable + + High: PE: Bug lf#2383 - Combine failcounts for all instances of an anonymous clone on a host + + High: PE: Bug lf#2384 - Fix intra-set colocation and ordering + + High: PE: Bug lf#2403 - Enforce mandatory promotion (colocation) constraints + + High: PE: Bug lf#2412 - Correctly locate clone instances by their prefix + + High: PE: Correctly implement optional colocation between primitives and clone resources + + High: PE: Do not be so quick to pull the trigger on nodes that are coming up + + High: PE: Fix memory leaks exposed by valgrind + + High: PE: Fix memory leaks reported by valgrind + + High: PE: Repair handling of unordered groups in RHS ordering constraints + + High: PE: Rewrite native_merge_weights() to avoid Fix use-after-free + + High: PE: Suppress duplicate ordering constraints to achieve orders of magnitude speed increases for large clusters + + High: Shell: add support for xml in cli + + High: Shell: always reload status if working with the cluster (bnc#590035) + + High: Shell: check timeouts also against the default-action-timeout property + + High: Shell: Default to using the status section from the live CIB (bnc#592762) + + High: Shell: edit multiple meta_attributes sets in resource management (lf#2315) + + High: Shell: enable comments (lf#2221) + + High: Shell: implement new cibstatus interface and commands (bnc#580492) + + High: Shell: improve configure commit (lf#2336) + + High: Shell: new cibstatus import command (bnc#585471) + + High: Shell: new configure filter command + + High: Shell: restore error reporting in options + + High: Shell: split shell into modules + + High: Shell: support for the utilization element (old patch for the new structure) + + High: Shell: update previous node lookup procedure to include the id where necessary + + High: Tools: crm_mon - fix memory leaks exposed by valgrind + +* Thu Feb 11 2010 Andrew Beekhof - 1.1.1-0.1-60b7753f7310.hg +- Update the tarball from upstream to version 60b7753f7310 + + First public release of the 1.1 series + +* Wed Dec 9 2009 Andrew Beekhof - 1.0.5-5 +- Include patch of changeset 66b7bfd467f3: + Some clients such as gfs_controld want a cluster name, allow one to be specified in corosync.conf + +* Thu Oct 29 2009 Andrew Beekhof - 1.0.5-4 +- Include the fixes from CoroSync integration testing +- Move the resource templates - they are not documentation +- Ensure documentation is placed in a standard location +- Exclude documentation that is included elsewhere in the package + +- Update the tarball from upstream to version ee19d8e83c2a + + High: cib: Correctly clean up when both plaintext and tls remote ports are requested + + High: PE: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions + + High: PE: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints + + High: PE: Make sure promote/demote pseudo actions are created correctly + + High: PE: Prevent target-role from promoting more than master-max instances + + High: ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage + + High: ais: Prevent deadlock - dont try to release IPC message if the connection failed + + High: cib: For validation errors, send back the full CIB so the client can display the errors + + High: cib: Prevent use-after-free for remote plaintext connections + + High: crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat + + High: Core: Bug lf#2169 - Allow dtd/schema validation to be disabled + + High: PE: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change + + High: PE: Bug lf#2170 - stop-all-resources option had no effect + + High: PE: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which cannot + + High: PE: Disable resource management if stonith-enabled=true and no stonith resources are defined + + High: PE: Do not include master score if it would prevent allocation + + High: ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms) + + High: ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync + + High: ais: Gracefully handle changes to the AIS nodeid + + High: crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE + + High: crmd: Prevent use-after-free with LOG_DEBUG_3 + + Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672) + + Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm + + Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild + + Medium: PE: Bug lf#2178 - Indicate unmanaged clones + + Medium: PE: Bug lf#2180 - Include node information for all failed ops + + Medium: PE: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint + + Medium: PE: Correctly log resources that would like to start but cannot + + Medium: PE: Stop ptest from logging to syslog + + Medium: ais: Include version details in plugin name + + Medium: crmd: Requery the resource metadata after every start operation + +* Fri Oct 9 2009 Fabio M. Di Nitto - 1.0.5-3 +- rebuilt with new net-snmp + +* Fri Aug 21 2009 Tomas Mraz - 1.0.5-2.1 +- rebuilt with new openssl + +* Wed Aug 19 2009 Andrew Beekhof - 1.0.5-2 +- Add versioned perl dependancy as specified by + https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl +- No longer remove RPATH data, it prevents us finding libperl.so and no other + libraries were being hardcoded +- Compile in support for heartbeat +- Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements + depending on which stacks are supported + +* Mon Aug 17 2009 Andrew Beekhof - 1.0.5-1 +- Add dependancy on resource-agents +- Use the version of the configure macro that supplies --prefix, --libdir, etc +- Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final) + + High: Tools: crm_resource - Advertise --move instead of --migrate + + Medium: Extra: New node connectivity RA that uses system ping and attrd_updater + + Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches + +* Tue Aug 11 2009 Ville Skyttä - 1.0.5-0.7.c9120a53a6ae.hg +- Use bzipped upstream tarball. + +* Wed Jul 29 2009 Andrew Beekhof - 1.0.5-0.6.c9120a53a6ae.hg +- Add back missing build auto* dependancies +- Minor cleanups to the install directive + +* Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.5.c9120a53a6ae.hg +- Add a leading zero to the revision when alphatag is used + +* Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.4.c9120a53a6ae.hg +- Incorporate the feedback from the cluster-glue review +- Realistically, the version is a 1.0.5 pre-release +- Use the global directive instead of define for variables +- Use the haclient/hacluster group/user instead of daemon +- Use the _configure macro +- Fix install dependancies + +* Fri Jul 24 2009 Andrew Beekhof - 1.0.4-3 +- Include an AUTHORS and license file in each package +- Change the library package name to pacemaker-libs to be more + Fedora compliant +- Remove execute permissions from xml related files +- Reference the new cluster-glue devel package name +- Update the tarball from upstream to version c9120a53a6ae + + High: PE: Only prevent migration if the clone dependancy is stopping/starting on the target node + + High: PE: Bug 2160 - Dont shuffle clones due to colocation + + High: PE: New implementation of the resource migration (not stop/start) logic + + Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options + + Medium: PE: Prevent use-of-NULL in find_first_action() + + Low: Build: Include licensing files + +* Tue Jul 14 2009 Andrew Beekhof - 1.0.4-2 +- Reference authors from the project AUTHORS file instead of listing in description +- Change Source0 to reference the project's Mercurial repo +- Cleaned up the summaries and descriptions +- Incorporate the results of Fedora package self-review + +* Tue Jul 14 2009 Andrew Beekhof - 1.0.4-1 +- Initial checkin