Browse Source

pacemaker inital package creation

Signed-off-by: basebuilder_pel7ppc64bebuilder0 <basebuilder@powerel.org>
master
basebuilder_pel7ppc64bebuilder0 7 years ago
parent
commit
df4eea222a
  1. 99
      SOURCES/001-new-behavior.patch
  2. 7624
      SOURCES/002-fixes.patch
  3. 157
      SOURCES/003-cleanup.patch
  4. 419
      SOURCES/004-cleanup.patch
  5. 62
      SOURCES/005-cleanup.patch
  6. 296
      SOURCES/006-leaks.patch
  7. 92
      SOURCES/007-bundles.patch
  8. 145
      SOURCES/008-quorum.patch
  9. 97
      SOURCES/009-crm_resource.patch
  10. 34
      SOURCES/010-crm_master.patch
  11. 28
      SOURCES/lrmd-protocol-version.patch
  12. 25
      SOURCES/rhbz-url.patch
  13. 2011
      SPECS/pacemaker.spec

99
SOURCES/001-new-behavior.patch

@ -0,0 +1,99 @@
From d0278eca6f2f8d4e707f73d12b4f8161f07e42fe Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Thu, 2 Nov 2017 18:26:03 -0500
Subject: [PATCH 1/2] Feature: tools: enable new crm_resource
--cleanup/--refresh behavior

it was temporarily disabled by 3576364
---
tools/crm_resource.c | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 92255df..356bb05 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -212,14 +212,11 @@ static struct crm_option long_options[] = {
},
{
"cleanup", no_argument, NULL, 'C',
-#if 0
- // new behavior disabled until 2.0.0
"\t\tDelete failed operations from a resource's history allowing its current state to be rechecked.\n"
"\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n"
},
{
"refresh", no_argument, NULL, 'R',
-#endif
"\t\tDelete resource's history (including failures) so its current state is rechecked.\n"
"\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n"
"\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be cleaned"
@@ -384,7 +381,6 @@ static struct crm_option long_options[] = {
{"un-migrate", no_argument, NULL, 'U', NULL, pcmk_option_hidden},
{"un-move", no_argument, NULL, 'U', NULL, pcmk_option_hidden},
- {"refresh", 0, 0, 'R', NULL, pcmk_option_hidden}, // remove this line for 2.0.0
{"reprobe", no_argument, NULL, 'P', NULL, pcmk_option_hidden},
{"-spacer-", 1, NULL, '-', "\nExamples:", pcmk_option_paragraph},
@@ -645,7 +641,7 @@ main(int argc, char **argv)
if (cib_file == NULL) {
require_crmd = TRUE;
}
- just_errors = FALSE; // disable until 2.0.0
+ just_errors = TRUE;
rsc_cmd = 'C';
find_flags = pe_find_renamed|pe_find_anon;
break;
--
1.8.3.1


From b48ceeb041cee65a9b93b9b76235e475fa1a128f Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Mon, 16 Oct 2017 09:45:18 -0500
Subject: [PATCH 2/2] Feature: crmd: default record-pending to TRUE

---
crmd/lrm.c | 15 ++++++---------
1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/crmd/lrm.c b/crmd/lrm.c
index eb4e16e..36dc076 100644
--- a/crmd/lrm.c
+++ b/crmd/lrm.c
@@ -2061,25 +2061,22 @@ stop_recurring_actions(gpointer key, gpointer value, gpointer user_data)
static void
record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op)
{
+ const char *record_pending = NULL;
+
CRM_CHECK(node_name != NULL, return);
CRM_CHECK(rsc != NULL, return);
CRM_CHECK(op != NULL, return);
- if (op->op_type == NULL
+ if ((op->op_type == NULL) || (op->params == NULL)
|| safe_str_eq(op->op_type, CRMD_ACTION_CANCEL)
|| safe_str_eq(op->op_type, CRMD_ACTION_DELETE)) {
return;
}
- if (op->params == NULL) {
+ // defaults to true
+ record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING);
+ if (record_pending && !crm_is_true(record_pending)) {
return;
-
- } else {
- const char *record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING);
-
- if (record_pending == NULL || crm_is_true(record_pending) == FALSE) {
- return;
- }
}
op->call_id = -1;
--
1.8.3.1

7624
SOURCES/002-fixes.patch

File diff suppressed because it is too large Load Diff

157
SOURCES/003-cleanup.patch

@ -0,0 +1,157 @@
From c2d5c19a863f407a034a63f2877eb5faf7036d59 Mon Sep 17 00:00:00 2001
From: "Gao,Yan" <ygao@suse.com>
Date: Fri, 8 Dec 2017 14:47:40 +0100
Subject: [PATCH 1/2] Refactor: tools: crm_resource - Functionize cleaning up
resource failures

---
tools/crm_resource.c | 26 ++------------------------
tools/crm_resource.h | 3 +++
tools/crm_resource_runtime.c | 36 ++++++++++++++++++++++++++++++++++++
3 files changed, 41 insertions(+), 24 deletions(-)

diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index f93f688..4ddcef4 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1094,31 +1094,9 @@ main(int argc, char **argv)
} else if (rsc_cmd == 'C' && just_errors) {
crmd_replies_needed = 0;
- for (xmlNode *xml_op = __xml_first_child(data_set.failed); xml_op != NULL;
- xml_op = __xml_next(xml_op)) {
-
- const char *node = crm_element_value(xml_op, XML_ATTR_UNAME);
- const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
- const char *task_interval = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
- const char *resource_name = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
-
- if(resource_name == NULL) {
- continue;
- } else if(host_uname && safe_str_neq(host_uname, node)) {
- continue;
- } else if(rsc_id && safe_str_neq(rsc_id, resource_name)) {
- continue;
- } else if(operation && safe_str_neq(operation, task)) {
- continue;
- } else if(interval && safe_str_neq(interval, task_interval)) {
- continue;
- }
- crm_debug("Erasing %s failure for %s (%s detected) on %s",
- task, rsc->id, resource_name, node);
- rc = cli_resource_delete(crmd_channel, node, rsc, task,
- task_interval, &data_set);
- }
+ rc = cli_resource_delete_failures(crmd_channel, host_uname, rsc, operation,
+ interval, &data_set);
if(rsc && (rc == pcmk_ok) && (BE_QUIET == FALSE)) {
/* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */
diff --git a/tools/crm_resource.h b/tools/crm_resource.h
index 0b8dd2a..e28c9ef 100644
--- a/tools/crm_resource.h
+++ b/tools/crm_resource.h
@@ -76,6 +76,9 @@ int cli_resource_search(resource_t *rsc, const char *requested_name,
int cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
resource_t *rsc, const char *operation,
const char *interval, pe_working_set_t *data_set);
+int cli_resource_delete_failures(crm_ipc_t *crmd_channel, const char *host_uname,
+ resource_t *rsc, const char *operation,
+ const char *interval, pe_working_set_t *data_set);
int cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * cib);
int cli_resource_move(resource_t *rsc, const char *rsc_id,
const char *host_name, cib_t *cib,
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index ffe4e5d..d250a07 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -655,6 +655,42 @@ cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
return rc;
}
+int
+cli_resource_delete_failures(crm_ipc_t *crmd_channel, const char *host_uname,
+ resource_t *rsc, const char *operation,
+ const char *interval, pe_working_set_t *data_set)
+{
+ int rc = pcmk_ok;
+
+ for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
+ xml_op = __xml_next(xml_op)) {
+
+ const char *node = crm_element_value(xml_op, XML_ATTR_UNAME);
+ const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
+ const char *task_interval = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
+ const char *resource_name = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
+
+ if(resource_name == NULL) {
+ continue;
+ } else if(host_uname && safe_str_neq(host_uname, node)) {
+ continue;
+ } else if(rsc->id && safe_str_neq(rsc->id, resource_name)) {
+ continue;
+ } else if(operation && safe_str_neq(operation, task)) {
+ continue;
+ } else if(interval && safe_str_neq(interval, task_interval)) {
+ continue;
+ }
+
+ crm_debug("Erasing %s failure for %s (%s detected) on %s",
+ task, rsc->id, resource_name, node);
+ rc = cli_resource_delete(crmd_channel, node, rsc, task,
+ task_interval, data_set);
+ }
+
+ return rc;
+}
+
void
cli_resource_check(cib_t * cib_conn, resource_t *rsc)
{
--
1.8.3.1


From 170ec0afcddb01fcfb8c2e8c86bc0e53594a42f9 Mon Sep 17 00:00:00 2001
From: "Gao,Yan" <ygao@suse.com>
Date: Fri, 8 Dec 2017 16:22:54 +0100
Subject: [PATCH 2/2] Fix: tools: crm_resource --cleanup for non-primitive
resources

---
tools/crm_resource_runtime.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)

diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index d250a07..1048636 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -662,6 +662,24 @@ cli_resource_delete_failures(crm_ipc_t *crmd_channel, const char *host_uname,
{
int rc = pcmk_ok;
+ if (rsc == NULL) {
+ return -ENXIO;
+
+ } else if (rsc->children) {
+ GListPtr lpc = NULL;
+
+ for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
+ resource_t *child = (resource_t *) lpc->data;
+
+ rc = cli_resource_delete_failures(crmd_channel, host_uname, child, operation,
+ interval, data_set);
+ if(rc != pcmk_ok) {
+ return rc;
+ }
+ }
+ return pcmk_ok;
+ }
+
for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
xml_op = __xml_next(xml_op)) {
--
1.8.3.1

419
SOURCES/004-cleanup.patch

@ -0,0 +1,419 @@
From 7a813755269f00d7b815e819636841af991762c0 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Mon, 11 Dec 2017 12:23:06 -0600
Subject: [PATCH] Fix: tools: crm_resource --cleanup

The new "failures only" mode of crm_resource --cleanup had multiple issues,
including not working without --resource specified, comparing a
user-provided interval string against a milliseconds interval, and
considering no interval specified as all intervals rather than 0
but only when clearing LRM history entries.
---
tools/crm_resource.c | 35 +++---
tools/crm_resource.h | 9 +-
tools/crm_resource_runtime.c | 258 ++++++++++++++++++++++++++++++-------------
3 files changed, 202 insertions(+), 100 deletions(-)

diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 4ddcef4..5152004 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1092,14 +1092,20 @@ main(int argc, char **argv)
rc = cli_resource_delete_attribute(rsc, rsc_id, prop_set, prop_id,
prop_name, cib_conn, &data_set);
- } else if (rsc_cmd == 'C' && just_errors) {
+ } else if ((rsc_cmd == 'C') && rsc) {
+ if (do_force == FALSE) {
+ rsc = uber_parent(rsc);
+ }
crmd_replies_needed = 0;
- rc = cli_resource_delete_failures(crmd_channel, host_uname, rsc, operation,
- interval, &data_set);
+ crm_debug("%s of %s (%s requested) on %s",
+ (just_errors? "Clearing failures" : "Re-checking the state"),
+ rsc->id, rsc_id, (host_uname? host_uname : "all hosts"));
+ rc = cli_resource_delete(crmd_channel, host_uname, rsc, operation,
+ interval, just_errors, &data_set);
- if(rsc && (rc == pcmk_ok) && (BE_QUIET == FALSE)) {
- /* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */
+ if ((rc == pcmk_ok) && !BE_QUIET) {
+ // Show any reasons why resource might stay stopped
cli_resource_check(cib_conn, rsc);
}
@@ -1107,22 +1113,9 @@ main(int argc, char **argv)
start_mainloop();
}
- } else if ((rsc_cmd == 'C') && rsc) {
- if(do_force == FALSE) {
- rsc = uber_parent(rsc);
- }
-
- crm_debug("Re-checking the state of %s (%s requested) on %s",
- rsc->id, rsc_id, host_uname);
- crmd_replies_needed = 0;
- rc = cli_resource_delete(crmd_channel, host_uname, rsc, operation,
- interval, &data_set);
-
- if(rc == pcmk_ok && BE_QUIET == FALSE) {
- /* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */
- cli_resource_check(cib_conn, rsc);
- }
-
+ } else if (rsc_cmd == 'C' && just_errors) {
+ rc = cli_cleanup_all(crmd_channel, host_uname, operation, interval,
+ &data_set);
if (rc == pcmk_ok) {
start_mainloop();
}
diff --git a/tools/crm_resource.h b/tools/crm_resource.h
index e28c9ef..0ac51f2 100644
--- a/tools/crm_resource.h
+++ b/tools/crm_resource.h
@@ -75,10 +75,11 @@ int cli_resource_search(resource_t *rsc, const char *requested_name,
pe_working_set_t *data_set);
int cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
resource_t *rsc, const char *operation,
- const char *interval, pe_working_set_t *data_set);
-int cli_resource_delete_failures(crm_ipc_t *crmd_channel, const char *host_uname,
- resource_t *rsc, const char *operation,
- const char *interval, pe_working_set_t *data_set);
+ const char *interval, bool just_failures,
+ pe_working_set_t *data_set);
+int cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name,
+ const char *operation, const char *interval,
+ pe_working_set_t *data_set);
int cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * cib);
int cli_resource_move(resource_t *rsc, const char *rsc_id,
const char *host_name, cib_t *cib,
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index 1048636..bdebb0b 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -532,15 +532,129 @@ rsc_fail_name(resource_t *rsc)
return is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
}
+static int
+clear_rsc_history(crm_ipc_t *crmd_channel, const char *host_uname,
+ const char *rsc_id, pe_working_set_t *data_set)
+{
+ int rc = pcmk_ok;
+
+ /* Erase the resource's entire LRM history in the CIB, even if we're only
+ * clearing a single operation's fail count. If we erased only entries for a
+ * single operation, we might wind up with a wrong idea of the current
+ * resource state, and we might not re-probe the resource.
+ */
+ rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc_id,
+ TRUE, data_set);
+ if (rc != pcmk_ok) {
+ return rc;
+ }
+ crmd_replies_needed++;
+
+ crm_trace("Processing %d mainloop inputs", crmd_replies_needed);
+ while (g_main_context_iteration(NULL, FALSE)) {
+ crm_trace("Processed mainloop input, %d still remaining",
+ crmd_replies_needed);
+ }
+
+ if (crmd_replies_needed < 0) {
+ crmd_replies_needed = 0;
+ }
+ return rc;
+}
+
+static int
+clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name,
+ const char *rsc_id, const char *operation,
+ const char *interval, pe_working_set_t *data_set)
+{
+ int rc = pcmk_ok;
+ const char *failed_value = NULL;
+ const char *interval_ms_str = NULL;
+ GHashTable *rscs = NULL;
+ GHashTableIter iter;
+
+ /* Create a hash table to use as a set of resources to clean. This lets us
+ * clean each resource only once (per node) regardless of how many failed
+ * operations it has.
+ */
+ rscs = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
+
+ // Normalize interval to milliseconds for comparison to history entry
+ if (operation) {
+ interval_ms_str = crm_strdup_printf("%llu", crm_get_interval(interval));
+ }
+
+ for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
+ xml_op = __xml_next(xml_op)) {
+
+ // No resource specified means all resources match
+ failed_value = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
+ if (rsc_id == NULL) {
+ rsc_id = failed_value;
+ } else if (safe_str_neq(rsc_id, failed_value)) {
+ continue;
+ }
+
+ // Host name should always have been provided by this point
+ failed_value = crm_element_value(xml_op, XML_ATTR_UNAME);
+ if (safe_str_neq(node_name, failed_value)) {
+ continue;
+ }
+
+ // No operation specified means all operations match
+ if (operation) {
+ failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
+ if (safe_str_neq(operation, failed_value)) {
+ continue;
+ }
+
+ // Interval (if operation was specified) defaults to 0 (not all)
+ failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
+ if (safe_str_neq(interval_ms_str, failed_value)) {
+ continue;
+ }
+ }
+
+ g_hash_table_add(rscs, (gpointer) rsc_id);
+ }
+
+ g_hash_table_iter_init(&iter, rscs);
+ while (g_hash_table_iter_next(&iter, (gpointer *) &rsc_id, NULL)) {
+ crm_debug("Erasing failures of %s on %s", rsc_id, node_name);
+ rc = clear_rsc_history(crmd_channel, node_name, rsc_id, data_set);
+ if (rc != pcmk_ok) {
+ return rc;
+ }
+ }
+ g_hash_table_destroy(rscs);
+ return rc;
+}
+
+static int
+clear_rsc_fail_attrs(resource_t *rsc, const char *operation,
+ const char *interval, node_t *node)
+{
+ int rc = pcmk_ok;
+ int attr_options = attrd_opt_none;
+ char *rsc_name = rsc_fail_name(rsc);
+
+ if (is_remote_node(node)) {
+ attr_options |= attrd_opt_remote;
+ }
+ rc = attrd_clear_delegate(NULL, node->details->uname, rsc_name, operation,
+ interval, NULL, attr_options);
+ free(rsc_name);
+ return rc;
+}
+
int
cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
resource_t *rsc, const char *operation,
- const char *interval, pe_working_set_t *data_set)
+ const char *interval, bool just_failures,
+ pe_working_set_t *data_set)
{
int rc = pcmk_ok;
node_t *node = NULL;
- char *rsc_name = NULL;
- int attr_options = attrd_opt_none;
if (rsc == NULL) {
return -ENXIO;
@@ -552,8 +666,8 @@ cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
resource_t *child = (resource_t *) lpc->data;
rc = cli_resource_delete(crmd_channel, host_uname, child, operation,
- interval, data_set);
- if(rc != pcmk_ok) {
+ interval, just_failures, data_set);
+ if (rc != pcmk_ok) {
return rc;
}
}
@@ -585,8 +699,13 @@ cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
node = (node_t *) lpc->data;
if (node->details->online) {
- cli_resource_delete(crmd_channel, node->details->uname, rsc,
- operation, interval, data_set);
+ rc = cli_resource_delete(crmd_channel, node->details->uname,
+ rsc, operation, interval,
+ just_failures, data_set);
+ }
+ if (rc != pcmk_ok) {
+ g_list_free(nodes);
+ return rc;
}
}
@@ -611,102 +730,91 @@ cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
if (crmd_channel == NULL) {
printf("Dry run: skipping clean-up of %s on %s due to CIB_file\n",
rsc->id, host_uname);
- return rc;
- }
+ return pcmk_ok;
+ }
- /* Erase the resource's entire LRM history in the CIB, even if we're only
- * clearing a single operation's fail count. If we erased only entries for a
- * single operation, we might wind up with a wrong idea of the current
- * resource state, and we might not re-probe the resource.
- */
- rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc->id,
- TRUE, data_set);
+ rc = clear_rsc_fail_attrs(rsc, operation, interval, node);
if (rc != pcmk_ok) {
- printf("Unable to clean up %s history on %s: %s\n",
- rsc->id, host_uname, pcmk_strerror(rc));
+ printf("Unable to clean up %s failures on %s: %s\n",
+ rsc->id, host_uname, pcmk_strerror(rc));
return rc;
}
- crmd_replies_needed++;
- crm_trace("Processing %d mainloop inputs", crmd_replies_needed);
- while(g_main_context_iteration(NULL, FALSE)) {
- crm_trace("Processed mainloop input, %d still remaining",
- crmd_replies_needed);
- }
-
- if(crmd_replies_needed < 0) {
- crmd_replies_needed = 0;
- }
-
- rsc_name = rsc_fail_name(rsc);
- if (is_remote_node(node)) {
- attr_options |= attrd_opt_remote;
+ if (just_failures) {
+ rc = clear_rsc_failures(crmd_channel, host_uname, rsc->id, operation,
+ interval, data_set);
+ } else {
+ rc = clear_rsc_history(crmd_channel, host_uname, rsc->id, data_set);
}
- rc = attrd_clear_delegate(NULL, host_uname, rsc_name, operation, interval,
- NULL, attr_options);
if (rc != pcmk_ok) {
- printf("Cleaned %s history on %s, but unable to clear failures: %s\n",
+ printf("Cleaned %s failures on %s, but unable to clean history: %s\n",
rsc->id, host_uname, pcmk_strerror(rc));
} else {
printf("Cleaned up %s on %s\n", rsc->id, host_uname);
}
- free(rsc_name);
-
return rc;
}
int
-cli_resource_delete_failures(crm_ipc_t *crmd_channel, const char *host_uname,
- resource_t *rsc, const char *operation,
- const char *interval, pe_working_set_t *data_set)
+cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name,
+ const char *operation, const char *interval,
+ pe_working_set_t *data_set)
{
+ int attr_options = attrd_opt_none;
int rc = pcmk_ok;
+ const char *display_name = node_name? node_name : "all nodes";
- if (rsc == NULL) {
- return -ENXIO;
-
- } else if (rsc->children) {
- GListPtr lpc = NULL;
+ if (crmd_channel == NULL) {
+ printf("Dry run: skipping clean-up of %s due to CIB_file\n",
+ display_name);
+ return pcmk_ok;
+ }
+ crmd_replies_needed = 0;
- for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
- resource_t *child = (resource_t *) lpc->data;
+ if (node_name) {
+ node_t *node = pe_find_node(data_set->nodes, node_name);
- rc = cli_resource_delete_failures(crmd_channel, host_uname, child, operation,
- interval, data_set);
- if(rc != pcmk_ok) {
- return rc;
- }
+ if (node == NULL) {
+ CMD_ERR("Unknown node: %s", node_name);
+ return -ENXIO;
+ }
+ if (is_remote_node(node)) {
+ attr_options |= attrd_opt_remote;
}
- return pcmk_ok;
}
- for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
- xml_op = __xml_next(xml_op)) {
-
- const char *node = crm_element_value(xml_op, XML_ATTR_UNAME);
- const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
- const char *task_interval = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
- const char *resource_name = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
+ rc = attrd_clear_delegate(NULL, node_name, NULL, operation, interval,
+ NULL, attr_options);
+ if (rc != pcmk_ok) {
+ printf("Unable to clean up all failures on %s: %s\n",
+ display_name, pcmk_strerror(rc));
+ return rc;
+ }
- if(resource_name == NULL) {
- continue;
- } else if(host_uname && safe_str_neq(host_uname, node)) {
- continue;
- } else if(rsc->id && safe_str_neq(rsc->id, resource_name)) {
- continue;
- } else if(operation && safe_str_neq(operation, task)) {
- continue;
- } else if(interval && safe_str_neq(interval, task_interval)) {
- continue;
+ if (node_name) {
+ rc = clear_rsc_failures(crmd_channel, node_name, NULL,
+ operation, interval, data_set);
+ if (rc != pcmk_ok) {
+ printf("Cleaned all resource failures on %s, but unable to clean history: %s\n",
+ node_name, pcmk_strerror(rc));
+ return rc;
}
+ } else {
+ for (GList *iter = data_set->nodes; iter; iter = iter->next) {
+ pe_node_t *node = (pe_node_t *) iter->data;
- crm_debug("Erasing %s failure for %s (%s detected) on %s",
- task, rsc->id, resource_name, node);
- rc = cli_resource_delete(crmd_channel, node, rsc, task,
- task_interval, data_set);
+ rc = clear_rsc_failures(crmd_channel, node->details->uname, NULL,
+ operation, interval, data_set);
+ if (rc != pcmk_ok) {
+ printf("Cleaned all resource failures on all nodes, but unable to clean history on %s: %s\n",
+ node->details->uname, pcmk_strerror(rc));
+ return rc;
+ }
+ }
}
- return rc;
+ printf("Cleaned up all resources on %s\n", display_name);
+ return pcmk_ok;
}
void
--
1.8.3.1

62
SOURCES/005-cleanup.patch

@ -0,0 +1,62 @@
From a2305469012b5fe3713427412c12459085ed61a1 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Tue, 12 Dec 2017 10:02:22 -0600
Subject: [PATCH] Fix: tools: crm_resource --cleanup with no resource specified

7a813755 failed to completely fix --cleanup without --resource
---
tools/crm_resource_runtime.c | 20 ++++++++++++--------
1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index bdebb0b..79f8b98 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -569,6 +569,7 @@ clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name,
{
int rc = pcmk_ok;
const char *failed_value = NULL;
+ const char *failed_id = NULL;
const char *interval_ms_str = NULL;
GHashTable *rscs = NULL;
GHashTableIter iter;
@@ -587,11 +588,14 @@ clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name,
for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
xml_op = __xml_next(xml_op)) {
+ failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
+ if (failed_id == NULL) {
+ // Malformed history entry, should never happen
+ continue;
+ }
+
// No resource specified means all resources match
- failed_value = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
- if (rsc_id == NULL) {
- rsc_id = failed_value;
- } else if (safe_str_neq(rsc_id, failed_value)) {
+ if (rsc_id && safe_str_neq(rsc_id, failed_id)) {
continue;
}
@@ -615,13 +619,13 @@ clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name,
}
}
- g_hash_table_add(rscs, (gpointer) rsc_id);
+ g_hash_table_add(rscs, (gpointer) failed_id);
}
g_hash_table_iter_init(&iter, rscs);
- while (g_hash_table_iter_next(&iter, (gpointer *) &rsc_id, NULL)) {
- crm_debug("Erasing failures of %s on %s", rsc_id, node_name);
- rc = clear_rsc_history(crmd_channel, node_name, rsc_id, data_set);
+ while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
+ crm_debug("Erasing failures of %s on %s", failed_id, node_name);
+ rc = clear_rsc_history(crmd_channel, node_name, failed_id, data_set);
if (rc != pcmk_ok) {
return rc;
}
--
1.8.3.1

296
SOURCES/006-leaks.patch

@ -0,0 +1,296 @@
From 5042a3b19a2f2bfa3d09b4d1029f53e6b674918b Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Thu, 14 Dec 2017 09:16:47 -0600
Subject: [PATCH 1/5] Test: CTS: remove dead code

makes static analysis happy
---
cts/CTSaudits.py | 1 -
cts/environment.py | 1 -
cts/remote.py | 5 +----
cts/watcher.py | 6 +++---
4 files changed, 4 insertions(+), 9 deletions(-)

diff --git a/cts/CTSaudits.py b/cts/CTSaudits.py
index aff897f..d9fbeb9 100755
--- a/cts/CTSaudits.py
+++ b/cts/CTSaudits.py
@@ -190,7 +190,6 @@ class DiskAudit(ClusterAudit):
if answer and answer == "n":
raise ValueError("Disk full on %s" % (node))
- ret = 0
elif remaining_mb < 100 or used_percent > 90:
self.CM.log("WARN: Low on log disk space (%dMB) on %s" % (remaining_mb, node))
diff --git a/cts/environment.py b/cts/environment.py
index 75a18c8..6c4831c 100644
--- a/cts/environment.py
+++ b/cts/environment.py
@@ -182,7 +182,6 @@ class Environment:
else:
raise ValueError("Unknown stack: "+name)
- sys.exit(1)
def get_stack_short(self):
# Create the Cluster Manager object
diff --git a/cts/remote.py b/cts/remote.py
index 8c36918..7cef40e 100644
--- a/cts/remote.py
+++ b/cts/remote.py
@@ -220,10 +220,7 @@ class RemoteExec:
if not silent:
for err in errors:
- if stdout == 3:
- result.append("error: "+err)
- else:
- self.debug("cmd: stderr: %s" % err)
+ self.debug("cmd: stderr: %s" % err)
if stdout == 0:
if not silent and result:
diff --git a/cts/watcher.py b/cts/watcher.py
index de032f7..42685ad 100644
--- a/cts/watcher.py
+++ b/cts/watcher.py
@@ -337,19 +337,19 @@ class LogWatcher(RemoteExec):
self.kind = kind
else:
raise
- self.kind = self.Env["LogWatcher"]
+ #self.kind = self.Env["LogWatcher"]
if log:
self.filename = log
else:
raise
- self.filename = self.Env["LogFileName"]
+ #self.filename = self.Env["LogFileName"]
if hosts:
self.hosts = hosts
else:
raise
- self.hosts = self.Env["nodes"]
+ #self.hosts = self.Env["nodes"]
if trace_lw:
self.debug_level = 3
--
1.8.3.1


From 570929eba229558b1a6900ffc54e4d5ee4150f74 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Thu, 14 Dec 2017 09:23:03 -0600
Subject: [PATCH 2/5] Refactor: pengine: validate more function arguments

not an issue with current code, but makes static analysis happy
---
pengine/clone.c | 3 ++-
pengine/utilization.c | 1 +
2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/pengine/clone.c b/pengine/clone.c
index 99bac7e..e81dbc8 100644
--- a/pengine/clone.c
+++ b/pengine/clone.c
@@ -955,6 +955,7 @@ is_child_compatible(resource_t *child_rsc, node_t * local_node, enum rsc_role_e
node_t *node = NULL;
enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current);
+ CRM_CHECK(child_rsc && local_node, return FALSE);
if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
/* We only want instances that haven't failed */
node = child_rsc->fns->location(child_rsc, NULL, current);
@@ -965,7 +966,7 @@ is_child_compatible(resource_t *child_rsc, node_t * local_node, enum rsc_role_e
return FALSE;
}
- if (node && local_node && node->details == local_node->details) {
+ if (node && (node->details == local_node->details)) {
return TRUE;
} else if (node) {
diff --git a/pengine/utilization.c b/pengine/utilization.c
index f42c85d..05f8d78 100644
--- a/pengine/utilization.c
+++ b/pengine/utilization.c
@@ -341,6 +341,7 @@ process_utilization(resource_t * rsc, node_t ** prefer, pe_working_set_t * data_
{
int alloc_details = scores_log_level + 1;
+ CRM_CHECK(rsc && prefer && data_set, return);
if (safe_str_neq(data_set->placement_strategy, "default")) {
GHashTableIter iter;
GListPtr colocated_rscs = NULL;
--
1.8.3.1


From db2fdc9a452fef11d397e25202fde8ba1bad4cd3 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Thu, 14 Dec 2017 10:36:20 -0600
Subject: [PATCH 3/5] Low: libcrmservice: avoid memory leak on DBus error

---
lib/services/dbus.c | 47 +++++++++++++++++++++++++++++++++++++----------
1 file changed, 37 insertions(+), 10 deletions(-)

diff --git a/lib/services/dbus.c b/lib/services/dbus.c
index fb3e867..58df927 100644
--- a/lib/services/dbus.c
+++ b/lib/services/dbus.c
@@ -23,6 +23,15 @@ struct db_getall_data {
void (*callback)(const char *name, const char *value, void *userdata);
};
+static void
+free_db_getall_data(struct db_getall_data *data)
+{
+ free(data->target);
+ free(data->object);
+ free(data->name);
+ free(data);
+}
+
DBusConnection *
pcmk_dbus_connect(void)
{
@@ -196,6 +205,20 @@ pcmk_dbus_send_recv(DBusMessage *msg, DBusConnection *connection,
return reply;
}
+/*!
+ * \internal
+ * \brief Send a DBus message with a callback for the reply
+ *
+ * \param[in] msg DBus message to send
+ * \param[in,out] connection DBus connection to send on
+ * \param[in] done Function to call when pending call completes
+ * \param[in] user_data Data to pass to done callback
+ *
+ * \return Handle for reply on success, NULL on error
+ * \note The caller can assume that the done callback is called always and
+ * only when the return value is non-NULL. (This allows the caller to
+ * know where it should free dynamically allocated user_data.)
+ */
DBusPendingCall *
pcmk_dbus_send(DBusMessage *msg, DBusConnection *connection,
void(*done)(DBusPendingCall *pending, void *user_data),
@@ -359,11 +382,7 @@ pcmk_dbus_lookup_result(DBusMessage *reply, struct db_getall_data *data)
}
cleanup:
- free(data->target);
- free(data->object);
- free(data->name);
- free(data);
-
+ free_db_getall_data(data);
return output;
}
@@ -424,11 +443,19 @@ pcmk_dbus_get_property(DBusConnection *connection, const char *target,
query_data->name = strdup(name);
}
- if(query_data->callback) {
- DBusPendingCall* _pending;
- _pending = pcmk_dbus_send(msg, connection, pcmk_dbus_lookup_cb, query_data, timeout);
- if (pending != NULL) {
- *pending = _pending;
+ if (query_data->callback) {
+ DBusPendingCall *local_pending;
+
+ local_pending = pcmk_dbus_send(msg, connection, pcmk_dbus_lookup_cb,
+ query_data, timeout);
+ if (local_pending == NULL) {
+ // pcmk_dbus_lookup_cb() was not called in this case
+ free_db_getall_data(query_data);
+ query_data = NULL;
+ }
+
+ if (pending) {
+ *pending = local_pending;
}
} else {
--
1.8.3.1


From 4a774710ec7269ec3a1427ae09fc6ca435c66e92 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Thu, 14 Dec 2017 12:44:04 -0600
Subject: [PATCH 4/5] Build: systemd unit files: restore DBus dependency

06e2e26 removed the unit files' DBus dependency on the advice of a
systemd developer, but it is necessary
---
lrmd/pacemaker_remote.service.in | 3 +++
mcp/pacemaker.service.in | 4 ++++
2 files changed, 7 insertions(+)

diff --git a/lrmd/pacemaker_remote.service.in b/lrmd/pacemaker_remote.service.in
index d5717f6..1c596e1 100644
--- a/lrmd/pacemaker_remote.service.in
+++ b/lrmd/pacemaker_remote.service.in
@@ -2,8 +2,11 @@
Description=Pacemaker Remote Service
Documentation=man:pacemaker_remoted http://clusterlabs.org/doc/en-US/Pacemaker/1.1-pcs/html/Pacemaker_Remote/index.html
+# See main pacemaker unit file for descriptions of why these are needed
After=network.target
After=time-sync.target
+After=dbus.service
+Wants=dbus.service
After=resource-agents-deps.target
Wants=resource-agents-deps.target
After=syslog.service
diff --git a/mcp/pacemaker.service.in b/mcp/pacemaker.service.in
index 516de0f..e532ea2 100644
--- a/mcp/pacemaker.service.in
+++ b/mcp/pacemaker.service.in
@@ -14,6 +14,10 @@ After=network.target
# and failure timestamps, so wait until it's done.
After=time-sync.target
+# Managing systemd resources requires DBus.
+After=dbus.service
+Wants=dbus.service
+
# Some OCF resources may have dependencies that aren't managed by the cluster;
# these must be started before Pacemaker and stopped after it. The
# resource-agents package provides this target, which lets system adminstrators
--
1.8.3.1


From 69de188a7263ba66afa0e8a3a46a64f07a7facca Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Thu, 14 Dec 2017 16:05:12 -0600
Subject: [PATCH 5/5] Low: attrd: avoid small memory leak at start-up

introduced by 3518544
---
attrd/commands.c | 1 +
1 file changed, 1 insertion(+)

diff --git a/attrd/commands.c b/attrd/commands.c
index 0a20b26..20bd82f 100644
--- a/attrd/commands.c
+++ b/attrd/commands.c
@@ -539,6 +539,7 @@ attrd_broadcast_protocol()
crm_xml_add(attrd_op, F_ATTRD_VALUE, ATTRD_PROTOCOL_VERSION);
crm_xml_add_int(attrd_op, F_ATTRD_IS_PRIVATE, 1);
attrd_client_update(attrd_op);
+ free_xml(attrd_op);
}
void
--
1.8.3.1

92
SOURCES/007-bundles.patch

@ -0,0 +1,92 @@
From 2ce5fc46463ff7b9a5a2c68602d8c5b35a7c37d7 Mon Sep 17 00:00:00 2001
From: Andrew Beekhof <andrew@beekhof.net>
Date: Tue, 16 Jan 2018 19:05:31 +1100
Subject: [PATCH 1/2] Bug rhbz#1519812 - Prevent notify actions from causing
--wait to hang

---
tools/crm_resource_runtime.c | 21 ++++++++++++++++-----
1 file changed, 16 insertions(+), 5 deletions(-)

diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index 22bdebf..189d1b3 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1343,10 +1343,19 @@ done:
return rc;
}
-#define action_is_pending(action) \
- ((is_set((action)->flags, pe_action_optional) == FALSE) \
- && (is_set((action)->flags, pe_action_runnable) == TRUE) \
- && (is_set((action)->flags, pe_action_pseudo) == FALSE))
+static inline int action_is_pending(action_t *action)
+{
+ if(is_set(action->flags, pe_action_optional)) {
+ return FALSE;
+ } else if(is_set(action->flags, pe_action_runnable) == FALSE) {
+ return FALSE;
+ } else if(is_set(action->flags, pe_action_pseudo)) {
+ return FALSE;
+ } else if(safe_str_eq("notify", action->task)) {
+ return FALSE;
+ }
+ return TRUE;
+}
/*!
* \internal
@@ -1362,7 +1371,9 @@ actions_are_pending(GListPtr actions)
GListPtr action;
for (action = actions; action != NULL; action = action->next) {
- if (action_is_pending((action_t *) action->data)) {
+ action_t *a = (action_t *)action->data;
+ if (action_is_pending(a)) {
+ crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags);
return TRUE;
}
}
--
1.8.3.1


From ef15ea4f687e7f9ba1f8a99548ee1e0bf9d4b50a Mon Sep 17 00:00:00 2001
From: Andrew Beekhof <andrew@beekhof.net>
Date: Mon, 22 Jan 2018 21:18:46 +1100
Subject: [PATCH 2/2] Fix: rhbz#1527072 - Correctly observe colocation
constraints with bundles in the Master role

---
pengine/container.c | 14 +++++++++++---
1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/pengine/container.c b/pengine/container.c
index f5d916c..15d094d 100644
--- a/pengine/container.c
+++ b/pengine/container.c
@@ -486,10 +486,18 @@ container_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc, rsc_colocatio
} else {
node_t *chosen = tuple->docker->fns->location(tuple->docker, NULL, FALSE);
- if (chosen != NULL && is_set_recursive(tuple->docker, pe_rsc_block, TRUE) == FALSE) {
- pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight);
- allocated_rhs = g_list_prepend(allocated_rhs, chosen);
+ if (chosen == NULL || is_set_recursive(tuple->docker, pe_rsc_block, TRUE)) {
+ continue;
+ }
+ if(constraint->role_rh >= RSC_ROLE_MASTER && tuple->child == NULL) {
+ continue;
}
+ if(constraint->role_rh >= RSC_ROLE_MASTER && tuple->child->next_role < RSC_ROLE_MASTER) {
+ continue;
+ }
+
+ pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight);
+ allocated_rhs = g_list_prepend(allocated_rhs, chosen);
}
}
--
1.8.3.1

145
SOURCES/008-quorum.patch

@ -0,0 +1,145 @@
From 7c322f4b9a7f36eba1d3ca74d7dd8fe1093ca7bd Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Mon, 22 Jan 2018 11:38:22 -0600
Subject: [PATCH] Low: crmd: quorum gain should always cause new transition

0b689055 aborted the transition on quorum loss, but quorum can also be acquired
without triggering a new transition, if corosync gives quorum without a node
joining (e.g. forced via corosync-cmapctl, or perhaps via heuristics).

This aborts the transition when quorum is gained, but only after a 5-second
delay, if the transition has not been aborted in that time. This avoids an
unnecessary abort in the vast majority of cases where an abort is already done,
and it allows some time for all nodes to connect when quorum is gained, rather
than immediately fencing remaining unseen nodes.
---
crmd/membership.c | 22 +++++++++++++++++-----
crmd/te_utils.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++--
crmd/tengine.h | 2 ++
3 files changed, 65 insertions(+), 7 deletions(-)

diff --git a/crmd/membership.c b/crmd/membership.c
index c36dbed..4f2fa8a 100644
--- a/crmd/membership.c
+++ b/crmd/membership.c
@@ -438,12 +438,24 @@ crm_update_quorum(gboolean quorum, gboolean force_update)
fsa_register_cib_callback(call_id, FALSE, NULL, cib_quorum_update_complete);
free_xml(update);
- /* If a node not running any resources is cleanly shut down and drops us
- * below quorum, we won't necessarily abort the transition, so abort it
- * here to be safe.
+ /* Quorum changes usually cause a new transition via other activity:
+ * quorum gained via a node joining will abort via the node join,
+ * and quorum lost via a node leaving will usually abort via resource
+ * activity and/or fencing.
+ *
+ * However, it is possible that nothing else causes a transition (e.g.
+ * someone forces quorum via corosync-cmaptcl, or quorum is lost due to
+ * a node in standby shutting down cleanly), so here ensure a new
+ * transition is triggered.
*/
- if (quorum == FALSE) {
- abort_transition(INFINITY, tg_restart, "Quorum loss", NULL);
+ if (quorum) {
+ /* If quorum was gained, abort after a short delay, in case multiple
+ * nodes are joining around the same time, so the one that brings us
+ * to quorum doesn't cause all the remaining ones to be fenced.
+ */
+ abort_after_delay(INFINITY, tg_restart, "Quorum gained", 5000);
+ } else {
+ abort_transition(INFINITY, tg_restart, "Quorum lost", NULL);
}
}
fsa_has_quorum = quorum;
diff --git a/crmd/te_utils.c b/crmd/te_utils.c
index dab02d3..8d105dc 100644
--- a/crmd/te_utils.c
+++ b/crmd/te_utils.c
@@ -530,6 +530,46 @@ trigger_graph_processing(const char *fn, int line)
mainloop_set_trigger(transition_trigger);
}
+static struct abort_timer_s {
+ bool aborted;
+ guint id;
+ int priority;
+ enum transition_action action;
+ const char *text;
+} abort_timer = { 0, };
+
+static gboolean
+abort_timer_popped(gpointer data)
+{
+ if (abort_timer.aborted == FALSE) {
+ abort_transition(abort_timer.priority, abort_timer.action,
+ abort_timer.text, NULL);
+ }
+ abort_timer.id = 0;
+ return FALSE; // do not immediately reschedule timer
+}
+
+/*!
+ * \internal
+ * \brief Abort transition after delay, if not already aborted in that time
+ *
+ * \param[in] abort_text Must be literal string
+ */
+void
+abort_after_delay(int abort_priority, enum transition_action abort_action,
+ const char *abort_text, guint delay_ms)
+{
+ if (abort_timer.id) {
+ // Timer already in progress, stop and reschedule
+ g_source_remove(abort_timer.id);
+ }
+ abort_timer.aborted = FALSE;
+ abort_timer.priority = abort_priority;
+ abort_timer.action = abort_action;
+ abort_timer.text = abort_text;
+ abort_timer.id = g_timeout_add(delay_ms, abort_timer_popped, NULL);
+}
+
void
abort_transition_graph(int abort_priority, enum transition_action abort_action,
const char *abort_text, xmlNode * reason, const char *fn, int line)
@@ -557,6 +597,8 @@ abort_transition_graph(int abort_priority, enum transition_action abort_action,
break;
}
+ abort_timer.aborted = TRUE;
+
/* Make sure any queued calculations are discarded ASAP */
free(fsa_pe_ref);
fsa_pe_ref = NULL;
@@ -660,10 +702,12 @@ abort_transition_graph(int abort_priority, enum transition_action abort_action,
(transition_graph->complete? "true" : "false"));
} else {
+ const char *id = ID(reason);
+
do_crm_log(level, "Transition aborted by %s.%s '%s': %s "
CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s",
- TYPE(reason), ID(reason), (op? op : "change"), abort_text,
- add[0], add[1], add[2], fn, line, path,
+ TYPE(reason), (id? id : ""), (op? op : "change"),
+ abort_text, add[0], add[1], add[2], fn, line, path,
(transition_graph->complete? "true" : "false"));
}
}
diff --git a/crmd/tengine.h b/crmd/tengine.h
index 7205c16..6a75a08 100644
--- a/crmd/tengine.h
+++ b/crmd/tengine.h
@@ -59,6 +59,8 @@ extern void notify_crmd(crm_graph_t * graph);
# include <te_callbacks.h>
extern void trigger_graph_processing(const char *fn, int line);
+void abort_after_delay(int abort_priority, enum transition_action abort_action,
+ const char *abort_text, guint delay_ms);
extern void abort_transition_graph(int abort_priority, enum transition_action abort_action,
const char *abort_text, xmlNode * reason, const char *fn,
int line);
--
1.8.3.1

97
SOURCES/009-crm_resource.patch

@ -0,0 +1,97 @@
From 30eb9a980db152f6c803a35d3b261a563ad4ee75 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Wed, 24 Jan 2018 10:51:34 -0600
Subject: [PATCH 1/2] Low: tools: crm_resource --refresh should ignore
--operation and --interval

It already did when a resource was not specified.
Also update help text to clarify cleanup vs refresh.
---
tools/crm_resource.c | 26 ++++++++++++++++-----------
1 file changed, 16 insertions(+), 10 deletions(-)

diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 3fbc6e1..d007668 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -212,14 +212,16 @@ static struct crm_option long_options[] = {
},
{
"cleanup", no_argument, NULL, 'C',
- "\t\tDelete failed operations from a resource's history allowing its current state to be rechecked.\n"
+ "\t\tIf resource has any past failures, clear its history and fail count.\n"
"\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n"
+ "\t\t\t\t--operation and --interval apply to fail counts, but entire history is always cleared,\n"
+ "\t\t\t\tto allow current state to be rechecked.\n"
},
{
"refresh", no_argument, NULL, 'R',
"\t\tDelete resource's history (including failures) so its current state is rechecked.\n"
- "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n"
- "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be cleaned"
+ "\t\t\t\tOptionally filtered by --resource and --node (otherwise all).\n"
+ "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be refreshed."
},
{
"set-parameter", required_argument, NULL, 'p',
@@ -438,7 +440,6 @@ main(int argc, char **argv)
bool require_resource = TRUE; /* whether command requires that resource be specified */
bool require_dataset = TRUE; /* whether command requires populated dataset instance */
bool require_crmd = FALSE; /* whether command requires connection to CRMd */
- bool just_errors = TRUE; /* whether cleanup command deletes all history or just errors */
int rc = pcmk_ok;
int is_ocf_rc = 0;
@@ -630,8 +631,7 @@ main(int argc, char **argv)
if (cib_file == NULL) {
require_crmd = TRUE;
}
- just_errors = FALSE;
- rsc_cmd = 'C';
+ rsc_cmd = 'R';
find_flags = pe_find_renamed|pe_find_anon;
break;
@@ -641,7 +641,6 @@ main(int argc, char **argv)
if (cib_file == NULL) {
require_crmd = TRUE;
}
- just_errors = TRUE;
rsc_cmd = 'C';
find_flags = pe_find_renamed|pe_find_anon;
break;
@@ -1092,7 +1091,14 @@ main(int argc, char **argv)
rc = cli_resource_delete_attribute(rsc, rsc_id, prop_set, prop_id,
prop_name, cib_conn, &data_set);
- } else if ((rsc_cmd == 'C') && rsc) {
+ } else if (((rsc_cmd == 'C') || (rsc_cmd == 'R')) && rsc) {
+ bool just_errors = TRUE;
+
+ if (rsc_cmd == 'R') {
+ just_errors = FALSE;
+ operation = NULL;
+ interval = 0;
+ }
if (do_force == FALSE) {
rsc = uber_parent(rsc);
}
@@ -1113,14 +1119,14 @@ main(int argc, char **argv)
start_mainloop();
}
- } else if (rsc_cmd == 'C' && just_errors) {
+ } else if (rsc_cmd == 'C') {
rc = cli_cleanup_all(crmd_channel, host_uname, operation, interval,
&data_set);
if (rc == pcmk_ok) {
start_mainloop();
}
- } else if (rsc_cmd == 'C') {
+ } else if (rsc_cmd == 'R') {
#if HAVE_ATOMIC_ATTRD
const char *router_node = host_uname;
xmlNode *msg_data = NULL;
--
1.8.3.1

34
SOURCES/010-crm_master.patch

@ -0,0 +1,34 @@
From 18572d4e1e84c9d1f293b9a3082190133367154e Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Fri, 26 Jan 2018 12:31:09 -0600
Subject: [PATCH] Fix: tools: crm_master should always work on node attribute

Before ccbdb2a, crm_master would always set --node, thus ensuring crm_attribute
would treat the value as a node attribute. That commit removed that so that
crm_attribute could determine the local node name properly, but that introduced
an issue where the master value would be set as a cluster property instead of a
node attribute if --lifetime (or --node) was not set explicitly.

This fixes it by setting the default value of --lifetime explicitly.
---
tools/crm_master | 4 ++++
1 file changed, 4 insertions(+)

diff --git a/tools/crm_master b/tools/crm_master
index 7e31cea..f4a0772 100755
--- a/tools/crm_master
+++ b/tools/crm_master
@@ -8,6 +8,10 @@ if [ $? != 0 ] ; then echo "crm_master - A convenience wrapper for crm_attribute
# Note the quotes around `$TEMP': they are essential!
eval set -- "$TEMP"
+# Explicitly set the (usual default) lifetime, so the attribute gets set as a
+# node attribute and not a cluster property.
+options="--lifetime forever"
+
while true ; do
case "$1" in
-N|--node|-U|--uname) options="$options $1 $2"; shift; shift;;
--
1.8.3.1

28
SOURCES/lrmd-protocol-version.patch

@ -0,0 +1,28 @@
From 8c497bc794e1e6a3ed188a548da771d768cef8f1 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Wed, 26 Oct 2016 11:18:17 -0500
Subject: [PATCH] Fix: lrmd: undo unnecessary LRMD protocol version change

The change breaks rolling upgrades in a cluster with Pacemaker Remote nodes,
and was never necessary. This introduces a divergence from upstream that
will need to be reconciled in the future.
---
include/crm/lrmd.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h
index 446b39c..a099315 100644
--- a/include/crm/lrmd.h
+++ b/include/crm/lrmd.h
@@ -38,7 +38,7 @@ typedef struct lrmd_key_value_s {
/* This should be bumped every time there is an incompatible change that
* prevents older clients from connecting to this version of the server.
*/
-#define LRMD_PROTOCOL_VERSION "1.1"
+#define LRMD_PROTOCOL_VERSION "1.0"
/* This is the version that the client version will actually be compared
* against. This should be identical to LRMD_PROTOCOL_VERSION. However, we
--
1.8.3.1

25
SOURCES/rhbz-url.patch

@ -0,0 +1,25 @@
From 9b74fb4d667cf187c1c80aeb39ff3b3c12846421 Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Tue, 18 Apr 2017 14:17:38 -0500
Subject: [PATCH] Low: tools: show Red Hat bugzilla URL when using crm_report

---
tools/crm_report.in | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tools/crm_report.in b/tools/crm_report.in
index 26050a7..4715155 100755
--- a/tools/crm_report.in
+++ b/tools/crm_report.in
@@ -222,7 +222,7 @@ EOF
log "Collected results are available in $fname"
log " "
log "Please create a bug entry at"
- log " http://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"
+ log " https://bugzilla.redhat.com/"
log "Include a description of your problem and attach this tarball"
log " "
log "Thank you for taking time to create this report."
--
1.8.3.1

2011
SPECS/pacemaker.spec

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save