You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
152 lines
4.5 KiB
152 lines
4.5 KiB
3 years ago
|
From faaaa3452ceec6afcc18cffc9beca3fe19841cce Mon Sep 17 00:00:00 2001
|
||
|
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||
|
Date: Thu, 3 Jan 2019 17:44:18 +0530
|
||
|
Subject: [PATCH 104/124] afr/shd: Cleanup self heal daemon resources during
|
||
|
afr fini
|
||
|
|
||
|
We were not properly cleaning self-heal daemon resources
|
||
|
during afr fini. This patch will clean the same.
|
||
|
|
||
|
Backport of: https://review.gluster.org/#/c/glusterfs/+/22151/
|
||
|
|
||
|
>Change-Id: I597860be6f781b195449e695d871b8667a418d5a
|
||
|
>updates: bz#1659708
|
||
|
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||
|
|
||
|
Change-Id: I7be981b9c2476c8cacadea6b14d74234f67b714f
|
||
|
BUG: 1471742
|
||
|
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||
|
Reviewed-on: https://code.engineering.redhat.com/gerrit/167845
|
||
|
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||
|
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
||
|
---
|
||
|
libglusterfs/src/syncop-utils.c | 8 +++++
|
||
|
xlators/cluster/afr/src/afr-self-heald.c | 2 ++
|
||
|
xlators/cluster/afr/src/afr.c | 57 ++++++++++++++++++++++++++++++++
|
||
|
3 files changed, 67 insertions(+)
|
||
|
|
||
|
diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c
|
||
|
index be03527..b842142 100644
|
||
|
--- a/libglusterfs/src/syncop-utils.c
|
||
|
+++ b/libglusterfs/src/syncop-utils.c
|
||
|
@@ -350,6 +350,11 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
|
||
|
gf_boolean_t cond_init = _gf_false;
|
||
|
gf_boolean_t mut_init = _gf_false;
|
||
|
gf_dirent_t entries;
|
||
|
+ xlator_t *this = NULL;
|
||
|
+
|
||
|
+ if (frame) {
|
||
|
+ this = frame->this;
|
||
|
+ }
|
||
|
|
||
|
/*For this functionality to be implemented in general, we need
|
||
|
* synccond_t infra which doesn't block the executing thread. Until then
|
||
|
@@ -397,6 +402,9 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
|
||
|
|
||
|
list_for_each_entry_safe(entry, tmp, &entries.list, list)
|
||
|
{
|
||
|
+ if (this && this->cleanup_starting)
|
||
|
+ goto out;
|
||
|
+
|
||
|
list_del_init(&entry->list);
|
||
|
if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) {
|
||
|
gf_dirent_entry_free(entry);
|
||
|
diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
|
||
|
index 7eb1207..8bc4720 100644
|
||
|
--- a/xlators/cluster/afr/src/afr-self-heald.c
|
||
|
+++ b/xlators/cluster/afr/src/afr-self-heald.c
|
||
|
@@ -373,6 +373,7 @@ afr_shd_sweep_prepare(struct subvol_healer *healer)
|
||
|
|
||
|
time(&event->start_time);
|
||
|
event->end_time = 0;
|
||
|
+ _mask_cancellation();
|
||
|
}
|
||
|
|
||
|
void
|
||
|
@@ -394,6 +395,7 @@ afr_shd_sweep_done(struct subvol_healer *healer)
|
||
|
|
||
|
if (eh_save_history(shd->statistics[healer->subvol], history) < 0)
|
||
|
GF_FREE(history);
|
||
|
+ _unmask_cancellation();
|
||
|
}
|
||
|
|
||
|
int
|
||
|
diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c
|
||
|
index 33258a0..a0a7551 100644
|
||
|
--- a/xlators/cluster/afr/src/afr.c
|
||
|
+++ b/xlators/cluster/afr/src/afr.c
|
||
|
@@ -611,13 +611,70 @@ init(xlator_t *this)
|
||
|
out:
|
||
|
return ret;
|
||
|
}
|
||
|
+void
|
||
|
+afr_destroy_healer_object(xlator_t *this, struct subvol_healer *healer)
|
||
|
+{
|
||
|
+ int ret = -1;
|
||
|
+
|
||
|
+ if (!healer)
|
||
|
+ return;
|
||
|
+
|
||
|
+ if (healer->running) {
|
||
|
+ /*
|
||
|
+ * If there are any resources to cleanup, We need
|
||
|
+ * to do that gracefully using pthread_cleanup_push
|
||
|
+ */
|
||
|
+ ret = gf_thread_cleanup_xint(healer->thread);
|
||
|
+ if (ret)
|
||
|
+ gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SELF_HEAL_FAILED,
|
||
|
+ "Failed to clean up healer threads.");
|
||
|
+ healer->thread = 0;
|
||
|
+ }
|
||
|
+ pthread_cond_destroy(&healer->cond);
|
||
|
+ pthread_mutex_destroy(&healer->mutex);
|
||
|
+}
|
||
|
+
|
||
|
+void
|
||
|
+afr_selfheal_daemon_fini(xlator_t *this)
|
||
|
+{
|
||
|
+ struct subvol_healer *healer = NULL;
|
||
|
+ afr_self_heald_t *shd = NULL;
|
||
|
+ afr_private_t *priv = NULL;
|
||
|
+ int i = 0;
|
||
|
+
|
||
|
+ priv = this->private;
|
||
|
+ if (!priv)
|
||
|
+ return;
|
||
|
+
|
||
|
+ shd = &priv->shd;
|
||
|
+ if (!shd->iamshd)
|
||
|
+ return;
|
||
|
+
|
||
|
+ for (i = 0; i < priv->child_count; i++) {
|
||
|
+ healer = &shd->index_healers[i];
|
||
|
+ afr_destroy_healer_object(this, healer);
|
||
|
|
||
|
+ healer = &shd->full_healers[i];
|
||
|
+ afr_destroy_healer_object(this, healer);
|
||
|
+
|
||
|
+ if (shd->statistics[i])
|
||
|
+ eh_destroy(shd->statistics[i]);
|
||
|
+ }
|
||
|
+ GF_FREE(shd->index_healers);
|
||
|
+ GF_FREE(shd->full_healers);
|
||
|
+ GF_FREE(shd->statistics);
|
||
|
+ if (shd->split_brain)
|
||
|
+ eh_destroy(shd->split_brain);
|
||
|
+}
|
||
|
void
|
||
|
fini(xlator_t *this)
|
||
|
{
|
||
|
afr_private_t *priv = NULL;
|
||
|
|
||
|
priv = this->private;
|
||
|
+
|
||
|
+ afr_selfheal_daemon_fini(this);
|
||
|
+
|
||
|
LOCK(&priv->lock);
|
||
|
if (priv->timer != NULL) {
|
||
|
gf_timer_call_cancel(this->ctx, priv->timer);
|
||
|
--
|
||
|
1.8.3.1
|
||
|
|