You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1912 lines
59 KiB
1912 lines
59 KiB
From 1029c27982d2f91cb2d3c4fcc19aa5171111dfb9 Mon Sep 17 00:00:00 2001 |
|
From: Jiffin Tony Thottan <jthottan@redhat.com> |
|
Date: Mon, 16 Oct 2017 14:24:29 +0530 |
|
Subject: [PATCH 054/124] Revert "glusterd: (storhaug) remove ganesha" |
|
|
|
This reverts commit 843e1b04b554ab887ec656ae7b468bb93ee4e2f7. |
|
|
|
Label: DOWNSTREAM ONLY |
|
|
|
Change-Id: I06b5450344c33f26da3d94b6f67051d41dfbba17 |
|
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com> |
|
Reviewed-on: https://code.engineering.redhat.com/gerrit/167103 |
|
Reviewed-by: Soumya Koduri <skoduri@redhat.com> |
|
Tested-by: RHGS Build Bot <nigelb@redhat.com> |
|
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com> |
|
--- |
|
cli/src/cli-cmd-global.c | 57 ++ |
|
cli/src/cli-cmd-parser.c | 122 ++- |
|
cli/src/cli-cmd.c | 3 +- |
|
cli/src/cli-rpc-ops.c | 82 ++ |
|
cli/src/cli.h | 4 + |
|
xlators/mgmt/glusterd/src/Makefile.am | 4 +- |
|
xlators/mgmt/glusterd/src/glusterd-errno.h | 2 +- |
|
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 915 +++++++++++++++++++++ |
|
xlators/mgmt/glusterd/src/glusterd-handler.c | 79 ++ |
|
xlators/mgmt/glusterd/src/glusterd-messages.h | 2 +- |
|
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 45 +- |
|
.../mgmt/glusterd/src/glusterd-snapshot-utils.c | 196 +++++ |
|
xlators/mgmt/glusterd/src/glusterd-store.h | 2 + |
|
xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 37 + |
|
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 7 + |
|
xlators/mgmt/glusterd/src/glusterd.h | 22 + |
|
16 files changed, 1568 insertions(+), 11 deletions(-) |
|
create mode 100644 xlators/mgmt/glusterd/src/glusterd-ganesha.c |
|
|
|
diff --git a/cli/src/cli-cmd-global.c b/cli/src/cli-cmd-global.c |
|
index d0729ac..270b76f 100644 |
|
--- a/cli/src/cli-cmd-global.c |
|
+++ b/cli/src/cli-cmd-global.c |
|
@@ -36,6 +36,10 @@ int |
|
cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word, |
|
const char **words, int wordcount); |
|
|
|
+int |
|
+cli_cmd_ganesha_cbk(struct cli_state *state, struct cli_cmd_word *word, |
|
+ const char **words, int wordcount); |
|
+ |
|
struct cli_cmd global_cmds[] = { |
|
{ |
|
"global help", |
|
@@ -48,6 +52,11 @@ struct cli_cmd global_cmds[] = { |
|
cli_cmd_get_state_cbk, |
|
"Get local state representation of mentioned daemon", |
|
}, |
|
+ { |
|
+ "nfs-ganesha {enable| disable} ", |
|
+ cli_cmd_ganesha_cbk, |
|
+ "Enable/disable NFS-Ganesha support", |
|
+ }, |
|
{NULL, NULL, NULL}}; |
|
|
|
int |
|
@@ -89,6 +98,54 @@ out: |
|
} |
|
|
|
int |
|
+cli_cmd_ganesha_cbk(struct cli_state *state, struct cli_cmd_word *word, |
|
+ const char **words, int wordcount) |
|
+ |
|
+{ |
|
+ int sent = 0; |
|
+ int parse_error = 0; |
|
+ int ret = -1; |
|
+ rpc_clnt_procedure_t *proc = NULL; |
|
+ call_frame_t *frame = NULL; |
|
+ dict_t *options = NULL; |
|
+ cli_local_t *local = NULL; |
|
+ char *op_errstr = NULL; |
|
+ |
|
+ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GANESHA]; |
|
+ |
|
+ frame = create_frame(THIS, THIS->ctx->pool); |
|
+ if (!frame) |
|
+ goto out; |
|
+ |
|
+ ret = cli_cmd_ganesha_parse(state, words, wordcount, &options, &op_errstr); |
|
+ if (ret) { |
|
+ if (op_errstr) { |
|
+ cli_err("%s", op_errstr); |
|
+ GF_FREE(op_errstr); |
|
+ } else |
|
+ cli_usage_out(word->pattern); |
|
+ parse_error = 1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ CLI_LOCAL_INIT(local, words, frame, options); |
|
+ |
|
+ if (proc->fn) { |
|
+ ret = proc->fn(frame, THIS, options); |
|
+ } |
|
+ |
|
+out: |
|
+ if (ret) { |
|
+ cli_cmd_sent_status_get(&sent); |
|
+ if ((sent == 0) && (parse_error == 0)) |
|
+ cli_out("Setting global option failed"); |
|
+ } |
|
+ |
|
+ CLI_STACK_DESTROY(frame); |
|
+ return ret; |
|
+} |
|
+ |
|
+int |
|
cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word, |
|
const char **words, int wordcount) |
|
{ |
|
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c |
|
index d9ccba1..cd9c445 100644 |
|
--- a/cli/src/cli-cmd-parser.c |
|
+++ b/cli/src/cli-cmd-parser.c |
|
@@ -1694,7 +1694,7 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words, |
|
} |
|
} |
|
|
|
- if ((strcmp (key, "cluster.brick-multiplex") == 0)) { |
|
+ if ((strcmp(key, "cluster.brick-multiplex") == 0)) { |
|
question = |
|
"Brick-multiplexing is supported only for " |
|
"OCS converged or independent mode. Also it is " |
|
@@ -1703,11 +1703,12 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words, |
|
"are running before this option is modified." |
|
"Do you still want to continue?"; |
|
|
|
- answer = cli_cmd_get_confirmation (state, question); |
|
+ answer = cli_cmd_get_confirmation(state, question); |
|
if (GF_ANSWER_NO == answer) { |
|
- gf_log ("cli", GF_LOG_ERROR, "Operation " |
|
- "cancelled, exiting"); |
|
- *op_errstr = gf_strdup ("Aborted by user."); |
|
+ gf_log("cli", GF_LOG_ERROR, |
|
+ "Operation " |
|
+ "cancelled, exiting"); |
|
+ *op_errstr = gf_strdup("Aborted by user."); |
|
ret = -1; |
|
goto out; |
|
} |
|
@@ -5848,3 +5849,114 @@ out: |
|
|
|
return ret; |
|
} |
|
+ |
|
+/* Parsing global option for NFS-Ganesha config |
|
+ * gluster nfs-ganesha enable/disable */ |
|
+ |
|
+int32_t |
|
+cli_cmd_ganesha_parse(struct cli_state *state, const char **words, |
|
+ int wordcount, dict_t **options, char **op_errstr) |
|
+{ |
|
+ dict_t *dict = NULL; |
|
+ int ret = -1; |
|
+ char *key = NULL; |
|
+ char *value = NULL; |
|
+ char *w = NULL; |
|
+ char *opwords[] = {"enable", "disable", NULL}; |
|
+ const char *question = NULL; |
|
+ gf_answer_t answer = GF_ANSWER_NO; |
|
+ |
|
+ GF_ASSERT(words); |
|
+ GF_ASSERT(options); |
|
+ |
|
+ dict = dict_new(); |
|
+ |
|
+ if (!dict) |
|
+ goto out; |
|
+ |
|
+ if (wordcount != 2) |
|
+ goto out; |
|
+ |
|
+ key = (char *)words[0]; |
|
+ value = (char *)words[1]; |
|
+ |
|
+ if (!key || !value) { |
|
+ cli_out("Usage : nfs-ganesha <enable/disable>"); |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = gf_strip_whitespace(value, strlen(value)); |
|
+ if (ret == -1) |
|
+ goto out; |
|
+ |
|
+ if (strcmp(key, "nfs-ganesha")) { |
|
+ gf_asprintf(op_errstr, |
|
+ "Global option: error: ' %s '" |
|
+ "is not a valid global option.", |
|
+ key); |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ w = str_getunamb(value, opwords); |
|
+ if (!w) { |
|
+ cli_out( |
|
+ "Invalid global option \n" |
|
+ "Usage : nfs-ganesha <enable/disable>"); |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ question = |
|
+ "Enabling NFS-Ganesha requires Gluster-NFS to be" |
|
+ " disabled across the trusted pool. Do you " |
|
+ "still want to continue?\n"; |
|
+ |
|
+ if (strcmp(value, "enable") == 0) { |
|
+ answer = cli_cmd_get_confirmation(state, question); |
|
+ if (GF_ANSWER_NO == answer) { |
|
+ gf_log("cli", GF_LOG_ERROR, |
|
+ "Global operation " |
|
+ "cancelled, exiting"); |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ } |
|
+ cli_out("This will take a few minutes to complete. Please wait .."); |
|
+ |
|
+ ret = dict_set_str(dict, "key", key); |
|
+ if (ret) { |
|
+ gf_log(THIS->name, GF_LOG_ERROR, "dict set on key failed"); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = dict_set_str(dict, "value", value); |
|
+ if (ret) { |
|
+ gf_log(THIS->name, GF_LOG_ERROR, "dict set on value failed"); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = dict_set_str(dict, "globalname", "All"); |
|
+ if (ret) { |
|
+ gf_log(THIS->name, GF_LOG_ERROR, |
|
+ "dict set on global" |
|
+ " key failed."); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = dict_set_int32(dict, "hold_global_locks", _gf_true); |
|
+ if (ret) { |
|
+ gf_log(THIS->name, GF_LOG_ERROR, |
|
+ "dict set on global key " |
|
+ "failed."); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ *options = dict; |
|
+out: |
|
+ if (ret) |
|
+ dict_unref(dict); |
|
+ |
|
+ return ret; |
|
+} |
|
diff --git a/cli/src/cli-cmd.c b/cli/src/cli-cmd.c |
|
index 2ee8b1b..8c06905 100644 |
|
--- a/cli/src/cli-cmd.c |
|
+++ b/cli/src/cli-cmd.c |
|
@@ -366,7 +366,8 @@ cli_cmd_submit(struct rpc_clnt *rpc, void *req, call_frame_t *frame, |
|
unsigned timeout = 0; |
|
|
|
if ((GLUSTER_CLI_PROFILE_VOLUME == procnum) || |
|
- (GLUSTER_CLI_HEAL_VOLUME == procnum)) |
|
+ (GLUSTER_CLI_HEAL_VOLUME == procnum) || |
|
+ (GLUSTER_CLI_GANESHA == procnum)) |
|
timeout = cli_ten_minutes_timeout; |
|
else |
|
timeout = cli_default_conn_timeout; |
|
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c |
|
index 12e7fcc..736cd18 100644 |
|
--- a/cli/src/cli-rpc-ops.c |
|
+++ b/cli/src/cli-rpc-ops.c |
|
@@ -2207,6 +2207,62 @@ out: |
|
return ret; |
|
} |
|
|
|
+int |
|
+gf_cli_ganesha_cbk(struct rpc_req *req, struct iovec *iov, int count, |
|
+ void *myframe) |
|
+{ |
|
+ gf_cli_rsp rsp = { |
|
+ 0, |
|
+ }; |
|
+ int ret = -1; |
|
+ dict_t *dict = NULL; |
|
+ |
|
+ GF_ASSERT(myframe); |
|
+ |
|
+ if (-1 == req->rpc_status) { |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); |
|
+ if (ret < 0) { |
|
+ gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, |
|
+ "Failed to decode xdr response"); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ gf_log("cli", GF_LOG_DEBUG, "Received resp to ganesha"); |
|
+ |
|
+ dict = dict_new(); |
|
+ |
|
+ if (!dict) { |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); |
|
+ if (ret) |
|
+ goto out; |
|
+ |
|
+ if (rsp.op_ret) { |
|
+ if (strcmp(rsp.op_errstr, "")) |
|
+ cli_err("nfs-ganesha: failed: %s", rsp.op_errstr); |
|
+ else |
|
+ cli_err("nfs-ganesha: failed"); |
|
+ } |
|
+ |
|
+ else { |
|
+ cli_out("nfs-ganesha : success "); |
|
+ } |
|
+ |
|
+ ret = rsp.op_ret; |
|
+ |
|
+out: |
|
+ if (dict) |
|
+ dict_unref(dict); |
|
+ cli_cmd_broadcast_response(ret); |
|
+ return ret; |
|
+} |
|
+ |
|
char * |
|
is_server_debug_xlator(void *myframe) |
|
{ |
|
@@ -4880,6 +4936,31 @@ out: |
|
} |
|
|
|
int32_t |
|
+gf_cli_ganesha(call_frame_t *frame, xlator_t *this, void *data) |
|
+{ |
|
+ gf_cli_req req = {{ |
|
+ 0, |
|
+ }}; |
|
+ int ret = 0; |
|
+ dict_t *dict = NULL; |
|
+ |
|
+ if (!frame || !this || !data) { |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ dict = data; |
|
+ |
|
+ ret = cli_to_glusterd(&req, frame, gf_cli_ganesha_cbk, |
|
+ (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_GANESHA, |
|
+ this, cli_rpc_prog, NULL); |
|
+out: |
|
+ gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret); |
|
+ |
|
+ return ret; |
|
+} |
|
+ |
|
+int32_t |
|
gf_cli_set_volume(call_frame_t *frame, xlator_t *this, void *data) |
|
{ |
|
gf_cli_req req = {{ |
|
@@ -12214,6 +12295,7 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = { |
|
[GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", gf_cli_sys_exec}, |
|
[GLUSTER_CLI_SNAP] = {"SNAP", gf_cli_snapshot}, |
|
[GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER VOLUME", gf_cli_barrier_volume}, |
|
+ [GLUSTER_CLI_GANESHA] = {"GANESHA", gf_cli_ganesha}, |
|
[GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", gf_cli_get_vol_opt}, |
|
[GLUSTER_CLI_BITROT] = {"BITROT", gf_cli_bitrot}, |
|
[GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", gf_cli_attach_tier}, |
|
diff --git a/cli/src/cli.h b/cli/src/cli.h |
|
index b79a0a2..37e4d9d 100644 |
|
--- a/cli/src/cli.h |
|
+++ b/cli/src/cli.h |
|
@@ -282,6 +282,10 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words, |
|
int wordcount, dict_t **options, char **op_errstr); |
|
|
|
int32_t |
|
+cli_cmd_ganesha_parse(struct cli_state *state, const char **words, |
|
+ int wordcount, dict_t **options, char **op_errstr); |
|
+ |
|
+int32_t |
|
cli_cmd_get_state_parse(struct cli_state *state, const char **words, |
|
int wordcount, dict_t **options, char **op_errstr); |
|
|
|
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am |
|
index c8dd8e3..5fe5156 100644 |
|
--- a/xlators/mgmt/glusterd/src/Makefile.am |
|
+++ b/xlators/mgmt/glusterd/src/Makefile.am |
|
@@ -10,7 +10,7 @@ glusterd_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS) $(LIB_DL) |
|
glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \ |
|
glusterd-op-sm.c glusterd-utils.c glusterd-rpc-ops.c \ |
|
glusterd-store.c glusterd-handshake.c glusterd-pmap.c \ |
|
- glusterd-volgen.c glusterd-rebalance.c \ |
|
+ glusterd-volgen.c glusterd-rebalance.c glusterd-ganesha.c \ |
|
glusterd-quota.c glusterd-bitrot.c glusterd-geo-rep.c \ |
|
glusterd-replace-brick.c glusterd-log-ops.c glusterd-tier.c \ |
|
glusterd-volume-ops.c glusterd-brick-ops.c glusterd-mountbroker.c \ |
|
@@ -52,6 +52,8 @@ AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \ |
|
-I$(CONTRIBDIR)/mount -I$(CONTRIBDIR)/userspace-rcu \ |
|
-DSBIN_DIR=\"$(sbindir)\" -DDATADIR=\"$(localstatedir)\" \ |
|
-DGSYNCD_PREFIX=\"$(GLUSTERFS_LIBEXECDIR)\" \ |
|
+ -DCONFDIR=\"$(localstatedir)/run/gluster/shared_storage/nfs-ganesha\" \ |
|
+ -DGANESHA_PREFIX=\"$(libexecdir)/ganesha\" \ |
|
-DSYNCDAEMON_COMPILE=$(SYNCDAEMON_COMPILE) |
|
|
|
|
|
diff --git a/xlators/mgmt/glusterd/src/glusterd-errno.h b/xlators/mgmt/glusterd/src/glusterd-errno.h |
|
index 7e1575b..c74070e 100644 |
|
--- a/xlators/mgmt/glusterd/src/glusterd-errno.h |
|
+++ b/xlators/mgmt/glusterd/src/glusterd-errno.h |
|
@@ -27,7 +27,7 @@ enum glusterd_op_errno { |
|
EG_ISSNAP = 30813, /* Volume is a snap volume */ |
|
EG_GEOREPRUN = 30814, /* Geo-Replication is running */ |
|
EG_NOTTHINP = 30815, /* Bricks are not thinly provisioned */ |
|
- EG_NOGANESHA = 30816, /* obsolete ganesha is not enabled */ |
|
+ EG_NOGANESHA = 30816, /* Global ganesha is not enabled */ |
|
}; |
|
|
|
#endif |
|
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c |
|
new file mode 100644 |
|
index 0000000..fac16e6 |
|
--- /dev/null |
|
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c |
|
@@ -0,0 +1,915 @@ |
|
+/* |
|
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com> |
|
+ This file is part of GlusterFS. |
|
+ |
|
+ This file is licensed to you under your choice of the GNU Lesser |
|
+ General Public License, version 3 or any later version (LGPLv3 or |
|
+ later), or the GNU General Public License, version 2 (GPLv2), in all |
|
+ cases as published by the Free Software Foundation. |
|
+*/ |
|
+ |
|
+#include <glusterfs/common-utils.h> |
|
+#include "glusterd.h" |
|
+#include "glusterd-op-sm.h" |
|
+#include "glusterd-store.h" |
|
+#include "glusterd-utils.h" |
|
+#include "glusterd-nfs-svc.h" |
|
+#include "glusterd-volgen.h" |
|
+#include "glusterd-messages.h" |
|
+#include <glusterfs/syscall.h> |
|
+ |
|
+#include <ctype.h> |
|
+ |
|
+int |
|
+start_ganesha(char **op_errstr); |
|
+ |
|
+typedef struct service_command { |
|
+ char *binary; |
|
+ char *service; |
|
+ int (*action)(struct service_command *, char *); |
|
+} service_command; |
|
+ |
|
+/* parsing_ganesha_ha_conf will allocate the returned string |
|
+ * to be freed (GF_FREE) by the caller |
|
+ * return NULL if error or not found */ |
|
+static char * |
|
+parsing_ganesha_ha_conf(const char *key) |
|
+{ |
|
+#define MAX_LINE 1024 |
|
+ char scratch[MAX_LINE * 2] = { |
|
+ 0, |
|
+ }; |
|
+ char *value = NULL, *pointer = NULL, *end_pointer = NULL; |
|
+ FILE *fp; |
|
+ |
|
+ fp = fopen(GANESHA_HA_CONF, "r"); |
|
+ if (fp == NULL) { |
|
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, |
|
+ "couldn't open the file %s", GANESHA_HA_CONF); |
|
+ goto end_ret; |
|
+ } |
|
+ while ((pointer = fgets(scratch, MAX_LINE, fp)) != NULL) { |
|
+ /* Read config file until we get matching "^[[:space:]]*key" */ |
|
+ if (*pointer == '#') { |
|
+ continue; |
|
+ } |
|
+ while (isblank(*pointer)) { |
|
+ pointer++; |
|
+ } |
|
+ if (strncmp(pointer, key, strlen(key))) { |
|
+ continue; |
|
+ } |
|
+ pointer += strlen(key); |
|
+ /* key found : if we fail to parse, we'll return an error |
|
+ * rather than trying next one |
|
+ * - supposition : conf file is bash compatible : no space |
|
+ * around the '=' */ |
|
+ if (*pointer != '=') { |
|
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, |
|
+ GD_MSG_GET_CONFIG_INFO_FAILED, "Parsing %s failed at key %s", |
|
+ GANESHA_HA_CONF, key); |
|
+ goto end_close; |
|
+ } |
|
+ pointer++; /* jump the '=' */ |
|
+ |
|
+ if (*pointer == '"' || *pointer == '\'') { |
|
+ /* dont get the quote */ |
|
+ pointer++; |
|
+ } |
|
+ end_pointer = pointer; |
|
+ /* stop at the next closing quote or blank/newline */ |
|
+ do { |
|
+ end_pointer++; |
|
+ } while (!(*end_pointer == '\'' || *end_pointer == '"' || |
|
+ isspace(*end_pointer) || *end_pointer == '\0')); |
|
+ *end_pointer = '\0'; |
|
+ |
|
+ /* got it. copy it and return */ |
|
+ value = gf_strdup(pointer); |
|
+ break; |
|
+ } |
|
+ |
|
+end_close: |
|
+ fclose(fp); |
|
+end_ret: |
|
+ return value; |
|
+} |
|
+ |
|
+static int |
|
+sc_systemctl_action(struct service_command *sc, char *command) |
|
+{ |
|
+ runner_t runner = { |
|
+ 0, |
|
+ }; |
|
+ |
|
+ runinit(&runner); |
|
+ runner_add_args(&runner, sc->binary, command, sc->service, NULL); |
|
+ return runner_run(&runner); |
|
+} |
|
+ |
|
+static int |
|
+sc_service_action(struct service_command *sc, char *command) |
|
+{ |
|
+ runner_t runner = { |
|
+ 0, |
|
+ }; |
|
+ |
|
+ runinit(&runner); |
|
+ runner_add_args(&runner, sc->binary, sc->service, command, NULL); |
|
+ return runner_run(&runner); |
|
+} |
|
+ |
|
+static int |
|
+manage_service(char *action) |
|
+{ |
|
+ struct stat stbuf = { |
|
+ 0, |
|
+ }; |
|
+ int i = 0; |
|
+ int ret = 0; |
|
+ struct service_command sc_list[] = {{.binary = "/usr/bin/systemctl", |
|
+ .service = "nfs-ganesha", |
|
+ .action = sc_systemctl_action}, |
|
+ {.binary = "/sbin/invoke-rc.d", |
|
+ .service = "nfs-ganesha", |
|
+ .action = sc_service_action}, |
|
+ {.binary = "/sbin/service", |
|
+ .service = "nfs-ganesha", |
|
+ .action = sc_service_action}, |
|
+ {.binary = NULL}}; |
|
+ |
|
+ while (sc_list[i].binary != NULL) { |
|
+ ret = sys_stat(sc_list[i].binary, &stbuf); |
|
+ if (ret == 0) { |
|
+ gf_msg_debug(THIS->name, 0, "%s found.", sc_list[i].binary); |
|
+ if (strcmp(sc_list[i].binary, "/usr/bin/systemctl") == 0) |
|
+ ret = sc_systemctl_action(&sc_list[i], action); |
|
+ else |
|
+ ret = sc_service_action(&sc_list[i], action); |
|
+ |
|
+ return ret; |
|
+ } |
|
+ i++; |
|
+ } |
|
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_UNRECOGNIZED_SVC_MNGR, |
|
+ "Could not %s NFS-Ganesha.Service manager for distro" |
|
+ " not recognized.", |
|
+ action); |
|
+ return ret; |
|
+} |
|
+ |
|
+/* |
|
+ * Check if the cluster is a ganesha cluster or not * |
|
+ */ |
|
+gf_boolean_t |
|
+glusterd_is_ganesha_cluster() |
|
+{ |
|
+ int ret = -1; |
|
+ glusterd_conf_t *priv = NULL; |
|
+ xlator_t *this = NULL; |
|
+ gf_boolean_t ret_bool = _gf_false; |
|
+ |
|
+ this = THIS; |
|
+ GF_VALIDATE_OR_GOTO("ganesha", this, out); |
|
+ priv = this->private; |
|
+ GF_VALIDATE_OR_GOTO(this->name, priv, out); |
|
+ |
|
+ ret = dict_get_str_boolean(priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, |
|
+ _gf_false); |
|
+ if (ret == _gf_true) { |
|
+ ret_bool = _gf_true; |
|
+ gf_msg_debug(this->name, 0, "nfs-ganesha is enabled for the cluster"); |
|
+ } else |
|
+ gf_msg_debug(this->name, 0, "nfs-ganesha is disabled for the cluster"); |
|
+ |
|
+out: |
|
+ return ret_bool; |
|
+} |
|
+ |
|
+/* Check if ganesha.enable is set to 'on', that checks if |
|
+ * a particular volume is exported via NFS-Ganesha */ |
|
+gf_boolean_t |
|
+glusterd_check_ganesha_export(glusterd_volinfo_t *volinfo) |
|
+{ |
|
+ char *value = NULL; |
|
+ gf_boolean_t is_exported = _gf_false; |
|
+ int ret = 0; |
|
+ |
|
+ ret = glusterd_volinfo_get(volinfo, "ganesha.enable", &value); |
|
+ if ((ret == 0) && value) { |
|
+ if (strcmp(value, "on") == 0) { |
|
+ gf_msg_debug(THIS->name, 0, |
|
+ "ganesha.enable set" |
|
+ " to %s", |
|
+ value); |
|
+ is_exported = _gf_true; |
|
+ } |
|
+ } |
|
+ return is_exported; |
|
+} |
|
+ |
|
+/* * |
|
+ * The below function is called as part of commit phase for volume set option |
|
+ * "ganesha.enable". If the value is "on", it creates export configuration file |
|
+ * and then export the volume via dbus command. Incase of "off", the volume |
|
+ * will be already unexported during stage phase, so it will remove the conf |
|
+ * file from shared storage |
|
+ */ |
|
+int |
|
+glusterd_check_ganesha_cmd(char *key, char *value, char **errstr, dict_t *dict) |
|
+{ |
|
+ int ret = 0; |
|
+ char *volname = NULL; |
|
+ |
|
+ GF_ASSERT(key); |
|
+ GF_ASSERT(value); |
|
+ GF_ASSERT(dict); |
|
+ |
|
+ if ((strcmp(key, "ganesha.enable") == 0)) { |
|
+ if ((strcmp(value, "on")) && (strcmp(value, "off"))) { |
|
+ gf_asprintf(errstr, |
|
+ "Invalid value" |
|
+ " for volume set command. Use on/off only."); |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ if (strcmp(value, "on") == 0) { |
|
+ ret = glusterd_handle_ganesha_op(dict, errstr, key, value); |
|
+ |
|
+ } else if (is_origin_glusterd(dict)) { |
|
+ ret = dict_get_str(dict, "volname", &volname); |
|
+ if (ret) { |
|
+ gf_msg("glusterd-ganesha", GF_LOG_ERROR, errno, |
|
+ GD_MSG_DICT_GET_FAILED, "Unable to get volume name"); |
|
+ goto out; |
|
+ } |
|
+ ret = manage_export_config(volname, "off", errstr); |
|
+ } |
|
+ } |
|
+out: |
|
+ if (ret) { |
|
+ gf_msg("glusterd-ganesha", GF_LOG_ERROR, 0, |
|
+ GD_MSG_NFS_GNS_OP_HANDLE_FAIL, |
|
+ "Handling NFS-Ganesha" |
|
+ " op failed."); |
|
+ } |
|
+ return ret; |
|
+} |
|
+ |
|
+int |
|
+glusterd_op_stage_set_ganesha(dict_t *dict, char **op_errstr) |
|
+{ |
|
+ int ret = -1; |
|
+ int value = -1; |
|
+ gf_boolean_t option = _gf_false; |
|
+ char *str = NULL; |
|
+ glusterd_conf_t *priv = NULL; |
|
+ xlator_t *this = NULL; |
|
+ |
|
+ GF_ASSERT(dict); |
|
+ this = THIS; |
|
+ GF_ASSERT(this); |
|
+ priv = this->private; |
|
+ GF_ASSERT(priv); |
|
+ |
|
+ value = dict_get_str_boolean(dict, "value", _gf_false); |
|
+ if (value == -1) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, |
|
+ "value not present."); |
|
+ goto out; |
|
+ } |
|
+ /* This dict_get will fail if the user had never set the key before */ |
|
+ /*Ignoring the ret value and proceeding */ |
|
+ ret = dict_get_str(priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str); |
|
+ if (ret == -1) { |
|
+ gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_DICT_GET_FAILED, |
|
+ "Global dict not present."); |
|
+ ret = 0; |
|
+ goto out; |
|
+ } |
|
+ /* Validity of the value is already checked */ |
|
+ ret = gf_string2boolean(str, &option); |
|
+ /* Check if the feature is already enabled, fail in that case */ |
|
+ if (value == option) { |
|
+ gf_asprintf(op_errstr, "nfs-ganesha is already %sd.", str); |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ if (value) { |
|
+ ret = start_ganesha(op_errstr); |
|
+ if (ret) { |
|
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_NFS_GNS_START_FAIL, |
|
+ "Could not start NFS-Ganesha"); |
|
+ } |
|
+ } else { |
|
+ ret = stop_ganesha(op_errstr); |
|
+ if (ret) |
|
+ gf_msg_debug(THIS->name, 0, |
|
+ "Could not stop " |
|
+ "NFS-Ganesha."); |
|
+ } |
|
+ |
|
+out: |
|
+ |
|
+ if (ret) { |
|
+ if (!(*op_errstr)) { |
|
+ *op_errstr = gf_strdup("Error, Validation Failed"); |
|
+ gf_msg_debug(this->name, 0, "Error, Cannot Validate option :%s", |
|
+ GLUSTERD_STORE_KEY_GANESHA_GLOBAL); |
|
+ } else { |
|
+ gf_msg_debug(this->name, 0, "Error, Cannot Validate option"); |
|
+ } |
|
+ } |
|
+ return ret; |
|
+} |
|
+ |
|
+int |
|
+glusterd_op_set_ganesha(dict_t *dict, char **errstr) |
|
+{ |
|
+ int ret = 0; |
|
+ xlator_t *this = NULL; |
|
+ glusterd_conf_t *priv = NULL; |
|
+ char *key = NULL; |
|
+ char *value = NULL; |
|
+ char *next_version = NULL; |
|
+ |
|
+ this = THIS; |
|
+ GF_ASSERT(this); |
|
+ GF_ASSERT(dict); |
|
+ |
|
+ priv = this->private; |
|
+ GF_ASSERT(priv); |
|
+ |
|
+ ret = dict_get_str(dict, "key", &key); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, |
|
+ "Couldn't get key in global option set"); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = dict_get_str(dict, "value", &value); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, |
|
+ "Couldn't get value in global option set"); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = glusterd_handle_ganesha_op(dict, errstr, key, value); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NFS_GNS_SETUP_FAIL, |
|
+ "Initial NFS-Ganesha set up failed"); |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ ret = dict_set_dynstr_with_alloc(priv->opts, |
|
+ GLUSTERD_STORE_KEY_GANESHA_GLOBAL, value); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_DICT_SET_FAILED, |
|
+ "Failed to set" |
|
+ " nfs-ganesha in dict."); |
|
+ goto out; |
|
+ } |
|
+ ret = glusterd_get_next_global_opt_version_str(priv->opts, &next_version); |
|
+ if (ret) { |
|
+ gf_msg_debug(THIS->name, 0, |
|
+ "Could not fetch " |
|
+ " global op version"); |
|
+ goto out; |
|
+ } |
|
+ ret = dict_set_str(priv->opts, GLUSTERD_GLOBAL_OPT_VERSION, next_version); |
|
+ if (ret) |
|
+ goto out; |
|
+ |
|
+ ret = glusterd_store_options(this, priv->opts); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_FAIL, |
|
+ "Failed to store options"); |
|
+ goto out; |
|
+ } |
|
+ |
|
+out: |
|
+ gf_msg_debug(this->name, 0, "returning %d", ret); |
|
+ return ret; |
|
+} |
|
+ |
|
+/* Following function parse GANESHA_HA_CONF |
|
+ * The sample file looks like below, |
|
+ * HA_NAME="ganesha-ha-360" |
|
+ * HA_VOL_NAME="ha-state" |
|
+ * HA_CLUSTER_NODES="server1,server2" |
|
+ * VIP_rhs_1="10.x.x.x" |
|
+ * VIP_rhs_2="10.x.x.x." */ |
|
+ |
|
+/* Check if the localhost is listed as one of nfs-ganesha nodes */ |
|
+gf_boolean_t |
|
+check_host_list(void) |
|
+{ |
|
+ glusterd_conf_t *priv = NULL; |
|
+ char *hostname, *hostlist; |
|
+ gf_boolean_t ret = _gf_false; |
|
+ xlator_t *this = NULL; |
|
+ |
|
+ this = THIS; |
|
+ priv = THIS->private; |
|
+ GF_ASSERT(priv); |
|
+ |
|
+ hostlist = parsing_ganesha_ha_conf("HA_CLUSTER_NODES"); |
|
+ if (hostlist == NULL) { |
|
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_GET_CONFIG_INFO_FAILED, |
|
+ "couldn't get HA_CLUSTER_NODES from file %s", GANESHA_HA_CONF); |
|
+ return _gf_false; |
|
+ } |
|
+ |
|
+ /* Hostlist is a comma separated list now */ |
|
+ hostname = strtok(hostlist, ","); |
|
+ while (hostname != NULL) { |
|
+ ret = gf_is_local_addr(hostname); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_NFS_GNS_HOST_FOUND, |
|
+ "ganesha host found " |
|
+ "Hostname is %s", |
|
+ hostname); |
|
+ break; |
|
+ } |
|
+ hostname = strtok(NULL, ","); |
|
+ } |
|
+ |
|
+ GF_FREE(hostlist); |
|
+ return ret; |
|
+} |
|
+ |
|
+int |
|
+manage_export_config(char *volname, char *value, char **op_errstr) |
|
+{ |
|
+ runner_t runner = { |
|
+ 0, |
|
+ }; |
|
+ int ret = -1; |
|
+ |
|
+ GF_ASSERT(volname); |
|
+ runinit(&runner); |
|
+ runner_add_args(&runner, "sh", GANESHA_PREFIX "/create-export-ganesha.sh", |
|
+ CONFDIR, value, volname, NULL); |
|
+ ret = runner_run(&runner); |
|
+ |
|
+ if (ret) |
|
+ gf_asprintf(op_errstr, |
|
+ "Failed to create" |
|
+ " NFS-Ganesha export config file."); |
|
+ |
|
+ return ret; |
|
+} |
|
+ |
|
+/* Exports and unexports a particular volume via NFS-Ganesha */ |
|
+int |
|
+ganesha_manage_export(dict_t *dict, char *value, char **op_errstr) |
|
+{ |
|
+ runner_t runner = { |
|
+ 0, |
|
+ }; |
|
+ int ret = -1; |
|
+ glusterd_volinfo_t *volinfo = NULL; |
|
+ dict_t *vol_opts = NULL; |
|
+ char *volname = NULL; |
|
+ xlator_t *this = NULL; |
|
+ glusterd_conf_t *priv = NULL; |
|
+ gf_boolean_t option = _gf_false; |
|
+ |
|
+ runinit(&runner); |
|
+ this = THIS; |
|
+ GF_ASSERT(this); |
|
+ priv = this->private; |
|
+ |
|
+ GF_ASSERT(value); |
|
+ GF_ASSERT(dict); |
|
+ GF_ASSERT(priv); |
|
+ |
|
+ ret = dict_get_str(dict, "volname", &volname); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, |
|
+ "Unable to get volume name"); |
|
+ goto out; |
|
+ } |
|
+ ret = gf_string2boolean(value, &option); |
|
+ if (ret == -1) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, |
|
+ "invalid value."); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = glusterd_volinfo_find(volname, &volinfo); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, |
|
+ FMTSTR_CHECK_VOL_EXISTS, volname); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = glusterd_check_ganesha_export(volinfo); |
|
+ if (ret && option) { |
|
+ gf_asprintf(op_errstr, |
|
+ "ganesha.enable " |
|
+ "is already 'on'."); |
|
+ ret = -1; |
|
+ goto out; |
|
+ |
|
+ } else if (!option && !ret) { |
|
+ gf_asprintf(op_errstr, |
|
+ "ganesha.enable " |
|
+ "is already 'off'."); |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ /* Check if global option is enabled, proceed only then */ |
|
+ ret = dict_get_str_boolean(priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, |
|
+ _gf_false); |
|
+ if (ret == -1) { |
|
+ gf_msg_debug(this->name, 0, |
|
+ "Failed to get " |
|
+ "global option dict."); |
|
+ gf_asprintf(op_errstr, |
|
+ "The option " |
|
+ "nfs-ganesha should be " |
|
+ "enabled before setting ganesha.enable."); |
|
+ goto out; |
|
+ } |
|
+ if (!ret) { |
|
+ gf_asprintf(op_errstr, |
|
+ "The option " |
|
+ "nfs-ganesha should be " |
|
+ "enabled before setting ganesha.enable."); |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ /* * |
|
+ * Create the export file from the node where ganesha.enable "on" |
|
+ * is executed |
|
+ * */ |
|
+ if (option) { |
|
+ ret = manage_export_config(volname, "on", op_errstr); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EXPORT_FILE_CREATE_FAIL, |
|
+ "Failed to create" |
|
+ "export file for NFS-Ganesha\n"); |
|
+ goto out; |
|
+ } |
|
+ } |
|
+ |
|
+ if (check_host_list()) { |
|
+ runner_add_args(&runner, "sh", GANESHA_PREFIX "/dbus-send.sh", CONFDIR, |
|
+ value, volname, NULL); |
|
+ ret = runner_run(&runner); |
|
+ if (ret) { |
|
+ gf_asprintf(op_errstr, |
|
+ "Dynamic export" |
|
+ " addition/deletion failed." |
|
+ " Please see log file for details"); |
|
+ goto out; |
|
+ } |
|
+ } |
|
+ |
|
+ vol_opts = volinfo->dict; |
|
+ ret = dict_set_dynstr_with_alloc(vol_opts, "features.cache-invalidation", |
|
+ value); |
|
+ if (ret) |
|
+ gf_asprintf(op_errstr, |
|
+ "Cache-invalidation could not" |
|
+ " be set to %s.", |
|
+ value); |
|
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); |
|
+ if (ret) |
|
+ gf_asprintf(op_errstr, "failed to store volinfo for %s", |
|
+ volinfo->volname); |
|
+ |
|
+out: |
|
+ return ret; |
|
+} |
|
+ |
|
+int |
|
+tear_down_cluster(gf_boolean_t run_teardown) |
|
+{ |
|
+ int ret = 0; |
|
+ runner_t runner = { |
|
+ 0, |
|
+ }; |
|
+ struct stat st = { |
|
+ 0, |
|
+ }; |
|
+ DIR *dir = NULL; |
|
+ struct dirent *entry = NULL; |
|
+ struct dirent scratch[2] = { |
|
+ { |
|
+ 0, |
|
+ }, |
|
+ }; |
|
+ char path[PATH_MAX] = { |
|
+ 0, |
|
+ }; |
|
+ |
|
+ if (run_teardown) { |
|
+ runinit(&runner); |
|
+ runner_add_args(&runner, "sh", GANESHA_PREFIX "/ganesha-ha.sh", |
|
+ "teardown", CONFDIR, NULL); |
|
+ ret = runner_run(&runner); |
|
+ /* * |
|
+ * Remove all the entries in CONFDIR expect ganesha.conf and |
|
+ * ganesha-ha.conf |
|
+ */ |
|
+ dir = sys_opendir(CONFDIR); |
|
+ if (!dir) { |
|
+ gf_msg_debug(THIS->name, 0, |
|
+ "Failed to open directory %s. " |
|
+ "Reason : %s", |
|
+ CONFDIR, strerror(errno)); |
|
+ ret = 0; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); |
|
+ while (entry) { |
|
+ snprintf(path, PATH_MAX, "%s/%s", CONFDIR, entry->d_name); |
|
+ ret = sys_lstat(path, &st); |
|
+ if (ret == -1) { |
|
+ gf_msg_debug(THIS->name, 0, |
|
+ "Failed to stat entry %s :" |
|
+ " %s", |
|
+ path, strerror(errno)); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ if (strcmp(entry->d_name, "ganesha.conf") == 0 || |
|
+ strcmp(entry->d_name, "ganesha-ha.conf") == 0) |
|
+ gf_msg_debug(THIS->name, 0, |
|
+ " %s is not required" |
|
+ " to remove", |
|
+ path); |
|
+ else if (S_ISDIR(st.st_mode)) |
|
+ ret = recursive_rmdir(path); |
|
+ else |
|
+ ret = sys_unlink(path); |
|
+ |
|
+ if (ret) { |
|
+ gf_msg_debug(THIS->name, 0, |
|
+ " Failed to remove %s. " |
|
+ "Reason : %s", |
|
+ path, strerror(errno)); |
|
+ } |
|
+ |
|
+ gf_msg_debug(THIS->name, 0, "%s %s", |
|
+ ret ? "Failed to remove" : "Removed", entry->d_name); |
|
+ GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); |
|
+ } |
|
+ |
|
+ ret = sys_closedir(dir); |
|
+ if (ret) { |
|
+ gf_msg_debug(THIS->name, 0, |
|
+ "Failed to close dir %s. Reason :" |
|
+ " %s", |
|
+ CONFDIR, strerror(errno)); |
|
+ } |
|
+ } |
|
+ |
|
+out: |
|
+ return ret; |
|
+} |
|
+ |
|
+int |
|
+setup_cluster(gf_boolean_t run_setup) |
|
+{ |
|
+ int ret = 0; |
|
+ runner_t runner = { |
|
+ 0, |
|
+ }; |
|
+ |
|
+ if (run_setup) { |
|
+ runinit(&runner); |
|
+ runner_add_args(&runner, "sh", GANESHA_PREFIX "/ganesha-ha.sh", "setup", |
|
+ CONFDIR, NULL); |
|
+ ret = runner_run(&runner); |
|
+ } |
|
+ return ret; |
|
+} |
|
+ |
|
+static int |
|
+teardown(gf_boolean_t run_teardown, char **op_errstr) |
|
+{ |
|
+ runner_t runner = { |
|
+ 0, |
|
+ }; |
|
+ int ret = 1; |
|
+ glusterd_volinfo_t *volinfo = NULL; |
|
+ glusterd_conf_t *priv = NULL; |
|
+ dict_t *vol_opts = NULL; |
|
+ |
|
+ priv = THIS->private; |
|
+ |
|
+ ret = tear_down_cluster(run_teardown); |
|
+ if (ret == -1) { |
|
+ gf_asprintf(op_errstr, |
|
+ "Cleanup of NFS-Ganesha" |
|
+ " HA config failed."); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ runinit(&runner); |
|
+ runner_add_args(&runner, "sh", GANESHA_PREFIX "/ganesha-ha.sh", "cleanup", |
|
+ CONFDIR, NULL); |
|
+ ret = runner_run(&runner); |
|
+ if (ret) |
|
+ gf_msg_debug(THIS->name, 0, |
|
+ "Could not clean up" |
|
+ " NFS-Ganesha related config"); |
|
+ |
|
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list) |
|
+ { |
|
+ vol_opts = volinfo->dict; |
|
+ /* All the volumes exported via NFS-Ganesha will be |
|
+ unexported, hence setting the appropriate keys */ |
|
+ ret = dict_set_str(vol_opts, "features.cache-invalidation", "off"); |
|
+ if (ret) |
|
+ gf_msg(THIS->name, GF_LOG_WARNING, errno, GD_MSG_DICT_SET_FAILED, |
|
+ "Could not set features.cache-invalidation " |
|
+ "to off for %s", |
|
+ volinfo->volname); |
|
+ |
|
+ ret = dict_set_str(vol_opts, "ganesha.enable", "off"); |
|
+ if (ret) |
|
+ gf_msg(THIS->name, GF_LOG_WARNING, errno, GD_MSG_DICT_SET_FAILED, |
|
+ "Could not set ganesha.enable to off for %s", |
|
+ volinfo->volname); |
|
+ |
|
+ ret = glusterd_store_volinfo(volinfo, |
|
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT); |
|
+ if (ret) |
|
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_SET_FAIL, |
|
+ "failed to store volinfo for %s", volinfo->volname); |
|
+ } |
|
+out: |
|
+ return ret; |
|
+} |
|
+ |
|
+int |
|
+stop_ganesha(char **op_errstr) |
|
+{ |
|
+ int ret = 0; |
|
+ runner_t runner = { |
|
+ 0, |
|
+ }; |
|
+ |
|
+ runinit(&runner); |
|
+ runner_add_args(&runner, "sh", GANESHA_PREFIX "/ganesha-ha.sh", |
|
+ "--setup-ganesha-conf-files", CONFDIR, "no", NULL); |
|
+ ret = runner_run(&runner); |
|
+ if (ret) { |
|
+ gf_asprintf(op_errstr, |
|
+ "removal of symlink ganesha.conf " |
|
+ "in /etc/ganesha failed"); |
|
+ } |
|
+ |
|
+ if (check_host_list()) { |
|
+ ret = manage_service("stop"); |
|
+ if (ret) |
|
+ gf_asprintf(op_errstr, |
|
+ "NFS-Ganesha service could not" |
|
+ "be stopped."); |
|
+ } |
|
+ return ret; |
|
+} |
|
+ |
|
+int |
|
+start_ganesha(char **op_errstr) |
|
+{ |
|
+ int ret = -1; |
|
+ dict_t *vol_opts = NULL; |
|
+ glusterd_volinfo_t *volinfo = NULL; |
|
+ glusterd_conf_t *priv = NULL; |
|
+ runner_t runner = { |
|
+ 0, |
|
+ }; |
|
+ |
|
+ priv = THIS->private; |
|
+ GF_ASSERT(priv); |
|
+ |
|
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list) |
|
+ { |
|
+ vol_opts = volinfo->dict; |
|
+ /* Gluster-nfs has to be disabled across the trusted pool */ |
|
+ /* before attempting to start nfs-ganesha */ |
|
+ ret = dict_set_str(vol_opts, NFS_DISABLE_MAP_KEY, "on"); |
|
+ if (ret) |
|
+ goto out; |
|
+ |
|
+ ret = glusterd_store_volinfo(volinfo, |
|
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT); |
|
+ if (ret) { |
|
+ *op_errstr = gf_strdup( |
|
+ "Failed to store the " |
|
+ "Volume information"); |
|
+ goto out; |
|
+ } |
|
+ } |
|
+ |
|
+ /* If the nfs svc is not initialized it means that the service is not |
|
+ * running, hence we can skip the process of stopping gluster-nfs |
|
+ * service |
|
+ */ |
|
+ if (priv->nfs_svc.inited) { |
|
+ ret = priv->nfs_svc.stop(&(priv->nfs_svc), SIGKILL); |
|
+ if (ret) { |
|
+ ret = -1; |
|
+ gf_asprintf(op_errstr, |
|
+ "Gluster-NFS service could" |
|
+ "not be stopped, exiting."); |
|
+ goto out; |
|
+ } |
|
+ } |
|
+ |
|
+ if (check_host_list()) { |
|
+ runinit(&runner); |
|
+ runner_add_args(&runner, "sh", GANESHA_PREFIX "/ganesha-ha.sh", |
|
+ "--setup-ganesha-conf-files", CONFDIR, "yes", NULL); |
|
+ ret = runner_run(&runner); |
|
+ if (ret) { |
|
+ gf_asprintf(op_errstr, |
|
+ "creation of symlink ganesha.conf " |
|
+ "in /etc/ganesha failed"); |
|
+ goto out; |
|
+ } |
|
+ ret = manage_service("start"); |
|
+ if (ret) |
|
+ gf_asprintf(op_errstr, |
|
+ "NFS-Ganesha failed to start." |
|
+ "Please see log file for details"); |
|
+ } |
|
+ |
|
+out: |
|
+ return ret; |
|
+} |
|
+ |
|
+static int |
|
+pre_setup(gf_boolean_t run_setup, char **op_errstr) |
|
+{ |
|
+ int ret = 0; |
|
+ |
|
+ ret = check_host_list(); |
|
+ |
|
+ if (ret) { |
|
+ ret = setup_cluster(run_setup); |
|
+ if (ret == -1) |
|
+ gf_asprintf(op_errstr, |
|
+ "Failed to set up HA " |
|
+ "config for NFS-Ganesha. " |
|
+ "Please check the log file for details"); |
|
+ } |
|
+ |
|
+ return ret; |
|
+} |
|
+ |
|
+int |
|
+glusterd_handle_ganesha_op(dict_t *dict, char **op_errstr, char *key, |
|
+ char *value) |
|
+{ |
|
+ int32_t ret = -1; |
|
+ gf_boolean_t option = _gf_false; |
|
+ |
|
+ GF_ASSERT(dict); |
|
+ GF_ASSERT(op_errstr); |
|
+ GF_ASSERT(key); |
|
+ GF_ASSERT(value); |
|
+ |
|
+ if (strcmp(key, "ganesha.enable") == 0) { |
|
+ ret = ganesha_manage_export(dict, value, op_errstr); |
|
+ if (ret < 0) |
|
+ goto out; |
|
+ } |
|
+ |
|
+ /* It is possible that the key might not be set */ |
|
+ ret = gf_string2boolean(value, &option); |
|
+ if (ret == -1) { |
|
+ gf_asprintf(op_errstr, "Invalid value in key-value pair."); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ if (strcmp(key, GLUSTERD_STORE_KEY_GANESHA_GLOBAL) == 0) { |
|
+ /* * |
|
+ * The set up/teardown of pcs cluster should be performed only |
|
+ * once. This will done on the node in which the cli command |
|
+ * 'gluster nfs-ganesha <enable/disable>' got executed. So that |
|
+ * node should part of ganesha HA cluster |
|
+ */ |
|
+ if (option) { |
|
+ ret = pre_setup(is_origin_glusterd(dict), op_errstr); |
|
+ if (ret < 0) |
|
+ goto out; |
|
+ } else { |
|
+ ret = teardown(is_origin_glusterd(dict), op_errstr); |
|
+ if (ret < 0) |
|
+ goto out; |
|
+ } |
|
+ } |
|
+ |
|
+out: |
|
+ return ret; |
|
+} |
|
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c |
|
index de44af7..528993c 100644 |
|
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c |
|
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c |
|
@@ -1911,6 +1911,83 @@ glusterd_op_begin(rpcsvc_request_t *req, glusterd_op_t op, void *ctx, |
|
return ret; |
|
} |
|
|
|
+int |
|
+__glusterd_handle_ganesha_cmd(rpcsvc_request_t *req) |
|
+{ |
|
+ int32_t ret = -1; |
|
+ gf_cli_req cli_req = {{ |
|
+ 0, |
|
+ }}; |
|
+ dict_t *dict = NULL; |
|
+ glusterd_op_t cli_op = GD_OP_GANESHA; |
|
+ char *op_errstr = NULL; |
|
+ char err_str[2048] = { |
|
+ 0, |
|
+ }; |
|
+ xlator_t *this = NULL; |
|
+ |
|
+ this = THIS; |
|
+ GF_ASSERT(this); |
|
+ |
|
+ GF_ASSERT(req); |
|
+ |
|
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); |
|
+ if (ret < 0) { |
|
+ snprintf(err_str, sizeof(err_str), |
|
+ "Failed to decode " |
|
+ "request received from cli"); |
|
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s", |
|
+ err_str); |
|
+ req->rpc_err = GARBAGE_ARGS; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ if (cli_req.dict.dict_len) { |
|
+ /* Unserialize the dictionary */ |
|
+ dict = dict_new(); |
|
+ if (!dict) { |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, |
|
+ &dict); |
|
+ if (ret < 0) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, |
|
+ "failed to " |
|
+ "unserialize req-buffer to dictionary"); |
|
+ snprintf(err_str, sizeof(err_str), |
|
+ "Unable to decode " |
|
+ "the command"); |
|
+ goto out; |
|
+ } else { |
|
+ dict->extra_stdfree = cli_req.dict.dict_val; |
|
+ } |
|
+ } |
|
+ |
|
+ gf_msg_trace(this->name, 0, "Received global option request"); |
|
+ |
|
+ ret = glusterd_op_begin_synctask(req, GD_OP_GANESHA, dict); |
|
+out: |
|
+ if (ret) { |
|
+ if (err_str[0] == '\0') |
|
+ snprintf(err_str, sizeof(err_str), "Operation failed"); |
|
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str); |
|
+ } |
|
+ if (op_errstr) |
|
+ GF_FREE(op_errstr); |
|
+ if (dict) |
|
+ dict_unref(dict); |
|
+ |
|
+ return ret; |
|
+} |
|
+ |
|
+int |
|
+glusterd_handle_ganesha_cmd(rpcsvc_request_t *req) |
|
+{ |
|
+ return glusterd_big_locked_handler(req, __glusterd_handle_ganesha_cmd); |
|
+} |
|
+ |
|
static int |
|
__glusterd_handle_reset_volume(rpcsvc_request_t *req) |
|
{ |
|
@@ -6644,6 +6721,8 @@ rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = { |
|
[GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME", |
|
GLUSTER_CLI_BARRIER_VOLUME, |
|
glusterd_handle_barrier, NULL, 0, DRC_NA}, |
|
+ [GLUSTER_CLI_GANESHA] = {"GANESHA", GLUSTER_CLI_GANESHA, |
|
+ glusterd_handle_ganesha_cmd, NULL, 0, DRC_NA}, |
|
[GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", GLUSTER_CLI_GET_VOL_OPT, |
|
glusterd_handle_get_vol_opt, NULL, 0, DRC_NA}, |
|
[GLUSTER_CLI_BITROT] = {"BITROT", GLUSTER_CLI_BITROT, |
|
diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h |
|
index 1a4bd54..9558480 100644 |
|
--- a/xlators/mgmt/glusterd/src/glusterd-messages.h |
|
+++ b/xlators/mgmt/glusterd/src/glusterd-messages.h |
|
@@ -297,7 +297,7 @@ GLFS_MSGID( |
|
GD_MSG_LOCALTIME_LOGGING_VOL_OPT_VALIDATE_FAIL, |
|
GD_MSG_LOCALTIME_LOGGING_ENABLE, GD_MSG_LOCALTIME_LOGGING_DISABLE, |
|
GD_MSG_PORTS_EXHAUSTED, GD_MSG_CHANGELOG_GET_FAIL, |
|
- GD_MSG_MANAGER_FUNCTION_FAILED, |
|
+ GD_MSG_MANAGER_FUNCTION_FAILED, GD_MSG_NFS_GANESHA_DISABLED, |
|
GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL); |
|
|
|
#endif /* !_GLUSTERD_MESSAGES_H_ */ |
|
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c |
|
index 12d857a..a630c48 100644 |
|
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c |
|
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c |
|
@@ -1176,6 +1176,13 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr) |
|
if (ret) |
|
goto out; |
|
|
|
+ if ((strcmp(key, "ganesha.enable") == 0) && |
|
+ (strcmp(value, "off") == 0)) { |
|
+ ret = ganesha_manage_export(dict, "off", op_errstr); |
|
+ if (ret) |
|
+ goto out; |
|
+ } |
|
+ |
|
ret = glusterd_check_quota_cmd(key, value, errstr, sizeof(errstr)); |
|
if (ret) |
|
goto out; |
|
@@ -1677,6 +1684,20 @@ glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr) |
|
goto out; |
|
} |
|
|
|
+ /* * |
|
+ * If key ganesha.enable is set, then volume should be unexported from |
|
+ * ganesha server. Also it is a volume-level option, perform only when |
|
+ * volume name not equal to "all"(in other words if volinfo != NULL) |
|
+ */ |
|
+ if (volinfo && (!strcmp(key, "all") || !strcmp(key, "ganesha.enable"))) { |
|
+ if (glusterd_check_ganesha_export(volinfo)) { |
|
+ ret = ganesha_manage_export(dict, "off", op_errstr); |
|
+ if (ret) |
|
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL, |
|
+ "Could not reset ganesha.enable key"); |
|
+ } |
|
+ } |
|
+ |
|
if (strcmp(key, "all")) { |
|
exists = glusterd_check_option_exists(key, &key_fixed); |
|
if (exists == -1) { |
|
@@ -2393,6 +2414,15 @@ glusterd_op_reset_volume(dict_t *dict, char **op_rspstr) |
|
} |
|
} |
|
|
|
+ if (!strcmp(key, "ganesha.enable") || !strcmp(key, "all")) { |
|
+ if (glusterd_check_ganesha_export(volinfo)) { |
|
+ ret = manage_export_config(volname, "off", op_rspstr); |
|
+ if (ret) |
|
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL, |
|
+ "Could not reset ganesha.enable key"); |
|
+ } |
|
+ } |
|
+ |
|
out: |
|
GF_FREE(key_fixed); |
|
if (quorum_action) |
|
@@ -2964,6 +2994,10 @@ glusterd_op_set_volume(dict_t *dict, char **errstr) |
|
} |
|
} |
|
|
|
+ ret = glusterd_check_ganesha_cmd(key, value, errstr, dict); |
|
+ if (ret == -1) |
|
+ goto out; |
|
+ |
|
if (!is_key_glusterd_hooks_friendly(key)) { |
|
ret = glusterd_check_option_exists(key, &key_fixed); |
|
GF_ASSERT(ret); |
|
@@ -4494,7 +4528,8 @@ glusterd_op_build_payload(dict_t **req, char **op_errstr, dict_t *op_ctx) |
|
|
|
case GD_OP_SYNC_VOLUME: |
|
case GD_OP_COPY_FILE: |
|
- case GD_OP_SYS_EXEC: { |
|
+ case GD_OP_SYS_EXEC: |
|
+ case GD_OP_GANESHA: { |
|
dict_copy(dict, req_dict); |
|
} break; |
|
|
|
@@ -5944,6 +5979,10 @@ glusterd_op_stage_validate(glusterd_op_t op, dict_t *dict, char **op_errstr, |
|
ret = glusterd_op_stage_set_volume(dict, op_errstr); |
|
break; |
|
|
|
+ case GD_OP_GANESHA: |
|
+ ret = glusterd_op_stage_set_ganesha(dict, op_errstr); |
|
+ break; |
|
+ |
|
case GD_OP_RESET_VOLUME: |
|
ret = glusterd_op_stage_reset_volume(dict, op_errstr); |
|
break; |
|
@@ -6074,7 +6113,9 @@ glusterd_op_commit_perform(glusterd_op_t op, dict_t *dict, char **op_errstr, |
|
case GD_OP_SET_VOLUME: |
|
ret = glusterd_op_set_volume(dict, op_errstr); |
|
break; |
|
- |
|
+ case GD_OP_GANESHA: |
|
+ ret = glusterd_op_set_ganesha(dict, op_errstr); |
|
+ break; |
|
case GD_OP_RESET_VOLUME: |
|
ret = glusterd_op_reset_volume(dict, op_errstr); |
|
break; |
|
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c |
|
index 2958443..041946d 100644 |
|
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c |
|
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c |
|
@@ -3788,6 +3788,148 @@ out: |
|
return ret; |
|
} |
|
|
|
+/* * |
|
+ * Here there are two possibilities, either destination is snaphot or |
|
+ * clone. In the case of snapshot nfs_ganesha export file will be copied |
|
+ * to snapdir. If it is clone , then new export file will be created for |
|
+ * the clone in the GANESHA_EXPORT_DIRECTORY, replacing occurences of |
|
+ * volname with clonename |
|
+ */ |
|
+int |
|
+glusterd_copy_nfs_ganesha_file(glusterd_volinfo_t *src_vol, |
|
+ glusterd_volinfo_t *dest_vol) |
|
+{ |
|
+ int32_t ret = -1; |
|
+ char snap_dir[PATH_MAX] = { |
|
+ 0, |
|
+ }; |
|
+ char src_path[PATH_MAX] = { |
|
+ 0, |
|
+ }; |
|
+ char dest_path[PATH_MAX] = { |
|
+ 0, |
|
+ }; |
|
+ char buffer[BUFSIZ] = { |
|
+ 0, |
|
+ }; |
|
+ char *find_ptr = NULL; |
|
+ char *buff_ptr = NULL; |
|
+ char *tmp_ptr = NULL; |
|
+ xlator_t *this = NULL; |
|
+ glusterd_conf_t *priv = NULL; |
|
+ struct stat stbuf = { |
|
+ 0, |
|
+ }; |
|
+ FILE *src = NULL; |
|
+ FILE *dest = NULL; |
|
+ |
|
+ this = THIS; |
|
+ GF_VALIDATE_OR_GOTO("snapshot", this, out); |
|
+ priv = this->private; |
|
+ GF_VALIDATE_OR_GOTO(this->name, priv, out); |
|
+ |
|
+ GF_VALIDATE_OR_GOTO(this->name, src_vol, out); |
|
+ GF_VALIDATE_OR_GOTO(this->name, dest_vol, out); |
|
+ |
|
+ if (glusterd_check_ganesha_export(src_vol) == _gf_false) { |
|
+ gf_msg_debug(this->name, 0, |
|
+ "%s is not exported via " |
|
+ "NFS-Ganesha. Skipping copy of export conf.", |
|
+ src_vol->volname); |
|
+ ret = 0; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ if (src_vol->is_snap_volume) { |
|
+ GLUSTERD_GET_SNAP_DIR(snap_dir, src_vol->snapshot, priv); |
|
+ ret = snprintf(src_path, PATH_MAX, "%s/export.%s.conf", snap_dir, |
|
+ src_vol->snapshot->snapname); |
|
+ } else { |
|
+ ret = snprintf(src_path, PATH_MAX, "%s/export.%s.conf", |
|
+ GANESHA_EXPORT_DIRECTORY, src_vol->volname); |
|
+ } |
|
+ if (ret < 0 || ret >= PATH_MAX) |
|
+ goto out; |
|
+ |
|
+ ret = sys_lstat(src_path, &stbuf); |
|
+ if (ret) { |
|
+ /* |
|
+ * This code path is hit, only when the src_vol is being * |
|
+ * exported via NFS-Ganesha. So if the conf file is not * |
|
+ * available, we fail the snapshot operation. * |
|
+ */ |
|
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, |
|
+ "Stat on %s failed with %s", src_path, strerror(errno)); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ if (dest_vol->is_snap_volume) { |
|
+ memset(snap_dir, 0, PATH_MAX); |
|
+ GLUSTERD_GET_SNAP_DIR(snap_dir, dest_vol->snapshot, priv); |
|
+ ret = snprintf(dest_path, sizeof(dest_path), "%s/export.%s.conf", |
|
+ snap_dir, dest_vol->snapshot->snapname); |
|
+ if (ret < 0) |
|
+ goto out; |
|
+ |
|
+ ret = glusterd_copy_file(src_path, dest_path); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, |
|
+ "Failed to copy %s in %s", src_path, dest_path); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ } else { |
|
+ ret = snprintf(dest_path, sizeof(dest_path), "%s/export.%s.conf", |
|
+ GANESHA_EXPORT_DIRECTORY, dest_vol->volname); |
|
+ if (ret < 0) |
|
+ goto out; |
|
+ |
|
+ src = fopen(src_path, "r"); |
|
+ dest = fopen(dest_path, "w"); |
|
+ |
|
+ if (!src || !dest) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, |
|
+ "Failed to open %s", dest ? src_path : dest_path); |
|
+ ret = -1; |
|
+ goto out; |
|
+ } |
|
+ |
|
+ /* * |
|
+ * if the source volume is snapshot, the export conf file |
|
+ * consists of orginal volname |
|
+ */ |
|
+ if (src_vol->is_snap_volume) |
|
+ find_ptr = gf_strdup(src_vol->parent_volname); |
|
+ else |
|
+ find_ptr = gf_strdup(src_vol->volname); |
|
+ |
|
+ if (!find_ptr) |
|
+ goto out; |
|
+ |
|
+ /* Replacing volname with clonename */ |
|
+ while (fgets(buffer, BUFSIZ, src)) { |
|
+ buff_ptr = buffer; |
|
+ while ((tmp_ptr = strstr(buff_ptr, find_ptr))) { |
|
+ while (buff_ptr < tmp_ptr) |
|
+ fputc((int)*buff_ptr++, dest); |
|
+ fputs(dest_vol->volname, dest); |
|
+ buff_ptr += strlen(find_ptr); |
|
+ } |
|
+ fputs(buff_ptr, dest); |
|
+ memset(buffer, 0, BUFSIZ); |
|
+ } |
|
+ } |
|
+out: |
|
+ if (src) |
|
+ fclose(src); |
|
+ if (dest) |
|
+ fclose(dest); |
|
+ if (find_ptr) |
|
+ GF_FREE(find_ptr); |
|
+ |
|
+ return ret; |
|
+} |
|
+ |
|
int32_t |
|
glusterd_restore_geo_rep_files(glusterd_volinfo_t *snap_vol) |
|
{ |
|
@@ -3876,6 +4018,60 @@ out: |
|
return ret; |
|
} |
|
|
|
+int |
|
+glusterd_restore_nfs_ganesha_file(glusterd_volinfo_t *src_vol, |
|
+ glusterd_snap_t *snap) |
|
+{ |
|
+ int32_t ret = -1; |
|
+ char snap_dir[PATH_MAX] = ""; |
|
+ char src_path[PATH_MAX] = ""; |
|
+ char dest_path[PATH_MAX] = ""; |
|
+ xlator_t *this = NULL; |
|
+ glusterd_conf_t *priv = NULL; |
|
+ struct stat stbuf = { |
|
+ 0, |
|
+ }; |
|
+ |
|
+ this = THIS; |
|
+ GF_VALIDATE_OR_GOTO("snapshot", this, out); |
|
+ priv = this->private; |
|
+ GF_VALIDATE_OR_GOTO(this->name, priv, out); |
|
+ |
|
+ GF_VALIDATE_OR_GOTO(this->name, src_vol, out); |
|
+ GF_VALIDATE_OR_GOTO(this->name, snap, out); |
|
+ |
|
+ GLUSTERD_GET_SNAP_DIR(snap_dir, snap, priv); |
|
+ |
|
+ ret = snprintf(src_path, sizeof(src_path), "%s/export.%s.conf", snap_dir, |
|
+ snap->snapname); |
|
+ if (ret < 0) |
|
+ goto out; |
|
+ |
|
+ ret = sys_lstat(src_path, &stbuf); |
|
+ if (ret) { |
|
+ if (errno == ENOENT) { |
|
+ ret = 0; |
|
+ gf_msg_debug(this->name, 0, "%s not found", src_path); |
|
+ } else |
|
+ gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED, |
|
+ "Stat on %s failed with %s", src_path, strerror(errno)); |
|
+ goto out; |
|
+ } |
|
+ |
|
+ ret = snprintf(dest_path, sizeof(dest_path), "%s/export.%s.conf", |
|
+ GANESHA_EXPORT_DIRECTORY, src_vol->volname); |
|
+ if (ret < 0) |
|
+ goto out; |
|
+ |
|
+ ret = glusterd_copy_file(src_path, dest_path); |
|
+ if (ret) |
|
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, |
|
+ "Failed to copy %s in %s", src_path, dest_path); |
|
+ |
|
+out: |
|
+ return ret; |
|
+} |
|
+ |
|
/* Snapd functions */ |
|
int |
|
glusterd_is_snapd_enabled(glusterd_volinfo_t *volinfo) |
|
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h |
|
index e60be6e..41d0001 100644 |
|
--- a/xlators/mgmt/glusterd/src/glusterd-store.h |
|
+++ b/xlators/mgmt/glusterd/src/glusterd-store.h |
|
@@ -118,6 +118,8 @@ typedef enum glusterd_store_ver_ac_ { |
|
#define GLUSTERD_STORE_KEY_VOL_MIGRATIONS_SKIPPED "migration-skipped" |
|
#define GLUSTERD_STORE_KEY_VOL_MIGRATION_RUN_TIME "migration-run-time" |
|
|
|
+#define GLUSTERD_STORE_KEY_GANESHA_GLOBAL "nfs-ganesha" |
|
+ |
|
int32_t |
|
glusterd_store_volinfo(glusterd_volinfo_t *volinfo, |
|
glusterd_volinfo_ver_ac_t ac); |
|
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c |
|
index 86ef470..a0417ca 100644 |
|
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c |
|
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c |
|
@@ -1823,6 +1823,18 @@ glusterd_op_stage_stop_volume(dict_t *dict, char **op_errstr) |
|
goto out; |
|
} |
|
|
|
+ ret = glusterd_check_ganesha_export(volinfo); |
|
+ if (ret) { |
|
+ ret = ganesha_manage_export(dict, "off", op_errstr); |
|
+ if (ret) { |
|
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, |
|
+ GD_MSG_NFS_GNS_UNEXPRT_VOL_FAIL, |
|
+ "Could not " |
|
+ "unexport volume via NFS-Ganesha"); |
|
+ ret = 0; |
|
+ } |
|
+ } |
|
+ |
|
if (glusterd_is_defrag_on(volinfo)) { |
|
snprintf(msg, sizeof(msg), |
|
"rebalance session is " |
|
@@ -2674,6 +2686,8 @@ glusterd_op_start_volume(dict_t *dict, char **op_errstr) |
|
xlator_t *this = NULL; |
|
glusterd_conf_t *conf = NULL; |
|
glusterd_svc_t *svc = NULL; |
|
+ char *str = NULL; |
|
+ gf_boolean_t option = _gf_false; |
|
|
|
this = THIS; |
|
GF_ASSERT(this); |
|
@@ -2731,6 +2745,29 @@ glusterd_op_start_volume(dict_t *dict, char **op_errstr) |
|
} |
|
} |
|
|
|
+ ret = dict_get_str(conf->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str); |
|
+ if (ret != 0) { |
|
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED, |
|
+ "Global dict not present."); |
|
+ ret = 0; |
|
+ |
|
+ } else { |
|
+ ret = gf_string2boolean(str, &option); |
|
+ /* Check if the feature is enabled and set nfs-disable to true */ |
|
+ if (option) { |
|
+ gf_msg_debug(this->name, 0, "NFS-Ganesha is enabled"); |
|
+ /* Gluster-nfs should not start when NFS-Ganesha is enabled*/ |
|
+ ret = dict_set_str(volinfo->dict, NFS_DISABLE_MAP_KEY, "on"); |
|
+ if (ret) { |
|
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, |
|
+ "Failed to set nfs.disable for" |
|
+ "volume %s", |
|
+ volname); |
|
+ goto out; |
|
+ } |
|
+ } |
|
+ } |
|
+ |
|
ret = glusterd_start_volume(volinfo, flags, _gf_true); |
|
if (ret) |
|
goto out; |
|
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c |
|
index d1244e4..13f423a 100644 |
|
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c |
|
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c |
|
@@ -2597,6 +2597,13 @@ struct volopt_map_entry glusterd_volopt_map[] = { |
|
.voltype = "features/upcall", |
|
.op_version = GD_OP_VERSION_3_7_0, |
|
}, |
|
+ { |
|
+ .key = "ganesha.enable", |
|
+ .voltype = "features/ganesha", |
|
+ .value = "off", |
|
+ .option = "ganesha.enable", |
|
+ .op_version = GD_OP_VERSION_3_7_0, |
|
+ }, |
|
/* Lease translator options */ |
|
{ |
|
.key = "features.leases", |
|
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h |
|
index 67867f8..5135181 100644 |
|
--- a/xlators/mgmt/glusterd/src/glusterd.h |
|
+++ b/xlators/mgmt/glusterd/src/glusterd.h |
|
@@ -61,6 +61,9 @@ |
|
#define GLUSTERD_LOCALTIME_LOGGING_KEY "cluster.localtime-logging" |
|
#define GLUSTERD_DAEMON_LOG_LEVEL_KEY "cluster.daemon-log-level" |
|
|
|
+#define GANESHA_HA_CONF CONFDIR "/ganesha-ha.conf" |
|
+#define GANESHA_EXPORT_DIRECTORY CONFDIR "/exports" |
|
+ |
|
#define GLUSTERD_SNAPS_MAX_HARD_LIMIT 256 |
|
#define GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT 90 |
|
#define GLUSTERD_SNAPS_MAX_SOFT_LIMIT_PERCENT 100 |
|
@@ -1356,6 +1359,25 @@ glusterd_op_stop_volume(dict_t *dict); |
|
int |
|
glusterd_op_delete_volume(dict_t *dict); |
|
int |
|
+glusterd_handle_ganesha_op(dict_t *dict, char **op_errstr, char *key, |
|
+ char *value); |
|
+int |
|
+glusterd_check_ganesha_cmd(char *key, char *value, char **errstr, dict_t *dict); |
|
+int |
|
+glusterd_op_stage_set_ganesha(dict_t *dict, char **op_errstr); |
|
+int |
|
+glusterd_op_set_ganesha(dict_t *dict, char **errstr); |
|
+int |
|
+ganesha_manage_export(dict_t *dict, char *value, char **op_errstr); |
|
+gf_boolean_t |
|
+glusterd_is_ganesha_cluster(); |
|
+gf_boolean_t |
|
+glusterd_check_ganesha_export(glusterd_volinfo_t *volinfo); |
|
+int |
|
+stop_ganesha(char **op_errstr); |
|
+int |
|
+tear_down_cluster(gf_boolean_t run_teardown); |
|
+int |
|
manage_export_config(char *volname, char *value, char **op_errstr); |
|
|
|
int |
|
-- |
|
1.8.3.1 |
|
|
|
|