Browse Source

initial package creation

Signed-off-by: Toshaan Bharvani <toshaan@powerel.org>
master
Toshaan Bharvani 3 years ago
commit
433b33f037
  1. 1557
      SOURCES/0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch
  2. 51
      SOURCES/0003-rpc-set-bind-insecure-to-off-by-default.patch
  3. 47
      SOURCES/0004-glusterd-spec-fixing-autogen-issue.patch
  4. 36
      SOURCES/0005-libglusterfs-glusterd-Fix-compilation-errors.patch
  5. 58
      SOURCES/0006-build-remove-ghost-directory-entries.patch
  6. 620
      SOURCES/0007-build-add-RHGS-specific-changes.patch
  7. 35
      SOURCES/0008-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch
  8. 57
      SOURCES/0009-build-introduce-security-hardening-flags-in-gluster.patch
  9. 100
      SOURCES/0010-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch
  10. 138
      SOURCES/0011-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch
  11. 73
      SOURCES/0012-build-add-pretrans-check.patch
  12. 50
      SOURCES/0013-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch
  13. 72
      SOURCES/0014-build-spec-file-conflict-resolution.patch
  14. 198
      SOURCES/0015-build-randomize-temp-file-names-in-pretrans-scriptle.patch
  15. 42
      SOURCES/0016-glusterd-parallel-readdir-Change-the-op-version-of-p.patch
  16. 37
      SOURCES/0017-glusterd-Revert-op-version-for-cluster.max-brick-per.patch
  17. 56
      SOURCES/0018-cli-Add-message-for-user-before-modifying-brick-mult.patch
  18. 99
      SOURCES/0019-build-launch-glusterd-upgrade-after-all-new-bits-are.patch
  19. 38
      SOURCES/0020-spec-unpackaged-files-found-for-RHEL-7-client-build.patch
  20. 66
      SOURCES/0021-cli-glusterfsd-remove-copyright-information.patch
  21. 40
      SOURCES/0022-cli-Remove-upstream-doc-reference.patch
  22. 148
      SOURCES/0023-hooks-remove-selinux-hooks.patch
  23. 50
      SOURCES/0024-glusterd-Make-localtime-logging-option-invisible-in-.patch
  24. 45
      SOURCES/0025-build-make-RHGS-version-available-for-server.patch
  25. 68
      SOURCES/0026-glusterd-Introduce-daemon-log-level-cluster-wide-opt.patch
  26. 50
      SOURCES/0027-glusterd-change-op-version-of-fips-mode-rchecksum.patch
  27. 52
      SOURCES/0028-glusterd-Reset-op-version-for-features.shard-deletio.patch
  28. 39
      SOURCES/0029-glusterd-Reset-op-version-for-features.shard-lru-lim.patch
  29. 42
      SOURCES/0030-selinux-glusterd-add-features.selinux-to-glusterd-vo.patch
  30. 34
      SOURCES/0031-glusterd-turn-off-selinux-feature-in-downstream.patch
  31. 29
      SOURCES/0032-glusterd-update-gd-op-version-to-3_7_0.patch
  32. 83
      SOURCES/0033-build-add-missing-explicit-package-dependencies.patch
  33. 59
      SOURCES/0034-glusterd-introduce-a-new-op-version-for-rhgs-3.4.3.patch
  34. 41
      SOURCES/0035-glusterd-tag-rebalance-mgmt_v3-command-to-op-version.patch
  35. 47
      SOURCES/0036-build-add-conditional-dependency-on-server-for-devel.patch
  36. 35
      SOURCES/0037-cli-change-the-warning-message.patch
  37. 230
      SOURCES/0038-spec-avoid-creation-of-temp-file-in-lua-script.patch
  38. 61
      SOURCES/0039-cli-fix-query-to-user-during-brick-mux-selection.patch
  39. 136
      SOURCES/0040-build-Remove-unsupported-test-cases-failing-consiste.patch
  40. 43
      SOURCES/0041-tests-geo-rep-Build-failed-in-Jenkins-for-test-bug-1.patch
  41. 123
      SOURCES/0042-spec-client-server-Builds-are-failing-on-rhel-6.patch
  42. 137
      SOURCES/0043-inode-don-t-dump-the-whole-table-to-CLI.patch
  43. 360
      SOURCES/0044-cluster-ec-Don-t-enqueue-an-entry-if-it-is-already-h.patch
  44. 126
      SOURCES/0045-glusterd-fix-txn-id-mem-leak.patch
  45. 98
      SOURCES/0046-protocol-client-Do-not-fallback-to-anon-fd-if-fd-is-.patch
  46. 1652
      SOURCES/0047-client-rpc-Fix-the-payload-being-sent-on-the-wire.patch
  47. 115
      SOURCES/0048-gfapi-Unblock-epoll-thread-for-upcall-processing.patch
  48. 49
      SOURCES/0049-transport-socket-log-shutdown-msg-occasionally.patch
  49. 142
      SOURCES/0050-geo-rep-Fix-syncing-multiple-rename-of-symlink.patch
  50. 67
      SOURCES/0051-spec-update-rpm-install-condition.patch
  51. 299
      SOURCES/0052-geo-rep-IPv6-support.patch
  52. 575
      SOURCES/0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch
  53. 1912
      SOURCES/0054-Revert-glusterd-storhaug-remove-ganesha.patch
  54. 1897
      SOURCES/0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch
  55. 229
      SOURCES/0056-common-ha-fixes-for-Debian-based-systems.patch
  56. 40
      SOURCES/0057-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch
  57. 62
      SOURCES/0058-glusterd-ganesha-During-volume-delete-remove-the-gan.patch
  58. 132
      SOURCES/0059-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch
  59. 61
      SOURCES/0060-ganesha-scripts-Stop-ganesha-process-on-all-nodes-if.patch
  60. 106
      SOURCES/0061-ganesha-allow-refresh-config-and-volume-export-unexp.patch
  61. 59
      SOURCES/0062-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch
  62. 144
      SOURCES/0063-glusterd-ganesha-update-cache-invalidation-properly-.patch
  63. 52
      SOURCES/0064-glusterd-ganesha-return-proper-value-in-pre_setup.patch
  64. 58
      SOURCES/0065-ganesha-scripts-remove-dependency-over-export-config.patch
  65. 41
      SOURCES/0066-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch
  66. 41
      SOURCES/0067-ganesha-minor-improvments-for-commit-e91cdf4-17081.patch
  67. 58
      SOURCES/0068-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch
  68. 96
      SOURCES/0069-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
  69. 76
      SOURCES/0070-packaging-glusterfs-ganesha-update-sometimes-fails-s.patch
  70. 66
      SOURCES/0071-common-ha-enable-and-disable-selinux-gluster_use_exe.patch
  71. 60
      SOURCES/0072-ganesha-ha-don-t-set-SELinux-booleans-if-SELinux-is-.patch
  72. 45
      SOURCES/0073-build-remove-ganesha-dependency-on-selinux-policy.patch
  73. 67
      SOURCES/0074-common-ha-enable-pacemaker-at-end-of-setup.patch
  74. 43
      SOURCES/0075-common-ha-Fix-an-incorrect-syntax-during-setup.patch
  75. 44
      SOURCES/0076-glusterd-ganesha-change-voltype-for-ganesha.enable-i.patch
  76. 73
      SOURCES/0077-glusterd-ganesha-create-remove-export-file-only-from.patch
  77. 40
      SOURCES/0078-common-ha-scripts-pass-the-list-of-servers-properly-.patch
  78. 93
      SOURCES/0079-common-ha-All-statd-related-files-need-to-be-owned-b.patch
  79. 62
      SOURCES/0080-glusterd-ganesha-Skip-non-ganesha-nodes-properly-for.patch
  80. 50
      SOURCES/0081-ganesha-ha-ensure-pacemaker-is-enabled-after-setup.patch
  81. 59
      SOURCES/0082-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch
  82. 82
      SOURCES/0083-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
  83. 37
      SOURCES/0084-glusterd-Fix-duplicate-client_op_version-in-info-fil.patch
  84. 8976
      SOURCES/0085-Revert-all-remove-code-which-is-not-being-considered.patch
  85. 3194
      SOURCES/0086-Revert-tiering-remove-the-translator-from-build-and-.patch
  86. 89
      SOURCES/0087-ganesha-fixing-minor-issues-after-the-backport-from-.patch
  87. 74
      SOURCES/0088-tier-fix-failures-noticed-during-tier-start-and-tier.patch
  88. 85
      SOURCES/0089-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch
  89. 307
      SOURCES/0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch
  90. 110
      SOURCES/0091-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch
  91. 41
      SOURCES/0092-logging-Fix-GF_LOG_OCCASSIONALLY-API.patch
  92. 106
      SOURCES/0093-glusterd-Change-op-version-of-cache-invalidation-in-.patch
  93. 45
      SOURCES/0094-glusterd-load-ctime-in-the-client-graph-only-if-it-s.patch
  94. 204
      SOURCES/0095-cluster-afr-Remove-local-from-owners_list-on-failure.patch
  95. 94
      SOURCES/0096-core-Brick-is-not-able-to-detach-successfully-in-bri.patch
  96. 61
      SOURCES/0097-glusterd-tier-while-doing-an-attach-tier-the-self-he.patch
  97. 4617
      SOURCES/0098-mgmt-shd-Implement-multiplexing-in-self-heal-daemon.patch
  98. 119
      SOURCES/0099-client-fini-return-fini-after-rpc-cleanup.patch
  99. 179
      SOURCES/0100-clnt-rpc-ref-leak-during-disconnect.patch
  100. 162
      SOURCES/0101-shd-mux-Fix-coverity-issues-introduced-by-shd-mux-pa.patch
  101. Some files were not shown because too many files have changed in this diff Show More

1557
SOURCES/0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch

File diff suppressed because it is too large Load Diff

51
SOURCES/0003-rpc-set-bind-insecure-to-off-by-default.patch

@ -0,0 +1,51 @@
From 9b58731c83bc1ee9c5f2a3cd58a8f845cf09ee82 Mon Sep 17 00:00:00 2001
From: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
Date: Mon, 21 Mar 2016 13:54:19 +0530
Subject: [PATCH 03/52] rpc: set bind-insecure to off by default

commit 243a5b429f225acb8e7132264fe0a0835ff013d5 turn's 'ON'
allow-insecure and bind-insecure by default.

Problem:
Now with newer versions we have bind-insecure 'ON' by default.
So, while upgrading subset of nodes from a trusted storage pool,
nodes which have older versions of glusterfs will expect
connection from secure ports only (since they still have
bind-insecure off) thus they reject connection from upgraded
nodes which now have insecure ports.

Hence we will run into connection issues between peers.

Solution:
This patch will turn bind-insecure 'OFF' by default to avoid
problem explained above.

Label: DOWNSTREAM ONLY

Change-Id: Id7a19b4872399d3b019243b0857c9c7af75472f7
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/70313
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Atin Mukherjee <amukherj@redhat.com>
---
rpc/rpc-lib/src/rpc-transport.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/rpc/rpc-lib/src/rpc-transport.c b/rpc/rpc-lib/src/rpc-transport.c
index f9cbdf1..4beaaf9 100644
--- a/rpc/rpc-lib/src/rpc-transport.c
+++ b/rpc/rpc-lib/src/rpc-transport.c
@@ -269,8 +269,8 @@ rpc_transport_load(glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
else
trans->bind_insecure = 0;
} else {
- /* By default allow bind insecure */
- trans->bind_insecure = 1;
+ /* Turning off bind insecure by default*/
+ trans->bind_insecure = 0;
}
ret = dict_get_str(options, "transport-type", &type);
--
1.8.3.1

47
SOURCES/0004-glusterd-spec-fixing-autogen-issue.patch

@ -0,0 +1,47 @@
From aa73240892a7072be68772370fd95173e6e77d10 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 21 Mar 2016 17:07:00 +0530
Subject: [PATCH 04/52] glusterd/spec: fixing autogen issue

Backport of https://code.engineering.redhat.com/gerrit/#/c/59463/

Because of the incorrect build section, autogen.sh wasn't re-run during the rpm
build process. The `extras/Makefile.in` was not regenerated with the changes
made to `extras/Makefile.am` in the firewalld patch. This meant that
`extras/Makefile` was generated without the firewalld changes. So the firewalld
config wasn't installed during `make install` and rpmbuild later failed when it
failed to find `/usr/lib/firewalld/glusterfs.xml`

Label: DOWNSTREAM ONLY

>Reviewed-on: https://code.engineering.redhat.com/gerrit/59463

Change-Id: I498bcceeacbd839640282eb6467c9f1464505697
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/70343
Reviewed-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 7 +------
1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index c655f16..f5c1f79 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -700,12 +700,7 @@ done
%build
-# RHEL6 and earlier need to manually replace config.guess and config.sub
-%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
-./autogen.sh
-%endif
-
-%configure \
+./autogen.sh && %configure \
%{?_with_asan} \
%{?_with_cmocka} \
%{?_with_debug} \
--
1.8.3.1

36
SOURCES/0005-libglusterfs-glusterd-Fix-compilation-errors.patch

@ -0,0 +1,36 @@
From 44f758a56c5c5ad340ebc6d6a6478e8712c2c101 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 21 Mar 2016 22:31:02 +0530
Subject: [PATCH 05/52] libglusterfs/glusterd: Fix compilation errors

1. Removed duplicate definition of GD_OP_VER_PERSISTENT_AFR_XATTRS introduced in
d367a88 where GD_OP_VER_PERSISTENT_AFR_XATTRS was redfined

2. Fixed incorrect op-version

Label: DOWNSTREAM ONLY

Change-Id: Icfa3206e8a41a11875641f57523732b80837f8f6
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/70384
Reviewed-by: Nithya Balachandran <nbalacha@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-store.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 64447e7..51ca3d1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -967,7 +967,7 @@ glusterd_volume_exclude_options_write(int fd, glusterd_volinfo_t *volinfo)
goto out;
}
- if (conf->op_version >= GD_OP_VERSION_RHS_3_0) {
+ if (conf->op_version >= GD_OP_VERSION_3_7_0) {
snprintf(buf, sizeof(buf), "%d", volinfo->disperse_count);
ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DISPERSE_CNT, buf);
if (ret)
--
1.8.3.1

58
SOURCES/0006-build-remove-ghost-directory-entries.patch

@ -0,0 +1,58 @@
From 1f28e008825ae291208a9e6c714dd642f715a2a1 Mon Sep 17 00:00:00 2001
From: "Bala.FA" <barumuga@redhat.com>
Date: Mon, 7 Apr 2014 15:24:10 +0530
Subject: [PATCH 06/52] build: remove ghost directory entries

ovirt requires hook directories for gluster management and ghost
directories are no more ghost entries

Label: DOWNSTREAM ONLY

Change-Id: Iaf1066ba0655619024f87eaaa039f0010578c567
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60133
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index f5c1f79..6be492e 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -799,15 +799,30 @@ install -D -p -m 0644 extras/glusterfs-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
%if ( 0%{!?_without_georeplication:1} )
-# geo-rep ghosts
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
install -D -p -m 0644 extras/glusterfs-georep-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
%endif
+%if ( 0%{!?_without_syslog:1} )
+%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
+install -D -p -m 0644 extras/gluster-rsyslog-7.2.conf \
+ %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example
+%endif
+
+%if ( 0%{?rhel} && 0%{?rhel} == 6 )
+install -D -p -m 0644 extras/gluster-rsyslog-5.8.conf \
+ %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example
+%endif
+
+%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
+install -D -p -m 0644 extras/logger.conf.example \
+ %{buildroot}%{_sysconfdir}/glusterfs/logger.conf.example
+%endif
+%endif
+
%if ( 0%{!?_without_server:1} )
-# the rest of the ghosts
touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
touch %{buildroot}%{_sharedstatedir}/glusterd/options
subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop)
--
1.8.3.1

620
SOURCES/0007-build-add-RHGS-specific-changes.patch

@ -0,0 +1,620 @@
From 7744475550cd27f58f536741e9c50c639d3b02d8 Mon Sep 17 00:00:00 2001
From: "Bala.FA" <barumuga@redhat.com>
Date: Thu, 6 Dec 2018 20:06:27 +0530
Subject: [PATCH 07/52] build: add RHGS specific changes

Label: DOWNSTREAM ONLY

Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1074947
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1097782
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1115267
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1221743
Change-Id: I08333334745adf2350e772c6454ffcfe9c08cb89
Reviewed-on: https://code.engineering.redhat.com/gerrit/24983
Reviewed-on: https://code.engineering.redhat.com/gerrit/25451
Reviewed-on: https://code.engineering.redhat.com/gerrit/25518
Reviewed-on: https://code.engineering.redhat.com/gerrit/25983
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60134
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 485 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 481 insertions(+), 4 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 6be492e..eb04491 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -95,9 +95,16 @@
%{?_without_server:%global _without_server --without-server}
# disable server components forcefully as rhel <= 6
-%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
+%if ( 0%{?rhel} )
+%if ( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" )
+%global _without_server %{nil}
+%else
%global _without_server --without-server
%endif
+%endif
+
+%global _without_extra_xlators 1
+%global _without_regression_tests 1
# syslog
# if you wish to build rpms without syslog logging, compile like this
@@ -229,7 +236,8 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
%else
Name: @PACKAGE_NAME@
Version: @PACKAGE_VERSION@
-Release: 0.@PACKAGE_RELEASE@%{?dist}
+Release: @PACKAGE_RELEASE@%{?dist}
+ExcludeArch: i686
%endif
License: GPLv2 or LGPLv3+
URL: http://docs.gluster.org/
@@ -243,8 +251,6 @@ Source8: glusterfsd.init
Source0: @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz
%endif
-BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
-
Requires(pre): shadow-utils
%if ( 0%{?_with_systemd:1} )
BuildRequires: systemd
@@ -384,7 +390,9 @@ This package provides cloudsync plugins for archival feature.
Summary: Development Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
# Needed for the Glupy examples to work
+%if ( 0%{!?_without_extra_xlators:1} )
Requires: %{name}-extra-xlators%{?_isa} = %{version}-%{release}
+%endif
%description devel
GlusterFS is a distributed file-system capable of scaling to several
@@ -397,6 +405,7 @@ is in user space and easily manageable.
This package provides the development libraries and include files.
+%if ( 0%{!?_without_extra_xlators:1} )
%package extra-xlators
Summary: Extra Gluster filesystem Translators
# We need python-gluster rpm for gluster module's __init__.py in Python
@@ -415,6 +424,7 @@ is in user space and easily manageable.
This package provides extra filesystem Translators, such as Glupy,
for GlusterFS.
+%endif
%package fuse
Summary: Fuse client
@@ -440,6 +450,30 @@ is in user space and easily manageable.
This package provides support to FUSE based clients and inlcudes the
glusterfs(d) binary.
+%if ( 0%{!?_without_server:1} )
+%package ganesha
+Summary: NFS-Ganesha configuration
+Group: Applications/File
+
+Requires: %{name}-server%{?_isa} = %{version}-%{release}
+Requires: nfs-ganesha-gluster, pcs, dbus
+%if ( 0%{?rhel} && 0%{?rhel} == 6 )
+Requires: cman, pacemaker, corosync
+%endif
+
+%description ganesha
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over Infiniband RDMA
+or TCP/IP interconnect into one large parallel network file
+system. GlusterFS is one of the most sophisticated file systems in
+terms of features and extensibility. It borrows a powerful concept
+called Translators from GNU Hurd kernel. Much of the code in GlusterFS
+is in user space and easily manageable.
+
+This package provides the configuration and related files for using
+NFS-Ganesha as the NFS server using GlusterFS
+%endif
+
%if ( 0%{!?_without_georeplication:1} )
%package geo-replication
Summary: GlusterFS Geo-replication
@@ -541,6 +575,7 @@ is in user space and easily manageable.
This package provides support to ib-verbs library.
%endif
+%if ( 0%{!?_without_regression_tests:1} )
%package regression-tests
Summary: Development Tools
Requires: %{name}%{?_isa} = %{version}-%{release}
@@ -556,6 +591,7 @@ Requires: nfs-utils xfsprogs yajl psmisc bc
%description regression-tests
The Gluster Test Framework, is a suite of scripts used for
regression testing of Gluster.
+%endif
%if ( 0%{!?_without_ocf:1} )
%package resource-agents
@@ -1092,6 +1128,16 @@ exit 0
%if 0%{?_tmpfilesdir:1} && 0%{!?_without_server:1}
%{_tmpfilesdir}/gluster.conf
%endif
+%if ( 0%{?_without_extra_xlators:1} )
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so
+%endif
+%if ( 0%{?_without_regression_tests:1} )
+%exclude %{_datadir}/glusterfs/run-tests.sh
+%exclude %{_datadir}/glusterfs/tests
+%endif
%files api
%exclude %{_libdir}/*.so
@@ -1134,12 +1180,14 @@ exit 0
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
+%if ( 0%{!?_without_extra_xlators:1} )
%files extra-xlators
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
+%endif
%files fuse
# glusterfs is a symlink to glusterfsd, -server depends on -fuse.
@@ -1239,11 +1287,13 @@ exit 0
%{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
%endif
+%if ( 0%{!?_without_regression_tests:1} )
%files regression-tests
%dir %{_datadir}/glusterfs
%{_datadir}/glusterfs/run-tests.sh
%{_datadir}/glusterfs/tests
%exclude %{_datadir}/glusterfs/tests/vagrant
+%endif
%if ( 0%{!?_without_ocf:1} )
%files resource-agents
@@ -1424,6 +1474,433 @@ exit 0
%endif
%endif
+##-----------------------------------------------------------------------------
+## All %pretrans should be placed here and keep them sorted
+##
+%if 0%{!?_without_server:1}
+%pretrans -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped."
+ echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!"
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime."
+ echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding."
+ echo "WARNING: Refer upgrade section of install guide for more details"
+ echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;"
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans api -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-api_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans api-devel -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-api-devel_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans devel -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-devel_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans fuse -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-fuse_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%if 0%{?_can_georeplicate}
+%if ( 0%{!?_without_georeplication:1} )
+%pretrans geo-replication -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-geo-replication_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+%endif
+%endif
+
+
+
+%pretrans libs -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-libs_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%if ( 0%{!?_without_rdma:1} )
+%pretrans rdma -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-rdma_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+%endif
+
+
+
+%if ( 0%{!?_without_ocf:1} )
+%pretrans resource-agents -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-resource-agents_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+%endif
+
+
+
+%pretrans server -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-server_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+%endif
+
%changelog
* Wed Mar 6 2019 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- remove unneeded ldconfig in scriptlets
--
1.8.3.1

35
SOURCES/0008-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch

@ -0,0 +1,35 @@
From 0ab54c5b274f29fcdd4787325c7183a84e875bbc Mon Sep 17 00:00:00 2001
From: "Bala.FA" <barumuga@redhat.com>
Date: Thu, 22 May 2014 08:37:27 +0530
Subject: [PATCH 08/52] secalert: remove setuid bit for fusermount-glusterfs

glusterfs-fuse: File /usr/bin/fusermount-glusterfs on x86_64 is setuid
root but is not on the setxid whitelist

Label: DOWNSTREAM ONLY

Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=989480
Change-Id: Icf6e5db72ae15ccc60b02be6713fb6c4f4c8a15f
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/25453
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60135
Tested-by: Milind Changire <mchangir@redhat.com>
---
contrib/fuse-util/Makefile.am | 1 -
1 file changed, 1 deletion(-)

diff --git a/contrib/fuse-util/Makefile.am b/contrib/fuse-util/Makefile.am
index abbc10e..a071c81 100644
--- a/contrib/fuse-util/Makefile.am
+++ b/contrib/fuse-util/Makefile.am
@@ -9,6 +9,5 @@ AM_CFLAGS = -Wall $(GF_CFLAGS)
install-exec-hook:
-chown root $(DESTDIR)$(bindir)/fusermount-glusterfs
- chmod u+s $(DESTDIR)$(bindir)/fusermount-glusterfs
CLEANFILES =
--
1.8.3.1

57
SOURCES/0009-build-introduce-security-hardening-flags-in-gluster.patch

@ -0,0 +1,57 @@
From 2adb5d540e9344149ae2591811ad34928775e6fd Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Wed, 3 Jun 2015 11:09:21 +0530
Subject: [PATCH 09/52] build: introduce security hardening flags in gluster

This patch introduces two of the security hardening compiler flags RELRO & PIE
in gluster codebase. Using _hardened_build as 1 doesn't guarantee the existance
of these flags in the compilation as different versions of RHEL have different
redhat-rpm-config macro. So the idea is to export these flags at spec file
level.

Label: DOWNSTREAM ONLY

Change-Id: I0a1a56d0a8f54f110d306ba5e55e39b1b073dc84
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/49780
Reviewed-by: Balamurugan Arumugam <barumuga@redhat.com>
Tested-by: Balamurugan Arumugam <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60137
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index eb04491..8a31a98 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -736,6 +736,25 @@ done
%build
+# In RHEL7 few hardening flags are available by default, however the RELRO
+# default behaviour is partial, convert to full
+%if ( 0%{?rhel} && 0%{?rhel} >= 7 )
+LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
+export LDFLAGS
+%else
+%if ( 0%{?rhel} && 0%{?rhel} == 6 )
+CFLAGS="$RPM_OPT_FLAGS -fPIE -DPIE"
+LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro,-z,now"
+%else
+#It appears that with gcc-4.1.2 in RHEL5 there is an issue using both -fPIC and
+ # -fPIE that makes -z relro not work; -fPIE seems to undo what -fPIC does
+CFLAGS="$CFLAGS $RPM_OPT_FLAGS"
+LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
+%endif
+export CFLAGS
+export LDFLAGS
+%endif
+
./autogen.sh && %configure \
%{?_with_asan} \
%{?_with_cmocka} \
--
1.8.3.1

100
SOURCES/0010-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch

@ -0,0 +1,100 @@
From bf5906cbc9bf986c7495db792d098001e28c47e3 Mon Sep 17 00:00:00 2001
From: Niels de Vos <ndevos@redhat.com>
Date: Wed, 22 Apr 2015 15:39:59 +0200
Subject: [PATCH 10/52] spec: fix/add pre-transaction scripts for geo-rep and
cli packages

The cli subpackage never had a %pretrans script, this has been added
now.

The %pretrans script for ge-repliaction was never included in the RPM
package because it was disable by a undefined macro (_can_georeplicate).
This macro is not used/set anywhere else and _without_georeplication
should take care of it anyway.

Note: This is a Red Hat Gluster Storage specific patch. Upstream
packaging guidelines do not allow these kind of 'features'.

Label: DOWNSTREAM ONLY

Change-Id: I16aab5bba72f1ed178f3bcac47f9d8ef767cfcef
Signed-off-by: Niels de Vos <ndevos@redhat.com>
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/50491
Reviewed-on: https://code.engineering.redhat.com/gerrit/60138
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 43 +++++++++++++++++++++++++++++++++++++++++--
1 file changed, 41 insertions(+), 2 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 8a31a98..b70dbfc 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1626,6 +1626,47 @@ end
+%pretrans cli -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-cli_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
%pretrans devel -p <lua>
if not posix.access("/bin/bash", "x") then
-- initial installation, no shell, no running glusterfsd
@@ -1708,7 +1749,6 @@ end
-%if 0%{?_can_georeplicate}
%if ( 0%{!?_without_georeplication:1} )
%pretrans geo-replication -p <lua>
if not posix.access("/bin/bash", "x") then
@@ -1749,7 +1789,6 @@ if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
%endif
-%endif
--
1.8.3.1

138
SOURCES/0011-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch

@ -0,0 +1,138 @@
From 40eb62a8872ce061416e899fb6c0784b6253ab16 Mon Sep 17 00:00:00 2001
From: Niels de Vos <ndevos@redhat.com>
Date: Fri, 7 Dec 2018 14:05:21 +0530
Subject: [PATCH 11/52] rpm: glusterfs-devel for client-builds should not
depend on -server

glusterfs-devel for client-side packages should *not* include the
libgfdb.so symlink and libgfdb.pc file or any of the libchangelog
ones.

Label: DOWNSTREAM ONLY

Change-Id: Ifb4a9cf48841e5af5dd0a98b6de51e2ee469fc56
Signed-off-by: Niels de Vos <ndevos@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/51019
Reviewed-by: Balamurugan Arumugam <barumuga@redhat.com>
Tested-by: Balamurugan Arumugam <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60139
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 86 +++++++++++++++++++++++++++++++++++++++----------------
1 file changed, 62 insertions(+), 24 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index b70dbfc..1c631db 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -450,30 +450,6 @@ is in user space and easily manageable.
This package provides support to FUSE based clients and inlcudes the
glusterfs(d) binary.
-%if ( 0%{!?_without_server:1} )
-%package ganesha
-Summary: NFS-Ganesha configuration
-Group: Applications/File
-
-Requires: %{name}-server%{?_isa} = %{version}-%{release}
-Requires: nfs-ganesha-gluster, pcs, dbus
-%if ( 0%{?rhel} && 0%{?rhel} == 6 )
-Requires: cman, pacemaker, corosync
-%endif
-
-%description ganesha
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides the configuration and related files for using
-NFS-Ganesha as the NFS server using GlusterFS
-%endif
-
%if ( 0%{!?_without_georeplication:1} )
%package geo-replication
Summary: GlusterFS Geo-replication
@@ -1157,6 +1133,62 @@ exit 0
%exclude %{_datadir}/glusterfs/run-tests.sh
%exclude %{_datadir}/glusterfs/tests
%endif
+%if 0%{?_without_server:1}
+%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-5.8.conf
+%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-7.2.conf
+%exclude %{_sysconfdir}/glusterfs/glusterd.vol
+%exclude %{_sysconfdir}/glusterfs/glusterfs-georep-logrotate
+%exclude %{_sysconfdir}/glusterfs/glusterfs-logrotate
+%exclude %{_sysconfdir}/glusterfs/group-db-workload
+%exclude %{_sysconfdir}/glusterfs/group-distributed-virt
+%exclude %{_sysconfdir}/glusterfs/group-gluster-block
+%exclude %{_sysconfdir}/glusterfs/group-metadata-cache
+%exclude %{_sysconfdir}/glusterfs/group-nl-cache
+%exclude %{_sysconfdir}/glusterfs/group-virt.example
+%exclude %{_sysconfdir}/glusterfs/logger.conf.example
+%exclude %{_sysconfdir}/rsyslog.d/gluster.conf.example
+%exclude %{_prefix}/bin/glusterfind
+%exclude %{_prefix}/lib/firewalld/services/glusterfs.xml
+%exclude %{_prefix}/lib/systemd/system/glusterd.service
+%exclude %{_prefix}/lib/systemd/system/glusterfssharedstorage.service
+%exclude %{_prefix}/lib/tmpfiles.d/gluster.conf
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix-locks.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quotad.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/sdfs.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/thin-arbiter.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage/posix.so
+%exclude %{_libexecdir}/glusterfs/*
+%exclude %{_sbindir}/conf.py
+%exclude %{_sbindir}/gcron.py
+%exclude %{_sbindir}/gf_attach
+%exclude %{_sbindir}/gfind_missing_files
+%exclude %{_sbindir}/glfsheal
+%exclude %{_sbindir}/gluster
+%exclude %{_sbindir}/gluster-setgfid2path
+%exclude %{_sbindir}/glusterd
+%exclude %{_sbindir}/snap_scheduler.py
+%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
+%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
+%exclude %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
+%exclude %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
+%exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
+%exclude %{_sharedstatedir}/glusterd/*
+%endif
%files api
%exclude %{_libdir}/*.so
@@ -1190,7 +1222,13 @@ exit 0
%exclude %{_includedir}/glusterfs/api
%exclude %{_libdir}/libgfapi.so
%{_libdir}/*.so
+%if ( 0%{?_without_server:1} )
+%exclude %{_libdir}/pkgconfig/libgfchangelog.pc
+%exclude %{_libdir}/libgfchangelog.so
+%else
%{_libdir}/pkgconfig/libgfchangelog.pc
+%{_libdir}/libgfchangelog.so
+%endif
%files client-xlators
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
--
1.8.3.1

73
SOURCES/0012-build-add-pretrans-check.patch

@ -0,0 +1,73 @@
From f054086daf4549a6227196fe37a57a7e49aa5849 Mon Sep 17 00:00:00 2001
From: "Bala.FA" <barumuga@redhat.com>
Date: Fri, 7 Dec 2018 14:13:40 +0530
Subject: [PATCH 12/52] build: add pretrans check

This patch adds pretrans check for client-xlators

NOTE: ganesha and python-gluster sub-packages are now obsolete

Label: DOWNSTREAM ONLY

Change-Id: I454016319832c11902c0ca79a79fbbcf8ac0a121
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/50967
Reviewed-on: https://code.engineering.redhat.com/gerrit/60140
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 39 +++++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 1c631db..a1ff6e0 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1704,6 +1704,45 @@ if not (ok == 0) then
end
+%pretrans client-xlators -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-client-xlators_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
%pretrans devel -p <lua>
if not posix.access("/bin/bash", "x") then
--
1.8.3.1

50
SOURCES/0013-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch

@ -0,0 +1,50 @@
From 39932e6bbc8de25813387bb1394cc7942b79ef46 Mon Sep 17 00:00:00 2001
From: anand <anekkunt@redhat.com>
Date: Wed, 18 Nov 2015 16:13:46 +0530
Subject: [PATCH 13/52] glusterd: fix info file checksum mismatch during
upgrade

peers are moving rejected state when upgrading from RHS2.1 to RHGS3.1.2
due to checksum mismatch.

Label: DOWNSTREAM ONLY

Change-Id: Ifea6b7dfe8477c7f17eefc5ca87ced58aaa21c84
Signed-off-by: anand <anekkunt@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/61774
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-store.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 51ca3d1..fb52957 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -1009,10 +1009,18 @@ glusterd_volume_exclude_options_write(int fd, glusterd_volinfo_t *volinfo)
goto out;
}
- snprintf(buf, sizeof(buf), "%d", volinfo->op_version);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_OP_VERSION, buf);
- if (ret)
- goto out;
+ if (conf->op_version >= GD_OP_VERSION_RHS_3_0) {
+ snprintf (buf, sizeof (buf), "%d", volinfo->op_version);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_OP_VERSION, buf);
+ if (ret)
+ goto out;
+
+ snprintf (buf, sizeof (buf), "%d", volinfo->client_op_version);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION,
+ buf);
+ if (ret)
+ goto out;
+ }
snprintf(buf, sizeof(buf), "%d", volinfo->client_op_version);
ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION,
--
1.8.3.1

72
SOURCES/0014-build-spec-file-conflict-resolution.patch

@ -0,0 +1,72 @@
From f76d2370160c50a1f59d08a03a444254c289da60 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Fri, 7 Dec 2018 16:18:07 +0530
Subject: [PATCH 14/52] build: spec file conflict resolution

Missed conflict resolution for removing references to
gluster.conf.example as mentioned in patch titled:
packaging: gratuitous dependencies on rsyslog-mm{count,jsonparse}
by Kaleb

References to hook scripts S31ganesha-start.sh and
S31ganesha-reset.sh got lost in the downstream only
patch conflict resolution.

Commented blanket reference to %{_sharedsstatedir}/glusterd/*
in section %files server to avoid rpmbuild warning related to
multiple references to hook scripts and other files under
/var/lib/glusterd.

Label: DOWNSTREAM ONLY

Change-Id: I9d409f1595ab985ed9f79d9d4f4298877609ba17
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/70535
Reviewed-by: Rajesh Joseph <rjoseph@redhat.com>
Tested-by: Rajesh Joseph <rjoseph@redhat.com>
---
glusterfs.spec.in | 21 +--------------------
1 file changed, 1 insertion(+), 20 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index a1ff6e0..8c57f57 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -96,9 +96,7 @@
# disable server components forcefully as rhel <= 6
%if ( 0%{?rhel} )
-%if ( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" )
-%global _without_server %{nil}
-%else
+%if (!(( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" )))
%global _without_server --without-server
%endif
%endif
@@ -836,23 +834,6 @@ install -D -p -m 0644 extras/glusterfs-georep-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
%endif
-%if ( 0%{!?_without_syslog:1} )
-%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
-install -D -p -m 0644 extras/gluster-rsyslog-7.2.conf \
- %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example
-%endif
-
-%if ( 0%{?rhel} && 0%{?rhel} == 6 )
-install -D -p -m 0644 extras/gluster-rsyslog-5.8.conf \
- %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example
-%endif
-
-%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
-install -D -p -m 0644 extras/logger.conf.example \
- %{buildroot}%{_sysconfdir}/glusterfs/logger.conf.example
-%endif
-%endif
-
%if ( 0%{!?_without_server:1} )
touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
touch %{buildroot}%{_sharedstatedir}/glusterd/options
--
1.8.3.1

198
SOURCES/0015-build-randomize-temp-file-names-in-pretrans-scriptle.patch

@ -0,0 +1,198 @@
From 3d0e09400dc21dbb5f76fd9ca4bfce3edad0d626 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Fri, 14 Oct 2016 12:53:27 +0530
Subject: [PATCH 15/52] build: randomize temp file names in pretrans scriptlets

Security issue CVE-2015-1795 mentions about possibility of file name
spoof attack for the %pretrans server scriptlet.
Since %pretrans scriptlets are executed only for server builds, we can
use os.tmpname() to randomize temporary file names for all %pretrans
scriptlets using this mechanism.

Label: DOWNSTREAM ONLY

Change-Id: Ic82433897432794b6d311d836355aa4bad886369
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/86187
Reviewed-by: Siddharth Sharma <siddharth@redhat.com>
Reviewed-by: Niels de Vos <ndevos@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
glusterfs.spec.in | 84 +++++++++++++++++++++++++++++++------------------------
1 file changed, 48 insertions(+), 36 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 8c57f57..3a98822 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1549,9 +1549,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1590,9 +1591,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-api_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1631,9 +1633,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-api-devel_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1672,9 +1675,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-cli_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1712,9 +1716,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-client-xlators_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1752,9 +1757,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-devel_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1793,9 +1799,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-fuse_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1835,9 +1842,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-geo-replication_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1877,9 +1885,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-libs_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1919,9 +1928,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-rdma_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1962,9 +1972,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-resource-agents_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -2004,9 +2015,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-server_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
--
1.8.3.1

42
SOURCES/0016-glusterd-parallel-readdir-Change-the-op-version-of-p.patch

@ -0,0 +1,42 @@
From c283f15ac9bfb1c98ce95ed0000ebed81cd3b318 Mon Sep 17 00:00:00 2001
From: Poornima G <pgurusid@redhat.com>
Date: Wed, 26 Apr 2017 14:07:58 +0530
Subject: [PATCH 16/52] glusterd, parallel-readdir: Change the op-version of
parallel-readdir to 31100

Issue: Downstream 3.2 was released with op-version 31001, parallel-readdir
feature in upstream was released in 3.10 and hence with op-version 31000.
With this, parallel-readdir will be allowed in 3.2 cluster/clients as well.
But 3.2 didn't have parallel-readdir feature backported.

Fix:
Increase the op-version of parallel-readdir feature only in downstream
to 31100(3.3 highest op-version)

Label: DOWNSTREAM ONLY

Change-Id: I2640520985627f3a1cb4fb96e28350f8bb9b146c
Signed-off-by: Poornima G <pgurusid@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/104403
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index d07fc10..a31ecda 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2718,7 +2718,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.option = "parallel-readdir",
.value = "off",
.type = DOC,
- .op_version = GD_OP_VERSION_3_10_0,
+ .op_version = GD_OP_VERSION_3_11_0,
.validate_fn = validate_parallel_readdir,
.description = "If this option is enabled, the readdir operation "
"is performed in parallel on all the bricks, thus "
--
1.8.3.1

37
SOURCES/0017-glusterd-Revert-op-version-for-cluster.max-brick-per.patch

@ -0,0 +1,37 @@
From 5d3315a53611f23a69f88bc8266448e258e2e10f Mon Sep 17 00:00:00 2001
From: Samikshan Bairagya <sbairagy@redhat.com>
Date: Mon, 10 Jul 2017 11:54:52 +0530
Subject: [PATCH 17/52] glusterd: Revert op-version for
"cluster.max-brick-per-process"

The op-version for the "cluster.max-brick-per-process" option was
set to 3.12.0 in the upstream patch and was backported here:
https://code.engineering.redhat.com/gerrit/#/c/111799. This commit
reverts the op-version for this option to 3.11.1 instead.

Label: DOWNSTREAM ONLY

Change-Id: I23639cef43d41915eea0394d019b1e0796a99d7b
Signed-off-by: Samikshan Bairagya <sbairagy@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/111804
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index a31ecda..9a6fe9f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2794,7 +2794,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{.key = GLUSTERD_BRICKMUX_LIMIT_KEY,
.voltype = "mgmt/glusterd",
.value = GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE,
- .op_version = GD_OP_VERSION_3_12_0,
+ .op_version = GD_OP_VERSION_3_11_1,
.validate_fn = validate_mux_limit,
.type = GLOBAL_DOC,
.description = "This option can be used to limit the number of brick "
--
1.8.3.1

56
SOURCES/0018-cli-Add-message-for-user-before-modifying-brick-mult.patch

@ -0,0 +1,56 @@
From 539626a64e5b8cfe05d42f5398073e8a57644073 Mon Sep 17 00:00:00 2001
From: Samikshan Bairagya <sbairagy@redhat.com>
Date: Wed, 9 Aug 2017 14:32:59 +0530
Subject: [PATCH 18/52] cli: Add message for user before modifying
brick-multiplex option

Users should ne notified that brick-multiplexing feature is
supported only for container workloads (CNS/CRS). It should also be
made known to users that it is advisable to either have all volumes
in stopped state or have no bricks running before modifying the
"brick-multiplex" option. This commit makes sure these messages
are displayed to the user before brick-multiplexing is enabled or
disabled.

Label: DOWNSTREAM ONLY

Change-Id: Ic40294b26c691ea03185c4d1fce840ef23f95718
Signed-off-by: Samikshan Bairagya <sbairagy@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/114793
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
cli/src/cli-cmd-parser.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)

diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index d9913f6..f148c59 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -1698,6 +1698,24 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words,
}
}
+ if ((strcmp (key, "cluster.brick-multiplex") == 0)) {
+ question = "Brick-multiplexing is supported only for "
+ "container workloads (CNS/CRS). Also it is "
+ "advised to make sure that either all "
+ "volumes are in stopped state or no bricks "
+ "are running before this option is modified."
+ "Do you still want to continue?";
+
+ answer = cli_cmd_get_confirmation (state, question);
+ if (GF_ANSWER_NO == answer) {
+ gf_log ("cli", GF_LOG_ERROR, "Operation "
+ "cancelled, exiting");
+ *op_errstr = gf_strdup ("Aborted by user.");
+ ret = -1;
+ goto out;
+ }
+ }
+
ret = dict_set_int32(dict, "count", wordcount - 3);
if (ret)
--
1.8.3.1

99
SOURCES/0019-build-launch-glusterd-upgrade-after-all-new-bits-are.patch

@ -0,0 +1,99 @@
From 8a3035bf612943694a3cd1c6a857bd009e84f55d Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Tue, 10 Oct 2017 09:58:24 +0530
Subject: [PATCH 19/52] build: launch glusterd upgrade after all new bits are
installed

Problem:
glusterd upgrade mode needs new bits from glusterfs-rdma which
optional and causes the dependency graph to break since it is
not tied into glusterfs-server requirements

Solution:
Run glusterd upgrade mode after all new bits are installed
i.e. in %posttrans server section

Label: DOWNSTREAM ONLY

Change-Id: I356e02d0bf0eaaef43c20ce07b388262f63093a4
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/120094
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
---
glusterfs.spec.in | 51 +++++++++++++++++++++++++++++----------------------
1 file changed, 29 insertions(+), 22 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 3a98822..208a82d 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -946,28 +946,6 @@ fi
%firewalld_reload
%endif
-pidof -c -o %PPID -x glusterd &> /dev/null
-if [ $? -eq 0 ]; then
- kill -9 `pgrep -f gsyncd.py` &> /dev/null
-
- killall --wait glusterd &> /dev/null
- glusterd --xlator-option *.upgrade=on -N
-
- #Cleaning leftover glusterd socket file which is created by glusterd in
- #rpm_script_t context.
- rm -f %{_rundir}/glusterd.socket
-
- # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
- # so start it again
- %service_start glusterd
-else
- glusterd --xlator-option *.upgrade=on -N
-
- #Cleaning leftover glusterd socket file which is created by glusterd in
- #rpm_script_t context.
- rm -f %{_rundir}/glusterd.socket
-fi
-exit 0
%endif
##-----------------------------------------------------------------------------
@@ -2027,6 +2005,35 @@ os.remove(tmpname)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
+
+%posttrans server
+pidof -c -o %PPID -x glusterd &> /dev/null
+if [ $? -eq 0 ]; then
+ kill -9 `pgrep -f gsyncd.py` &> /dev/null
+
+ killall --wait -SIGTERM glusterd &> /dev/null
+
+ if [ "$?" != "0" ]; then
+ echo "killall failed while killing glusterd"
+ fi
+
+ glusterd --xlator-option *.upgrade=on -N
+
+ #Cleaning leftover glusterd socket file which is created by glusterd in
+ #rpm_script_t context.
+ rm -rf /var/run/glusterd.socket
+
+ # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
+ # so start it again
+ %service_start glusterd
+else
+ glusterd --xlator-option *.upgrade=on -N
+
+ #Cleaning leftover glusterd socket file which is created by glusterd in
+ #rpm_script_t context.
+ rm -rf /var/run/glusterd.socket
+fi
+
%endif
%changelog
--
1.8.3.1

38
SOURCES/0020-spec-unpackaged-files-found-for-RHEL-7-client-build.patch

@ -0,0 +1,38 @@
From 968e5e698a070f9e6905a86c9c8338c36fcfa339 Mon Sep 17 00:00:00 2001
From: moagrawa <moagrawa@redhat.com>
Date: Mon, 15 Jan 2018 18:21:27 +0530
Subject: [PATCH 20/52] spec: unpackaged files found for RHEL-7 client build

Problem: unpackages files found for RHEL-7 client build

Solution: Update glusterfs.specs.in to exclude unpackage files
Label: DOWNSTREAM ONLY

Change-Id: I761188a6a8447105b53bf3334ded963c645cab5b
Signed-off-by: moagrawa <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/127758
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 2 ++
1 file changed, 2 insertions(+)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 208a82d..ec06176 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1141,8 +1141,10 @@ exit 0
%exclude %{_sbindir}/gluster-setgfid2path
%exclude %{_sbindir}/glusterd
%exclude %{_sbindir}/snap_scheduler.py
+%if ( 0%{?_with_systemd:1} )
%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
+%endif
%exclude %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
%exclude %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
%exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
--
1.8.3.1

66
SOURCES/0021-cli-glusterfsd-remove-copyright-information.patch

@ -0,0 +1,66 @@
From fbc7f0e5ac8c292b865a8e02ceed2efa101d145c Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 12 Mar 2018 19:47:11 +0530
Subject: [PATCH 21/52] cli/glusterfsd: remove copyright information

There's no point of dumping upstream copyright information in --version.

Label: DOWNSTREAM ONLY

Change-Id: I3a10e30878698e1d53082936bbf22bca560a3896
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/132445
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
---
cli/src/cli.c | 11 +----------
glusterfsd/src/glusterfsd.c | 11 +----------
2 files changed, 2 insertions(+), 20 deletions(-)

diff --git a/cli/src/cli.c b/cli/src/cli.c
index 84ce0f4..08f117e 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -65,16 +65,7 @@ extern int connected;
/* using argp for command line parsing */
const char *argp_program_version =
- "" PACKAGE_NAME " " PACKAGE_VERSION
- "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION
- "\n"
- "Copyright (c) 2006-2016 Red Hat, Inc. "
- "<https://www.gluster.org/>\n"
- "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n"
- "It is licensed to you under your choice of the GNU Lesser\n"
- "General Public License, version 3 or any later version (LGPLv3\n"
- "or later), or the GNU General Public License, version 2 (GPLv2),\n"
- "in all cases as published by the Free Software Foundation.";
+ PACKAGE_NAME" "PACKAGE_VERSION;
const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
struct rpc_clnt *global_quotad_rpc;
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 5d46b3d..c983882 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -86,16 +86,7 @@ static char argp_doc[] =
"--volfile-server=SERVER [MOUNT-POINT]\n"
"--volfile=VOLFILE [MOUNT-POINT]";
const char *argp_program_version =
- "" PACKAGE_NAME " " PACKAGE_VERSION
- "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION
- "\n"
- "Copyright (c) 2006-2016 Red Hat, Inc. "
- "<https://www.gluster.org/>\n"
- "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n"
- "It is licensed to you under your choice of the GNU Lesser\n"
- "General Public License, version 3 or any later version (LGPLv3\n"
- "or later), or the GNU General Public License, version 2 (GPLv2),\n"
- "in all cases as published by the Free Software Foundation.";
+ PACKAGE_NAME" "PACKAGE_VERSION;
const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
static error_t
--
1.8.3.1

40
SOURCES/0022-cli-Remove-upstream-doc-reference.patch

@ -0,0 +1,40 @@
From 00db0c44d109e6f3e394487bf76ff28ba2eee7de Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Thu, 15 Mar 2018 12:56:02 +0530
Subject: [PATCH 22/52] cli: Remove upstream doc reference

...that is displayed while creating replica 2 volumes.

Label: DOWNSTREAM ONLY

Change-Id: I16b45c8ad3a33cdd2a464d84f51d006d8f568b23
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/132744
Reviewed-by: Karthik Subrahmanya <ksubrahm@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
cli/src/cli-cmd-parser.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index f148c59..760a10c 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -606,11 +606,8 @@ cli_cmd_volume_create_parse(struct cli_state *state, const char **words,
"Replica 2 volumes are prone"
" to split-brain. Use "
"Arbiter or Replica 3 to "
- "avoid this. See: "
- "http://docs.gluster.org/en/latest/"
- "Administrator%20Guide/"
- "Split%20brain%20and%20ways%20to%20deal%20with%20it/."
- "\nDo you still want to "
+ "avoid this.\n"
+ "Do you still want to "
"continue?\n";
answer = cli_cmd_get_confirmation(state, question);
if (GF_ANSWER_NO == answer) {
--
1.8.3.1

148
SOURCES/0023-hooks-remove-selinux-hooks.patch

@ -0,0 +1,148 @@
From 421743b7cfa6a249544f6abb4cca5a612bd20ea1 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Tue, 11 Dec 2018 16:21:43 +0530
Subject: [PATCH 23/52] hooks: remove selinux hooks

Label: DOWNSTREAM ONLY

Change-Id: I810466a0ca99ab21f5a8eac8cdffbb18333d10ad
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/135800
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Jiffin Thottan <jthottan@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
---
configure.ac | 20 --------------------
extras/hook-scripts/Makefile.am | 2 +-
extras/hook-scripts/create/Makefile.am | 1 -
extras/hook-scripts/create/post/Makefile.am | 8 --------
extras/hook-scripts/delete/Makefile.am | 1 -
extras/hook-scripts/delete/pre/Makefile.am | 8 --------
glusterfs.spec.in | 2 --
7 files changed, 1 insertion(+), 41 deletions(-)
delete mode 100644 extras/hook-scripts/create/Makefile.am
delete mode 100644 extras/hook-scripts/create/post/Makefile.am
delete mode 100644 extras/hook-scripts/delete/Makefile.am
delete mode 100644 extras/hook-scripts/delete/pre/Makefile.am

diff --git a/configure.ac b/configure.ac
index 2f341de..0d06f5a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -214,10 +214,6 @@ AC_CONFIG_FILES([Makefile
extras/hook-scripts/add-brick/Makefile
extras/hook-scripts/add-brick/pre/Makefile
extras/hook-scripts/add-brick/post/Makefile
- extras/hook-scripts/create/Makefile
- extras/hook-scripts/create/post/Makefile
- extras/hook-scripts/delete/Makefile
- extras/hook-scripts/delete/pre/Makefile
extras/hook-scripts/start/Makefile
extras/hook-scripts/start/post/Makefile
extras/hook-scripts/set/Makefile
@@ -909,21 +905,6 @@ fi
AM_CONDITIONAL([BUILD_CLOUDSYNC], [test "x$BUILD_CLOUDSYNC" = "xyes"])
dnl end cloudsync section
-dnl SELinux feature enablement
-case $host_os in
- linux*)
- AC_ARG_ENABLE([selinux],
- AC_HELP_STRING([--disable-selinux],
- [Disable SELinux features]),
- [USE_SELINUX="${enableval}"], [USE_SELINUX="yes"])
- ;;
- *)
- USE_SELINUX=no
- ;;
-esac
-AM_CONDITIONAL(USE_SELINUX, test "x${USE_SELINUX}" = "xyes")
-dnl end of SELinux feature enablement
-
AC_CHECK_HEADERS([execinfo.h], [have_backtrace=yes])
if test "x${have_backtrace}" = "xyes"; then
AC_DEFINE(HAVE_BACKTRACE, 1, [define if found backtrace])
@@ -1599,7 +1580,6 @@ echo "XML output : $BUILD_XML_OUTPUT"
echo "Unit Tests : $BUILD_UNITTEST"
echo "Track priv ports : $TRACK_PRIVPORTS"
echo "POSIX ACLs : $BUILD_POSIX_ACLS"
-echo "SELinux features : $USE_SELINUX"
echo "firewalld-config : $BUILD_FIREWALLD"
echo "Events : $BUILD_EVENTS"
echo "EC dynamic support : $EC_DYNAMIC_SUPPORT"
diff --git a/extras/hook-scripts/Makefile.am b/extras/hook-scripts/Makefile.am
index 26059d7..771b37e 100644
--- a/extras/hook-scripts/Makefile.am
+++ b/extras/hook-scripts/Makefile.am
@@ -1,5 +1,5 @@
EXTRA_DIST = S40ufo-stop.py S56glusterd-geo-rep-create-post.sh
-SUBDIRS = add-brick create delete set start stop reset
+SUBDIRS = add-brick set start stop reset
scriptsdir = $(GLUSTERD_WORKDIR)/hooks/1/gsync-create/post/
if USE_GEOREP
diff --git a/extras/hook-scripts/create/Makefile.am b/extras/hook-scripts/create/Makefile.am
deleted file mode 100644
index b083a91..0000000
--- a/extras/hook-scripts/create/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-SUBDIRS = post
diff --git a/extras/hook-scripts/create/post/Makefile.am b/extras/hook-scripts/create/post/Makefile.am
deleted file mode 100644
index fd1892e..0000000
--- a/extras/hook-scripts/create/post/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-EXTRA_DIST = S10selinux-label-brick.sh
-
-scriptsdir = $(GLUSTERD_WORKDIR)/hooks/1/create/post/
-if WITH_SERVER
-if USE_SELINUX
-scripts_SCRIPTS = S10selinux-label-brick.sh
-endif
-endif
diff --git a/extras/hook-scripts/delete/Makefile.am b/extras/hook-scripts/delete/Makefile.am
deleted file mode 100644
index c98a05d..0000000
--- a/extras/hook-scripts/delete/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-SUBDIRS = pre
diff --git a/extras/hook-scripts/delete/pre/Makefile.am b/extras/hook-scripts/delete/pre/Makefile.am
deleted file mode 100644
index 4fbfbe7..0000000
--- a/extras/hook-scripts/delete/pre/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-EXTRA_DIST = S10selinux-del-fcontext.sh
-
-scriptsdir = $(GLUSTERD_WORKDIR)/hooks/1/delete/pre/
-if WITH_SERVER
-if USE_SELINUX
-scripts_SCRIPTS = S10selinux-del-fcontext.sh
-endif
-endif
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index ec06176..db50b8e 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1413,7 +1413,6 @@ exit 0
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post
- %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post/S10selinux-label-brick.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/pre
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/post
@@ -1422,7 +1421,6 @@ exit 0
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/post
%{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre
- %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre/S10selinux-del-fcontext.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/post
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/pre
--
1.8.3.1

50
SOURCES/0024-glusterd-Make-localtime-logging-option-invisible-in-.patch

@ -0,0 +1,50 @@
From 79c19f0c6d02228aa8cf4b9299afeb7e0b2ad0da Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 16 Apr 2018 17:44:19 +0530
Subject: [PATCH 24/52] glusterd: Make localtime-logging option invisible in
downstream

Label: DOWNSTREAM ONLY

Change-Id: Ie631edebb7e19152392bfd3c369a96e88796bd75
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/135754
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 2 +-
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index dd3f9eb..cbbb5d9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -86,7 +86,7 @@ glusterd_all_vol_opts valid_all_vol_opts[] = {
* TBD: Discuss the default value for this. Maybe this should be a
* dynamic value depending on the memory specifications per node */
{GLUSTERD_BRICKMUX_LIMIT_KEY, GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE},
- {GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},
+ /*{GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},*/
{GLUSTERD_DAEMON_LOG_LEVEL_KEY, "INFO"},
{NULL},
};
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 9a6fe9f..fed2864 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2850,10 +2850,11 @@ struct volopt_map_entry glusterd_volopt_map[] = {
"to have enabled when clients and/or bricks support "
"SELinux."},
{.key = GLUSTERD_LOCALTIME_LOGGING_KEY,
+ /*{.key = GLUSTERD_LOCALTIME_LOGGING_KEY,
.voltype = "mgmt/glusterd",
.type = GLOBAL_DOC,
.op_version = GD_OP_VERSION_3_12_0,
- .validate_fn = validate_boolean},
+ .validate_fn = validate_boolean},*/
{.key = GLUSTERD_DAEMON_LOG_LEVEL_KEY,
.voltype = "mgmt/glusterd",
.type = GLOBAL_NO_DOC,
--
1.8.3.1

45
SOURCES/0025-build-make-RHGS-version-available-for-server.patch

@ -0,0 +1,45 @@
From 12ae1a9a62c2c94af44f55b03575ab8806bd22ee Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Mon, 23 Apr 2018 13:16:30 +0530
Subject: [PATCH 25/52] build: make RHGS version available for server

Make /usr/share/glusterfs/release available for gluserfs-server package.
This file contains the RHGS release number for the release.

Label: DOWNSTREAM ONLY

Change-Id: I7485f77cfb8ca7f0f8363a20124900ae9ae8a528
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/137139
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
glusterfs.spec.in | 5 +++++
1 file changed, 5 insertions(+)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index db50b8e..bdb47ba 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -862,6 +862,10 @@ install -p -m 0744 -D extras/command-completion/gluster.bash \
%{buildroot}%{_sysconfdir}/bash_completion.d/gluster
%endif
+%if ( 0%{!?_without_server:1} )
+echo "RHGS 3.5" > %{buildroot}%{_datadir}/glusterfs/release
+%endif
+
%clean
rm -rf %{buildroot}
@@ -1452,6 +1456,7 @@ exit 0
# Extra utility script
%dir %{_libexecdir}/glusterfs
+ %{_datadir}/glusterfs/release
%dir %{_datadir}/glusterfs/scripts
%{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
%if ( 0%{?_with_systemd:1} )
--
1.8.3.1

68
SOURCES/0026-glusterd-Introduce-daemon-log-level-cluster-wide-opt.patch

@ -0,0 +1,68 @@
From a3538a7d1fb7674acdf0934847f4004d8fbc4709 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Tue, 11 Dec 2018 17:57:50 +0530
Subject: [PATCH 26/52] glusterd: Introduce daemon-log-level cluster wide
option

This option, applicable to the node level daemons can be very helpful in
controlling the log level of these services. Please note any daemon
which is started prior to setting the specific value of this option (if
not INFO) will need to go through a restart to have this change into
effect.

> upstream patch : https://review.gluster.org/#/c/20442/

Please note there's a difference in deownstream delta. The op-version
against this option is already tageed as 3_11_2 in RHGS 3.3.1 and hence
the same is retained. Marking this DOWNSTREAM_ONLY label because of

Label: DOWNSTREAM ONLY

IMPORTANT:
This patch only sets .op_version in glusterd-volume-set.c to
GD_OP_VERSION_3_11_2 as per Atin's recommendation on
Tue, Dec 11, 2018 5:46pm IST

>Change-Id: I7f6d2620bab2b094c737f5cc816bc093e9c9c4c9
>fixes: bz#1597473
>Signed-off-by: Atin Mukherjee <amukherj@redhat.com>

Change-Id: I7f6d2620bab2b094c737f5cc816bc093e9c9c4c9
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/143137
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sanju Rakonde <srakonde@redhat.com>
---
libglusterfs/src/glusterfs/globals.h | 2 ++
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index b9da872..a278f18 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -104,6 +104,8 @@
#define GD_OP_VERSION_3_11_1 31101 /* Op-version for GlusterFS 3.11.1 */
+#define GD_OP_VERSION_3_11_2 31102 /* Op-version for GlusterFS 3.11.2 */
+
#define GD_OP_VERSION_3_12_0 31200 /* Op-version for GlusterFS 3.12.0 */
#define GD_OP_VERSION_3_12_2 31202 /* Op-version for GlusterFS 3.12.2 */
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index fed2864..84f2705 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2859,7 +2859,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "mgmt/glusterd",
.type = GLOBAL_NO_DOC,
.value = "INFO",
- .op_version = GD_OP_VERSION_5_0},
+ .op_version = GD_OP_VERSION_3_11_2},
{.key = "debug.delay-gen",
.voltype = "debug/delay-gen",
.option = "!debug",
--
1.8.3.1

50
SOURCES/0027-glusterd-change-op-version-of-fips-mode-rchecksum.patch

@ -0,0 +1,50 @@
From 9be3c4745b161f1815f77cd19b550ac9795845f5 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Thu, 20 Sep 2018 22:01:05 +0530
Subject: [PATCH 27/52] glusterd: change op-version of fips-mode-rchecksum

..to GD_OP_VERSION_3_13_3 since GD_OP_VERSION_4_0_0 is not present in
rhgs-3.4.1

Label: DOWNSTREAM ONLY

Change-Id: I759272748177d174b15123faffc2305f7a5ec58f
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/150714
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/glusterfs/globals.h | 2 ++
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index a278f18..4a82889 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -118,6 +118,8 @@
#define GD_OP_VERSION_3_13_2 31302 /* Op-version for GlusterFS 3.13.2 */
+#define GD_OP_VERSION_3_13_3 31303 /* Op-version for GlusterFS 3.13.3 */
+
#define GD_OP_VERSION_4_0_0 40000 /* Op-version for GlusterFS 4.0.0 */
#define GD_OP_VERSION_4_1_0 40100 /* Op-version for GlusterFS 4.1.0 */
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 84f2705..2bd0a9c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2329,7 +2329,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.key = "storage.fips-mode-rchecksum",
.type = NO_DOC,
.voltype = "storage/posix",
- .op_version = GD_OP_VERSION_4_0_0,
+ .op_version = GD_OP_VERSION_3_13_3,
},
{
.option = "force-create-mode",
--
1.8.3.1

52
SOURCES/0028-glusterd-Reset-op-version-for-features.shard-deletio.patch

@ -0,0 +1,52 @@
From 64ffcf770c5c0087f8937b5235ed0ad5b0efe7f2 Mon Sep 17 00:00:00 2001
From: Krutika Dhananjay <kdhananj@redhat.com>
Date: Wed, 12 Sep 2018 21:41:35 +0530
Subject: [PATCH 28/52] glusterd: Reset op-version for
"features.shard-deletion-rate"

The op-version for the "features.shard-deletion-rate" option was set to
4.2.0 in the upstream patch and backported at
e75be952569eb69325d5f505f7ab94aace31be52.
This commit reverts the op-version for this option to 3.13.3.

Label: DOWNSTREAM ONLY

Change-Id: Ie3d12f3119ad7a4b40d81bd8bd6ed591658e8371
Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/154865
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/glusterfs/globals.h | 2 ++
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index 4a82889..4d95f75 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -120,6 +120,8 @@
#define GD_OP_VERSION_3_13_3 31303 /* Op-version for GlusterFS 3.13.3 */
+#define GD_OP_VERSION_3_13_4 31304 /* Op-version for GlusterFS 3.13.4 */
+
#define GD_OP_VERSION_4_0_0 40000 /* Op-version for GlusterFS 4.0.0 */
#define GD_OP_VERSION_4_1_0 40100 /* Op-version for GlusterFS 4.1.0 */
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 2bd0a9c..2f3271f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2552,7 +2552,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
},
{.key = "features.shard-deletion-rate",
.voltype = "features/shard",
- .op_version = GD_OP_VERSION_5_0,
+ .op_version = GD_OP_VERSION_3_13_4,
.flags = VOLOPT_FLAG_CLIENT_OPT},
{
.key = "features.scrub-throttle",
--
1.8.3.1

39
SOURCES/0029-glusterd-Reset-op-version-for-features.shard-lru-lim.patch

@ -0,0 +1,39 @@
From b504052d003aa41fbd44eec286d1733b6f2a168e Mon Sep 17 00:00:00 2001
From: Krutika Dhananjay <kdhananj@redhat.com>
Date: Tue, 6 Nov 2018 18:44:55 +0530
Subject: [PATCH 29/52] glusterd: Reset op-version for
"features.shard-lru-limit"

The op-version for the "features.shard-lru-limit" option was set to
4.2.0 in the upstream patch and backported at
41e7e33c6512e98a1567e5a5532d3898b59cfa98

This commit reverts the op-version for this option to 3.13.4.

Label: DOWNSTREAM ONLY

Change-Id: I7d3ed6b373851267c78fc6815a83bee2c0906413
Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/155127
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Xavi Hernandez <xhernandez@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 2f3271f..4bf89a6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2546,7 +2546,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{
.key = "features.shard-lru-limit",
.voltype = "features/shard",
- .op_version = GD_OP_VERSION_5_0,
+ .op_version = GD_OP_VERSION_3_13_4,
.flags = VOLOPT_FLAG_CLIENT_OPT,
.type = NO_DOC,
},
--
1.8.3.1

42
SOURCES/0030-selinux-glusterd-add-features.selinux-to-glusterd-vo.patch

@ -0,0 +1,42 @@
From 1d2d29396ee25f09c7d379a992ac9bd244e89c39 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Thu, 13 Dec 2018 14:28:57 +0530
Subject: [PATCH 30/52] selinux/glusterd : add "features.selinux" to
glusterd-volume-set.c

updates: #593
Change-Id: I38675ba4d47c8ba7f94cfb4734692683ddb3dcfd
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 4bf89a6..11265bf 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -1203,10 +1203,9 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "performance/io-threads",
.option = "pass-through",
.op_version = GD_OP_VERSION_4_1_0},
- {.key = "performance.least-rate-limit",
- .voltype = "performance/io-threads",
- .op_version = 1
- },
+ {.key = "performance.least-rate-limit",
+ .voltype = "performance/io-threads",
+ .op_version = 1},
/* Other perf xlators' options */
{.key = "performance.io-cache-pass-through",
@@ -2849,7 +2848,6 @@ struct volopt_map_entry glusterd_volopt_map[] = {
"trusted.gluster.selinux on the bricks. Recommended "
"to have enabled when clients and/or bricks support "
"SELinux."},
- {.key = GLUSTERD_LOCALTIME_LOGGING_KEY,
/*{.key = GLUSTERD_LOCALTIME_LOGGING_KEY,
.voltype = "mgmt/glusterd",
.type = GLOBAL_DOC,
--
1.8.3.1

34
SOURCES/0031-glusterd-turn-off-selinux-feature-in-downstream.patch

@ -0,0 +1,34 @@
From c3176144e531e22bfe97d0fef3b0e3e449fb1d32 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 16 Apr 2018 13:47:12 +0530
Subject: [PATCH 31/52] glusterd: turn off selinux feature in downstream

In RHGS 3.4.0 selinux feature was never meant to be qualified.

Label: DOWNSTREAM ONLY

Change-Id: I0cd5eb5207a757c8b6ef789980c061f211410bd5
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/135716
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 11265bf..d1244e4 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2842,7 +2842,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{.key = VKEY_FEATURES_SELINUX,
.voltype = "features/selinux",
.type = NO_DOC,
- .value = "on",
+ .value = "off",
.op_version = GD_OP_VERSION_3_11_0,
.description = "Convert security.selinux xattrs to "
"trusted.gluster.selinux on the bricks. Recommended "
--
1.8.3.1

29
SOURCES/0032-glusterd-update-gd-op-version-to-3_7_0.patch

@ -0,0 +1,29 @@
From bfa7055c3901b34a49f7933ea9edcf6465843de1 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Wed, 23 Jan 2019 14:22:00 +0530
Subject: [PATCH 32/52] glusterd: update gd-op-version to 3_7_0

Label: DOWNSTREAM ONLY

Change-Id: Ia6456134cd7e544a415692d09cd1ccbb6e02dd82
Signed-off-by: Milind Changire <mchangir@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-rebalance.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 6365b6e..e20e3c4 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -1174,7 +1174,7 @@ glusterd_op_stage_rebalance(dict_t *dict, char **op_errstr)
* 'force'
*/
ret = glusterd_check_client_op_version_support(
- volname, GD_OP_VERSION_3_6_0, NULL);
+ volname, GD_OP_VERSION_3_7_0, NULL);
if (ret) {
ret = gf_asprintf(op_errstr,
"Volume %s has one or "
--
1.8.3.1

83
SOURCES/0033-build-add-missing-explicit-package-dependencies.patch

@ -0,0 +1,83 @@
From 52e2d75c2c8e32d2e4f69840e34d21b39279284a Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Thu, 13 Dec 2018 12:46:56 +0530
Subject: [PATCH 33/52] build: add missing explicit package dependencies

Add dependencies for glusterfs-libs, and other packages.
This is an Errata Tool whine.

Label: DOWNSTREAM ONLY

Change-Id: Ieaadb6e4ffa84d1811aa740f7891855568ecbcbb
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/158501
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 8 ++++++++
1 file changed, 8 insertions(+)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index bdb47ba..9cd4372 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -323,6 +323,7 @@ and client framework.
Summary: GlusterFS api library
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description api
GlusterFS is a distributed file-system capable of scaling to several
@@ -340,6 +341,7 @@ Summary: Development Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-devel%{?_isa} = %{version}-%{release}
Requires: libacl-devel
+Requires: %{name}-api%{?_isa} = %{version}-%{release}
%description api-devel
GlusterFS is a distributed file-system capable of scaling to several
@@ -391,6 +393,8 @@ Requires: %{name}%{?_isa} = %{version}-%{release}
%if ( 0%{!?_without_extra_xlators:1} )
Requires: %{name}-extra-xlators%{?_isa} = %{version}-%{release}
%endif
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
+Requires: %{name}-server%{?_isa} = %{version}-%{release}
%description devel
GlusterFS is a distributed file-system capable of scaling to several
@@ -435,6 +439,7 @@ Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
Obsoletes: %{name}-client < %{version}-%{release}
Provides: %{name}-client = %{version}-%{release}
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description fuse
GlusterFS is a distributed file-system capable of scaling to several
@@ -459,6 +464,7 @@ Requires: python%{_pythonver}-gluster = %{version}-%{release}
Requires: rsync
Requires: util-linux
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description geo-replication
GlusterFS is a distributed file-system capable of scaling to several
@@ -536,6 +542,7 @@ BuildRequires: libibverbs-devel
BuildRequires: librdmacm-devel >= 1.0.15
%endif
Requires: %{name}%{?_isa} = %{version}-%{release}
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description rdma
GlusterFS is a distributed file-system capable of scaling to several
@@ -664,6 +671,7 @@ This package provides the glusterfs thin-arbiter translator.
%package client-xlators
Summary: GlusterFS client-side translators
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description client-xlators
GlusterFS is a distributed file-system capable of scaling to several
--
1.8.3.1

59
SOURCES/0034-glusterd-introduce-a-new-op-version-for-rhgs-3.4.3.patch

@ -0,0 +1,59 @@
From 463a920541a7579f2407f22597e4014494422804 Mon Sep 17 00:00:00 2001
From: Sanju Rakonde <srakonde@redhat.com>
Date: Mon, 17 Dec 2018 14:07:01 +0530
Subject: [PATCH 34/52] glusterd: introduce a new op-version for rhgs-3.4.3

This patch introduces a new op-version 31305 for rhgs-3.4.3 and
sets the max op-version to 31305.

For migrating profile commands (commit e68845ff7018e5d81d7979684b18e6eda449b088)
we used GD_OP_VERSION_6_0 in upstream. we are changing
it to GD_OP_VERSION_3_13_5 here.

Label: DOWNSTREAM ONLY

Change-Id: Ie3a05c70eb4e406889c468343f54e999b1218f19
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/158795
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
libglusterfs/src/glusterfs/globals.h | 2 ++
xlators/mgmt/glusterd/src/glusterd-handler.c | 4 ++--
2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index 4d95f75..6642ba0 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -122,6 +122,8 @@
#define GD_OP_VERSION_3_13_4 31304 /* Op-version for GlusterFS 3.13.4 */
+#define GD_OP_VERSION_3_13_5 31305 /* Op-version for GlusterFS 3.13.5 */
+
#define GD_OP_VERSION_4_0_0 40000 /* Op-version for GlusterFS 4.0.0 */
#define GD_OP_VERSION_4_1_0 40100 /* Op-version for GlusterFS 4.1.0 */
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 387643d..de44af7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3085,12 +3085,12 @@ __glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
goto out;
}
- if (conf->op_version < GD_OP_VERSION_6_0) {
+ if (conf->op_version < GD_OP_VERSION_3_13_5) {
gf_msg_debug(this->name, 0,
"The cluster is operating at "
"version less than %d. Falling back "
"to op-sm framework.",
- GD_OP_VERSION_6_0);
+ GD_OP_VERSION_3_13_5);
ret = glusterd_op_begin(req, cli_op, dict, err_str, sizeof(err_str));
glusterd_friend_sm();
glusterd_op_sm();
--
1.8.3.1

41
SOURCES/0035-glusterd-tag-rebalance-mgmt_v3-command-to-op-version.patch

@ -0,0 +1,41 @@
From 254033a80d85460675c921c272fb94bb7e9f67d4 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Tue, 18 Dec 2018 17:57:25 +0530
Subject: [PATCH 35/52] glusterd: tag rebalance mgmt_v3 command to op-version
31305

In upstream migrating rebalance command is tagged to op-version 60000
but in downstream the latest new op-version is 31305.

Label: DOWNSTREAM ONLY

Change-Id: I30bbad3efca29bf42b9a750581eb1aebc8a30ff9
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/158943
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-rebalance.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index e20e3c4..ed5ded5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -573,12 +573,12 @@ __glusterd_handle_defrag_volume(rpcsvc_request_t *req)
} else
op = GD_OP_REBALANCE;
- if (priv->op_version < GD_OP_VERSION_6_0) {
+ if (priv->op_version < GD_OP_VERSION_3_13_5) {
gf_msg_debug(this->name, 0,
"The cluster is operating at "
"version less than %d. Falling back "
"to op-sm framework.",
- GD_OP_VERSION_6_0);
+ GD_OP_VERSION_3_13_5);
ret = glusterd_op_begin(req, op, dict, msg, sizeof(msg));
glusterd_friend_sm();
glusterd_op_sm();
--
1.8.3.1

47
SOURCES/0036-build-add-conditional-dependency-on-server-for-devel.patch

@ -0,0 +1,47 @@
From d6458c40706d8886187bd9c2016087a3a1eee882 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Wed, 19 Dec 2018 13:17:42 +0530
Subject: [PATCH 36/52] build: add conditional dependency on server for devel

Add conditional depedency on server for glusterfs-devel

Label: DOWNSTREAM ONLY

Change-Id: Icc45df3db137dbc03d240c1ac774b5c8735c5f2f
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/159030
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 7 +++++++
1 file changed, 7 insertions(+)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 9cd4372..9db5a34 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -394,7 +394,9 @@ Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-extra-xlators%{?_isa} = %{version}-%{release}
%endif
Requires: %{name}-libs%{?_isa} = %{version}-%{release}
+%if ( 0%{!?_without_server:1} )
Requires: %{name}-server%{?_isa} = %{version}-%{release}
+%endif
%description devel
GlusterFS is a distributed file-system capable of scaling to several
@@ -2067,6 +2069,11 @@ fi
* Thu Feb 21 2019 Jiffin Tony Thottan <jthottan@redhat.com>
- Obsoleting gluster-gnfs package
+* Wed Dec 19 2018 Milind Changire <mchangir@redhat.com>
+- Add explicit package dependencies (#1656357)
+- Remove absolute paths from spec file (#1350745)
+- Do not package crypt.so for FIPS compliance (#1653224)
+
* Wed Nov 28 2018 Krutika Dhananjay <kdhananj@redhat.com>
- Install /var/lib/glusterd/groups/distributed-virt by default
--
1.8.3.1

35
SOURCES/0037-cli-change-the-warning-message.patch

@ -0,0 +1,35 @@
From 7e0342e0d01204f136b0bd28931a6313ea216649 Mon Sep 17 00:00:00 2001
From: Sanju Rakonde <srakonde@redhat.com>
Date: Wed, 6 Feb 2019 19:06:45 +0530
Subject: [PATCH 37/52] cli: change the warning message

This patch changes the warning message user gets, when enabling brick
multiplexing to reflect OCS instead of CNS/CRS.

Label: DOWNSTREAM ONLY

Change-Id: Id5fd87955d5a692f8e57560245f8b0cf9882e1da
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/162405
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
cli/src/cli-cmd-parser.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 760a10c..541dc62 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -1697,7 +1697,7 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words,
if ((strcmp (key, "cluster.brick-multiplex") == 0)) {
question = "Brick-multiplexing is supported only for "
- "container workloads (CNS/CRS). Also it is "
+ "OCS converged or independent mode. Also it is "
"advised to make sure that either all "
"volumes are in stopped state or no bricks "
"are running before this option is modified."
--
1.8.3.1

230
SOURCES/0038-spec-avoid-creation-of-temp-file-in-lua-script.patch

@ -0,0 +1,230 @@
From a577dd0a3cbf435681f10d095a0dca0595c6a354 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Sat, 9 Feb 2019 14:01:28 +0530
Subject: [PATCH 38/52] spec: avoid creation of temp file in lua script

Avoiding creation of temporary file to execute bash shell script from a
lua scriptlet increases install time security.

Label: DOWNSTREAM ONLY

Change-Id: Ie5b9035f292402b18dea768aca8bc82a1e7fa615
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/162621
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 120 ++++++------------------------------------------------
1 file changed, 12 insertions(+), 108 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 9db5a34..df8d116 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1542,15 +1542,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1584,15 +1576,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1626,15 +1610,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1668,15 +1644,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1709,15 +1677,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1750,15 +1710,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1792,15 +1744,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1835,15 +1779,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1878,15 +1814,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1921,15 +1849,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1965,15 +1885,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -2008,15 +1920,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
--
1.8.3.1

61
SOURCES/0039-cli-fix-query-to-user-during-brick-mux-selection.patch

@ -0,0 +1,61 @@
From ec707e099d4e4338d1ea21560d367b02e6339532 Mon Sep 17 00:00:00 2001
From: Sunil Kumar Acharya <sheggodu@redhat.com>
Date: Mon, 1 Apr 2019 16:16:47 +0530
Subject: [PATCH 39/52] cli: fix query to user during brick-mux selection

Label: DOWNSTREAM ONLY

Change-Id: I59472066b917ea2b23de72bcd91dc3e275d5e055
Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
---
cli/src/cli-cmd-parser.c | 33 +++++++++++++++++----------------
1 file changed, 17 insertions(+), 16 deletions(-)

diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 541dc62..d9ccba1 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -1693,23 +1693,24 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words,
goto out;
}
}
- }
-
- if ((strcmp (key, "cluster.brick-multiplex") == 0)) {
- question = "Brick-multiplexing is supported only for "
- "OCS converged or independent mode. Also it is "
- "advised to make sure that either all "
- "volumes are in stopped state or no bricks "
- "are running before this option is modified."
- "Do you still want to continue?";
- answer = cli_cmd_get_confirmation (state, question);
- if (GF_ANSWER_NO == answer) {
- gf_log ("cli", GF_LOG_ERROR, "Operation "
- "cancelled, exiting");
- *op_errstr = gf_strdup ("Aborted by user.");
- ret = -1;
- goto out;
+ if ((strcmp (key, "cluster.brick-multiplex") == 0)) {
+ question =
+ "Brick-multiplexing is supported only for "
+ "OCS converged or independent mode. Also it is "
+ "advised to make sure that either all "
+ "volumes are in stopped state or no bricks "
+ "are running before this option is modified."
+ "Do you still want to continue?";
+
+ answer = cli_cmd_get_confirmation (state, question);
+ if (GF_ANSWER_NO == answer) {
+ gf_log ("cli", GF_LOG_ERROR, "Operation "
+ "cancelled, exiting");
+ *op_errstr = gf_strdup ("Aborted by user.");
+ ret = -1;
+ goto out;
+ }
}
}
--
1.8.3.1

136
SOURCES/0040-build-Remove-unsupported-test-cases-failing-consiste.patch

@ -0,0 +1,136 @@
From 79c74009892804419dce264399f3fde357d5b1c3 Mon Sep 17 00:00:00 2001
From: Susant Palai <spalai@redhat.com>
Date: Tue, 2 Apr 2019 11:07:03 +0530
Subject: [PATCH 40/52] build: Remove unsupported test cases failing
consistently

The following two test cases failing in downstream regression runs.
Hence removing them as they are not supported downstream.

tests/basic/cloudsync-sanity.t
tests/bugs/distribute/bug-882278.t

Label: DOWNSTREAM ONLY

Change-Id: Ie4b506639a017ec9910e44df1b721d9bfadf07b3
Signed-off-by: Susant Palai <spalai@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166662
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Tested-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/basic/cloudsync-sanity.t | 22 ------------
tests/bugs/distribute/bug-882278.t | 73 --------------------------------------
2 files changed, 95 deletions(-)
delete mode 100644 tests/basic/cloudsync-sanity.t
delete mode 100755 tests/bugs/distribute/bug-882278.t

diff --git a/tests/basic/cloudsync-sanity.t b/tests/basic/cloudsync-sanity.t
deleted file mode 100644
index 3cf719d..0000000
--- a/tests/basic/cloudsync-sanity.t
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6,7,8,9};
-TEST $CLI volume set $V0 features.cloudsync enable;
-TEST $CLI volume start $V0;
-
-## Mount FUSE
-TEST $GFS -s $H0 --volfile-id $V0 $M1;
-
-# This test covers lookup, mkdir, mknod, symlink, link, rename,
-# create operations
-TEST $(dirname $0)/rpc-coverage.sh $M1
-
-cleanup;
diff --git a/tests/bugs/distribute/bug-882278.t b/tests/bugs/distribute/bug-882278.t
deleted file mode 100755
index 8cb5147..0000000
--- a/tests/bugs/distribute/bug-882278.t
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-cleanup
-
-# Is there a good reason to require --fqdn elsewhere? It's worse than useless
-# here.
-H0=$(hostname -s)
-
-function recreate {
- # The rm is necessary so we don't get fooled by leftovers from old runs.
- rm -rf $1 && mkdir -p $1
-}
-
-function count_lines {
- grep "$1" $2/* | wc -l
-}
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info;
-
-## Start and create a volume
-TEST recreate ${B0}/${V0}-0
-TEST recreate ${B0}/${V0}-1
-TEST $CLI volume create $V0 $H0:$B0/${V0}-{0,1}
-TEST $CLI volume set $V0 cluster.nufa on
-
-function volinfo_field()
-{
- local vol=$1;
- local field=$2;
-
- $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
-}
-
-
-## Verify volume is created
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-## Start volume and verify
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-## Mount native
-special_option="--xlator-option ${V0}-dht.local-volume-name=${V0}-client-1"
-TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $special_option $M0
-
-## Create a bunch of test files.
-for i in $(seq 0 99); do
- echo hello > $(printf $M0/file%02d $i)
-done
-
-## Make sure the files went to the right place. There might be link files in
-## the other brick, but they won't have any contents.
-EXPECT "0" count_lines hello ${B0}/${V0}-0
-EXPECT "100" count_lines hello ${B0}/${V0}-1
-
-if [ "$EXIT_EARLY" = "1" ]; then
- exit 0;
-fi
-
-## Finish up
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop $V0;
-EXPECT 'Stopped' volinfo_field $V0 'Status';
-
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
--
1.8.3.1

43
SOURCES/0041-tests-geo-rep-Build-failed-in-Jenkins-for-test-bug-1.patch

@ -0,0 +1,43 @@
From c8f0ac9b429e1ff73a3e87247193c35c66212540 Mon Sep 17 00:00:00 2001
From: Shwetha K Acharya <sacharya@redhat.com>
Date: Tue, 2 Apr 2019 12:06:53 +0530
Subject: [PATCH 41/52] tests/geo-rep: Build failed in Jenkins for test
bug-1600145.t

Problem: the ((strcmp (key, "cluster.brick-multiplex") == 0))
comparision in cli/src/cli-cmd-parser.c is expecting
either yes or no confirmation from cli, which is not handled
in bug-1600145.t, causing test to wait till timeout and
then fail.

Solution: Passing yes as pipeline to
`gluster v set all cluster.brick-multiplex on` in bug-1600145.t

Label: DOWNSTREAM ONLY

Change-Id: I1a6c2a992b65380cea145fd1c46d22ec1251c77e
Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166694
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Tested-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Reviewed-by: Sunny Kumar <sunkumar@redhat.com>
---
tests/00-geo-rep/bug-1600145.t | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/00-geo-rep/bug-1600145.t b/tests/00-geo-rep/bug-1600145.t
index 1d38bf9..359bc4f 100644
--- a/tests/00-geo-rep/bug-1600145.t
+++ b/tests/00-geo-rep/bug-1600145.t
@@ -29,7 +29,7 @@ slave_mnt=$M1
##create_and_start_master_volume
TEST $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2};
-gluster v set all cluster.brick-multiplex on
+yes | gluster v set all cluster.brick-multiplex on
TEST $CLI volume start $GMV0
##create_and_start_slave_volume
--
1.8.3.1

123
SOURCES/0042-spec-client-server-Builds-are-failing-on-rhel-6.patch

@ -0,0 +1,123 @@
From f25a92028ecc2018953a6375bba43a21d3a93566 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Thu, 4 Apr 2019 16:18:51 +0530
Subject: [PATCH 42/52] spec: (client|server) Builds are failing on rhel-6

Problem: 1) For sever-rpm gluster build is throwing an error
undefined reference to `dlcose` on RHEL 6
2) For server-rpm build is throwing reference for
For Not found for rot-13.so and symlink-cache.so
3) For client-rpms build is throwing an error
File Not found for all files with exclude
file in without_server check

Solution: 1) For server-rpm add LIB_DL link in Makefile
and remove reference for rot.so and symlink-cache.so
from glusterfs.spec.in
2) Remove exclude files list as they are not
being build

Label: DOWNSTREAM ONLY
Change-Id: I2b41604cbc8525b91231b0c5caee588c5d5d6b08
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166962
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 54 -----------------------------------
xlators/mgmt/glusterd/src/Makefile.am | 2 +-
2 files changed, 1 insertion(+), 55 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index df8d116..7c7f7c0 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1097,72 +1097,18 @@ exit 0
%{_tmpfilesdir}/gluster.conf
%endif
%if ( 0%{?_without_extra_xlators:1} )
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so
%endif
%if ( 0%{?_without_regression_tests:1} )
%exclude %{_datadir}/glusterfs/run-tests.sh
%exclude %{_datadir}/glusterfs/tests
%endif
%if 0%{?_without_server:1}
-%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-5.8.conf
-%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-7.2.conf
-%exclude %{_sysconfdir}/glusterfs/glusterd.vol
-%exclude %{_sysconfdir}/glusterfs/glusterfs-georep-logrotate
-%exclude %{_sysconfdir}/glusterfs/glusterfs-logrotate
-%exclude %{_sysconfdir}/glusterfs/group-db-workload
-%exclude %{_sysconfdir}/glusterfs/group-distributed-virt
-%exclude %{_sysconfdir}/glusterfs/group-gluster-block
-%exclude %{_sysconfdir}/glusterfs/group-metadata-cache
-%exclude %{_sysconfdir}/glusterfs/group-nl-cache
-%exclude %{_sysconfdir}/glusterfs/group-virt.example
-%exclude %{_sysconfdir}/glusterfs/logger.conf.example
-%exclude %{_sysconfdir}/rsyslog.d/gluster.conf.example
-%exclude %{_prefix}/bin/glusterfind
-%exclude %{_prefix}/lib/firewalld/services/glusterfs.xml
-%exclude %{_prefix}/lib/systemd/system/glusterd.service
-%exclude %{_prefix}/lib/systemd/system/glusterfssharedstorage.service
-%exclude %{_prefix}/lib/tmpfiles.d/gluster.conf
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix-locks.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quotad.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/sdfs.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/thin-arbiter.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage/posix.so
-%exclude %{_libexecdir}/glusterfs/*
-%exclude %{_sbindir}/conf.py
-%exclude %{_sbindir}/gcron.py
-%exclude %{_sbindir}/gf_attach
-%exclude %{_sbindir}/gfind_missing_files
-%exclude %{_sbindir}/glfsheal
-%exclude %{_sbindir}/gluster
-%exclude %{_sbindir}/gluster-setgfid2path
-%exclude %{_sbindir}/glusterd
-%exclude %{_sbindir}/snap_scheduler.py
%if ( 0%{?_with_systemd:1} )
%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
%endif
-%exclude %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
-%exclude %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
-%exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
-%exclude %{_sharedstatedir}/glusterd/*
%endif
%files api
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
index 6d09e37..c8dd8e3 100644
--- a/xlators/mgmt/glusterd/src/Makefile.am
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -6,7 +6,7 @@ xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/mgmt
glusterd_la_CPPFLAGS = $(AM_CPPFLAGS) \
-DFILTERDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/filter\" \
-DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\"
-glusterd_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+glusterd_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS) $(LIB_DL)
glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-op-sm.c glusterd-utils.c glusterd-rpc-ops.c \
glusterd-store.c glusterd-handshake.c glusterd-pmap.c \
--
1.8.3.1

137
SOURCES/0043-inode-don-t-dump-the-whole-table-to-CLI.patch

@ -0,0 +1,137 @@
From 416dfc70ef87400e1ddfd70e5b6e512d330b54a6 Mon Sep 17 00:00:00 2001
From: Sheetal Pamecha <sheetal.pamecha08@gmail.com>
Date: Tue, 2 Apr 2019 23:25:11 +0530
Subject: [PATCH 43/52] inode: don't dump the whole table to CLI

dumping the whole inode table detail to screen doesn't solve any
purpose. We should be getting only toplevel details on CLI, and
then if one wants to debug further, then they need to get to
'statedump' to get full details.

Patch on upstream master: https://review.gluster.org/#/c/glusterfs/+/22347/

BUG: 1578703
Change-Id: Ie7e7f5a67c1606e3c18ce21ee6df6c7e4550c211
Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166768
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
cli/src/cli-rpc-ops.c | 23 ++++++++++++++++++++++-
libglusterfs/src/inode.c | 13 +++++++++++++
2 files changed, 35 insertions(+), 1 deletion(-)

diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index 78043cd..12e7fcc 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -7606,15 +7606,24 @@ cli_print_volume_status_itables(dict_t *dict, char *prefix)
uint32_t active_size = 0;
uint32_t lru_size = 0;
uint32_t purge_size = 0;
+ uint32_t lru_limit = 0;
int i = 0;
GF_ASSERT(dict);
GF_ASSERT(prefix);
+ snprintf(key, sizeof(key), "%s.lru_limit", prefix);
+ ret = dict_get_uint32(dict, key, &lru_limit);
+ if (ret)
+ goto out;
+ cli_out("LRU limit : %u", lru_limit);
+
snprintf(key, sizeof(key), "%s.active_size", prefix);
ret = dict_get_uint32(dict, key, &active_size);
if (ret)
goto out;
+
+#ifdef DEBUG
if (active_size != 0) {
cli_out("Active inodes:");
cli_out("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", "IA type");
@@ -7626,10 +7635,16 @@ cli_print_volume_status_itables(dict_t *dict, char *prefix)
}
cli_out(" ");
+#else
+ cli_out("Active Inodes : %u", active_size);
+
+#endif
snprintf(key, sizeof(key), "%s.lru_size", prefix);
ret = dict_get_uint32(dict, key, &lru_size);
if (ret)
goto out;
+
+#ifdef DEBUG
if (lru_size != 0) {
cli_out("LRU inodes:");
cli_out("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", "IA type");
@@ -7640,11 +7655,15 @@ cli_print_volume_status_itables(dict_t *dict, char *prefix)
cli_print_volume_status_inode_entry(dict, key);
}
cli_out(" ");
+#else
+ cli_out("LRU Inodes : %u", lru_size);
+#endif
snprintf(key, sizeof(key), "%s.purge_size", prefix);
ret = dict_get_uint32(dict, key, &purge_size);
if (ret)
goto out;
+#ifdef DEBUG
if (purge_size != 0) {
cli_out("Purged inodes:");
cli_out("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", "IA type");
@@ -7654,7 +7673,9 @@ cli_print_volume_status_itables(dict_t *dict, char *prefix)
snprintf(key, sizeof(key), "%s.purge%d", prefix, i);
cli_print_volume_status_inode_entry(dict, key);
}
-
+#else
+ cli_out("Purge Inodes : %u", purge_size);
+#endif
out:
return;
}
diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c
index 87f74e0..96ddea5 100644
--- a/libglusterfs/src/inode.c
+++ b/libglusterfs/src/inode.c
@@ -2598,6 +2598,11 @@ inode_table_dump_to_dict(inode_table_t *itable, char *prefix, dict_t *dict)
if (ret)
return;
+ snprintf(key, sizeof(key), "%s.itable.lru_limit", prefix);
+ ret = dict_set_uint32(dict, key, itable->lru_limit);
+ if (ret)
+ goto out;
+
snprintf(key, sizeof(key), "%s.itable.active_size", prefix);
ret = dict_set_uint32(dict, key, itable->active_size);
if (ret)
@@ -2613,6 +2618,13 @@ inode_table_dump_to_dict(inode_table_t *itable, char *prefix, dict_t *dict)
if (ret)
goto out;
+#ifdef DEBUG
+ /* Dumping inode details in dictionary and sending it to CLI is not
+ required as when a developer (or support team) asks for this command
+ output, they just want to get top level detail of inode table.
+ If one wants to debug, let them take statedump and debug, this
+ wouldn't be available in CLI during production setup.
+ */
list_for_each_entry(inode, &itable->active, list)
{
snprintf(key, sizeof(key), "%s.itable.active%d", prefix, count++);
@@ -2632,6 +2644,7 @@ inode_table_dump_to_dict(inode_table_t *itable, char *prefix, dict_t *dict)
snprintf(key, sizeof(key), "%s.itable.purge%d", prefix, count++);
inode_dump_to_dict(inode, key, dict);
}
+#endif
out:
pthread_mutex_unlock(&itable->lock);
--
1.8.3.1

360
SOURCES/0044-cluster-ec-Don-t-enqueue-an-entry-if-it-is-already-h.patch

@ -0,0 +1,360 @@
From bc6588890ce94101a63b861178cf38db5549d8a8 Mon Sep 17 00:00:00 2001
From: Ashish Pandey <aspandey@redhat.com>
Date: Wed, 28 Nov 2018 11:22:52 +0530
Subject: [PATCH 44/52] cluster/ec: Don't enqueue an entry if it is already
healing

Problem:
1 - heal-wait-qlength is by default 128. If shd is disabled
and we need to heal files, client side heal is needed.
If we access these files that will trigger the heal.
However, it has been observed that a file will be enqueued
multiple times in the heal wait queue, which in turn causes
queue to be filled and prevent other files to be enqueued.

2 - While a file is going through healing and a write fop from
mount comes on that file, it sends write on all the bricks including
healing one. At the end it updates version and size on all the
bricks. However, it does not unset dirty flag on all the bricks,
even if this write fop was successful on all the bricks.
After healing completion this dirty flag remain set and never
gets cleaned up if SHD is disabled.

Solution:
1 - If an entry is already in queue or going through heal process,
don't enqueue next client side request to heal the same file.

2 - Unset dirty on all the bricks at the end if fop has succeeded on
all the bricks even if some of the bricks are going through heal.

backport of : https://review.gluster.org/#/c/glusterfs/+/21744/

Change-Id: Ia61ffe230c6502ce6cb934425d55e2f40dd1a727
BUG: 1600918
Signed-off-by: Ashish Pandey <aspandey@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166296
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/bugs/ec/bug-1236065.t | 1 -
xlators/cluster/ec/src/ec-common.c | 43 +++++++++------
xlators/cluster/ec/src/ec-common.h | 8 +++
xlators/cluster/ec/src/ec-heal.c | 104 +++++++++++++++++++++++++++++++-----
xlators/cluster/ec/src/ec-helpers.c | 1 +
xlators/cluster/ec/src/ec-types.h | 1 +
6 files changed, 127 insertions(+), 31 deletions(-)

diff --git a/tests/bugs/ec/bug-1236065.t b/tests/bugs/ec/bug-1236065.t
index 76d25d7..9181e73 100644
--- a/tests/bugs/ec/bug-1236065.t
+++ b/tests/bugs/ec/bug-1236065.t
@@ -85,7 +85,6 @@ TEST pidof glusterd
EXPECT "$V0" volinfo_field $V0 'Volume Name'
EXPECT 'Started' volinfo_field $V0 'Status'
EXPECT '7' online_brick_count
-
## cleanup
cd
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
index 8d65670..5183680 100644
--- a/xlators/cluster/ec/src/ec-common.c
+++ b/xlators/cluster/ec/src/ec-common.c
@@ -313,14 +313,15 @@ ec_check_status(ec_fop_data_t *fop)
gf_msg(fop->xl->name, GF_LOG_WARNING, 0, EC_MSG_OP_FAIL_ON_SUBVOLS,
"Operation failed on %d of %d subvolumes.(up=%s, mask=%s, "
- "remaining=%s, good=%s, bad=%s)",
+ "remaining=%s, good=%s, bad=%s, %s)",
gf_bits_count(ec->xl_up & ~(fop->remaining | fop->good)), ec->nodes,
ec_bin(str1, sizeof(str1), ec->xl_up, ec->nodes),
ec_bin(str2, sizeof(str2), fop->mask, ec->nodes),
ec_bin(str3, sizeof(str3), fop->remaining, ec->nodes),
ec_bin(str4, sizeof(str4), fop->good, ec->nodes),
ec_bin(str5, sizeof(str5), ec->xl_up & ~(fop->remaining | fop->good),
- ec->nodes));
+ ec->nodes),
+ ec_msg_str(fop));
if (fop->use_fd) {
if (fop->fd != NULL) {
ec_fheal(NULL, fop->xl, -1, EC_MINIMUM_ONE, ec_heal_report, NULL,
@@ -2371,37 +2372,47 @@ ec_update_info(ec_lock_link_t *link)
uint64_t dirty[2] = {0, 0};
uint64_t size;
ec_t *ec = NULL;
+ uintptr_t mask;
lock = link->lock;
ctx = lock->ctx;
ec = link->fop->xl->private;
/* pre_version[*] will be 0 if have_version is false */
- version[0] = ctx->post_version[0] - ctx->pre_version[0];
- version[1] = ctx->post_version[1] - ctx->pre_version[1];
+ version[EC_DATA_TXN] = ctx->post_version[EC_DATA_TXN] -
+ ctx->pre_version[EC_DATA_TXN];
+ version[EC_METADATA_TXN] = ctx->post_version[EC_METADATA_TXN] -
+ ctx->pre_version[EC_METADATA_TXN];
size = ctx->post_size - ctx->pre_size;
/* If we set the dirty flag for update fop, we have to unset it.
* If fop has failed on some bricks, leave the dirty as marked. */
+
if (lock->unlock_now) {
+ if (version[EC_DATA_TXN]) {
+ /*A data fop will have difference in post and pre version
+ *and for data fop we send writes on healing bricks also */
+ mask = lock->good_mask | lock->healing;
+ } else {
+ mask = lock->good_mask;
+ }
/* Ensure that nodes are up while doing final
* metadata update.*/
- if (!(ec->node_mask & ~lock->good_mask) &&
- !(ec->node_mask & ~ec->xl_up)) {
- if (ctx->dirty[0] != 0) {
- dirty[0] = -1;
+ if (!(ec->node_mask & ~(mask)) && !(ec->node_mask & ~ec->xl_up)) {
+ if (ctx->dirty[EC_DATA_TXN] != 0) {
+ dirty[EC_DATA_TXN] = -1;
}
- if (ctx->dirty[1] != 0) {
- dirty[1] = -1;
+ if (ctx->dirty[EC_METADATA_TXN] != 0) {
+ dirty[EC_METADATA_TXN] = -1;
}
/*If everything is fine and we already
*have version xattr set on entry, there
*is no need to update version again*/
- if (ctx->pre_version[0]) {
- version[0] = 0;
+ if (ctx->pre_version[EC_DATA_TXN]) {
+ version[EC_DATA_TXN] = 0;
}
- if (ctx->pre_version[1]) {
- version[1] = 0;
+ if (ctx->pre_version[EC_METADATA_TXN]) {
+ version[EC_METADATA_TXN] = 0;
}
} else {
link->optimistic_changelog = _gf_false;
@@ -2410,8 +2421,8 @@ ec_update_info(ec_lock_link_t *link)
memset(ctx->dirty, 0, sizeof(ctx->dirty));
}
- if ((version[0] != 0) || (version[1] != 0) || (dirty[0] != 0) ||
- (dirty[1] != 0)) {
+ if ((version[EC_DATA_TXN] != 0) || (version[EC_METADATA_TXN] != 0) ||
+ (dirty[EC_DATA_TXN] != 0) || (dirty[EC_METADATA_TXN] != 0)) {
ec_update_size_version(link, version, size, dirty);
return _gf_true;
}
diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h
index 115e147..54aaa77 100644
--- a/xlators/cluster/ec/src/ec-common.h
+++ b/xlators/cluster/ec/src/ec-common.h
@@ -190,4 +190,12 @@ ec_lock_unlocked(call_frame_t *frame, void *cookie, xlator_t *this,
void
ec_update_fd_status(fd_t *fd, xlator_t *xl, int child_index,
int32_t ret_status);
+gf_boolean_t
+ec_is_entry_healing(ec_fop_data_t *fop);
+void
+ec_set_entry_healing(ec_fop_data_t *fop);
+void
+ec_reset_entry_healing(ec_fop_data_t *fop);
+char *
+ec_msg_str(ec_fop_data_t *fop);
#endif /* __EC_COMMON_H__ */
diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
index eaf80e0..1ca12c1 100644
--- a/xlators/cluster/ec/src/ec-heal.c
+++ b/xlators/cluster/ec/src/ec-heal.c
@@ -103,6 +103,48 @@ ec_sh_key_match(dict_t *dict, char *key, data_t *val, void *mdata)
}
/* FOP: heal */
+void
+ec_set_entry_healing(ec_fop_data_t *fop)
+{
+ ec_inode_t *ctx = NULL;
+ loc_t *loc = NULL;
+
+ if (!fop)
+ return;
+
+ loc = &fop->loc[0];
+ LOCK(&loc->inode->lock);
+ {
+ ctx = __ec_inode_get(loc->inode, fop->xl);
+ if (ctx) {
+ ctx->heal_count += 1;
+ }
+ }
+ UNLOCK(&loc->inode->lock);
+}
+
+void
+ec_reset_entry_healing(ec_fop_data_t *fop)
+{
+ ec_inode_t *ctx = NULL;
+ loc_t *loc = NULL;
+ int32_t heal_count = 0;
+ if (!fop)
+ return;
+
+ loc = &fop->loc[0];
+ LOCK(&loc->inode->lock);
+ {
+ ctx = __ec_inode_get(loc->inode, fop->xl);
+ if (ctx) {
+ ctx->heal_count += -1;
+ heal_count = ctx->heal_count;
+ }
+ }
+ UNLOCK(&loc->inode->lock);
+ GF_ASSERT(heal_count >= 0);
+}
+
uintptr_t
ec_heal_check(ec_fop_data_t *fop, uintptr_t *pgood)
{
@@ -2507,17 +2549,6 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
"Heal is not required for : %s ", uuid_utoa(loc->gfid));
goto out;
}
-
- msources = alloca0(ec->nodes);
- mhealed_sinks = alloca0(ec->nodes);
- ret = ec_heal_metadata(frame, ec, loc->inode, msources, mhealed_sinks);
- if (ret == 0) {
- mgood = ec_char_array_to_mask(msources, ec->nodes);
- mbad = ec_char_array_to_mask(mhealed_sinks, ec->nodes);
- } else {
- op_ret = -1;
- op_errno = -ret;
- }
sources = alloca0(ec->nodes);
healed_sinks = alloca0(ec->nodes);
if (IA_ISREG(loc->inode->ia_type)) {
@@ -2538,8 +2569,19 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
op_ret = -1;
op_errno = -ret;
}
+ msources = alloca0(ec->nodes);
+ mhealed_sinks = alloca0(ec->nodes);
+ ret = ec_heal_metadata(frame, ec, loc->inode, msources, mhealed_sinks);
+ if (ret == 0) {
+ mgood = ec_char_array_to_mask(msources, ec->nodes);
+ mbad = ec_char_array_to_mask(mhealed_sinks, ec->nodes);
+ } else {
+ op_ret = -1;
+ op_errno = -ret;
+ }
out:
+ ec_reset_entry_healing(fop);
if (fop->cbks.heal) {
fop->cbks.heal(fop->req_frame, fop, fop->xl, op_ret, op_errno,
ec_char_array_to_mask(participants, ec->nodes),
@@ -2650,11 +2692,33 @@ ec_handle_healers_done(ec_fop_data_t *fop)
ec_launch_heal(ec, heal_fop);
}
+gf_boolean_t
+ec_is_entry_healing(ec_fop_data_t *fop)
+{
+ ec_inode_t *ctx = NULL;
+ int32_t heal_count = 0;
+ loc_t *loc = NULL;
+
+ loc = &fop->loc[0];
+
+ LOCK(&loc->inode->lock);
+ {
+ ctx = __ec_inode_get(loc->inode, fop->xl);
+ if (ctx) {
+ heal_count = ctx->heal_count;
+ }
+ }
+ UNLOCK(&loc->inode->lock);
+ GF_ASSERT(heal_count >= 0);
+ return heal_count;
+}
+
void
ec_heal_throttle(xlator_t *this, ec_fop_data_t *fop)
{
gf_boolean_t can_heal = _gf_true;
ec_t *ec = this->private;
+ ec_fop_data_t *fop_rel = NULL;
if (fop->req_frame == NULL) {
LOCK(&ec->lock);
@@ -2662,8 +2726,13 @@ ec_heal_throttle(xlator_t *this, ec_fop_data_t *fop)
if ((ec->background_heals > 0) &&
(ec->heal_wait_qlen + ec->background_heals) >
(ec->heal_waiters + ec->healers)) {
- list_add_tail(&fop->healer, &ec->heal_waiting);
- ec->heal_waiters++;
+ if (!ec_is_entry_healing(fop)) {
+ list_add_tail(&fop->healer, &ec->heal_waiting);
+ ec->heal_waiters++;
+ ec_set_entry_healing(fop);
+ } else {
+ fop_rel = fop;
+ }
fop = __ec_dequeue_heals(ec);
} else {
can_heal = _gf_false;
@@ -2673,8 +2742,12 @@ ec_heal_throttle(xlator_t *this, ec_fop_data_t *fop)
}
if (can_heal) {
- if (fop)
+ if (fop) {
+ if (fop->req_frame != NULL) {
+ ec_set_entry_healing(fop);
+ }
ec_launch_heal(ec, fop);
+ }
} else {
gf_msg_debug(this->name, 0,
"Max number of heals are "
@@ -2682,6 +2755,9 @@ ec_heal_throttle(xlator_t *this, ec_fop_data_t *fop)
ec_fop_set_error(fop, EBUSY);
ec_heal_fail(ec, fop);
}
+ if (fop_rel) {
+ ec_heal_done(0, NULL, fop_rel);
+ }
}
void
diff --git a/xlators/cluster/ec/src/ec-helpers.c b/xlators/cluster/ec/src/ec-helpers.c
index e6b0359..43f6e3b 100644
--- a/xlators/cluster/ec/src/ec-helpers.c
+++ b/xlators/cluster/ec/src/ec-helpers.c
@@ -717,6 +717,7 @@ __ec_inode_get(inode_t *inode, xlator_t *xl)
memset(ctx, 0, sizeof(*ctx));
INIT_LIST_HEAD(&ctx->heal);
INIT_LIST_HEAD(&ctx->stripe_cache.lru);
+ ctx->heal_count = 0;
value = (uint64_t)(uintptr_t)ctx;
if (__inode_ctx_set(inode, xl, &value) != 0) {
GF_FREE(ctx);
diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h
index f3d63ca..6ae4a2b 100644
--- a/xlators/cluster/ec/src/ec-types.h
+++ b/xlators/cluster/ec/src/ec-types.h
@@ -171,6 +171,7 @@ struct _ec_inode {
gf_boolean_t have_config;
gf_boolean_t have_version;
gf_boolean_t have_size;
+ int32_t heal_count;
ec_config_t config;
uint64_t pre_version[2];
uint64_t post_version[2];
--
1.8.3.1

126
SOURCES/0045-glusterd-fix-txn-id-mem-leak.patch

@ -0,0 +1,126 @@
From 6c004c6c8b8f98f56e186740881520b8364e6f85 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 18 Mar 2019 16:08:04 +0530
Subject: [PATCH 45/52] glusterd: fix txn-id mem leak

This commit ensures the following:
1. Don't send commit op request to the remote nodes when gluster v
status all is executed as for the status all transaction the local
commit gets the name of the volumes and remote commit ops are
technically a no-op. So no need for additional rpc requests.
2. In op state machine flow, if the transaction is in staged state and
op_info.skip_locking is true, then no need to set the txn id in the
priv->glusterd_txn_opinfo dictionary which never gets freed.

> Fixes: bz#1691164
> Change-Id: Ib6a9300ea29633f501abac2ba53fb72ff648c822
> Signed-off-by: Atin Mukherjee <amukherj@redhat.com>

upstream patch: https://review.gluster.org/#/c/glusterfs/+/22388/

BUG: 1670415
Change-Id: Ib6a9300ea29633f501abac2ba53fb72ff648c822
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166449
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 26 ++++++++++++++++++++------
xlators/mgmt/glusterd/src/glusterd-syncop.c | 16 ++++++++++++++++
2 files changed, 36 insertions(+), 6 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index cbbb5d9..12d857a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -5652,6 +5652,9 @@ glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx)
dict_t *dict = NULL;
xlator_t *this = NULL;
uuid_t *txn_id = NULL;
+ glusterd_op_info_t txn_op_info = {
+ {0},
+ };
this = THIS;
GF_ASSERT(this);
@@ -5686,6 +5689,7 @@ glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx)
ret = -1;
goto out;
}
+ ret = glusterd_get_txn_opinfo(&event->txn_id, &txn_op_info);
ret = dict_set_bin(rsp_dict, "transaction_id", txn_id, sizeof(*txn_id));
if (ret) {
@@ -5704,6 +5708,12 @@ out:
gf_msg_debug(this->name, 0, "Returning with %d", ret);
+ /* for no volname transactions, the txn_opinfo needs to be cleaned up
+ * as there's no unlock event triggered
+ */
+ if (txn_op_info.skip_locking)
+ ret = glusterd_clear_txn_opinfo(txn_id);
+
if (rsp_dict)
dict_unref(rsp_dict);
@@ -8159,12 +8169,16 @@ glusterd_op_sm()
"Unable to clear "
"transaction's opinfo");
} else {
- ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ if (!(event_type == GD_OP_EVENT_STAGE_OP &&
+ opinfo.state.state == GD_OP_STATE_STAGED &&
+ opinfo.skip_locking)) {
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+ }
}
glusterd_destroy_op_event_ctx(event);
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 1741cf8..618d8bc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -1392,6 +1392,8 @@ gd_commit_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
char *errstr = NULL;
struct syncargs args = {0};
int type = GF_QUOTA_OPTION_TYPE_NONE;
+ uint32_t cmd = 0;
+ gf_boolean_t origin_glusterd = _gf_false;
this = THIS;
GF_ASSERT(this);
@@ -1449,6 +1451,20 @@ commit_done:
gd_syncargs_init(&args, op_ctx);
synctask_barrier_init((&args));
peer_cnt = 0;
+ origin_glusterd = is_origin_glusterd(req_dict);
+
+ if (op == GD_OP_STATUS_VOLUME) {
+ ret = dict_get_uint32(req_dict, "cmd", &cmd);
+ if (ret)
+ goto out;
+
+ if (origin_glusterd) {
+ if ((cmd & GF_CLI_STATUS_ALL)) {
+ ret = 0;
+ goto out;
+ }
+ }
+ }
RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
--
1.8.3.1

98
SOURCES/0046-protocol-client-Do-not-fallback-to-anon-fd-if-fd-is-.patch

@ -0,0 +1,98 @@
From a0661449cd8ba7b851fec473191733767f4541b8 Mon Sep 17 00:00:00 2001
From: Pranith Kumar K <pkarampu@redhat.com>
Date: Thu, 28 Mar 2019 17:55:54 +0530
Subject: [PATCH 46/52] protocol/client: Do not fallback to anon-fd if fd is
not open

If an open comes on a file when a brick is down and after the brick comes up,
a fop comes on the fd, client xlator would still wind the fop on anon-fd
leading to wrong behavior of the fops in some cases.

Example:
If lk fop is issued on the fd just after the brick is up in the scenario above,
lk fop will be sent on anon-fd instead of failing it on that client xlator.
This lock will never be freed upon close of the fd as flush on anon-fd is
invalid and is not wound below server xlator.

As a fix, failing the fop unless the fd has FALLBACK_TO_ANON_FD flag.

>Upstream-patch: https://review.gluster.org/c/glusterfs/+/15804
>Change-Id: I77692d056660b2858e323bdabdfe0a381807cccc
>fixes bz#1390914

BUG: 1695057
Change-Id: Id656bea8dde14327212fbe7ecc97519dc5b32098
Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166833
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/bugs/protocol/bug-1390914.t | 36 ++++++++++++++++++++++++++++
xlators/protocol/client/src/client-helpers.c | 8 ++++++-
2 files changed, 43 insertions(+), 1 deletion(-)
create mode 100644 tests/bugs/protocol/bug-1390914.t

diff --git a/tests/bugs/protocol/bug-1390914.t b/tests/bugs/protocol/bug-1390914.t
new file mode 100644
index 0000000..e3dab92
--- /dev/null
+++ b/tests/bugs/protocol/bug-1390914.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+cleanup;
+
+#test that fops are not wound on anon-fd when fd is not open on that brick
+TEST glusterd;
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3};
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST $GFS -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0;
+
+TEST touch $M0/1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST fd_open 200 'w' "$M0/1"
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#lk should only happen on 2 bricks, if there is a bug, it will plant a lock
+#with anon-fd on first-brick which will never be released because flush won't
+#be wound below server xlator for anon-fd
+TEST flock -x -n 200
+TEST fd_close 200
+
+TEST fd_open 200 'w' "$M0/1"
+#this lock will fail if there is a stale lock
+TEST flock -x -n 200
+TEST fd_close 200
+cleanup;
diff --git a/xlators/protocol/client/src/client-helpers.c b/xlators/protocol/client/src/client-helpers.c
index 55e87b3..2dd7106 100644
--- a/xlators/protocol/client/src/client-helpers.c
+++ b/xlators/protocol/client/src/client-helpers.c
@@ -419,7 +419,13 @@ client_get_remote_fd(xlator_t *this, fd_t *fd, int flags, int64_t *remote_fd)
{
fdctx = this_fd_get_ctx(fd, this);
if (!fdctx) {
- *remote_fd = GF_ANON_FD_NO;
+ if (fd->anonymous) {
+ *remote_fd = GF_ANON_FD_NO;
+ } else {
+ *remote_fd = -1;
+ gf_msg_debug(this->name, EBADF, "not a valid fd for gfid: %s",
+ uuid_utoa(fd->inode->gfid));
+ }
} else {
if (__is_fd_reopen_in_progress(fdctx))
*remote_fd = -1;
--
1.8.3.1

1652
SOURCES/0047-client-rpc-Fix-the-payload-being-sent-on-the-wire.patch

File diff suppressed because it is too large Load Diff

115
SOURCES/0048-gfapi-Unblock-epoll-thread-for-upcall-processing.patch

@ -0,0 +1,115 @@
From 2449a1824c6f7b57889335caaeb09f4c5cb3efce Mon Sep 17 00:00:00 2001
From: Soumya Koduri <skoduri@redhat.com>
Date: Thu, 28 Mar 2019 14:59:00 +0530
Subject: [PATCH 48/52] gfapi: Unblock epoll thread for upcall processing

With commit#ad35193,we have made changes to offload
processing upcall notifications to synctask so as not
to block epoll threads. However seems like the issue wasnt
fully addressed.

In "glfs_cbk_upcall_data" -> "synctask_new1" after creating synctask
if there is no callback defined, the thread waits on synctask_join
till the syncfn is finished. So that way even with those changes,
epoll threads are blocked till the upcalls are processed.

Hence the right fix now is to define a callback function for that
synctask "glfs_cbk_upcall_syncop" so as to unblock epoll/notify threads
completely and the upcall processing can happen in parallel by synctask
threads.

Upstream references-
mainline : https://review.gluster.org/22436
release-6.0 : https://review.gluster.org/22459

Change-Id: I4d8645e3588fab2c3ca534e0112773aaab68a5dd
fixes: bz#1694565
Signed-off-by: Soumya Koduri <skoduri@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166586
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
api/src/glfs-fops.c | 42 ++++++++++++++++++++++++++++++++++--------
1 file changed, 34 insertions(+), 8 deletions(-)

diff --git a/api/src/glfs-fops.c b/api/src/glfs-fops.c
index 88cd32b..01ba60b 100644
--- a/api/src/glfs-fops.c
+++ b/api/src/glfs-fops.c
@@ -5714,6 +5714,16 @@ out:
}
static int
+glfs_upcall_syncop_cbk(int ret, call_frame_t *frame, void *opaque)
+{
+ struct upcall_syncop_args *args = opaque;
+
+ GF_FREE(args->upcall_data);
+ GF_FREE(args);
+ return 0;
+}
+
+static int
glfs_cbk_upcall_syncop(void *opaque)
{
struct upcall_syncop_args *args = opaque;
@@ -5770,15 +5780,13 @@ out:
GLFS_FREE(up_arg);
}
- return ret;
+ return 0;
}
static void
glfs_cbk_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
{
- struct upcall_syncop_args args = {
- 0,
- };
+ struct upcall_syncop_args *args = NULL;
int ret = -1;
if (!fs || !upcall_data)
@@ -5789,16 +5797,34 @@ glfs_cbk_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
goto out;
}
- args.fs = fs;
- args.upcall_data = upcall_data;
+ args = GF_CALLOC(1, sizeof(struct upcall_syncop_args),
+ glfs_mt_upcall_entry_t);
+ if (!args) {
+ gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
+ "Upcall syncop args allocation failed.");
+ goto out;
+ }
+
+ /* Note: we are not taking any ref on fs here.
+ * Ideally applications have to unregister for upcall events
+ * or stop polling for upcall events before performing
+ * glfs_fini. And as for outstanding synctasks created, we wait
+ * for all syncenv threads to finish tasks before cleaning up the
+ * fs->ctx. Hence it seems safe to process these callback
+ * notification without taking any lock/ref.
+ */
+ args->fs = fs;
+ args->upcall_data = gf_memdup(upcall_data, sizeof(*upcall_data));
- ret = synctask_new(THIS->ctx->env, glfs_cbk_upcall_syncop, NULL, NULL,
- &args);
+ ret = synctask_new(THIS->ctx->env, glfs_cbk_upcall_syncop,
+ glfs_upcall_syncop_cbk, NULL, args);
/* should we retry incase of failure? */
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_UPCALL_SYNCOP_FAILED,
"Synctak for Upcall event_type(%d) and gfid(%s) failed",
upcall_data->event_type, (char *)(upcall_data->gfid));
+ GF_FREE(args->upcall_data);
+ GF_FREE(args);
}
out:
--
1.8.3.1

49
SOURCES/0049-transport-socket-log-shutdown-msg-occasionally.patch

@ -0,0 +1,49 @@
From e205516ef874d617e4756856098bf10e17b0ea3d Mon Sep 17 00:00:00 2001
From: Raghavendra G <rgowdapp@redhat.com>
Date: Fri, 22 Mar 2019 10:40:45 +0530
Subject: [PATCH 49/52] transport/socket: log shutdown msg occasionally

>Change-Id: If3fc0884e7e2f45de2d278b98693b7a473220a5f
>Signed-off-by: Raghavendra G <rgowdapp@redhat.com>
>Fixes: bz#1691616

BUG: 1691620
Change-Id: If3fc0884e7e2f45de2d278b98693b7a473220a5f
Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167088
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
rpc/rpc-transport/socket/src/socket.c | 4 ++--
rpc/rpc-transport/socket/src/socket.h | 1 +
2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
index 121d46b..f6de1d3 100644
--- a/rpc/rpc-transport/socket/src/socket.c
+++ b/rpc/rpc-transport/socket/src/socket.c
@@ -807,8 +807,8 @@ __socket_shutdown(rpc_transport_t *this)
gf_log(this->name, GF_LOG_DEBUG, "shutdown() returned %d. %s", ret,
strerror(errno));
} else {
- gf_log(this->name, GF_LOG_INFO, "intentional socket shutdown(%d)",
- priv->sock);
+ GF_LOG_OCCASIONALLY(priv->shutdown_log_ctr, this->name, GF_LOG_INFO,
+ "intentional socket shutdown(%d)", priv->sock);
}
return ret;
diff --git a/rpc/rpc-transport/socket/src/socket.h b/rpc/rpc-transport/socket/src/socket.h
index 32339d3..e1ccae2 100644
--- a/rpc/rpc-transport/socket/src/socket.h
+++ b/rpc/rpc-transport/socket/src/socket.h
@@ -219,6 +219,7 @@ typedef struct {
int keepalivecnt;
int timeout;
int log_ctr;
+ int shutdown_log_ctr;
/* ssl_error_required is used only during the SSL connection setup
* phase.
* It holds the error code returned by SSL_get_error() and is used to
--
1.8.3.1

142
SOURCES/0050-geo-rep-Fix-syncing-multiple-rename-of-symlink.patch

@ -0,0 +1,142 @@
From 161a039f8088bf8ce7000d8175e3793219525179 Mon Sep 17 00:00:00 2001
From: Kotresh HR <khiremat@redhat.com>
Date: Thu, 28 Mar 2019 07:17:16 -0400
Subject: [PATCH 50/52] geo-rep: Fix syncing multiple rename of symlink

Problem:
Geo-rep fails to sync rename of symlink if it's
renamed multiple times if creation and rename
happened successively

Worker crash at slave:
Traceback (most recent call last):
File "/usr/libexec/glusterfs/python/syncdaemon/repce.py", in worker
res = getattr(self.obj, rmeth)(*in_data[2:])
File "/usr/libexec/glusterfs/python/syncdaemon/resource.py", in entry_ops
[ESTALE, EINVAL, EBUSY])
File "/usr/libexec/glusterfs/python/syncdaemon/syncdutils.py", in errno_wrap
return call(*arg)
File "/usr/libexec/glusterfs/python/syncdaemon/libcxattr.py", in lsetxattr
cls.raise_oserr()
File "/usr/libexec/glusterfs/python/syncdaemon/libcxattr.py", in raise_oserr
raise OSError(errn, os.strerror(errn))
OSError: [Errno 12] Cannot allocate memory

Geo-rep Behaviour:
1. SYMLINK doesn't record target path in changelog.
So while syncing SYMLINK, readlink is done on
master to get target path.

2. Geo-rep will create destination if source is not
present while syncing RENAME. Hence while syncing
RENAME of SYMLINK, target path is collected from
destination.

Cause:
If symlink is created and renamed multiple times, creation of
symlink is ignored, as it's no longer present on master at
that path. While symlink is renamed multiple times at master,
when syncing first RENAME of SYMLINK, both source and destination
is not present, hence target path is not known. In this case,
while creating destination directly at slave, regular file
attributes were encoded into blob instead of symlink,
causing failure in gfid-access translator while decoding
blob.

Solution:
While syncing of RENAME of SYMLINK, when target is not known
and when src and destination is not present on the master,
don't create destination. Ignore the rename. It's ok to ignore.
If it's unliked, it's fine. If it's renamed to something else,
it will be synced then.

Backport of:
> Patch: https://review.gluster.org/22438
> Change-Id: Ibdfa495513b7c05b5370ab0b89c69a6802338d87
> BUG: 1693648
> Signed-off-by: Kotresh HR <khiremat@redhat.com>

Change-Id: Ibdfa495513b7c05b5370ab0b89c69a6802338d87
fixes: bz#1670429
Signed-off-by: Kotresh HR <khiremat@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167122
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
geo-replication/syncdaemon/resource.py | 23 ++++++++++++++---------
tests/00-geo-rep/georep-basic-dr-rsync.t | 1 +
tests/geo-rep.rc | 12 ++++++++++++
3 files changed, 27 insertions(+), 9 deletions(-)

diff --git a/geo-replication/syncdaemon/resource.py b/geo-replication/syncdaemon/resource.py
index a2d0b16..c290d86 100644
--- a/geo-replication/syncdaemon/resource.py
+++ b/geo-replication/syncdaemon/resource.py
@@ -625,15 +625,20 @@ class Server(object):
# exist with different gfid.
if not matching_disk_gfid(gfid, entry):
if e['stat'] and not stat.S_ISDIR(e['stat']['mode']):
- if stat.S_ISLNK(e['stat']['mode']) and \
- e['link'] is not None:
- st1 = lstat(en)
- if isinstance(st1, int):
- (pg, bname) = entry2pb(en)
- blob = entry_pack_symlink(cls, gfid, bname,
- e['link'], e['stat'])
- elif not matching_disk_gfid(gfid, en):
- collect_failure(e, EEXIST, uid, gid, True)
+ if stat.S_ISLNK(e['stat']['mode']):
+ # src is not present, so don't sync symlink as
+ # we don't know target. It's ok to ignore. If
+ # it's unliked, it's fine. If it's renamed to
+ # something else, it will be synced then.
+ if e['link'] is not None:
+ st1 = lstat(en)
+ if isinstance(st1, int):
+ (pg, bname) = entry2pb(en)
+ blob = entry_pack_symlink(cls, gfid, bname,
+ e['link'],
+ e['stat'])
+ elif not matching_disk_gfid(gfid, en):
+ collect_failure(e, EEXIST, uid, gid, True)
else:
slink = os.path.join(pfx, gfid)
st = lstat(slink)
diff --git a/tests/00-geo-rep/georep-basic-dr-rsync.t b/tests/00-geo-rep/georep-basic-dr-rsync.t
index 4a03930..8b64370 100644
--- a/tests/00-geo-rep/georep-basic-dr-rsync.t
+++ b/tests/00-geo-rep/georep-basic-dr-rsync.t
@@ -110,6 +110,7 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/hybrid_chown_f1
#Check History Crawl.
TEST $GEOREP_CLI $master $slave stop
TEST create_data "history"
+TEST create_rename_symlink_case
TEST $GEOREP_CLI $master $slave start
EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active"
EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Passive"
diff --git a/tests/geo-rep.rc b/tests/geo-rep.rc
index 396b4c4..d723129 100644
--- a/tests/geo-rep.rc
+++ b/tests/geo-rep.rc
@@ -19,6 +19,18 @@ function check_common_secret_file()
echo $?
}
+function create_rename_symlink_case()
+{
+ mkdir ${mastermnt}/MUL_REN_SYMLINK
+ cd ${mastermnt}/MUL_REN_SYMLINK
+ mkdir sym_dir1
+ ln -s "sym_dir1" sym1
+ mv sym1 sym2
+ mv sym2 sym3
+ mv sym3 sym4
+ cd -
+}
+
function create_data()
{
prefix=$1
--
1.8.3.1

67
SOURCES/0051-spec-update-rpm-install-condition.patch

@ -0,0 +1,67 @@
From 71f4d55770287288f39b31a0435916ac3d9f742b Mon Sep 17 00:00:00 2001
From: Sunil Kumar Acharya <sheggodu@redhat.com>
Date: Fri, 5 Apr 2019 22:27:52 +0530
Subject: [PATCH 51/52] spec: update rpm install condition

Update code to allow rpm install without gluster process shutdown.

Label: DOWNSTREAM ONLY

BUG: 1493284
Change-Id: I308e7e4629a2428927a6df34536e3cd645a54f8c
Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167089
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 34 ----------------------------------
1 file changed, 34 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 7c7f7c0..0d57b49 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1629,40 +1629,6 @@ if not (ok == 0) then
end
-%pretrans devel -p <lua>
-if not posix.access("/bin/bash", "x") then
- -- initial installation, no shell, no running glusterfsd
- return 0
-end
-
--- TODO: move this completely to a lua script
--- For now, we write a temporary bash script and execute that.
-
-script = [[#!/bin/sh
-pidof -c -o %PPID -x glusterfsd &>/dev/null
-
-if [ $? -eq 0 ]; then
- pushd . > /dev/null 2>&1
- for volume in /var/lib/glusterd/vols/*; do cd $volume;
- vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
- volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
- if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
- exit 1;
- fi
- done
-
- popd > /dev/null 2>&1
- exit 1;
-fi
-]]
-
-ok, how, val = os.execute(script)
-if not (ok == 0) then
- error("Detected running glusterfs processes", ok)
-end
-
-
-
%pretrans fuse -p <lua>
if not posix.access("/bin/bash", "x") then
-- initial installation, no shell, no running glusterfsd
--
1.8.3.1

299
SOURCES/0052-geo-rep-IPv6-support.patch

@ -0,0 +1,299 @@
From d7bb933742f4d9135621590bf13713633c549af1 Mon Sep 17 00:00:00 2001
From: Aravinda VK <avishwan@redhat.com>
Date: Thu, 14 Mar 2019 20:06:54 +0530
Subject: [PATCH 52/52] geo-rep: IPv6 support

`address_family=inet6` needs to be added while mounting master and
slave volumes in gverify script.

New option introduced to gluster cli(`--inet6`) which will be used
internally by geo-rep while calling `gluster volume info
--remote-host=<ipv6>`.

Backport of https://review.gluster.org/22363

Fixes: bz#1688231
Change-Id: I1e0d42cae07158df043e64a2f991882d8c897837
Signed-off-by: Aravinda VK <avishwan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167120
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
cli/src/cli.c | 11 ++++++++++
cli/src/cli.h | 1 +
geo-replication/src/gverify.sh | 22 ++++++++++++++++----
geo-replication/syncdaemon/argsupgrade.py | 13 +++++++++++-
geo-replication/syncdaemon/gsyncd.py | 1 +
geo-replication/syncdaemon/subcmds.py | 9 +++++++--
xlators/mgmt/glusterd/src/glusterd-geo-rep.c | 30 ++++++++++++++++++++++++++--
7 files changed, 78 insertions(+), 9 deletions(-)

diff --git a/cli/src/cli.c b/cli/src/cli.c
index 08f117e..c33d152 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -433,6 +433,12 @@ cli_opt_parse(char *opt, struct cli_state *state)
return 0;
}
+ oarg = strtail(opt, "inet6");
+ if (oarg) {
+ state->address_family = "inet6";
+ return 0;
+ }
+
oarg = strtail(opt, "log-file=");
if (oarg) {
state->log_file = oarg;
@@ -679,6 +685,11 @@ cli_rpc_init(struct cli_state *state)
this = THIS;
cli_rpc_prog = &cli_prog;
+ /* If address family specified in CLI */
+ if (state->address_family) {
+ addr_family = state->address_family;
+ }
+
/* Connect to glusterd using the specified method, giving preference
* to a unix socket connection. If nothing is specified, connect to
* the default glusterd socket.
diff --git a/cli/src/cli.h b/cli/src/cli.h
index 5df86d5..b79a0a2 100644
--- a/cli/src/cli.h
+++ b/cli/src/cli.h
@@ -136,6 +136,7 @@ struct cli_state {
gf_loglevel_t log_level;
char *glusterd_sock;
+ char *address_family;
};
struct cli_local {
diff --git a/geo-replication/src/gverify.sh b/geo-replication/src/gverify.sh
index d048de0..7c88f9f 100755
--- a/geo-replication/src/gverify.sh
+++ b/geo-replication/src/gverify.sh
@@ -94,6 +94,7 @@ echo $cmd_line;
function master_stats()
{
MASTERVOL=$1;
+ local inet6=$2;
local d;
local i;
local disk_size;
@@ -102,7 +103,12 @@ function master_stats()
local m_status;
d=$(mktemp -d -t ${0##*/}.XXXXXX 2>/dev/null);
- glusterfs -s localhost --xlator-option="*dht.lookup-unhashed=off" --volfile-id $MASTERVOL -l $master_log_file $d;
+ if [ "$inet6" = "inet6" ]; then
+ glusterfs -s localhost --xlator-option="*dht.lookup-unhashed=off" --xlator-option="transport.address-family=inet6" --volfile-id $MASTERVOL -l $master_log_file $d;
+ else
+ glusterfs -s localhost --xlator-option="*dht.lookup-unhashed=off" --volfile-id $MASTERVOL -l $master_log_file $d;
+ fi
+
i=$(get_inode_num $d);
if [[ "$i" -ne "1" ]]; then
echo 0:0;
@@ -124,12 +130,18 @@ function slave_stats()
SLAVEUSER=$1;
SLAVEHOST=$2;
SLAVEVOL=$3;
+ local inet6=$4;
local cmd_line;
local ver;
local status;
d=$(mktemp -d -t ${0##*/}.XXXXXX 2>/dev/null);
- glusterfs --xlator-option="*dht.lookup-unhashed=off" --volfile-server $SLAVEHOST --volfile-id $SLAVEVOL -l $slave_log_file $d;
+ if [ "$inet6" = "inet6" ]; then
+ glusterfs --xlator-option="*dht.lookup-unhashed=off" --xlator-option="transport.address-family=inet6" --volfile-server $SLAVEHOST --volfile-id $SLAVEVOL -l $slave_log_file $d;
+ else
+ glusterfs --xlator-option="*dht.lookup-unhashed=off" --volfile-server $SLAVEHOST --volfile-id $SLAVEVOL -l $slave_log_file $d;
+ fi
+
i=$(get_inode_num $d);
if [[ "$i" -ne "1" ]]; then
echo 0:0;
@@ -167,6 +179,8 @@ function main()
log_file=$6
> $log_file
+ inet6=$7
+
# Use FORCE_BLOCKER flag in the error message to differentiate
# between the errors which the force command should bypass
@@ -204,8 +218,8 @@ function main()
fi;
ERRORS=0;
- master_data=$(master_stats $1);
- slave_data=$(slave_stats $2 $3 $4);
+ master_data=$(master_stats $1 ${inet6});
+ slave_data=$(slave_stats $2 $3 $4 ${inet6});
master_disk_size=$(echo $master_data | cut -f1 -d':');
slave_disk_size=$(echo $slave_data | cut -f1 -d':');
master_used_size=$(echo $master_data | cut -f2 -d':');
diff --git a/geo-replication/syncdaemon/argsupgrade.py b/geo-replication/syncdaemon/argsupgrade.py
index 4018143..7af4063 100644
--- a/geo-replication/syncdaemon/argsupgrade.py
+++ b/geo-replication/syncdaemon/argsupgrade.py
@@ -84,6 +84,10 @@ def upgrade():
# fail when it does stat to check the existence.
init_gsyncd_template_conf()
+ inet6 = False
+ if "--inet6" in sys.argv:
+ inet6 = True
+
if "--monitor" in sys.argv:
# python gsyncd.py --path=/bricks/b1
# --monitor -c gsyncd.conf
@@ -147,8 +151,11 @@ def upgrade():
user, hname = remote_addr.split("@")
+ if not inet6:
+ hname = gethostbyname(hname)
+
print(("ssh://%s@%s:gluster://127.0.0.1:%s" % (
- user, gethostbyname(hname), vol)))
+ user, hname, vol)))
sys.exit(0)
elif "--normalize-url" in sys.argv:
@@ -346,3 +353,7 @@ def upgrade():
if pargs.reset_sync_time:
sys.argv.append("--reset-sync-time")
+
+ if inet6:
+ # Add `--inet6` as first argument
+ sys.argv = [sys.argv[0], "--inet6"] + sys.argv[1:]
diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py
index 037f351..effe0ce 100644
--- a/geo-replication/syncdaemon/gsyncd.py
+++ b/geo-replication/syncdaemon/gsyncd.py
@@ -47,6 +47,7 @@ def main():
sys.exit(0)
parser = ArgumentParser()
+ parser.add_argument("--inet6", action="store_true")
sp = parser.add_subparsers(dest="subcmd")
# Monitor Status File update
diff --git a/geo-replication/syncdaemon/subcmds.py b/geo-replication/syncdaemon/subcmds.py
index 30050ec..4ece7e0 100644
--- a/geo-replication/syncdaemon/subcmds.py
+++ b/geo-replication/syncdaemon/subcmds.py
@@ -110,8 +110,13 @@ def subcmd_voluuidget(args):
ParseError = XET.ParseError if hasattr(XET, 'ParseError') else SyntaxError
- po = Popen(['gluster', '--xml', '--remote-host=' + args.host,
- 'volume', 'info', args.volname], bufsize=0,
+ cmd = ['gluster', '--xml', '--remote-host=' + args.host,
+ 'volume', 'info', args.volname]
+
+ if args.inet6:
+ cmd.append("--inet6")
+
+ po = Popen(cmd, bufsize=0,
stdin=None, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
index 49baa58..0f40bea 100644
--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
@@ -76,6 +76,19 @@ static char *gsync_reserved_opts[] = {"gluster-command",
static char *gsync_no_restart_opts[] = {"checkpoint", "log_rsync_performance",
"log-rsync-performance", NULL};
+void
+set_gsyncd_inet6_arg(runner_t *runner)
+{
+ xlator_t *this = NULL;
+ char *af;
+ int ret;
+
+ this = THIS;
+ ret = dict_get_str(this->options, "transport.address-family", &af);
+ if (ret == 0)
+ runner_argprintf(runner, "--%s", af);
+}
+
int
__glusterd_handle_sys_exec(rpcsvc_request_t *req)
{
@@ -384,6 +397,7 @@ glusterd_urltransform_init(runner_t *runner, const char *transname)
{
runinit(runner);
runner_add_arg(runner, GSYNCD_PREFIX "/gsyncd");
+ set_gsyncd_inet6_arg(runner);
runner_argprintf(runner, "--%s-url", transname);
}
@@ -725,6 +739,7 @@ glusterd_get_slave_voluuid(char *slave_host, char *slave_vol, char *vol_uuid)
runinit(&runner);
runner_add_arg(&runner, GSYNCD_PREFIX "/gsyncd");
+ set_gsyncd_inet6_arg(&runner);
runner_add_arg(&runner, "--slavevoluuid-get");
runner_argprintf(&runner, "%s::%s", slave_host, slave_vol);
@@ -788,6 +803,7 @@ glusterd_gsync_get_config(char *master, char *slave, char *conf_path,
runinit(&runner);
runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
runner_argprintf(&runner, "%s", conf_path);
+ set_gsyncd_inet6_arg(&runner);
runner_argprintf(&runner, "--iprefix=%s", DATADIR);
runner_argprintf(&runner, ":%s", master);
runner_add_args(&runner, slave, "--config-get-all", NULL);
@@ -917,6 +933,7 @@ glusterd_gsync_get_status(char *master, char *slave, char *conf_path,
runinit(&runner);
runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
runner_argprintf(&runner, "%s", conf_path);
+ set_gsyncd_inet6_arg(&runner);
runner_argprintf(&runner, "--iprefix=%s", DATADIR);
runner_argprintf(&runner, ":%s", master);
runner_add_args(&runner, slave, "--status-get", NULL);
@@ -937,6 +954,7 @@ glusterd_gsync_get_param_file(char *prmfile, const char *param, char *master,
runinit(&runner);
runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
runner_argprintf(&runner, "%s", conf_path);
+ set_gsyncd_inet6_arg(&runner);
runner_argprintf(&runner, "--iprefix=%s", DATADIR);
runner_argprintf(&runner, ":%s", master);
runner_add_args(&runner, slave, "--config-get", NULL);
@@ -2811,6 +2829,7 @@ glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol,
char *slave_ip = NULL;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
+ char *af = NULL;
this = THIS;
GF_ASSERT(this);
@@ -2852,9 +2871,16 @@ glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol,
runner_argprintf(&runner, "%s", slave_vol);
runner_argprintf(&runner, "%d", ssh_port);
runner_argprintf(&runner, "%s", log_file_path);
- gf_msg_debug(this->name, 0, "gverify Args = %s %s %s %s %s %s %s",
+ ret = dict_get_str(this->options, "transport.address-family", &af);
+ if (ret)
+ af = "-";
+
+ runner_argprintf(&runner, "%s", af);
+
+ gf_msg_debug(this->name, 0, "gverify Args = %s %s %s %s %s %s %s %s",
runner.argv[0], runner.argv[1], runner.argv[2], runner.argv[3],
- runner.argv[4], runner.argv[5], runner.argv[6]);
+ runner.argv[4], runner.argv[5], runner.argv[6],
+ runner.argv[7]);
runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
synclock_unlock(&priv->big_lock);
ret = runner_run(&runner);
--
1.8.3.1

575
SOURCES/0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch

@ -0,0 +1,575 @@
From 1fb89973551937f34f24b45e07072a6ce6c30ff9 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Mon, 16 Oct 2017 14:18:31 +0530
Subject: [PATCH 053/124] Revert "packaging: (ganesha) remove glusterfs-ganesha
subpackage and related files)"

This reverts commit 0cf2963f12a8b540a7042605d8c79f638fdf6cee.

Label: DOWNSTREAM ONLY

Change-Id: Id6e7585021bd4dd78a59580cfa4838bdd4e539a0
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167102
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
configure.ac | 3 +
extras/Makefile.am | 3 +-
extras/ganesha/Makefile.am | 2 +
extras/ganesha/config/Makefile.am | 4 +
extras/ganesha/config/ganesha-ha.conf.sample | 19 ++++
extras/ganesha/scripts/Makefile.am | 4 +
extras/ganesha/scripts/create-export-ganesha.sh | 91 +++++++++++++++
extras/ganesha/scripts/dbus-send.sh | 60 ++++++++++
extras/ganesha/scripts/generate-epoch.py | 48 ++++++++
extras/hook-scripts/start/post/Makefile.am | 2 +-
extras/hook-scripts/start/post/S31ganesha-start.sh | 122 +++++++++++++++++++++
glusterfs.spec.in | 44 +++++++-
12 files changed, 396 insertions(+), 6 deletions(-)
create mode 100644 extras/ganesha/Makefile.am
create mode 100644 extras/ganesha/config/Makefile.am
create mode 100644 extras/ganesha/config/ganesha-ha.conf.sample
create mode 100644 extras/ganesha/scripts/Makefile.am
create mode 100755 extras/ganesha/scripts/create-export-ganesha.sh
create mode 100755 extras/ganesha/scripts/dbus-send.sh
create mode 100755 extras/ganesha/scripts/generate-epoch.py
create mode 100755 extras/hook-scripts/start/post/S31ganesha-start.sh

diff --git a/configure.ac b/configure.ac
index 0d06f5a..125ae29 100644
--- a/configure.ac
+++ b/configure.ac
@@ -196,6 +196,9 @@ AC_CONFIG_FILES([Makefile
extras/init.d/glustereventsd-Debian
extras/init.d/glustereventsd-Redhat
extras/init.d/glustereventsd-FreeBSD
+ extras/ganesha/Makefile
+ extras/ganesha/config/Makefile
+ extras/ganesha/scripts/Makefile
extras/systemd/Makefile
extras/systemd/glusterd.service
extras/systemd/glustereventsd.service
diff --git a/extras/Makefile.am b/extras/Makefile.am
index ff5ca9b..983f014 100644
--- a/extras/Makefile.am
+++ b/extras/Makefile.am
@@ -11,7 +11,8 @@ EditorModedir = $(docdir)
EditorMode_DATA = glusterfs-mode.el glusterfs.vim
SUBDIRS = init.d systemd benchmarking hook-scripts $(OCF_SUBDIR) LinuxRPM \
- $(GEOREP_EXTRAS_SUBDIR) snap_scheduler firewalld cliutils python
+ $(GEOREP_EXTRAS_SUBDIR) snap_scheduler firewalld cliutils python \
+ ganesha
confdir = $(sysconfdir)/glusterfs
if WITH_SERVER
diff --git a/extras/ganesha/Makefile.am b/extras/ganesha/Makefile.am
new file mode 100644
index 0000000..542de68
--- /dev/null
+++ b/extras/ganesha/Makefile.am
@@ -0,0 +1,2 @@
+SUBDIRS = scripts config
+CLEANFILES =
diff --git a/extras/ganesha/config/Makefile.am b/extras/ganesha/config/Makefile.am
new file mode 100644
index 0000000..c729273
--- /dev/null
+++ b/extras/ganesha/config/Makefile.am
@@ -0,0 +1,4 @@
+EXTRA_DIST= ganesha-ha.conf.sample
+
+confdir = $(sysconfdir)/ganesha
+conf_DATA = ganesha-ha.conf.sample
diff --git a/extras/ganesha/config/ganesha-ha.conf.sample b/extras/ganesha/config/ganesha-ha.conf.sample
new file mode 100644
index 0000000..c22892b
--- /dev/null
+++ b/extras/ganesha/config/ganesha-ha.conf.sample
@@ -0,0 +1,19 @@
+# Name of the HA cluster created.
+# must be unique within the subnet
+HA_NAME="ganesha-ha-360"
+#
+# N.B. you may use short names or long names; you may not use IP addrs.
+# Once you select one, stay with it as it will be mildly unpleasant to
+# clean up if you switch later on. Ensure that all names - short and/or
+# long - are in DNS or /etc/hosts on all machines in the cluster.
+#
+# The subset of nodes of the Gluster Trusted Pool that form the ganesha
+# HA cluster. Hostname is specified.
+HA_CLUSTER_NODES="server1,server2,..."
+#HA_CLUSTER_NODES="server1.lab.redhat.com,server2.lab.redhat.com,..."
+#
+# Virtual IPs for each of the nodes specified above.
+VIP_server1="10.0.2.1"
+VIP_server2="10.0.2.2"
+#VIP_server1_lab_redhat_com="10.0.2.1"
+#VIP_server2_lab_redhat_com="10.0.2.2"
diff --git a/extras/ganesha/scripts/Makefile.am b/extras/ganesha/scripts/Makefile.am
new file mode 100644
index 0000000..00a2c45
--- /dev/null
+++ b/extras/ganesha/scripts/Makefile.am
@@ -0,0 +1,4 @@
+EXTRA_DIST= create-export-ganesha.sh generate-epoch.py dbus-send.sh
+
+scriptsdir = $(libexecdir)/ganesha
+scripts_SCRIPTS = create-export-ganesha.sh dbus-send.sh generate-epoch.py
diff --git a/extras/ganesha/scripts/create-export-ganesha.sh b/extras/ganesha/scripts/create-export-ganesha.sh
new file mode 100755
index 0000000..1ffba42
--- /dev/null
+++ b/extras/ganesha/scripts/create-export-ganesha.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+
+#This script is called by glusterd when the user
+#tries to export a volume via NFS-Ganesha.
+#An export file specific to a volume
+#is created in GANESHA_DIR/exports.
+
+# Try loading the config from any of the distro
+# specific configuration locations
+if [ -f /etc/sysconfig/ganesha ]
+ then
+ . /etc/sysconfig/ganesha
+fi
+if [ -f /etc/conf.d/ganesha ]
+ then
+ . /etc/conf.d/ganesha
+fi
+if [ -f /etc/default/ganesha ]
+ then
+ . /etc/default/ganesha
+fi
+
+GANESHA_DIR=${1%/}
+OPTION=$2
+VOL=$3
+CONF=$GANESHA_DIR"/ganesha.conf"
+declare -i EXPORT_ID
+
+function check_cmd_status()
+{
+ if [ "$1" != "0" ]
+ then
+ rm -rf $GANESHA_DIR/exports/export.$VOL.conf
+ sed -i /$VOL.conf/d $CONF
+ exit 1
+ fi
+}
+
+
+if [ ! -d "$GANESHA_DIR/exports" ];
+ then
+ mkdir $GANESHA_DIR/exports
+ check_cmd_status `echo $?`
+fi
+
+function write_conf()
+{
+echo -e "# WARNING : Using Gluster CLI will overwrite manual
+# changes made to this file. To avoid it, edit the
+# file and run ganesha-ha.sh --refresh-config."
+
+echo "EXPORT{"
+echo " Export_Id = 2;"
+echo " Path = \"/$VOL\";"
+echo " FSAL {"
+echo " name = "GLUSTER";"
+echo " hostname=\"localhost\";"
+echo " volume=\"$VOL\";"
+echo " }"
+echo " Access_type = RW;"
+echo " Disable_ACL = true;"
+echo ' Squash="No_root_squash";'
+echo " Pseudo=\"/$VOL\";"
+echo ' Protocols = "3", "4" ;'
+echo ' Transports = "UDP","TCP";'
+echo ' SecType = "sys";'
+echo " }"
+}
+if [ "$OPTION" = "on" ];
+then
+ if ! (cat $CONF | grep $VOL.conf\"$ )
+ then
+ write_conf $@ > $GANESHA_DIR/exports/export.$VOL.conf
+ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF
+ count=`ls -l $GANESHA_DIR/exports/*.conf | wc -l`
+ if [ "$count" = "1" ] ; then
+ EXPORT_ID=2
+ else
+ EXPORT_ID=`cat $GANESHA_DIR/.export_added`
+ check_cmd_status `echo $?`
+ EXPORT_ID=EXPORT_ID+1
+ sed -i s/Export_Id.*/"Export_Id= $EXPORT_ID ;"/ \
+ $GANESHA_DIR/exports/export.$VOL.conf
+ check_cmd_status `echo $?`
+ fi
+ echo $EXPORT_ID > $GANESHA_DIR/.export_added
+ fi
+else
+ rm -rf $GANESHA_DIR/exports/export.$VOL.conf
+ sed -i /$VOL.conf/d $CONF
+fi
diff --git a/extras/ganesha/scripts/dbus-send.sh b/extras/ganesha/scripts/dbus-send.sh
new file mode 100755
index 0000000..ec8d948
--- /dev/null
+++ b/extras/ganesha/scripts/dbus-send.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# Try loading the config from any of the distro
+# specific configuration locations
+if [ -f /etc/sysconfig/ganesha ]
+ then
+ . /etc/sysconfig/ganesha
+fi
+if [ -f /etc/conf.d/ganesha ]
+ then
+ . /etc/conf.d/ganesha
+fi
+if [ -f /etc/default/ganesha ]
+ then
+ . /etc/default/ganesha
+fi
+
+GANESHA_DIR=${1%/}
+OPTION=$2
+VOL=$3
+CONF=$GANESHA_DIR"/ganesha.conf"
+
+function check_cmd_status()
+{
+ if [ "$1" != "0" ]
+ then
+ logger "dynamic export failed on node :${hostname -s}"
+ fi
+}
+
+#This function keeps track of export IDs and increments it with every new entry
+function dynamic_export_add()
+{
+ dbus-send --system \
+--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+org.ganesha.nfsd.exportmgr.AddExport string:$GANESHA_DIR/exports/export.$VOL.conf \
+string:"EXPORT(Path=/$VOL)"
+ check_cmd_status `echo $?`
+}
+
+#This function removes an export dynamically(uses the export_id of the export)
+function dynamic_export_remove()
+{
+ removed_id=`cat $GANESHA_DIR/exports/export.$VOL.conf |\
+grep Export_Id | awk -F"[=,;]" '{print$2}'| tr -d '[[:space:]]'`
+ dbus-send --print-reply --system \
+--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+org.ganesha.nfsd.exportmgr.RemoveExport uint16:$removed_id
+ check_cmd_status `echo $?`
+}
+
+if [ "$OPTION" = "on" ];
+then
+ dynamic_export_add $@
+fi
+
+if [ "$OPTION" = "off" ];
+then
+ dynamic_export_remove $@
+fi
diff --git a/extras/ganesha/scripts/generate-epoch.py b/extras/ganesha/scripts/generate-epoch.py
new file mode 100755
index 0000000..5db5e56
--- /dev/null
+++ b/extras/ganesha/scripts/generate-epoch.py
@@ -0,0 +1,48 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+#
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+#
+# Generates unique epoch value on each gluster node to be used by
+# nfs-ganesha service on that node.
+#
+# Configure 'EPOCH_EXEC' option to this script path in
+# '/etc/sysconfig/ganesha' file used by nfs-ganesha service.
+#
+# Construct epoch as follows -
+# first 32-bit contains the now() time
+# rest 32-bit value contains the local glusterd node uuid
+
+import time
+import binascii
+
+# Calculate the now() time into a 64-bit integer value
+def epoch_now():
+ epoch_time = int(time.mktime(time.localtime())) << 32
+ return epoch_time
+
+# Read glusterd UUID and extract first 32-bit of it
+def epoch_uuid():
+ file_name = '/var/lib/glusterd/glusterd.info'
+
+ for line in open(file_name):
+ if "UUID" in line:
+ glusterd_uuid = line.split('=')[1].strip()
+
+ uuid_bin = binascii.unhexlify(glusterd_uuid.replace("-",""))
+
+ epoch_uuid = int(uuid_bin.encode('hex'), 32) & 0xFFFF0000
+ return epoch_uuid
+
+# Construct epoch as follows -
+# first 32-bit contains the now() time
+# rest 32-bit value contains the local glusterd node uuid
+epoch = (epoch_now() | epoch_uuid())
+print str(epoch)
+
+exit(0)
diff --git a/extras/hook-scripts/start/post/Makefile.am b/extras/hook-scripts/start/post/Makefile.am
index e32546d..792019d 100644
--- a/extras/hook-scripts/start/post/Makefile.am
+++ b/extras/hook-scripts/start/post/Makefile.am
@@ -1,4 +1,4 @@
-EXTRA_DIST = S29CTDBsetup.sh S30samba-start.sh
+EXTRA_DIST = S29CTDBsetup.sh S30samba-start.sh S31ganesha-start.sh
hookdir = $(GLUSTERD_WORKDIR)/hooks/1/start/post/
if WITH_SERVER
diff --git a/extras/hook-scripts/start/post/S31ganesha-start.sh b/extras/hook-scripts/start/post/S31ganesha-start.sh
new file mode 100755
index 0000000..90ba6bc
--- /dev/null
+++ b/extras/hook-scripts/start/post/S31ganesha-start.sh
@@ -0,0 +1,122 @@
+#!/bin/bash
+PROGNAME="Sganesha-start"
+OPTSPEC="volname:,gd-workdir:"
+VOL=
+declare -i EXPORT_ID
+ganesha_key="ganesha.enable"
+GANESHA_DIR="/var/run/gluster/shared_storage/nfs-ganesha"
+CONF1="$GANESHA_DIR/ganesha.conf"
+GLUSTERD_WORKDIR=
+
+function parse_args ()
+{
+ ARGS=$(getopt -l $OPTSPEC -o "o" -name $PROGNAME $@)
+ eval set -- "$ARGS"
+
+ while true; do
+ case $1 in
+ --volname)
+ shift
+ VOL=$1
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ shift
+ done
+}
+
+
+
+#This function generates a new export entry as export.volume_name.conf
+function write_conf()
+{
+echo -e "# WARNING : Using Gluster CLI will overwrite manual
+# changes made to this file. To avoid it, edit the
+# file, copy it over to all the NFS-Ganesha nodes
+# and run ganesha-ha.sh --refresh-config."
+
+echo "EXPORT{"
+echo " Export_Id = 2;"
+echo " Path = \"/$VOL\";"
+echo " FSAL {"
+echo " name = \"GLUSTER\";"
+echo " hostname=\"localhost\";"
+echo " volume=\"$VOL\";"
+echo " }"
+echo " Access_type = RW;"
+echo " Disable_ACL = true;"
+echo " Squash=\"No_root_squash\";"
+echo " Pseudo=\"/$VOL\";"
+echo " Protocols = \"3\", \"4\" ;"
+echo " Transports = \"UDP\",\"TCP\";"
+echo " SecType = \"sys\";"
+echo "}"
+}
+
+#It adds the export dynamically by sending dbus signals
+function export_add()
+{
+ dbus-send --print-reply --system --dest=org.ganesha.nfsd \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport \
+string:$GANESHA_DIR/exports/export.$VOL.conf string:"EXPORT(Export_Id=$EXPORT_ID)"
+
+}
+
+# based on src/scripts/ganeshactl/Ganesha/export_mgr.py
+function is_exported()
+{
+ local volume="${1}"
+
+ dbus-send --type=method_call --print-reply --system \
+ --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+ org.ganesha.nfsd.exportmgr.ShowExports \
+ | grep -w -q "/${volume}"
+
+ return $?
+}
+
+# Check the info file (contains the volume options) to see if Ganesha is
+# enabled for this volume.
+function ganesha_enabled()
+{
+ local volume="${1}"
+ local info_file="${GLUSTERD_WORKDIR}/vols/${VOL}/info"
+ local enabled="off"
+
+ enabled=$(grep -w ${ganesha_key} ${info_file} | cut -d"=" -f2)
+
+ [ "${enabled}" == "on" ]
+
+ return $?
+}
+
+parse_args $@
+
+if ganesha_enabled ${VOL} && ! is_exported ${VOL}
+then
+ if [ ! -e ${GANESHA_DIR}/exports/export.${VOL}.conf ]
+ then
+ #Remove export entry from nfs-ganesha.conf
+ sed -i /$VOL.conf/d $CONF1
+ write_conf ${VOL} > ${GANESHA_DIR}/exports/export.${VOL}.conf
+ EXPORT_ID=`cat $GANESHA_DIR/.export_added`
+ EXPORT_ID=EXPORT_ID+1
+ echo $EXPORT_ID > $GANESHA_DIR/.export_added
+ sed -i s/Export_Id.*/"Export_Id=$EXPORT_ID;"/ \
+ $GANESHA_DIR/exports/export.$VOL.conf
+ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF1
+ else
+ EXPORT_ID=$(grep ^[[:space:]]*Export_Id $GANESHA_DIR/exports/export.$VOL.conf |\
+ awk -F"[=,;]" '{print $2}' | tr -d '[[:space:]]')
+ fi
+ export_add $VOL
+fi
+
+exit 0
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 0d57b49..dd7438c 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -299,7 +299,6 @@ Obsoletes: hekafs
Obsoletes: %{name}-common < %{version}-%{release}
Obsoletes: %{name}-core < %{version}-%{release}
Obsoletes: %{name}-ufo
-Obsoletes: %{name}-ganesha
%if ( 0%{!?_with_gnfs:1} )
Obsoletes: %{name}-gnfs
%endif
@@ -455,6 +454,30 @@ is in user space and easily manageable.
This package provides support to FUSE based clients and inlcudes the
glusterfs(d) binary.
+%if ( 0%{!?_without_server:1} )
+%package ganesha
+Summary: NFS-Ganesha configuration
+Group: Applications/File
+
+Requires: %{name}-server%{?_isa} = %{version}-%{release}
+Requires: nfs-ganesha-gluster, pcs, dbus
+%if ( 0%{?rhel} && 0%{?rhel} == 6 )
+Requires: cman, pacemaker, corosync
+%endif
+
+%description ganesha
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over Infiniband RDMA
+or TCP/IP interconnect into one large parallel network file
+system. GlusterFS is one of the most sophisticated file systems in
+terms of features and extensibility. It borrows a powerful concept
+called Translators from GNU Hurd kernel. Much of the code in GlusterFS
+is in user space and easily manageable.
+
+This package provides the configuration and related files for using
+NFS-Ganesha as the NFS server using GlusterFS
+%endif
+
%if ( 0%{!?_without_georeplication:1} )
%package geo-replication
Summary: GlusterFS Geo-replication
@@ -1111,6 +1134,12 @@ exit 0
%endif
%endif
+%if ( 0%{?_without_server:1} )
+#exclude ganesha related files
+%exclude %{_sysconfdir}/ganesha/*
+%exclude %{_libexecdir}/ganesha/*
+%endif
+
%files api
%exclude %{_libdir}/*.so
# libgfapi files
@@ -1273,6 +1302,12 @@ exit 0
%exclude %{_datadir}/glusterfs/tests/vagrant
%endif
+%if ( 0%{!?_without_server:1} )
+%files ganesha
+%{_sysconfdir}/ganesha/*
+%{_libexecdir}/ganesha/*
+%endif
+
%if ( 0%{!?_without_ocf:1} )
%files resource-agents
# /usr/lib is the standard for OCF, also on x86_64
@@ -1396,6 +1431,7 @@ exit 0
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
+ %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/pre
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/post
@@ -1868,6 +1904,9 @@ fi
%endif
%changelog
+* Fri Apr 5 2019 Jiffin Tony Thottan <jthottan@redhat.com>
+- Adding ganesha bits back in gluster repository
+
* Wed Mar 6 2019 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- remove unneeded ldconfig in scriptlets
- reported by Igor Gnatenko in Fedora
@@ -1960,9 +1999,6 @@ fi
* Thu Feb 16 2017 Niels de Vos <ndevos@redhat.com>
- Obsolete and Provide python-gluster for upgrading from glusterfs < 3.10
-* Tue Feb 7 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
-- remove ganesha (#1418417)
-
* Wed Feb 1 2017 Poornima G <pgurusid@redhat.com>
- Install /var/lib/glusterd/groups/metadata-cache by default
--
1.8.3.1

1912
SOURCES/0054-Revert-glusterd-storhaug-remove-ganesha.patch

File diff suppressed because it is too large Load Diff

1897
SOURCES/0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch

File diff suppressed because it is too large Load Diff

229
SOURCES/0056-common-ha-fixes-for-Debian-based-systems.patch

@ -0,0 +1,229 @@
From 2c1a83920b959a1ec170243d1eec71b1e2c074b0 Mon Sep 17 00:00:00 2001
From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
Date: Fri, 7 Apr 2017 09:09:29 -0400
Subject: [PATCH 056/124] common-ha: fixes for Debian-based systems

1) Debian-based systems don't have /usr/libexec/... and there is
a hard-coded invocation of /usr/libexec/ganesha/ganesha-ha.sh within
ganesha-ha.sh itself.
Fix: save $0 and use it instead for further invocations of self.

2) default shell is /bin/dash (not /bin/bash). Various runner_run()
invocations for ganesha used what amounts to
exec("sh /usr/$libexec/ganesha/ganesha-ha.sh ...);
which executes the script using the default shell, but there are
some bash-specific idioms that don't work if the shell is dash.
Fix: change to exec("/usr/$libexec/ganesha/ganesha-ha.sh ...); so that
the shebang forces the use of /bin/bash

3) Fedora and RHEL7 have merged /bin/ and /usr/bin, /bin is a symlink
to /usr/bin. Debian-based systems are not merged, and systemd systems
have /bin/systemctl. The logic to find .../bin/systemctl is backwards.
If the logic looks for /usr/bin/systemctl it will not find it on
Debian-based systems; if it looks for /bin/systemctl it will find it
on Fedora and RHEL by virtue of the symlink. (RHEL6 and others will
find their respective init regardless.)
Fix: change the logic to look for /bin/systemctl instead.

4) The logic for deciding to run systemctl (or not) is a bit silly.
Fix: simply invoke the found method via the function pointer in the
table.

Label: DOWNSTREAM ONLY

Change-Id: I33681b296a73aebb078bda6ac0d3a1d3b9770a21
Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
Reviewed-on: https://review.gluster.org/17013
Smoke: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Niels de Vos <ndevos@redhat.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: jiffin tony Thottan <jthottan@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167141
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 21 +++++++++---------
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 32 +++++++++++-----------------
2 files changed, 23 insertions(+), 30 deletions(-)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index 6b011be..4b93f95 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -20,6 +20,7 @@
# ensure that the NFS GRACE DBUS signal is sent after the VIP moves to
# the new host.
+GANESHA_HA_SH=$(realpath $0)
HA_NUM_SERVERS=0
HA_SERVERS=""
HA_VOL_NAME="gluster_shared_storage"
@@ -68,9 +69,9 @@ function find_rhel7_conf
done
}
-if [ -z $CONFFILE ]
+if [ -z ${CONFFILE} ]
then
- find_rhel7_conf $OPTIONS
+ find_rhel7_conf ${OPTIONS}
fi
@@ -90,9 +91,9 @@ usage() {
determine_service_manager () {
- if [ -e "/usr/bin/systemctl" ];
+ if [ -e "/bin/systemctl" ];
then
- SERVICE_MAN="/usr/bin/systemctl"
+ SERVICE_MAN="/bin/systemctl"
elif [ -e "/sbin/invoke-rc.d" ];
then
SERVICE_MAN="/sbin/invoke-rc.d"
@@ -100,7 +101,7 @@ determine_service_manager () {
then
SERVICE_MAN="/sbin/service"
fi
- if [ "$SERVICE_MAN" == "DISTRO_NOT_FOUND" ]
+ if [ "${SERVICE_MAN}" == "DISTRO_NOT_FOUND" ]
then
echo "Service manager not recognized, exiting"
exit 1
@@ -113,21 +114,21 @@ manage_service ()
local new_node=${2}
local option=
- if [ "$action" == "start" ]; then
+ if [ "${action}" == "start" ]; then
option="yes"
else
option="no"
fi
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} root@${new_node} "/usr/libexec/ganesha/ganesha-ha.sh --setup-ganesha-conf-files $HA_CONFDIR $option"
+${SECRET_PEM} root@${new_node} "${GANESHA_HA_SH} --setup-ganesha-conf-files $HA_CONFDIR $option"
- if [ "$SERVICE_MAN" == "/usr/bin/systemctl" ]
+ if [ "${SERVICE_MAN}" == "/bin/systemctl" ]
then
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} root@${new_node} "$SERVICE_MAN ${action} nfs-ganesha"
+${SECRET_PEM} root@${new_node} "${SERVICE_MAN} ${action} nfs-ganesha"
else
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} root@${new_node} "$SERVICE_MAN nfs-ganesha ${action}"
+${SECRET_PEM} root@${new_node} "${SERVICE_MAN} nfs-ganesha ${action}"
fi
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index fac16e6..81f794d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -122,12 +122,9 @@ sc_service_action(struct service_command *sc, char *command)
static int
manage_service(char *action)
{
- struct stat stbuf = {
- 0,
- };
int i = 0;
int ret = 0;
- struct service_command sc_list[] = {{.binary = "/usr/bin/systemctl",
+ struct service_command sc_list[] = {{.binary = "/bin/systemctl",
.service = "nfs-ganesha",
.action = sc_systemctl_action},
{.binary = "/sbin/invoke-rc.d",
@@ -139,15 +136,10 @@ manage_service(char *action)
{.binary = NULL}};
while (sc_list[i].binary != NULL) {
- ret = sys_stat(sc_list[i].binary, &stbuf);
+ ret = sys_access(sc_list[i].binary, X_OK);
if (ret == 0) {
gf_msg_debug(THIS->name, 0, "%s found.", sc_list[i].binary);
- if (strcmp(sc_list[i].binary, "/usr/bin/systemctl") == 0)
- ret = sc_systemctl_action(&sc_list[i], action);
- else
- ret = sc_service_action(&sc_list[i], action);
-
- return ret;
+ return sc_list[i].action(&sc_list[i], action);
}
i++;
}
@@ -449,7 +441,7 @@ manage_export_config(char *volname, char *value, char **op_errstr)
GF_ASSERT(volname);
runinit(&runner);
- runner_add_args(&runner, "sh", GANESHA_PREFIX "/create-export-ganesha.sh",
+ runner_add_args(&runner, GANESHA_PREFIX "/create-export-ganesha.sh",
CONFDIR, value, volname, NULL);
ret = runner_run(&runner);
@@ -558,8 +550,8 @@ ganesha_manage_export(dict_t *dict, char *value, char **op_errstr)
}
if (check_host_list()) {
- runner_add_args(&runner, "sh", GANESHA_PREFIX "/dbus-send.sh", CONFDIR,
- value, volname, NULL);
+ runner_add_args(&runner, GANESHA_PREFIX "/dbus-send.sh", CONFDIR, value,
+ volname, NULL);
ret = runner_run(&runner);
if (ret) {
gf_asprintf(op_errstr,
@@ -610,8 +602,8 @@ tear_down_cluster(gf_boolean_t run_teardown)
if (run_teardown) {
runinit(&runner);
- runner_add_args(&runner, "sh", GANESHA_PREFIX "/ganesha-ha.sh",
- "teardown", CONFDIR, NULL);
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh", "teardown",
+ CONFDIR, NULL);
ret = runner_run(&runner);
/* *
* Remove all the entries in CONFDIR expect ganesha.conf and
@@ -685,7 +677,7 @@ setup_cluster(gf_boolean_t run_setup)
if (run_setup) {
runinit(&runner);
- runner_add_args(&runner, "sh", GANESHA_PREFIX "/ganesha-ha.sh", "setup",
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh", "setup",
CONFDIR, NULL);
ret = runner_run(&runner);
}
@@ -714,7 +706,7 @@ teardown(gf_boolean_t run_teardown, char **op_errstr)
}
runinit(&runner);
- runner_add_args(&runner, "sh", GANESHA_PREFIX "/ganesha-ha.sh", "cleanup",
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh", "cleanup",
CONFDIR, NULL);
ret = runner_run(&runner);
if (ret)
@@ -759,7 +751,7 @@ stop_ganesha(char **op_errstr)
};
runinit(&runner);
- runner_add_args(&runner, "sh", GANESHA_PREFIX "/ganesha-ha.sh",
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh",
"--setup-ganesha-conf-files", CONFDIR, "no", NULL);
ret = runner_run(&runner);
if (ret) {
@@ -828,7 +820,7 @@ start_ganesha(char **op_errstr)
if (check_host_list()) {
runinit(&runner);
- runner_add_args(&runner, "sh", GANESHA_PREFIX "/ganesha-ha.sh",
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh",
"--setup-ganesha-conf-files", CONFDIR, "yes", NULL);
ret = runner_run(&runner);
if (ret) {
--
1.8.3.1

40
SOURCES/0057-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch

@ -0,0 +1,40 @@
From 16d298584c70138fd639281bc900838d7938aec9 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Wed, 22 Feb 2017 14:37:04 +0530
Subject: [PATCH 057/124] ganesha/scripts : Remove export entries from
ganesha.conf during cleanup

Label: DOWNSTREAM ONLY

Change-Id: I288f7c9ced23d258a7ce1242d8efe03a4bf6f746
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/16708
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: soumya k <skoduri@redhat.com>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167142
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 1 +
1 file changed, 1 insertion(+)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index 4b93f95..7ba80b5 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -342,6 +342,7 @@ cleanup_ganesha_config ()
rm -f /etc/corosync/corosync.conf
rm -rf /etc/cluster/cluster.conf*
rm -rf /var/lib/pacemaker/cib/*
+ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $HA_CONFDIR/ganesha.conf
}
do_create_virt_ip_constraints()
--
1.8.3.1

62
SOURCES/0058-glusterd-ganesha-During-volume-delete-remove-the-gan.patch

@ -0,0 +1,62 @@
From 172f32058b1a7d2e42f373490853aef5dd72f02f Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Wed, 22 Feb 2017 14:20:41 +0530
Subject: [PATCH 058/124] glusterd/ganesha : During volume delete remove the
ganesha export configuration file

Label: DOWNSTREAM ONLY

Change-Id: I0363e7f4d7cefd3f1b3c4f91e495767ec52e230e
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/16707
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: soumya k <skoduri@redhat.com>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167143
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 2 +-
xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 9 +++++++++
2 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index 81f794d..6d72fda 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -445,7 +445,7 @@ manage_export_config(char *volname, char *value, char **op_errstr)
CONFDIR, value, volname, NULL);
ret = runner_run(&runner);
- if (ret)
+ if (ret && !(*op_errstr))
gf_asprintf(op_errstr,
"Failed to create"
" NFS-Ganesha export config file.");
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index a0417ca..81c668c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -2936,6 +2936,15 @@ glusterd_op_delete_volume(dict_t *dict)
goto out;
}
+ if (glusterd_check_ganesha_export(volinfo)) {
+ ret = manage_export_config(volname, "off", NULL);
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0,
+ "Could not delete ganesha export conf file "
+ "for %s",
+ volname);
+ }
+
ret = glusterd_delete_volume(volinfo);
out:
gf_msg_debug(this->name, 0, "returning %d", ret);
--
1.8.3.1

132
SOURCES/0059-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch

@ -0,0 +1,132 @@
From 8b501d9dfbeecb3ffdc3cd11b7c74aa929356ed6 Mon Sep 17 00:00:00 2001
From: jiffin tony thottan <jthottan@redhat.com>
Date: Mon, 7 Dec 2015 14:38:54 +0530
Subject: [PATCH 059/124] glusterd/ganesha : throw proper error for "gluster
nfs-ganesha disable"

For first time or if "gluster nfs-ganesha enable" fails the global option
"nfs-ganesha" won't be stored in glusterd's dictionary. In both cases the
"gluster nfs-ganesha disable" throws following error :
"nfs-ganesha: failed: nfs-ganesha is already (null)d."

Also this patch provides the missing prompt for nfs-ganesha disable in 3.10

Label: DOWNSTREAM ONLY

Change-Id: I7c9fd6dabedc0cfb14c5190b3554bc63a6bc0340
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/16791
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: soumya k <skoduri@redhat.com>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167144
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
cli/src/cli-cmd-parser.c | 33 +++++++++++++++++-----------
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 22 +++++--------------
2 files changed, 26 insertions(+), 29 deletions(-)

diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index cd9c445..f85958b 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -5908,20 +5908,27 @@ cli_cmd_ganesha_parse(struct cli_state *state, const char **words,
goto out;
}
- question =
- "Enabling NFS-Ganesha requires Gluster-NFS to be"
- " disabled across the trusted pool. Do you "
- "still want to continue?\n";
-
if (strcmp(value, "enable") == 0) {
- answer = cli_cmd_get_confirmation(state, question);
- if (GF_ANSWER_NO == answer) {
- gf_log("cli", GF_LOG_ERROR,
- "Global operation "
- "cancelled, exiting");
- ret = -1;
- goto out;
- }
+ question =
+ "Enabling NFS-Ganesha requires Gluster-NFS to be "
+ "disabled across the trusted pool. Do you "
+ "still want to continue?\n";
+ } else if (strcmp(value, "disable") == 0) {
+ question =
+ "Disabling NFS-Ganesha will tear down the entire "
+ "ganesha cluster across the trusted pool. Do you "
+ "still want to continue?\n";
+ } else {
+ ret = -1;
+ goto out;
+ }
+ answer = cli_cmd_get_confirmation(state, question);
+ if (GF_ANSWER_NO == answer) {
+ gf_log("cli", GF_LOG_ERROR,
+ "Global operation "
+ "cancelled, exiting");
+ ret = -1;
+ goto out;
}
cli_out("This will take a few minutes to complete. Please wait ..");
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index 6d72fda..1d17a33 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -252,8 +252,7 @@ int
glusterd_op_stage_set_ganesha(dict_t *dict, char **op_errstr)
{
int ret = -1;
- int value = -1;
- gf_boolean_t option = _gf_false;
+ char *value = NULL;
char *str = NULL;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
@@ -264,8 +263,8 @@ glusterd_op_stage_set_ganesha(dict_t *dict, char **op_errstr)
priv = this->private;
GF_ASSERT(priv);
- value = dict_get_str_boolean(dict, "value", _gf_false);
- if (value == -1) {
+ ret = dict_get_str(dict, "value", &value);
+ if (value == NULL) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
"value not present.");
goto out;
@@ -273,22 +272,13 @@ glusterd_op_stage_set_ganesha(dict_t *dict, char **op_errstr)
/* This dict_get will fail if the user had never set the key before */
/*Ignoring the ret value and proceeding */
ret = dict_get_str(priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str);
- if (ret == -1) {
- gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_DICT_GET_FAILED,
- "Global dict not present.");
- ret = 0;
- goto out;
- }
- /* Validity of the value is already checked */
- ret = gf_string2boolean(str, &option);
- /* Check if the feature is already enabled, fail in that case */
- if (value == option) {
- gf_asprintf(op_errstr, "nfs-ganesha is already %sd.", str);
+ if (str ? strcmp(value, str) == 0 : strcmp(value, "disable") == 0) {
+ gf_asprintf(op_errstr, "nfs-ganesha is already %sd.", value);
ret = -1;
goto out;
}
- if (value) {
+ if (strcmp(value, "enable")) {
ret = start_ganesha(op_errstr);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_NFS_GNS_START_FAIL,
--
1.8.3.1

61
SOURCES/0060-ganesha-scripts-Stop-ganesha-process-on-all-nodes-if.patch

@ -0,0 +1,61 @@
From 93635333d17a03078a6bf72771445e1bd9ebdc15 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Thu, 2 Mar 2017 12:22:30 +0530
Subject: [PATCH 060/124] ganesha/scripts : Stop ganesha process on all nodes
if cluster setup fails

During staging phase of volume option "nfs-ganesha", symlink "ganesha.conf"
will be created plus ganesha process will be started. The cluster setup
happens during commit phase of that option. So if cluster set up fails, the
ganesha process will be running on all cluster nodes.

Label: DOWNSTREAM ONLY

Change-Id: Ib2cb85364b7ef5b702acb4826ffdf8e6f31a2acd
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/16823
Smoke: Gluster Build System <jenkins@build.gluster.org>
Tested-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Reviewed-by: soumya k <skoduri@redhat.com>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167145
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 9 +++++++++
1 file changed, 9 insertions(+)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index 7ba80b5..db3f921 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -175,6 +175,13 @@ determine_servers()
fi
}
+stop_ganesha_all()
+{
+ local serverlist=${1}
+ for node in ${serverlist} ; do
+ manage_service "stop" ${node}
+ done
+}
setup_cluster()
{
@@ -191,6 +198,8 @@ setup_cluster()
pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --transport udpu ${servers}
if [ $? -ne 0 ]; then
logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} ${servers} failed"
+ #set up failed stop all ganesha process and clean up symlinks in cluster
+ stop_ganesha_all ${servers}
exit 1;
fi
pcs cluster start --all
--
1.8.3.1

106
SOURCES/0061-ganesha-allow-refresh-config-and-volume-export-unexp.patch

@ -0,0 +1,106 @@
From a766878e11a984680ed29f13aae713d464ec985e Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Wed, 19 Apr 2017 16:12:10 +0530
Subject: [PATCH 061/124] ganesha : allow refresh-config and volume
export/unexport in failover state

If ganesha is not running on one of nodes in HA cluster, then alli dbus
commands send to that ganesha server will fail. This results in both
refresh-config and volume export/unepxort failure. This change will
gracefully handle those scenarios.

Label: DOWNSTREAM ONLY

Change-Id: I3f1b7b7ca98e54c273c266e56357d8e24dd1b14b
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/17081
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: soumya k <skoduri@redhat.com>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167146
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 6 ++----
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 24 +++++++++++++++---------
xlators/mgmt/glusterd/src/glusterd-messages.h | 2 +-
3 files changed, 18 insertions(+), 14 deletions(-)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index db3f921..f040ef6 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -275,8 +275,7 @@ string:\"EXPORT(Export_Id=$export_id)\" 2>&1")
ret=$?
logger <<< "${output}"
if [ ${ret} -ne 0 ]; then
- echo "Error: refresh-config failed on ${current_host}."
- exit 1
+ echo "Refresh-config failed on ${current_host}"
else
echo "Refresh-config completed on ${current_host}."
fi
@@ -297,8 +296,7 @@ string:"EXPORT(Export_Id=$export_id)" 2>&1)
ret=$?
logger <<< "${output}"
if [ ${ret} -ne 0 ] ; then
- echo "Error: refresh-config failed on localhost."
- exit 1
+ echo "Refresh-config failed on localhost."
else
echo "Success: refresh-config completed."
fi
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index 1d17a33..ee8b588 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -540,15 +540,21 @@ ganesha_manage_export(dict_t *dict, char *value, char **op_errstr)
}
if (check_host_list()) {
- runner_add_args(&runner, GANESHA_PREFIX "/dbus-send.sh", CONFDIR, value,
- volname, NULL);
- ret = runner_run(&runner);
- if (ret) {
- gf_asprintf(op_errstr,
- "Dynamic export"
- " addition/deletion failed."
- " Please see log file for details");
- goto out;
+ /* Check whether ganesha is running on this node */
+ if (manage_service("status")) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GANESHA_NOT_RUNNING,
+ "Export failed, NFS-Ganesha is not running");
+ } else {
+ runner_add_args(&runner, GANESHA_PREFIX "/dbus-send.sh", CONFDIR,
+ value, volname, NULL);
+ ret = runner_run(&runner);
+ if (ret) {
+ gf_asprintf(op_errstr,
+ "Dynamic export"
+ " addition/deletion failed."
+ " Please see log file for details");
+ goto out;
+ }
}
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h
index 9558480..c7b3ca8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-messages.h
+++ b/xlators/mgmt/glusterd/src/glusterd-messages.h
@@ -298,6 +298,6 @@ GLFS_MSGID(
GD_MSG_LOCALTIME_LOGGING_ENABLE, GD_MSG_LOCALTIME_LOGGING_DISABLE,
GD_MSG_PORTS_EXHAUSTED, GD_MSG_CHANGELOG_GET_FAIL,
GD_MSG_MANAGER_FUNCTION_FAILED, GD_MSG_NFS_GANESHA_DISABLED,
- GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL);
+ GD_MSG_GANESHA_NOT_RUNNING, GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL);
#endif /* !_GLUSTERD_MESSAGES_H_ */
--
1.8.3.1

59
SOURCES/0062-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch

@ -0,0 +1,59 @@
From eb784a40a4f72e347945e0d66ac1a28389bb076c Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Fri, 28 Apr 2017 17:27:46 +0530
Subject: [PATCH 062/124] glusterd/ganesha : perform removal of ganesha.conf on
nodes only in ganesha cluster

Label: DOWNSTREAM ONLY

Change-Id: I864ecd9391adf80fb1fa6ad2f9891a9ce77135e7
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/17138
Smoke: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: soumya k <skoduri@redhat.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167147
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index ee8b588..b743216 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -746,17 +746,16 @@ stop_ganesha(char **op_errstr)
0,
};
- runinit(&runner);
- runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh",
- "--setup-ganesha-conf-files", CONFDIR, "no", NULL);
- ret = runner_run(&runner);
- if (ret) {
- gf_asprintf(op_errstr,
- "removal of symlink ganesha.conf "
- "in /etc/ganesha failed");
- }
-
if (check_host_list()) {
+ runinit(&runner);
+ runner_add_args(&runner, GANESHA_PREFIX "/ganesha-ha.sh",
+ "--setup-ganesha-conf-files", CONFDIR, "no", NULL);
+ ret = runner_run(&runner);
+ if (ret) {
+ gf_asprintf(op_errstr,
+ "removal of symlink ganesha.conf "
+ "in /etc/ganesha failed");
+ }
ret = manage_service("stop");
if (ret)
gf_asprintf(op_errstr,
--
1.8.3.1

144
SOURCES/0063-glusterd-ganesha-update-cache-invalidation-properly-.patch

@ -0,0 +1,144 @@
From e5450c639915f4c29ae2ad480e4128b5845254cc Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Tue, 25 Apr 2017 16:36:40 +0530
Subject: [PATCH 063/124] glusterd/ganesha : update cache invalidation properly
during volume stop

As per current code, during volume stop for ganesha enabled volume the
feature.cache-invalidation was turned "off" in ganesha_manage_export().
And it never turn back to "on" when volume is started. It is not desire
to modify the volume options during stop, this patch fixes above mentioned
issue.

Label: DOWNSTREAM ONLY

Change-Id: Iea9c62e5cda4f54805b41ea6055cf0c3652a634c
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/17111
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167148
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 33 ++++++++++++++-----------
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 4 +--
xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 2 +-
xlators/mgmt/glusterd/src/glusterd.h | 3 ++-
4 files changed, 23 insertions(+), 19 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index b743216..1c2ba7a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -445,7 +445,8 @@ manage_export_config(char *volname, char *value, char **op_errstr)
/* Exports and unexports a particular volume via NFS-Ganesha */
int
-ganesha_manage_export(dict_t *dict, char *value, char **op_errstr)
+ganesha_manage_export(dict_t *dict, char *value,
+ gf_boolean_t update_cache_invalidation, char **op_errstr)
{
runner_t runner = {
0,
@@ -558,19 +559,21 @@ ganesha_manage_export(dict_t *dict, char *value, char **op_errstr)
}
}
- vol_opts = volinfo->dict;
- ret = dict_set_dynstr_with_alloc(vol_opts, "features.cache-invalidation",
- value);
- if (ret)
- gf_asprintf(op_errstr,
- "Cache-invalidation could not"
- " be set to %s.",
- value);
- ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
- gf_asprintf(op_errstr, "failed to store volinfo for %s",
- volinfo->volname);
-
+ if (update_cache_invalidation) {
+ vol_opts = volinfo->dict;
+ ret = dict_set_dynstr_with_alloc(vol_opts,
+ "features.cache-invalidation", value);
+ if (ret)
+ gf_asprintf(op_errstr,
+ "Cache-invalidation could not"
+ " be set to %s.",
+ value);
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ gf_asprintf(op_errstr, "failed to store volinfo for %s",
+ volinfo->volname);
+ }
out:
return ret;
}
@@ -867,7 +870,7 @@ glusterd_handle_ganesha_op(dict_t *dict, char **op_errstr, char *key,
GF_ASSERT(value);
if (strcmp(key, "ganesha.enable") == 0) {
- ret = ganesha_manage_export(dict, value, op_errstr);
+ ret = ganesha_manage_export(dict, value, _gf_true, op_errstr);
if (ret < 0)
goto out;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index a630c48..52809a8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -1178,7 +1178,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if ((strcmp(key, "ganesha.enable") == 0) &&
(strcmp(value, "off") == 0)) {
- ret = ganesha_manage_export(dict, "off", op_errstr);
+ ret = ganesha_manage_export(dict, "off", _gf_true, op_errstr);
if (ret)
goto out;
}
@@ -1691,7 +1691,7 @@ glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr)
*/
if (volinfo && (!strcmp(key, "all") || !strcmp(key, "ganesha.enable"))) {
if (glusterd_check_ganesha_export(volinfo)) {
- ret = ganesha_manage_export(dict, "off", op_errstr);
+ ret = ganesha_manage_export(dict, "off", _gf_true, op_errstr);
if (ret)
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL,
"Could not reset ganesha.enable key");
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 81c668c..de4eccb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -1825,7 +1825,7 @@ glusterd_op_stage_stop_volume(dict_t *dict, char **op_errstr)
ret = glusterd_check_ganesha_export(volinfo);
if (ret) {
- ret = ganesha_manage_export(dict, "off", op_errstr);
+ ret = ganesha_manage_export(dict, "off", _gf_false, op_errstr);
if (ret) {
gf_msg(THIS->name, GF_LOG_WARNING, 0,
GD_MSG_NFS_GNS_UNEXPRT_VOL_FAIL,
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 5135181..e858ce4 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -1368,7 +1368,8 @@ glusterd_op_stage_set_ganesha(dict_t *dict, char **op_errstr);
int
glusterd_op_set_ganesha(dict_t *dict, char **errstr);
int
-ganesha_manage_export(dict_t *dict, char *value, char **op_errstr);
+ganesha_manage_export(dict_t *dict, char *value,
+ gf_boolean_t update_cache_invalidation, char **op_errstr);
gf_boolean_t
glusterd_is_ganesha_cluster();
gf_boolean_t
--
1.8.3.1

52
SOURCES/0064-glusterd-ganesha-return-proper-value-in-pre_setup.patch

@ -0,0 +1,52 @@
From 37bf4daca164cfcb260760ee2fd25d66f920dc7f Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Wed, 22 Feb 2017 18:26:30 +0530
Subject: [PATCH 064/124] glusterd/ganesha : return proper value in pre_setup()

Label: DOWNSTREAM ONLY

Change-Id: I6f7ce82488904c7d418ee078162f26f1ec81e9d9
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/16733
Smoke: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
Tested-by: Raghavendra Talur <rtalur@redhat.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167149
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index 1c2ba7a..d9fdfc6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -843,16 +843,15 @@ pre_setup(gf_boolean_t run_setup, char **op_errstr)
{
int ret = 0;
- ret = check_host_list();
-
- if (ret) {
+ if (check_host_list()) {
ret = setup_cluster(run_setup);
if (ret == -1)
gf_asprintf(op_errstr,
"Failed to set up HA "
"config for NFS-Ganesha. "
"Please check the log file for details");
- }
+ } else
+ ret = -1;
return ret;
}
--
1.8.3.1

58
SOURCES/0065-ganesha-scripts-remove-dependency-over-export-config.patch

@ -0,0 +1,58 @@
From 7a47c004b907ed5469b78d559cae6d151e4d626b Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Thu, 23 Feb 2017 16:21:52 +0530
Subject: [PATCH 065/124] ganesha/scripts : remove dependency over export
configuration file for unexport

Currently unexport is performed by reading export id from volume configuration
file. So unexport has dependency over that file. This patch will unexport with
help of dbus command ShowExport. And it will only unexport the share which is
added via cli.

Label: DOWNSTREAM ONLY

Change-Id: I6f3c9b2bb48f0328b18e9cc0e4b9356174afd596
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/16771
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167150
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/dbus-send.sh | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/extras/ganesha/scripts/dbus-send.sh b/extras/ganesha/scripts/dbus-send.sh
index ec8d948..9d613a0 100755
--- a/extras/ganesha/scripts/dbus-send.sh
+++ b/extras/ganesha/scripts/dbus-send.sh
@@ -41,8 +41,18 @@ string:"EXPORT(Path=/$VOL)"
#This function removes an export dynamically(uses the export_id of the export)
function dynamic_export_remove()
{
- removed_id=`cat $GANESHA_DIR/exports/export.$VOL.conf |\
-grep Export_Id | awk -F"[=,;]" '{print$2}'| tr -d '[[:space:]]'`
+ # Below bash fetch all the export from ShowExport command and search
+ # export entry based on path and then get its export entry.
+ # There are two possiblities for path, either entire volume will be
+ # exported or subdir. It handles both cases. But it remove only first
+ # entry from the list based on assumption that entry exported via cli
+ # has lowest export id value
+ removed_id=$(dbus-send --type=method_call --print-reply --system \
+ --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+ org.ganesha.nfsd.exportmgr.ShowExports | grep -B 1 -we \
+ "/"$VOL -e "/"$VOL"/" | grep uint16 | awk '{print $2}' \
+ | head -1)
+
dbus-send --print-reply --system \
--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
org.ganesha.nfsd.exportmgr.RemoveExport uint16:$removed_id
--
1.8.3.1

41
SOURCES/0066-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch

@ -0,0 +1,41 @@
From d91eadbbb3e2d02e7297214da394b0e232544386 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Tue, 2 May 2017 14:06:00 +0530
Subject: [PATCH 066/124] glusterd/ganesha : add proper NULL check in
manage_export_config

Label: DOWNSTREAM ONLY

Change-Id: I872b2b6b027f04e61f60ad85588f50e1ef2f988c
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/17150
Smoke: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: soumya k <skoduri@redhat.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167151
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index d9fdfc6..fe0bffc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -435,7 +435,7 @@ manage_export_config(char *volname, char *value, char **op_errstr)
CONFDIR, value, volname, NULL);
ret = runner_run(&runner);
- if (ret && !(*op_errstr))
+ if (ret && op_errstr)
gf_asprintf(op_errstr,
"Failed to create"
" NFS-Ganesha export config file.");
--
1.8.3.1

41
SOURCES/0067-ganesha-minor-improvments-for-commit-e91cdf4-17081.patch

@ -0,0 +1,41 @@
From 1e5c6bb28894a57e5ca5ed7b4b3b5e05efecf7cd Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Wed, 3 May 2017 12:47:14 +0530
Subject: [PATCH 067/124] ganesha : minor improvments for commit e91cdf4
(17081)

Label: DOWNSTREAM ONLY

Change-Id: I3af13e081c5e46cc6f2c132e7a5106ac3355c850
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://review.gluster.org/17152
Smoke: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: soumya k <skoduri@redhat.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167152
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index f040ef6..cedc3fa 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -275,7 +275,7 @@ string:\"EXPORT(Export_Id=$export_id)\" 2>&1")
ret=$?
logger <<< "${output}"
if [ ${ret} -ne 0 ]; then
- echo "Refresh-config failed on ${current_host}"
+ echo "Refresh-config failed on ${current_host}. Please check logs on ${current_host}"
else
echo "Refresh-config completed on ${current_host}."
fi
--
1.8.3.1

58
SOURCES/0068-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch

@ -0,0 +1,58 @@
From aabc623f99d22a2a9e1d52f3ca7de1dc5b49946d Mon Sep 17 00:00:00 2001
From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
Date: Tue, 13 Jun 2017 07:36:50 -0400
Subject: [PATCH 068/124] common-ha: surviving ganesha.nfsd not put in grace on
fail-over

Behavior change is seen in new HA in RHEL 7.4 Beta. Up to now clone
RAs have been created with "pcs resource create ... meta notify=true".
Their notify method is invoked with pre-start or post-stop when one of
the clone RAs is started or stopped.

In 7.4 Beta the notify method we observe that the notify method is not
invoked when one of the clones is stopped (or started).

Ken Gaillot, one of the pacemaker devs, wrote:
With the above command, pcs puts the notify=true meta-attribute
on the primitive instead of the clone. Looking at the pcs help,
that seems expected (--clone notify=true would put it on the clone,
meta notify=true puts it on the primitive). If you drop the "meta"
above, I think it will work again.

And indeed his suggested fix does work on both RHEL 7.4 Beta and RHEL
7.3 and presumably Fedora.

Label: DOWNSTREAM ONLY

Change-Id: Idbb539f1366df6d39f77431c357dff4e53a2df6d
Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
Reviewed-on: https://review.gluster.org/17534
Smoke: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: soumya k <skoduri@redhat.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167153
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index cedc3fa..537c965 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -445,7 +445,7 @@ setup_create_resources()
# ganesha-active crm_attribute
sleep 5
- pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone meta notify=true
+ pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone notify=true
if [ $? -ne 0 ]; then
logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone failed"
fi
--
1.8.3.1

96
SOURCES/0069-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch

@ -0,0 +1,96 @@
From 916a79ea78db264ceedd4ebdba794e488b82eceb Mon Sep 17 00:00:00 2001
From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
Date: Wed, 21 Jun 2017 10:01:20 -0400
Subject: [PATCH 069/124] common-ha: enable and disable selinux
ganesha_use_fusefs

Starting in Fedora 26 and RHEL 7.4 there are new targeted policies
in selinux which include a tuneable to allow ganesha.nfsd to access
the gluster (FUSE) shared_storage volume where ganesha maintains its
state.

N.B. rpm doesn't have a way to distinguish between RHEL 7.3 or 7.4
so it can't be enabled for RHEL at this time. /usr/sbin/semanage is
in policycoreutils-python in RHEL (versus policycoreutils-python-utils
in Fedora.) Once RHEL 7.4 GAs we may also wish to specify the version
for RHEL 7 explicitly, i.e.
Requires: selinux-policy >= 3.13.1-160.
But beware, the corresponding version in Fedora 26 seems to be
selinux-policy-3.13.1.258 or so. (Maybe earlier versions, but that's
what's currently in the F26 beta.

release-3.10 is the upstream master branch for glusterfs-ganesha. For
release-3.11 and later storhaug needs a similar change, which is
tracked by https://github.com/linux-ha-storage/storhaug/issues/11

Maybe at some point we would want to consider migrating the targeted
policies for glusterfs (and nfs-ganesha) from selinux-policy to a
glusterfs-selinux (and nfs-ganesha-selinux) subpackage?

Label: DOWNSTREAM ONLY

Change-Id: I04a5443edd00636cbded59a2baddfa98095bf7ac
Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
Reviewed-on: https://review.gluster.org/17597
Smoke: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Niels de Vos <ndevos@redhat.com>
Reviewed-by: jiffin tony Thottan <jthottan@redhat.com>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167154
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index d748ebc..b01c94f 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -466,6 +466,11 @@ Requires: pcs, dbus
Requires: cman, pacemaker, corosync
%endif
+%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+Requires(post): policycoreutils-python-utils
+Requires(postun): policycoreutils-python-utils
+%endif
+
%description ganesha
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
@@ -923,6 +928,14 @@ exit 0
%systemd_post glustereventsd
%endif
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+%post ganesha
+semanage boolean -m ganesha_use_fusefs --on
+exit 0
+%endif
+%endif
+
%if ( 0%{!?_without_georeplication:1} )
%post geo-replication
if [ $1 -ge 1 ]; then
@@ -1055,6 +1068,14 @@ fi
exit 0
%endif
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+%postun ganesha
+semanage boolean -m ganesha_use_fusefs --off
+exit 0
+%endif
+%endif
+
##-----------------------------------------------------------------------------
## All %%files should be placed here and keep them grouped
##
--
1.8.3.1

76
SOURCES/0070-packaging-glusterfs-ganesha-update-sometimes-fails-s.patch

@ -0,0 +1,76 @@
From f410cd9f9b9455373a9612423558d8d0f83cd0fc Mon Sep 17 00:00:00 2001
From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
Date: Wed, 12 Jul 2017 07:43:51 -0400
Subject: [PATCH 070/124] packaging: glusterfs-ganesha update sometimes fails
semanage

Depending on how dnf orders updates, the updated version of
selinux-policy-targeted with ganesha_use_fusefs may not be updated
before the glusterfs-ganesha update execute its %post scriptlet
containing the `semanage ganesha_use_fusefs ...` command. In such
situations the semanage command (silently) fails.

Use a %trigger (and %triggerun) to run the scriptlet (again) after
selinux-policy-targeted with ganesha_use_fusefs has been installed
or updated.

Note: the %triggerun is probably unnecessary, but it doesn't hurt.

The release-3.10 branch is the "upstream master" for the glusterfs-
ganesha subpackage.

Note: to be merged after https://review.gluster.org/17806

Label: DOWNSTREAM ONLY

Change-Id: I1ad06d79fa1711e4abf038baf9f0a5b7bb665934
Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
Reviewed-on: https://review.gluster.org/17756
Smoke: Gluster Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Niels de Vos <ndevos@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167155
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index b01c94f..1d99a3d 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1077,6 +1077,28 @@ exit 0
%endif
##-----------------------------------------------------------------------------
+## All %%trigger should be placed here and keep them sorted
+##
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+%trigger ganesha -- selinux-policy-targeted
+semanage boolean -m ganesha_use_fusefs --on
+exit 0
+%endif
+%endif
+
+##-----------------------------------------------------------------------------
+## All %%triggerun should be placed here and keep them sorted
+##
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+%triggerun ganesha -- selinux-policy-targeted
+semanage boolean -m ganesha_use_fusefs --off
+exit 0
+%endif
+%endif
+
+##-----------------------------------------------------------------------------
## All %%files should be placed here and keep them grouped
##
%files
--
1.8.3.1

66
SOURCES/0071-common-ha-enable-and-disable-selinux-gluster_use_exe.patch

@ -0,0 +1,66 @@
From 662c94f3b3173bf78465644e2e42e03efd9ea493 Mon Sep 17 00:00:00 2001
From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
Date: Mon, 17 Jul 2017 11:07:40 -0400
Subject: [PATCH 071/124] common-ha: enable and disable selinux
gluster_use_execmem

Starting in Fedora 26 and RHEL 7.4 there are new targeted policies in
selinux which include a tuneable to allow glusterd->ganesha-ha.sh->pcs
to access the pcs config, i.e. gluster-use-execmem.

Note. rpm doesn't have a way to distinguish between RHEL 7.3 or 7.4
or between 3.13.1-X and 3.13.1-Y so it can't be enabled for RHEL at
this time.

/usr/sbin/semanage is in policycoreutils-python in RHEL (versus
policycoreutils-python-utils in Fedora.)

Requires selinux-policy >= 3.13.1-160 in RHEL7. The corresponding
version in Fedora 26 seems to be selinux-policy-3.13.1-259 or so. (Maybe
earlier versions, but that's what was in F26 when I checked.)

Label: DOWNSTREAM ONLY

Change-Id: Ic474b3f7739ff5be1e99d94d00b55caae4ceb5a0
Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
Reviewed-on: https://review.gluster.org/17806
Smoke: Gluster Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: soumya k <skoduri@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167156
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 6 ++++++
1 file changed, 6 insertions(+)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index 537c965..f4400af 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -984,6 +984,9 @@ main()
usage
exit 0
fi
+
+ semanage boolean -m gluster_use_execmem --on
+
HA_CONFDIR=${1%/}; shift
local ha_conf=${HA_CONFDIR}/ganesha-ha.conf
local node=""
@@ -1129,6 +1132,9 @@ $HA_CONFDIR/ganesha-ha.conf
;;
esac
+
+ semanage boolean -m gluster_use_execmem --off
+
}
main $*
--
1.8.3.1

60
SOURCES/0072-ganesha-ha-don-t-set-SELinux-booleans-if-SELinux-is-.patch

@ -0,0 +1,60 @@
From c147bbec10fc72b85301ab6a7580f15713b8a974 Mon Sep 17 00:00:00 2001
From: Ambarish <asoman@redhat.com>
Date: Tue, 12 Sep 2017 18:34:29 +0530
Subject: [PATCH 072/124] ganesha-ha: don't set SELinux booleans if SELinux is
disabled

semanage commands inside ganesha-ha.sh script will fail if selinux is
Disabled. This patch introduces a check if selinux is enabled or not,
and subsequently run semange commands only on selinux enabled systems.

Label: DOWNSTREAM ONLY

Change-Id: Ibee61cbb1d51a73e6c326b49bac5c7ce06feb310
Signed-off-by: Ambarish <asoman@redhat.com>
Reviewed-on: https://review.gluster.org/18264
Reviewed-by: Niels de Vos <ndevos@redhat.com>
Smoke: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Reviewed-by: jiffin tony Thottan <jthottan@redhat.com>
Reviewed-by: Daniel Gryniewicz <dang@redhat.com>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167157
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index f4400af..e1d3ea0 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -985,7 +985,9 @@ main()
exit 0
fi
- semanage boolean -m gluster_use_execmem --on
+ if (selinuxenabled) ;then
+ semanage boolean -m gluster_use_execmem --on
+ fi
HA_CONFDIR=${1%/}; shift
local ha_conf=${HA_CONFDIR}/ganesha-ha.conf
@@ -1133,8 +1135,9 @@ $HA_CONFDIR/ganesha-ha.conf
esac
- semanage boolean -m gluster_use_execmem --off
-
+ if (selinuxenabled) ;then
+ semanage boolean -m gluster_use_execmem --off
+ fi
}
main $*
--
1.8.3.1

45
SOURCES/0073-build-remove-ganesha-dependency-on-selinux-policy.patch

@ -0,0 +1,45 @@
From 52279c877264f41b522f747a986b937e6f054e2a Mon Sep 17 00:00:00 2001
From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
Date: Fri, 23 Jun 2017 20:43:16 +0530
Subject: [PATCH 073/124] build: remove ganesha dependency on selinux-policy

Problem:
Puddle creation fails with unresolved dependencies
unresolved deps:
*** selinux-policy >= 0:3.13.1-160

Solution:
We know a priori about the version at RHEL 7.4 is already the desired
version. So removing this explicit dependency *should* not be a gluster
test blocker.

Label: DOWNSTREAM ONLY

Change-Id: Id53ac0e41adc14704932787ee0dd3143e6615aaf
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/109945
Reviewed-by: Kaleb Keithley <kkeithle@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167158
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 1 +
1 file changed, 1 insertion(+)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 1d99a3d..e55e255 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -467,6 +467,7 @@ Requires: cman, pacemaker, corosync
%endif
%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+Requires: selinux-policy >= 3.13.1-160
Requires(post): policycoreutils-python-utils
Requires(postun): policycoreutils-python-utils
%endif
--
1.8.3.1

67
SOURCES/0074-common-ha-enable-pacemaker-at-end-of-setup.patch

@ -0,0 +1,67 @@
From bfbda24746bf11573b485baf534a5cf1373c6c89 Mon Sep 17 00:00:00 2001
From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
Date: Wed, 7 Jun 2017 08:15:48 -0400
Subject: [PATCH 074/124] common-ha: enable pacemaker at end of setup

Label: DOWNSTREAM ONLY

Change-Id: I3ccd59b67ed364bfc5d27e88321ab5b9f8d471fd
Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/108431
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167159
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index e1d3ea0..d7dfb87 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -787,6 +787,22 @@ setup_state_volume()
}
+enable_pacemaker()
+{
+ while [[ ${1} ]]; do
+ if [ "${SERVICE_MAN}" == "/usr/bin/systemctl" ]; then
+${SECRET_PEM} root@${1} ${SERVICE_MAN} enable pacemaker"
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${1} "${SERVICE_MAN} enable pacemaker"
+ else
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${1} "${SERVICE_MAN} pacemaker enable"
+ fi
+ shift
+ done
+}
+
+
addnode_state_volume()
{
local newnode=${1}; shift
@@ -1011,6 +1027,8 @@ main()
if [ "X${HA_NUM_SERVERS}X" != "X1X" ]; then
+ determine_service_manager
+
setup_cluster ${HA_NAME} ${HA_NUM_SERVERS} "${HA_SERVERS}"
setup_create_resources ${HA_SERVERS}
@@ -1019,6 +1037,8 @@ main()
setup_state_volume ${HA_SERVERS}
+ enable_pacemaker ${HA_SERVERS}
+
else
logger "insufficient servers for HA, aborting"
--
1.8.3.1

43
SOURCES/0075-common-ha-Fix-an-incorrect-syntax-during-setup.patch

@ -0,0 +1,43 @@
From 0a124b59c662c8f85fe6d184b839cbfe29d5e8ab Mon Sep 17 00:00:00 2001
From: Soumya Koduri <skoduri@redhat.com>
Date: Wed, 14 Jun 2017 15:20:22 +0530
Subject: [PATCH 075/124] common-ha: Fix an incorrect syntax during setup

There was an invalid line introduced as part of
https://code.engineering.redhat.com/gerrit/#/c/108431/

Detected by rpmdiff -
https://errata.devel.redhat.com/rpmdiff/show/175336?result_id=4796901

This change is to fix the same.

Label: DOWNSTREAM ONLY

Change-Id: I55cdd7d866cb175fb620dbbd2d02c36eab291a74
Signed-off-by: Soumya Koduri <skoduri@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/109017
Reviewed-by: Kaleb Keithley <kkeithle@redhat.com>
Tested-by: Kaleb Keithley <kkeithle@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167160
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 1 -
1 file changed, 1 deletion(-)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index d7dfb87..8302c5e 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -791,7 +791,6 @@ enable_pacemaker()
{
while [[ ${1} ]]; do
if [ "${SERVICE_MAN}" == "/usr/bin/systemctl" ]; then
-${SECRET_PEM} root@${1} ${SERVICE_MAN} enable pacemaker"
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
${SECRET_PEM} root@${1} "${SERVICE_MAN} enable pacemaker"
else
--
1.8.3.1

44
SOURCES/0076-glusterd-ganesha-change-voltype-for-ganesha.enable-i.patch

@ -0,0 +1,44 @@
From a917a989232d2c72752f8a2cf27bad90b5acb83d Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Tue, 27 Feb 2018 15:35:30 +0530
Subject: [PATCH 076/124] glusterd/ganesha : change voltype for ganesha.enable
in volume option table

The voltype defined for ganesha.enable is features/ganesha. But ganesha xlator
was removed from client stack long back. Now it is defined as part of glusterd.
So reflecting the same on the volume option table.

Label: DOWNSTREAM ONLY

Upstream reference :
>patch link https://review.gluster.org/19639
>Change-Id: Ifedd7493020b77bd54edfdbdd9c799d93b24d0aa
>BUG: 1486542
>Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>

Change-Id: Ifedd7493020b77bd54edfdbdd9c799d93b24d0aa
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167161
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 13f423a..c8f6e67 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2599,7 +2599,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
},
{
.key = "ganesha.enable",
- .voltype = "features/ganesha",
+ .voltype = "mgmt/ganesha",
.value = "off",
.option = "ganesha.enable",
.op_version = GD_OP_VERSION_3_7_0,
--
1.8.3.1

73
SOURCES/0077-glusterd-ganesha-create-remove-export-file-only-from.patch

@ -0,0 +1,73 @@
From 1e619b95e3f03e226fef135bfaeeca9b069eb978 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Wed, 14 Mar 2018 12:01:30 +0530
Subject: [PATCH 077/124] glusterd/ganesha : create/remove export file only
from the node which performs ganesha.enable

As part of volume set ganesha.enable on the ganesha export configuration file will be created/removed
using "create-export-ganesha.sh". This performed from the nodes which are part of ganesha cluster.
But it is not need since the file is saved in shared storage and consumed by the nodes in the ganesha cluster.

Label: DOWNSTREAM ONLY

Upstream Reference :
>patch link : https://review.gluster.org/#/c/19716/
>Change-Id: I2583899972b47d451a90711940293004a3af4690
>BUG: 1555195
>Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>

Change-Id: I2583899972b47d451a90711940293004a3af4690
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167162
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 2 +-
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 3 ++-
xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 2 +-
3 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index fe0bffc..ff36476 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -530,7 +530,7 @@ ganesha_manage_export(dict_t *dict, char *value,
* Create the export file from the node where ganesha.enable "on"
* is executed
* */
- if (option) {
+ if (option && is_origin_glusterd(dict)) {
ret = manage_export_config(volname, "on", op_errstr);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EXPORT_FILE_CREATE_FAIL,
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 52809a8..10e2d48 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -2415,7 +2415,8 @@ glusterd_op_reset_volume(dict_t *dict, char **op_rspstr)
}
if (!strcmp(key, "ganesha.enable") || !strcmp(key, "all")) {
- if (glusterd_check_ganesha_export(volinfo)) {
+ if (glusterd_check_ganesha_export(volinfo) &&
+ is_origin_glusterd(dict)) {
ret = manage_export_config(volname, "off", op_rspstr);
if (ret)
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL,
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index de4eccb..1ea8ba6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -2936,7 +2936,7 @@ glusterd_op_delete_volume(dict_t *dict)
goto out;
}
- if (glusterd_check_ganesha_export(volinfo)) {
+ if (glusterd_check_ganesha_export(volinfo) && is_origin_glusterd(dict)) {
ret = manage_export_config(volname, "off", NULL);
if (ret)
gf_msg(this->name, GF_LOG_WARNING, 0, 0,
--
1.8.3.1

40
SOURCES/0078-common-ha-scripts-pass-the-list-of-servers-properly-.patch

@ -0,0 +1,40 @@
From 5daff948884b1b68ffcbc6ceea3c7affdb9700f4 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Wed, 4 Apr 2018 09:29:43 +0530
Subject: [PATCH 078/124] common-ha/scripts : pass the list of servers properly
to stop_ganesha_all()

Label: DOWNSTREAM ONLY

Upstream Reference :
>Change-Id: I6d92623cd9fb450d7a27f5acc61eca0b3cbc9b08
>BUG: 1563500
>Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
>Patch link : https://review.gluster.org/#/c/19816/

Change-Id: I6d92623cd9fb450d7a27f5acc61eca0b3cbc9b08
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167163
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index 8302c5e..4e5799f 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -199,7 +199,7 @@ setup_cluster()
if [ $? -ne 0 ]; then
logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} ${servers} failed"
#set up failed stop all ganesha process and clean up symlinks in cluster
- stop_ganesha_all ${servers}
+ stop_ganesha_all "${servers}"
exit 1;
fi
pcs cluster start --all
--
1.8.3.1

93
SOURCES/0079-common-ha-All-statd-related-files-need-to-be-owned-b.patch

@ -0,0 +1,93 @@
From 7e71723a46237f13a570961054b361dc1b34ab25 Mon Sep 17 00:00:00 2001
From: Soumya Koduri <skoduri@redhat.com>
Date: Thu, 19 Jan 2017 15:01:12 +0530
Subject: [PATCH 079/124] common-ha: All statd related files need to be owned
by rpcuser

Statd service is started as rpcuser by default. Hence the
files/directories needed by it under '/var/lib/nfs' should be
owned by the same user.

Note: This change is not in mainline as the cluster-bits
are being moved to storehaug project -
http://review.gluster.org/#/c/16349/
http://review.gluster.org/#/c/16333/

Label: DOWNSTREAM ONLY

Upstream Reference :
> Change-Id: I89fd06aa9700c5ce60026ac825da7c154d9f48fd
> BUG: 1414665
> Signed-off-by: Soumya Koduri <skoduri@redhat.com>
> Reviewed-on: http://review.gluster.org/16433
> Reviewed-by: jiffin tony Thottan <jthottan@redhat.com>
> Smoke: Gluster Build System <jenkins@build.gluster.org>
> Tested-by: Kaleb KEITHLEY <kkeithle@redhat.com>
> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
> CentOS-regression: Gluster Build System <jenkins@build.gluster.org>

Change-Id: I89fd06aa9700c5ce60026ac825da7c154d9f48fd
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167164
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 8 ++++++++
1 file changed, 8 insertions(+)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index 4e5799f..4a98f32 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -756,9 +756,11 @@ setup_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/state
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov
@@ -768,9 +770,11 @@ setup_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
@@ -830,9 +834,11 @@ addnode_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/state
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov
@@ -842,9 +848,11 @@ addnode_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
--
1.8.3.1

62
SOURCES/0080-glusterd-ganesha-Skip-non-ganesha-nodes-properly-for.patch

@ -0,0 +1,62 @@
From c5c6720c5186741a3b01a5ba2b34633fc1a00fc5 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Mon, 30 Apr 2018 12:35:01 +0530
Subject: [PATCH 080/124] glusterd/ganesha : Skip non-ganesha nodes properly
for ganesha HA set up

Label: DOWNSTREAM ONLY

Upstream reference:
>Patch unlink https://review.gluster.org/#/c/19949/
>Change-Id: Iff7bc3ead43e97847219c5a5cc8b967bf0967903
>BUG: 1573078
>Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>

Change-Id: Iff7bc3ead43e97847219c5a5cc8b967bf0967903
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167165
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index ff36476..d882105 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -842,17 +842,20 @@ static int
pre_setup(gf_boolean_t run_setup, char **op_errstr)
{
int ret = 0;
-
- if (check_host_list()) {
- ret = setup_cluster(run_setup);
- if (ret == -1)
+ if (run_setup) {
+ if (!check_host_list()) {
gf_asprintf(op_errstr,
- "Failed to set up HA "
- "config for NFS-Ganesha. "
- "Please check the log file for details");
- } else
- ret = -1;
-
+ "Running nfs-ganesha setup command "
+ "from node which is not part of ganesha cluster");
+ return -1;
+ }
+ }
+ ret = setup_cluster(run_setup);
+ if (ret == -1)
+ gf_asprintf(op_errstr,
+ "Failed to set up HA "
+ "config for NFS-Ganesha. "
+ "Please check the log file for details");
return ret;
}
--
1.8.3.1

50
SOURCES/0081-ganesha-ha-ensure-pacemaker-is-enabled-after-setup.patch

@ -0,0 +1,50 @@
From 3cb9ed7e20f59eec036908eed4cfdbc61e990ee2 Mon Sep 17 00:00:00 2001
From: "Kaleb S. KEITHLEY" <kkeithle@redhat.com>
Date: Tue, 11 Dec 2018 10:09:42 -0500
Subject: [PATCH 081/124] ganesha-ha: ensure pacemaker is enabled after setup

There appears to be a race between `pcs cluster setup ...` early
in the setup and the `systemctl enable pacemaker` at the end. The
`pcs cluster setup ...` disables pacemaker and corosync. (Now, in
pacemaker-1.1.18. Was it always the case?)

I am not able to reproduce this on my devel system. I speculate that
on a busy system that the `pcs cluster setup ...` disable may, under
the right conditions, not run until after the setup script enables
it. It must require the right alignment of the Sun, Moon, and all
the planets.

Regardless, we'll use the --enable option to `pcs cluster setup ...`
to ensure that the cluster (re)starts pacemaker.

Label: DOWNSTREAM ONLY

Change-Id: I771ff62c37426438b80e61651a8b4ecaf2d549c3
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167166
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index 4a98f32..32af1ca 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -195,9 +195,9 @@ setup_cluster()
pcs cluster auth ${servers}
# pcs cluster setup --name ${name} ${servers}
- pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --transport udpu ${servers}
+ pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --enable --transport udpu ${servers}
if [ $? -ne 0 ]; then
- logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} ${servers} failed"
+ logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --enable --transport udpu ${servers} failed"
#set up failed stop all ganesha process and clean up symlinks in cluster
stop_ganesha_all "${servers}"
exit 1;
--
1.8.3.1

59
SOURCES/0082-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch

@ -0,0 +1,59 @@
From 6d6841a996a52488e8a18606f386bba0a12b4231 Mon Sep 17 00:00:00 2001
From: Soumya Koduri <skoduri@redhat.com>
Date: Fri, 18 Nov 2016 12:47:06 +0530
Subject: [PATCH 082/124] build: Add dependency on netstat for
glusterfs-ganesha pkg

portblock resource-agent needs netstat command but this dependency
should have been ideally added to resource-agents package. But the
fixes (bug1395594, bug1395596) are going to be available only
in the future RHEL 6.9 and RHEL 7.4 releases. Hence as an interim
workaround, we agreed to add this dependency for glusterfs-ganesha package.

label : DOWNSTREAM ONLY

Change-Id: I6ac1003103755d7534dd079c821bbaacd8dd94b8
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167167
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 14 ++++++++++++++
1 file changed, 14 insertions(+)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index e55e255..bc27058 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -466,6 +466,12 @@ Requires: pcs, dbus
Requires: cman, pacemaker, corosync
%endif
+%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 )
+# we need portblock resource-agent in 3.9.5 and later.
+Requires: resource-agents >= 3.9.5
+Requires: net-tools
+%endif
+
%if ( 0%{?fedora} && 0%{?fedora} > 25 )
Requires: selinux-policy >= 3.13.1-160
Requires(post): policycoreutils-python-utils
@@ -1951,6 +1957,14 @@ fi
%endif
%changelog
+* Sun Apr 7 2019 Soumya Koduri <skoduri@redhat.com>
+- As an interim fix add dependency on netstat(/net-tools) for glusterfs-ganesha package (#1395574)
+
+* Sun Apr 7 2019 Soumya Koduri <skoduri@redhat.com>
+- Add dependency on portblock resource agent for ganesha package (#1278336)
+- Fix incorrect Requires for portblock resource agent (#1278336)
+- Update version checks for portblock resource agent on RHEL (#1278336)
+
* Sat Apr 6 2019 Jiffin Tony Thottan <jthottan@redhat.com>
- Adding ganesha ha resources back in gluster repository
--
1.8.3.1

82
SOURCES/0083-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch

@ -0,0 +1,82 @@
From a80743a3053798521ae4dd830adcde8bc7da11b6 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Tue, 20 Feb 2018 11:50:33 +0530
Subject: [PATCH 083/124] common-ha: enable and disable selinux
ganesha_use_fusefs

Adding missing changes in a downstream backport(https://code.engineering.redhat.com/gerrit/#/c/109845/)

Label: DOWNSTREAM ONLY

Change-Id: I59fd2fc2228ded9547c2d1e08c22f7a10c35f86f
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167244
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Reviewed-by: Kaleb Keithley <kkeithle@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index bc27058..2149f86 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -472,11 +472,16 @@ Requires: resource-agents >= 3.9.5
Requires: net-tools
%endif
-%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%if ( 0%{?rhel} )
Requires: selinux-policy >= 3.13.1-160
+Requires(post): policycoreutils-python
+Requires(postun): policycoreutils-python
+%else
Requires(post): policycoreutils-python-utils
Requires(postun): policycoreutils-python-utils
%endif
+%endif
%description ganesha
GlusterFS is a distributed file-system capable of scaling to several
@@ -936,7 +941,7 @@ exit 0
%endif
%if ( 0%{!?_without_server:1} )
-%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%post ganesha
semanage boolean -m ganesha_use_fusefs --on
exit 0
@@ -1076,7 +1081,7 @@ exit 0
%endif
%if ( 0%{!?_without_server:1} )
-%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%postun ganesha
semanage boolean -m ganesha_use_fusefs --off
exit 0
@@ -1087,7 +1092,7 @@ exit 0
## All %%trigger should be placed here and keep them sorted
##
%if ( 0%{!?_without_server:1} )
-%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%trigger ganesha -- selinux-policy-targeted
semanage boolean -m ganesha_use_fusefs --on
exit 0
@@ -1098,7 +1103,7 @@ exit 0
## All %%triggerun should be placed here and keep them sorted
##
%if ( 0%{!?_without_server:1} )
-%if ( 0%{?fedora} && 0%{?fedora} > 25 )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%triggerun ganesha -- selinux-policy-targeted
semanage boolean -m ganesha_use_fusefs --off
exit 0
--
1.8.3.1

37
SOURCES/0084-glusterd-Fix-duplicate-client_op_version-in-info-fil.patch

@ -0,0 +1,37 @@
From d7bee4a4ad0878003e19711e20994c42c4d2bd9e Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Tue, 9 Apr 2019 16:15:09 +0530
Subject: [PATCH 084/124] glusterd: Fix duplicate client_op_version in info
file

This must have been introduced while applying downstream only patches at
RHGS 3.5.0 branch.

Change-Id: I231249cca2a7bce29ef53cf95f9d2377b8203283
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167341
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-store.c | 5 -----
1 file changed, 5 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index fb52957..351bd9e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -1022,11 +1022,6 @@ glusterd_volume_exclude_options_write(int fd, glusterd_volinfo_t *volinfo)
goto out;
}
- snprintf(buf, sizeof(buf), "%d", volinfo->client_op_version);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION,
- buf);
- if (ret)
- goto out;
if (volinfo->caps) {
snprintf(buf, sizeof(buf), "%d", volinfo->caps);
ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_CAPS, buf);
--
1.8.3.1

8976
SOURCES/0085-Revert-all-remove-code-which-is-not-being-considered.patch

File diff suppressed because it is too large Load Diff

3194
SOURCES/0086-Revert-tiering-remove-the-translator-from-build-and-.patch

File diff suppressed because it is too large Load Diff

89
SOURCES/0087-ganesha-fixing-minor-issues-after-the-backport-from-.patch

@ -0,0 +1,89 @@
From 144f2eb56d1bbecc9c455065755f41ec81974e3e Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Sun, 7 Apr 2019 21:54:07 +0530
Subject: [PATCH 087/124] ganesha : fixing minor issues after the backport from
3.4

label : DOWNSTREAM ONLY

Change-Id: Ib0f6d8728d2e33da63ed4baab0bb981a0b06a8e0
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167168
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Kaleb Keithley <kkeithle@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 20 +++++++++++++++++---
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 2 +-
2 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index e0607ba..f6b823d 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -883,6 +883,15 @@ sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sha
install -D -p -m 0644 extras/glusterfs-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
+# ganesha ghosts
+%if ( 0%{!?_without_server:1} )
+mkdir -p %{buildroot}%{_sysconfdir}/ganesha
+touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf
+mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/
+touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
+touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
+%endif
+
%if ( 0%{!?_without_georeplication:1} )
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
@@ -1197,7 +1206,7 @@ exit 0
%if ( 0%{?_without_server:1} )
#exclude ganesha related files
-%exclude %{_sysconfdir}/ganesha/*
+%exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample
%exclude %{_libexecdir}/ganesha/*
%exclude %{_prefix}/lib/ocf/resource.d/heartbeat/*
%endif
@@ -1376,9 +1385,15 @@ exit 0
%if ( 0%{!?_without_server:1} )
%files ganesha
-%{_sysconfdir}/ganesha/*
+%dir %{_libexecdir}/ganesha
+%{_sysconfdir}/ganesha/ganesha-ha.conf.sample
%{_libexecdir}/ganesha/*
%{_prefix}/lib/ocf/resource.d/heartbeat/*
+%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
+%ghost %attr(0644,-,-) %config(noreplace) %{_sysconfdir}/ganesha/ganesha-ha.conf
+%ghost %dir %attr(0755,-,-) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha
+%ghost %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
+%ghost %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
%endif
%if ( 0%{!?_without_ocf:1} )
@@ -1508,7 +1523,6 @@ exit 0
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
- %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/pre
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/post
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index d882105..0a16925 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -278,7 +278,7 @@ glusterd_op_stage_set_ganesha(dict_t *dict, char **op_errstr)
goto out;
}
- if (strcmp(value, "enable")) {
+ if (strcmp(value, "enable") == 0) {
ret = start_ganesha(op_errstr);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_NFS_GNS_START_FAIL,
--
1.8.3.1

74
SOURCES/0088-tier-fix-failures-noticed-during-tier-start-and-tier.patch

@ -0,0 +1,74 @@
From bbcfd7e28b43845bac675dcc486bde09b0953f64 Mon Sep 17 00:00:00 2001
From: Hari Gowtham <hgowtham@redhat.com>
Date: Thu, 11 Apr 2019 14:40:11 +0530
Subject: [PATCH 088/124] tier: fix failures noticed during tier start and tier
restart.

Problem 1: when tier is started using the tier start command,
the out put was skipped during a failure. failures don't have an
transaction id. this id was checked and if its missing then
it skips.

fix: had to remove the unnecessary jump for that case.

problem 2: When tier was restarted, the tierd doesn't come online.
This was because, there were a certain values that were supposed
to be stored in glusterd (gluster-store.c) which will be used
during restart to come to the original state.
These values were stored. as they were missing, tierd didn't come
online.

fix: store the value and make it available during the start.

Label: DOWNSTREAM ONLY

Change-Id: I7df898fa4c3b72fe8ded4adbf573307a59a37e5e
Signed-off-by: Hari Gowtham <hgowtham@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167653
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
cli/src/cli-rpc-ops.c | 1 -
xlators/mgmt/glusterd/src/glusterd-store.c | 13 +++++++++++++
2 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index 736cd18..b167e26 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -1973,7 +1973,6 @@ gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
if (ret) {
gf_log("cli", GF_LOG_WARNING, "failed to get %s from dict",
GF_REBALANCE_TID_KEY);
- goto out;
}
if (rsp.op_ret && strcmp(rsp.op_errstr, "")) {
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 351bd9e..4889217 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -3336,6 +3336,19 @@ glusterd_store_update_volinfo(glusterd_volinfo_t *volinfo)
break;
case GF_CLUSTER_TYPE_TIER:
+ if (volinfo->tier_info.cold_type ==
+ GF_CLUSTER_TYPE_DISPERSE)
+ volinfo->tier_info.cold_dist_leaf_count
+ = volinfo->disperse_count;
+ else
+ volinfo->tier_info.cold_dist_leaf_count
+ = glusterd_calc_dist_leaf_count (
+ volinfo->tier_info.
+ cold_replica_count,
+ 1);
+
+ break;
+
case GF_CLUSTER_TYPE_STRIPE:
case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
gf_msg(this->name, GF_LOG_CRITICAL, ENOTSUP,
--
1.8.3.1

85
SOURCES/0089-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch

@ -0,0 +1,85 @@
From 8ba7e04362019ea2d0e80e67eb214d53dca58774 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Thu, 17 Nov 2016 12:44:38 +0530
Subject: [PATCH 089/124] glusterd/gNFS : On post upgrade to 3.2, disable gNFS
for all volumes

Currently on 3.2 gNFS is dsiabled for newly created volumes or old volumes
with default value. There will be volumes which have explicitly turn off
nfs.disable option. This change disable gNFS even for that volume as well.

label : DOWNSTREAM ONLY

Change-Id: I4ddeb23690271034b0bbb3fc50b359350b5eae87
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/90425
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Atin Mukherjee <amukherj@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167573
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 43 ++++++++++++++++++------------
1 file changed, 26 insertions(+), 17 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 10e2d48..06ea8cf 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -2501,26 +2501,35 @@ glusterd_update_volumes_dict(glusterd_volinfo_t *volinfo)
GF_VALIDATE_OR_GOTO(this->name, conf, out);
/* 3.9.0 onwards gNFS will be disabled by default. In case of an upgrade
- * from anything below than 3.9.0 to 3.9.x the volume's dictionary will
- * not have 'nfs.disable' key set which means the same will not be set
- * to on until explicitly done. setnfs.disable to 'on' at op-version
- * bump up flow is the ideal way here. The same is also applicable for
- * transport.address-family where if the transport type is set to tcp
- * then transport.address-family is defaulted to 'inet'.
+ * from anything below than 3.9.0 to 3.9.x, the value for nfs.disable is
+ * set to 'on' for all volumes even if it is explicitly set to 'off' in
+ * previous version. This change is only applicable to downstream code.
+ * Setting nfs.disable to 'on' at op-version bump up flow is the ideal
+ * way here. The same is also applicable for transport.address-family
+ * where if the transport type is set to tcp then transport.address-family
+ * is defaulted to 'inet'.
*/
if (conf->op_version >= GD_OP_VERSION_3_9_0) {
- if (dict_get_str_boolean(volinfo->dict, NFS_DISABLE_MAP_KEY, 1)) {
- ret = dict_set_dynstr_with_alloc(volinfo->dict, NFS_DISABLE_MAP_KEY,
- "on");
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "Failed to set "
- "option ' NFS_DISABLE_MAP_KEY ' on "
- "volume %s",
- volinfo->volname);
- goto out;
- }
+ if (!(dict_get_str_boolean(volinfo->dict, NFS_DISABLE_MAP_KEY, 0))) {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "Gluster NFS is"
+ " being deprecated in favor of NFS-Ganesha, "
+ "hence setting nfs.disable to 'on' for volume "
+ "%s. Please re-enable it if requires",
+ volinfo->volname);
+ }
+
+ ret = dict_set_dynstr_with_alloc(volinfo->dict, NFS_DISABLE_MAP_KEY,
+ "on");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set "
+ "option ' NFS_DISABLE_MAP_KEY ' on "
+ "volume %s",
+ volinfo->volname);
+ goto out;
}
+
ret = dict_get_strn(volinfo->dict, "transport.address-family",
SLEN("transport.address-family"),
&address_family_str);
--
1.8.3.1

307
SOURCES/0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch

@ -0,0 +1,307 @@
From 19210e4fc551893d1545e719fa26d9ad2d2f5cba Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Mon, 13 Nov 2017 18:41:58 +0530
Subject: [PATCH 090/124] Revert "build: conditionally build legacy gNFS server
and associated sub-packaging"

This reverts commit 83abcba6b42f94eb5a6495a634d4055362a9d79d.

label : DOWNSTREAM ONLY

Change-Id: If1c02d80b746e0a5b5e2c9a3625909158eff55d5
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167575
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Reviewed-by: Kaleb Keithley <kkeithle@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
configure.ac | 13 -------
extras/LinuxRPM/Makefile.am | 4 +--
glusterfs.spec.in | 54 ++++++----------------------
xlators/Makefile.am | 5 +--
xlators/mgmt/glusterd/src/glusterd-nfs-svc.c | 27 ++++++--------
5 files changed, 24 insertions(+), 79 deletions(-)

diff --git a/configure.ac b/configure.ac
index 633e850..521671b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1359,18 +1359,6 @@ if test -n "$LIBAIO"; then
BUILD_LIBAIO=yes
fi
-dnl gnfs section
-BUILD_GNFS="no"
-AC_ARG_ENABLE([gnfs],
- AC_HELP_STRING([--enable-gnfs],
- [Enable legacy gnfs server xlator.]))
-if test "x${with_server}" = "xyes" -a "x$enable_gnfs" = "xyes"; then
- BUILD_GNFS="yes"
-fi
-AM_CONDITIONAL([BUILD_GNFS], [test x$BUILD_GNFS = xyes])
-AC_SUBST(BUILD_GNFS)
-dnl end gnfs section
-
dnl Check for userspace-rcu
PKG_CHECK_MODULES([URCU], [liburcu-bp], [],
[AC_CHECK_HEADERS([urcu-bp.h],
@@ -1624,7 +1612,6 @@ echo "EC dynamic support : $EC_DYNAMIC_SUPPORT"
echo "Use memory pools : $USE_MEMPOOL"
echo "Nanosecond m/atimes : $BUILD_NANOSECOND_TIMESTAMPS"
echo "Server components : $with_server"
-echo "Legacy gNFS server : $BUILD_GNFS"
echo "IPV6 default : $with_ipv6_default"
echo "Use TIRPC : $with_libtirpc"
echo "With Python : ${PYTHON_VERSION}"
diff --git a/extras/LinuxRPM/Makefile.am b/extras/LinuxRPM/Makefile.am
index f028537..61fd6da 100644
--- a/extras/LinuxRPM/Makefile.am
+++ b/extras/LinuxRPM/Makefile.am
@@ -18,7 +18,7 @@ autogen:
cd ../.. && \
rm -rf autom4te.cache && \
./autogen.sh && \
- ./configure --enable-gnfs --with-previous-options
+ ./configure --with-previous-options
prep:
$(MAKE) -C ../.. dist;
@@ -36,7 +36,7 @@ srcrpm:
mv rpmbuild/SRPMS/* .
rpms:
- rpmbuild --define '_topdir $(shell pwd)/rpmbuild' --with gnfs -bb rpmbuild/SPECS/glusterfs.spec
+ rpmbuild --define '_topdir $(shell pwd)/rpmbuild' -bb rpmbuild/SPECS/glusterfs.spec
mv rpmbuild/RPMS/*/* .
# EPEL-5 does not like new versions of rpmbuild and requires some
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index f6b823d..cb17eaa 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -52,11 +52,6 @@
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without georeplication
%{?_without_georeplication:%global _without_georeplication --disable-georeplication}
-# gnfs
-# if you wish to compile an rpm with the legacy gNFS server xlator
-# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with gnfs
-%{?_with_gnfs:%global _with_gnfs --enable-gnfs}
-
# ipv6default
# if you wish to compile an rpm with IPv6 default...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with ipv6default
@@ -153,7 +148,6 @@
%if 0%{?_without_server:1}
%global _without_events --disable-events
%global _without_georeplication --disable-georeplication
-%global _with_gnfs %{nil}
%global _without_tiering --disable-tiering
%global _without_ocf --without-ocf
%endif
@@ -525,25 +519,6 @@ is in userspace and easily manageable.
This package provides support to geo-replication.
%endif
-%if ( 0%{?_with_gnfs:1} )
-%package gnfs
-Summary: GlusterFS gNFS server
-Requires: %{name}%{?_isa} = %{version}-%{release}
-Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
-Requires: nfs-utils
-
-%description gnfs
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides the glusterfs legacy gNFS server xlator
-%endif
-
%package libs
Summary: GlusterFS common libraries
@@ -659,6 +634,7 @@ Requires: %{name}-api%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
# lvm2 for snapshot, and nfs-utils and rpcbind/portmap for gnfs server
Requires: lvm2
+Requires: nfs-utils
%if ( 0%{?_with_systemd:1} )
%{?systemd_requires}
%else
@@ -789,7 +765,6 @@ export LDFLAGS
%{?_with_cmocka} \
%{?_with_debug} \
%{?_with_firewalld} \
- %{?_with_gnfs} \
%{?_with_tmpfilesdir} \
%{?_with_tsan} \
%{?_with_valgrind} \
@@ -1286,17 +1261,6 @@ exit 0
%{_bindir}/fusermount-glusterfs
%endif
-%if ( 0%{?_with_gnfs:1} && 0%{!?_without_server:1} )
-%files gnfs
-%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
-%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs
- %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs/server.so
-%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs
-%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
-%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
-%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
-%endif
-
%files thin-arbiter
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
@@ -1409,11 +1373,6 @@ exit 0
%config(noreplace) %{_sysconfdir}/glusterfs
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
%exclude %{_sysconfdir}/glusterfs/eventsconfig.json
-%exclude %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
-%exclude %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
-%if ( 0%{?_with_gnfs:1} )
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs/*
-%endif
%config(noreplace) %{_sysconfdir}/sysconfig/glusterd
%if ( 0%{_for_fedora_koji_builds} )
%config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd
@@ -1461,6 +1420,7 @@ exit 0
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
+ %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
@@ -1477,6 +1437,7 @@ exit 0
# /var/lib/glusterd, e.g. hookscripts, etc.
%ghost %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
+%ghost %attr(0600,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/options
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/bitd
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/groups
@@ -1529,7 +1490,11 @@ exit 0
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh
-%config(noreplace) %ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/options
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs
+%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
+%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
+%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
+%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/options
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/peers
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/quotad
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/scrub
@@ -1995,6 +1960,9 @@ fi
%endif
%changelog
+* Sun Apr 7 2019 Jiffin Tony Thottan <jthottan@redhat.com>
+- DOWNSTREAM ONLY - revert of 83abcb(gnfs in an optional subpackage)
+
* Sun Apr 7 2019 Soumya Koduri <skoduri@redhat.com>
- As an interim fix add dependency on netstat(/net-tools) for glusterfs-ganesha package (#1395574)
diff --git a/xlators/Makefile.am b/xlators/Makefile.am
index ef20cbb..be54eb3 100644
--- a/xlators/Makefile.am
+++ b/xlators/Makefile.am
@@ -1,12 +1,9 @@
-if BUILD_GNFS
- GNFS_DIR = nfs
-endif
DIST_SUBDIRS = cluster storage protocol performance debug features \
mount nfs mgmt system playground meta
SUBDIRS = cluster storage protocol performance debug features \
- mount ${GNFS_DIR} mgmt system playground meta
+ mount nfs mgmt system playground meta
EXTRA_DIST = xlator.sym
diff --git a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
index 36e9052..3960031 100644
--- a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
@@ -18,6 +18,8 @@
#include "glusterd-messages.h"
#include "glusterd-svc-helper.h"
+static char *nfs_svc_name = "nfs";
+
static gf_boolean_t
glusterd_nfssvc_need_start()
{
@@ -41,6 +43,12 @@ glusterd_nfssvc_need_start()
return start;
}
+int
+glusterd_nfssvc_init(glusterd_svc_t *svc)
+{
+ return glusterd_svc_init(svc, nfs_svc_name);
+}
+
static int
glusterd_nfssvc_create_volfile()
{
@@ -49,7 +57,7 @@ glusterd_nfssvc_create_volfile()
};
glusterd_conf_t *conf = THIS->private;
- glusterd_svc_build_volfile_path(conf->nfs_svc.name, conf->workdir, filepath,
+ glusterd_svc_build_volfile_path(nfs_svc_name, conf->workdir, filepath,
sizeof(filepath));
return glusterd_create_global_volfile(build_nfs_graph, filepath, NULL);
}
@@ -60,7 +68,7 @@ glusterd_nfssvc_manager(glusterd_svc_t *svc, void *data, int flags)
int ret = -1;
if (!svc->inited) {
- ret = glusterd_svc_init(svc, "nfs");
+ ret = glusterd_nfssvc_init(svc);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_NFSSVC,
"Failed to init nfs service");
@@ -75,13 +83,6 @@ glusterd_nfssvc_manager(glusterd_svc_t *svc, void *data, int flags)
if (ret)
goto out;
- /* not an error, or a (very) soft error at best */
- if (sys_access(XLATORDIR "/nfs/server.so", R_OK) != 0) {
- gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_GNFS_XLATOR_NOT_INSTALLED,
- "nfs/server.so xlator is not installed");
- goto out;
- }
-
ret = glusterd_nfssvc_create_volfile();
if (ret)
goto out;
@@ -155,14 +156,6 @@ glusterd_nfssvc_reconfigure()
priv = this->private;
GF_VALIDATE_OR_GOTO(this->name, priv, out);
- /* not an error, or a (very) soft error at best */
- if (sys_access(XLATORDIR "/nfs/server.so", R_OK) != 0) {
- gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_GNFS_XLATOR_NOT_INSTALLED,
- "nfs/server.so xlator is not installed");
- ret = 0;
- goto out;
- }
-
cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
{
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
--
1.8.3.1

110
SOURCES/0091-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch

@ -0,0 +1,110 @@
From ca3a4ebeddfef8c6909ff5388787a91ee52fd675 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Thu, 15 Dec 2016 17:14:01 +0530
Subject: [PATCH 091/124] glusterd/gNFS : explicitly set "nfs.disable" to "off"
after 3.2 upgrade

Gluster NFS was enabled by default for all volumes till 3.1. But 3.2 onwards
for the new volumes it will be disabled by setting "nfs.disable" to "on".
This take patch will take care of existing volume in such a way that if the
option is not configured, it will set "nfs.disable" to "off" during op-version
bump up.

Also this patch removes the warning message while enabling gluster NFS for
a volume.

label : DOWNSTREAM ONLY

Change-Id: Ib199c3180204f917791b4627c58d846750d18a5a
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/93146
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167574
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
cli/src/cli-cmd-parser.c | 16 ---------------
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 31 ++++++++++++------------------
2 files changed, 12 insertions(+), 35 deletions(-)

diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index f85958b..92ceb8e 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -1678,22 +1678,6 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words,
goto out;
}
}
- if ((!strcmp(key, "nfs.disable")) && (!strcmp(value, "off"))) {
- question =
- "Gluster NFS is being deprecated in favor "
- "of NFS-Ganesha Enter \"yes\" to continue "
- "using Gluster NFS";
- answer = cli_cmd_get_confirmation(state, question);
- if (GF_ANSWER_NO == answer) {
- gf_log("cli", GF_LOG_ERROR,
- "Operation "
- "cancelled, exiting");
- *op_errstr = gf_strdup("Aborted by user.");
- ret = -1;
- goto out;
- }
- }
-
if ((strcmp(key, "cluster.brick-multiplex") == 0)) {
question =
"Brick-multiplexing is supported only for "
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 06ea8cf..df8a6ab 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -2502,32 +2502,25 @@ glusterd_update_volumes_dict(glusterd_volinfo_t *volinfo)
/* 3.9.0 onwards gNFS will be disabled by default. In case of an upgrade
* from anything below than 3.9.0 to 3.9.x, the value for nfs.disable is
- * set to 'on' for all volumes even if it is explicitly set to 'off' in
+ * set to 'off' for all volumes even if it is explicitly set to 'on' in
* previous version. This change is only applicable to downstream code.
- * Setting nfs.disable to 'on' at op-version bump up flow is the ideal
+ * Setting nfs.disable to 'off' at op-version bump up flow is the ideal
* way here. The same is also applicable for transport.address-family
* where if the transport type is set to tcp then transport.address-family
* is defaulted to 'inet'.
*/
if (conf->op_version >= GD_OP_VERSION_3_9_0) {
if (!(dict_get_str_boolean(volinfo->dict, NFS_DISABLE_MAP_KEY, 0))) {
- gf_msg(this->name, GF_LOG_INFO, 0, 0,
- "Gluster NFS is"
- " being deprecated in favor of NFS-Ganesha, "
- "hence setting nfs.disable to 'on' for volume "
- "%s. Please re-enable it if requires",
- volinfo->volname);
- }
-
- ret = dict_set_dynstr_with_alloc(volinfo->dict, NFS_DISABLE_MAP_KEY,
- "on");
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "Failed to set "
- "option ' NFS_DISABLE_MAP_KEY ' on "
- "volume %s",
- volinfo->volname);
- goto out;
+ ret = dict_set_dynstr_with_alloc(volinfo->dict, NFS_DISABLE_MAP_KEY,
+ "off");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set "
+ "option ' NFS_DISABLE_MAP_KEY ' off "
+ "volume %s",
+ volinfo->volname);
+ goto out;
+ }
}
ret = dict_get_strn(volinfo->dict, "transport.address-family",
--
1.8.3.1

41
SOURCES/0092-logging-Fix-GF_LOG_OCCASSIONALLY-API.patch

@ -0,0 +1,41 @@
From 82d7c8e057b9e22d13ca89f2a75e65a42878b7c3 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Tue, 2 Apr 2019 10:45:15 +0530
Subject: [PATCH 092/124] logging: Fix GF_LOG_OCCASSIONALLY API

GF_LOG_OCCASSIONALLY doesn't log on the first instance rather at every
42nd iterations which isn't effective as in some cases we might not have
the code flow hitting the same log for as many as 42 times and we'd end
up suppressing the log.

>upstream fix : https://review.gluster.org/#/c/glusterfs/+/22475/
>Fixes: bz#1694925
>Change-Id: Iee293281d25a652b64df111d59b13de4efce06fa
>Signed-off-by: Atin Mukherjee <amukherj@redhat.com>

BUG: 1691620
Change-Id: Iee293281d25a652b64df111d59b13de4efce06fa
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167822
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/glusterfs/logging.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libglusterfs/src/glusterfs/logging.h b/libglusterfs/src/glusterfs/logging.h
index c81e19b..3655b1d 100644
--- a/libglusterfs/src/glusterfs/logging.h
+++ b/libglusterfs/src/glusterfs/logging.h
@@ -300,7 +300,7 @@ _gf_log_eh(const char *function, const char *fmt, ...)
/* Log once in GF_UNIVERSAL_ANSWER times */
#define GF_LOG_OCCASIONALLY(var, args...) \
- if (!(var++ % GF_UNIVERSAL_ANSWER)) { \
+ if (var++ == 0 || !((var - 1) % GF_UNIVERSAL_ANSWER)) { \
gf_log(args); \
}
--
1.8.3.1

106
SOURCES/0093-glusterd-Change-op-version-of-cache-invalidation-in-.patch

@ -0,0 +1,106 @@
From 713f55b4a5cc582d06a10a1c9a0cdf71a4636a10 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 15 Apr 2019 07:57:40 +0530
Subject: [PATCH 093/124] glusterd: Change op-version of cache-invalidation in
quick-read

In upstream cache-invalidation option in quick read was introduced with
4.0. There are two problems with it:

1. The key being made duplicate to md-cache xlator, so setting the same
option actually enables this feature on both md-cache and quick-read.
2. As the op-version tagged to this key was GD_OP_VERSION_4_0_0, with a RHGS
3.5 cluster when a md-cache profile is set to a particular volume old
client fails to mount which is wrong.

Solving 1 with out breaking backward compatibility in upstream is quite hard.
This patch addresses both the problems in downstream by (a) changing the
op-version to GD_OP_VERSION_6_0_0 and (b) renaming this key to
quick-read-cache-invalidation. The fix is currently made downstream only till a
proper solution is identified in upstream.

Label: DOWNSTREAM ONLY
BUG: 1697820
Change-Id: I1251424576d6ebbdb2a761400fd20f0aff0c80a2
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167823
Reviewed-by: Amar Tumballi Suryanarayan <amarts@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/basic/quick-read-with-upcall.t | 1 +
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 6 +++---
xlators/performance/quick-read/src/quick-read.c | 11 ++++++-----
3 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/tests/basic/quick-read-with-upcall.t b/tests/basic/quick-read-with-upcall.t
index 318e93a..647dacf 100644
--- a/tests/basic/quick-read-with-upcall.t
+++ b/tests/basic/quick-read-with-upcall.t
@@ -58,6 +58,7 @@ EXPECT "$D0" cat $M1/test1.txt
sleep 60
EXPECT "$D1" cat $M1/test1.txt
+TEST $CLI volume set $V0 performance.quick-read-cache-invalidation on
TEST $CLI volume set $V0 performance.cache-invalidation on
TEST write_to "$M0/test2.txt" "$D0"
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index a877805..42ca9bb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -1690,10 +1690,10 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.option = "cache-timeout",
.op_version = 1,
.flags = VOLOPT_FLAG_CLIENT_OPT},
- {.key = "performance.cache-invalidation",
+ {.key = "performance.quick-read-cache-invalidation",
.voltype = "performance/quick-read",
- .option = "cache-invalidation",
- .op_version = GD_OP_VERSION_4_0_0,
+ .option = "quick-read-cache-invalidation",
+ .op_version = GD_OP_VERSION_6_0,
.flags = VOLOPT_FLAG_CLIENT_OPT},
{.key = "performance.ctime-invalidation",
.voltype = "performance/quick-read",
diff --git a/xlators/performance/quick-read/src/quick-read.c b/xlators/performance/quick-read/src/quick-read.c
index 244e8c8..59553c0 100644
--- a/xlators/performance/quick-read/src/quick-read.c
+++ b/xlators/performance/quick-read/src/quick-read.c
@@ -1218,8 +1218,8 @@ qr_reconfigure(xlator_t *this, dict_t *options)
GF_OPTION_RECONF("cache-timeout", conf->cache_timeout, options, int32, out);
- GF_OPTION_RECONF("cache-invalidation", conf->qr_invalidation, options, bool,
- out);
+ GF_OPTION_RECONF("quick-read-cache-invalidation", conf->qr_invalidation,
+ options, bool, out);
GF_OPTION_RECONF("ctime-invalidation", conf->ctime_invalidation, options,
bool, out);
@@ -1369,7 +1369,8 @@ qr_init(xlator_t *this)
GF_OPTION_INIT("cache-timeout", conf->cache_timeout, int32, out);
- GF_OPTION_INIT("cache-invalidation", conf->qr_invalidation, bool, out);
+ GF_OPTION_INIT("quick-read-cache-invalidation", conf->qr_invalidation, bool,
+ out);
GF_OPTION_INIT("cache-size", conf->cache_size, size_uint64, out);
if (!check_cache_size_ok(this, conf->cache_size)) {
@@ -1615,10 +1616,10 @@ struct volume_options qr_options[] = {
.flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
},
{
- .key = {"cache-invalidation"},
+ .key = {"quick-read-cache-invalidation"},
.type = GF_OPTION_TYPE_BOOL,
.default_value = "false",
- .op_version = {GD_OP_VERSION_4_0_0},
+ .op_version = {GD_OP_VERSION_6_0},
.flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
.description = "When \"on\", invalidates/updates the metadata cache,"
" on receiving the cache-invalidation notifications",
--
1.8.3.1

45
SOURCES/0094-glusterd-load-ctime-in-the-client-graph-only-if-it-s.patch

@ -0,0 +1,45 @@
From dab37dc78d21762ac3379ad505f8fc4ec996d0f7 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Tue, 9 Apr 2019 14:58:29 +0530
Subject: [PATCH 094/124] glusterd: load ctime in the client graph only if it's
not turned off

Considering ctime is a client side feature, we can't blindly load ctime
xlator into the client graph if it's explicitly turned off, that'd
result into backward compatibility issue where an old client can't mount
a volume configured on a server which is having ctime feature.

> Upstream patch : https://review.gluster.org/#/c/glusterfs/+/22536/
>Fixes: bz#1697907
>Change-Id: I6ae7b96d056073aa6746de9a449cf319786d45cc
>Signed-off-by: Atin Mukherjee <amukherj@redhat.com>

BUG: 1697820
Change-Id: I6ae7b96d056073aa6746de9a449cf319786d45cc
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167815
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-by: Amar Tumballi Suryanarayan <amarts@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volgen.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index ed24858..012f38e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -4358,7 +4358,8 @@ client_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
}
}
- if (conf->op_version >= GD_OP_VERSION_5_0) {
+ if (conf->op_version >= GD_OP_VERSION_5_0 &&
+ !dict_get_str_boolean(set_dict, "features.ctime", _gf_false)) {
xl = volgen_graph_add(graph, "features/utime", volname);
if (!xl) {
ret = -1;
--
1.8.3.1

204
SOURCES/0095-cluster-afr-Remove-local-from-owners_list-on-failure.patch

@ -0,0 +1,204 @@
From cca418b78ec976aa69eacd56b0e6127ea7e3dd26 Mon Sep 17 00:00:00 2001
From: Pranith Kumar K <pkarampu@redhat.com>
Date: Thu, 4 Apr 2019 15:31:56 +0530
Subject: [PATCH 095/124] cluster/afr: Remove local from owners_list on failure
of lock-acquisition

Backport of https://review.gluster.org/c/glusterfs/+/22515

When eager-lock lock acquisition fails because of say network failures, the
local is not being removed from owners_list, this leads to accumulation of
waiting frames and the application will hang because the waiting frames are
under the assumption that another transaction is in the process of acquiring
lock because owner-list is not empty. Handled this case as well in this patch.
Added asserts to make it easier to find these problems in future.

Change-Id: I3101393265e9827755725b1f2d94a93d8709e923
fixes: bz#1688395
Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167859
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/bugs/replicate/bug-1696599-io-hang.t | 47 ++++++++++++++++++++++++++++++
xlators/cluster/afr/src/afr-common.c | 8 ++---
xlators/cluster/afr/src/afr-lk-common.c | 1 -
xlators/cluster/afr/src/afr-transaction.c | 19 +++++-------
xlators/cluster/afr/src/afr.h | 4 +--
5 files changed, 61 insertions(+), 18 deletions(-)
create mode 100755 tests/bugs/replicate/bug-1696599-io-hang.t

diff --git a/tests/bugs/replicate/bug-1696599-io-hang.t b/tests/bugs/replicate/bug-1696599-io-hang.t
new file mode 100755
index 0000000..869cdb9
--- /dev/null
+++ b/tests/bugs/replicate/bug-1696599-io-hang.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+#Tests that local structures in afr are removed from granted/blocked list of
+#locks when inodelk fails on all bricks
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.client-io-threads off
+TEST $CLI volume set $V0 delay-gen locks
+TEST $CLI volume set $V0 delay-gen.delay-duration 5000000
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.enable finodelk
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file
+#Trigger write and stop bricks so inodelks fail on all bricks leading to
+#lock failure condition
+echo abc >> $M0/file &
+
+TEST $CLI volume stop $V0
+TEST $CLI volume reset $V0 delay-gen
+wait
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 2
+#Test that only one write succeeded, this tests that delay-gen worked as
+#expected
+echo abc >> $M0/file
+EXPECT "abc" cat $M0/file
+
+cleanup;
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index 45b96e3..47a5d3a 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -5763,6 +5763,10 @@ afr_transaction_local_init(afr_local_t *local, xlator_t *this)
afr_private_t *priv = NULL;
priv = this->private;
+ INIT_LIST_HEAD(&local->transaction.wait_list);
+ INIT_LIST_HEAD(&local->transaction.owner_list);
+ INIT_LIST_HEAD(&local->ta_waitq);
+ INIT_LIST_HEAD(&local->ta_onwireq);
ret = afr_internal_lock_init(&local->internal_lock, priv->child_count);
if (ret < 0)
goto out;
@@ -5800,10 +5804,6 @@ afr_transaction_local_init(afr_local_t *local, xlator_t *this)
goto out;
ret = 0;
- INIT_LIST_HEAD(&local->transaction.wait_list);
- INIT_LIST_HEAD(&local->transaction.owner_list);
- INIT_LIST_HEAD(&local->ta_waitq);
- INIT_LIST_HEAD(&local->ta_onwireq);
out:
return ret;
}
diff --git a/xlators/cluster/afr/src/afr-lk-common.c b/xlators/cluster/afr/src/afr-lk-common.c
index 4091671..bc8eabe 100644
--- a/xlators/cluster/afr/src/afr-lk-common.c
+++ b/xlators/cluster/afr/src/afr-lk-common.c
@@ -397,7 +397,6 @@ afr_unlock_now(call_frame_t *frame, xlator_t *this)
int_lock->lk_call_count = call_count;
if (!call_count) {
- GF_ASSERT(!local->transaction.do_eager_unlock);
gf_msg_trace(this->name, 0, "No internal locks unlocked");
int_lock->lock_cbk(frame, this);
goto out;
diff --git a/xlators/cluster/afr/src/afr-transaction.c b/xlators/cluster/afr/src/afr-transaction.c
index 229820b..15f3a7e 100644
--- a/xlators/cluster/afr/src/afr-transaction.c
+++ b/xlators/cluster/afr/src/afr-transaction.c
@@ -372,6 +372,8 @@ afr_transaction_done(call_frame_t *frame, xlator_t *this)
}
local->transaction.unwind(frame, this);
+ GF_ASSERT(list_empty(&local->transaction.owner_list));
+ GF_ASSERT(list_empty(&local->transaction.wait_list));
AFR_STACK_DESTROY(frame);
return 0;
@@ -393,7 +395,7 @@ afr_lock_fail_shared(afr_local_t *local, struct list_head *list)
}
static void
-afr_handle_lock_acquire_failure(afr_local_t *local, gf_boolean_t locked)
+afr_handle_lock_acquire_failure(afr_local_t *local)
{
struct list_head shared;
afr_lock_t *lock = NULL;
@@ -414,13 +416,8 @@ afr_handle_lock_acquire_failure(afr_local_t *local, gf_boolean_t locked)
afr_lock_fail_shared(local, &shared);
local->transaction.do_eager_unlock = _gf_true;
out:
- if (locked) {
- local->internal_lock.lock_cbk = afr_transaction_done;
- afr_unlock(local->transaction.frame, local->transaction.frame->this);
- } else {
- afr_transaction_done(local->transaction.frame,
- local->transaction.frame->this);
- }
+ local->internal_lock.lock_cbk = afr_transaction_done;
+ afr_unlock(local->transaction.frame, local->transaction.frame->this);
}
call_frame_t *
@@ -619,7 +616,7 @@ afr_transaction_perform_fop(call_frame_t *frame, xlator_t *this)
failure_count = AFR_COUNT(local->transaction.failed_subvols,
priv->child_count);
if (failure_count == priv->child_count) {
- afr_handle_lock_acquire_failure(local, _gf_true);
+ afr_handle_lock_acquire_failure(local);
return 0;
} else {
lock = &local->inode_ctx->lock[local->transaction.type];
@@ -2092,7 +2089,7 @@ err:
local->op_ret = -1;
local->op_errno = op_errno;
- afr_handle_lock_acquire_failure(local, _gf_true);
+ afr_handle_lock_acquire_failure(local);
if (xdata_req)
dict_unref(xdata_req);
@@ -2361,7 +2358,7 @@ afr_internal_lock_finish(call_frame_t *frame, xlator_t *this)
} else {
lock = &local->inode_ctx->lock[local->transaction.type];
if (local->internal_lock.lock_op_ret < 0) {
- afr_handle_lock_acquire_failure(local, _gf_false);
+ afr_handle_lock_acquire_failure(local);
} else {
lock->event_generation = local->event_generation;
afr_changelog_pre_op(frame, this);
diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
index 2cc3797..e731cfa 100644
--- a/xlators/cluster/afr/src/afr.h
+++ b/xlators/cluster/afr/src/afr.h
@@ -1091,8 +1091,8 @@ afr_cleanup_fd_ctx(xlator_t *this, fd_t *fd);
#define AFR_FRAME_INIT(frame, op_errno) \
({ \
frame->local = mem_get0(THIS->local_pool); \
- if (afr_local_init(frame->local, THIS->private, &op_errno)) { \
- afr_local_cleanup(frame->local, THIS); \
+ if (afr_local_init(frame->local, frame->this->private, &op_errno)) { \
+ afr_local_cleanup(frame->local, frame->this); \
mem_put(frame->local); \
frame->local = NULL; \
}; \
--
1.8.3.1

94
SOURCES/0096-core-Brick-is-not-able-to-detach-successfully-in-bri.patch

@ -0,0 +1,94 @@
From 3bcffadcd77eebe6b4f7e5015ad41ec7c1d1ec3e Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawal@redhat.com>
Date: Thu, 11 Apr 2019 20:38:53 +0530
Subject: [PATCH 096/124] core: Brick is not able to detach successfully in
brick_mux environment

Problem: In brick_mux environment, while volumes are stopped in a
loop bricks are not detached successfully. Brick's are not
detached because xprtrefcnt has not become 0 for detached brick.
At the time of initiating brick detach process server_notify
saves xprtrefcnt on detach brick and once counter has become
0 then server_rpc_notify spawn a server_graph_janitor_threads
for cleanup brick resources.xprtrefcnt has not become 0 because
socket framework is not working due to assigning 0 as a fd for socket.
In commit dc25d2c1eeace91669052e3cecc083896e7329b2
there was a change in changelog fini to close htime_fd if htime_fd is not
negative, by default htime_fd is 0 so it close 0 also.

Solution: Initialize htime_fd to -1 after just allocate changelog_priv
by GF_CALLOC

> Fixes: bz#1699025
> Change-Id: I5f7ca62a0eb1c0510c3e9b880d6ab8af8d736a25
> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
> (Cherry picked from commit b777d83001d8006420b6c7d2d88fe68950aa7e00)
> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/22549/

Fixes: bz#1698919
Change-Id: Ib5b74aa0818235625f8aac7c23d4daa599da3fd1
Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167826
Tested-by: Mohit Agrawal <moagrawa@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
.../bug-1699025-brick-mux-detach-brick-fd-issue.t | 33 ++++++++++++++++++++++
xlators/features/changelog/src/changelog.c | 1 +
2 files changed, 34 insertions(+)
create mode 100644 tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t

diff --git a/tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t b/tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t
new file mode 100644
index 0000000..1acbaa8
--- /dev/null
+++ b/tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+cleanup
+
+#bug-1444596 - validating brick mux
+
+TEST glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
+TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
+
+TEST $CLI volume set all cluster.brick-multiplex on
+
+TEST $CLI volume start $V0
+TEST $CLI volume start $V1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+TEST $CLI volume stop $V1
+# At the time initialize brick daemon it always keeps open
+# standard fd's (0, 1 , 2) so after stop 1 volume fd's should
+# be open
+nofds=$(ls -lrth /proc/`pgrep glusterfsd`/fd | grep dev/null | wc -l)
+TEST [ $((nofds)) -eq 3 ]
+
+cleanup
diff --git a/xlators/features/changelog/src/changelog.c b/xlators/features/changelog/src/changelog.c
index 1f22a97..d9025f3 100644
--- a/xlators/features/changelog/src/changelog.c
+++ b/xlators/features/changelog/src/changelog.c
@@ -2740,6 +2740,7 @@ init(xlator_t *this)
GF_ATOMIC_INIT(priv->clntcnt, 0);
GF_ATOMIC_INIT(priv->xprtcnt, 0);
INIT_LIST_HEAD(&priv->xprt_list);
+ priv->htime_fd = -1;
ret = changelog_init_options(this, priv);
if (ret)
--
1.8.3.1

61
SOURCES/0097-glusterd-tier-while-doing-an-attach-tier-the-self-he.patch

@ -0,0 +1,61 @@
From 302f3f87c9aa00c17ec3b49a81c8a4441d2bdf5f Mon Sep 17 00:00:00 2001
From: Hari Gowtham <hgowtham@redhat.com>
Date: Mon, 15 Apr 2019 10:01:40 +0530
Subject: [PATCH 097/124] glusterd/tier: while doing an attach tier, the self
heal daemon is not getting started

Problem: on a replicated volume, if attach tier is done,
The shd will be restarted. But here the restart fails because of the
graph not getting generated properly. The dict which is used for graph
creation doesn't get the values copied properly in prepare_shd_volume_options()
glusterd_prepare_shd_volume_options_for_tier() fails and skips the copy.

This patch reverts the changes back to the way it was in 3.4 and
help in fixing the issue. Using the old dict_set_str works.

label: DOWNSTREAM ONLY

Change-Id: I21534ca177511e018ba76886e899b3b1a4ac4716
Signed-off-by: Hari Gowtham <hgowtham@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167825
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volgen.c | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 012f38e..1f53beb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -4895,14 +4895,21 @@ glusterd_prepare_shd_volume_options_for_tier(glusterd_volinfo_t *volinfo,
dict_t *set_dict)
{
int ret = -1;
+ char *key = NULL;
- ret = volgen_set_shd_key_enable(set_dict, volinfo->tier_info.cold_type);
- if (ret)
- goto out;
+ key = volgen_get_shd_key (volinfo->tier_info.cold_type);
+ if (key) {
+ ret = dict_set_str (set_dict, key, "enable");
+ if (ret)
+ goto out;
+ }
- ret = volgen_set_shd_key_enable(set_dict, volinfo->tier_info.hot_type);
- if (ret)
- goto out;
+ key = volgen_get_shd_key (volinfo->tier_info.hot_type);
+ if (key) {
+ ret = dict_set_str (set_dict, key, "enable");
+ if (ret)
+ goto out;
+ }
out:
return ret;
}
--
1.8.3.1

4617
SOURCES/0098-mgmt-shd-Implement-multiplexing-in-self-heal-daemon.patch

File diff suppressed because it is too large Load Diff

119
SOURCES/0099-client-fini-return-fini-after-rpc-cleanup.patch

@ -0,0 +1,119 @@
From d79cb2cdff6fe8d962c9ac095a7541ddf500302b Mon Sep 17 00:00:00 2001
From: Mohammed Rafi KC <rkavunga@redhat.com>
Date: Mon, 1 Apr 2019 14:44:20 +0530
Subject: [PATCH 099/124] client/fini: return fini after rpc cleanup

There is a race condition in rpc_transport later
and client fini.

Sequence of events to happen the race condition
1) When we want to destroy a graph, we send a parent down
event first
2) Once parent down received on a client xlator, we will
initiates a rpc disconnect
3) This will in turn generates a child down event.
4) When we process child down, we first do fini for
Every xlator
5) On successful return of fini, we delete the graph

Here after the step 5, there is a chance that the fini
on client might not be finished. Because an rpc_tranpsort
ref can race with the above sequence.

So we have to wait till all rpc's are successfully freed
before returning the fini from client

Backport of: https://review.gluster.org/#/c/glusterfs/+/22468/

>Change-Id: I20145662d71fb837e448a4d3210d1fcb2855f2d4
>fixes: bz#1659708
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>

Change-Id: I848bcfb9443467caed32bae0717244ab01b407fc
BUG: 1471742
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167831
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/protocol/client/src/client.c | 25 ++++++++++++++++++++-----
xlators/protocol/client/src/client.h | 6 ++++++
2 files changed, 26 insertions(+), 5 deletions(-)

diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c
index 19f5175..a372807 100644
--- a/xlators/protocol/client/src/client.c
+++ b/xlators/protocol/client/src/client.c
@@ -49,11 +49,12 @@ client_fini_complete(xlator_t *this)
if (!conf->destroy)
return 0;
- this->private = NULL;
-
- pthread_spin_destroy(&conf->fd_lock);
- pthread_mutex_destroy(&conf->lock);
- GF_FREE(conf);
+ pthread_mutex_lock(&conf->lock);
+ {
+ conf->fini_completed = _gf_true;
+ pthread_cond_broadcast(&conf->fini_complete_cond);
+ }
+ pthread_mutex_unlock(&conf->lock);
out:
return 0;
@@ -2721,6 +2722,7 @@ init(xlator_t *this)
goto out;
pthread_mutex_init(&conf->lock, NULL);
+ pthread_cond_init(&conf->fini_complete_cond, NULL);
pthread_spin_init(&conf->fd_lock, 0);
INIT_LIST_HEAD(&conf->saved_fds);
@@ -2779,6 +2781,7 @@ fini(xlator_t *this)
if (!conf)
return;
+ conf->fini_completed = _gf_false;
conf->destroy = 1;
if (conf->rpc) {
/* cleanup the saved-frames before last unref */
@@ -2786,6 +2789,18 @@ fini(xlator_t *this)
rpc_clnt_unref(conf->rpc);
}
+ pthread_mutex_lock(&conf->lock);
+ {
+ while (!conf->fini_completed)
+ pthread_cond_wait(&conf->fini_complete_cond, &conf->lock);
+ }
+ pthread_mutex_unlock(&conf->lock);
+
+ pthread_spin_destroy(&conf->fd_lock);
+ pthread_mutex_destroy(&conf->lock);
+ pthread_cond_destroy(&conf->fini_complete_cond);
+ GF_FREE(conf);
+
/* Saved Fds */
/* TODO: */
diff --git a/xlators/protocol/client/src/client.h b/xlators/protocol/client/src/client.h
index f12fa61..8dcd72f 100644
--- a/xlators/protocol/client/src/client.h
+++ b/xlators/protocol/client/src/client.h
@@ -235,6 +235,12 @@ typedef struct clnt_conf {
* up, disconnects can be
* logged
*/
+
+ gf_boolean_t old_protocol; /* used only for old-protocol testing */
+ pthread_cond_t fini_complete_cond; /* Used to wait till we finsh the fini
+ compltely, ie client_fini_complete
+ to return*/
+ gf_boolean_t fini_completed;
} clnt_conf_t;
typedef struct _client_fd_ctx {
--
1.8.3.1

179
SOURCES/0100-clnt-rpc-ref-leak-during-disconnect.patch

@ -0,0 +1,179 @@
From 4d95e271a9042bf2d789a4d900ad263b6ea47681 Mon Sep 17 00:00:00 2001
From: Mohammed Rafi KC <rkavunga@redhat.com>
Date: Wed, 23 Jan 2019 21:55:01 +0530
Subject: [PATCH 100/124] clnt/rpc: ref leak during disconnect.

During disconnect cleanup, we are not cancelling reconnect
timer, which causes a ref leak each time when a disconnect
happen.

Backport of: https://review.gluster.org/#/c/glusterfs/+/22087/

>Change-Id: I9d05d1f368d080e04836bf6a0bb018bf8f7b5b8a
>updates: bz#1659708
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>

Change-Id: I5a2dbb17e663a4809bb4c435cacadbf0ab694a76
BUG: 1471742
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167844
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
libglusterfs/src/timer.c | 16 +++++++----
rpc/rpc-lib/src/rpc-clnt.c | 11 +++++++-
.../mgmt/glusterd/src/glusterd-snapshot-utils.c | 32 ++++++++++++++++++----
3 files changed, 47 insertions(+), 12 deletions(-)

diff --git a/libglusterfs/src/timer.c b/libglusterfs/src/timer.c
index d882543..2643c07 100644
--- a/libglusterfs/src/timer.c
+++ b/libglusterfs/src/timer.c
@@ -75,13 +75,13 @@ gf_timer_call_cancel(glusterfs_ctx_t *ctx, gf_timer_t *event)
if (ctx == NULL || event == NULL) {
gf_msg_callingfn("timer", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
"invalid argument");
- return 0;
+ return -1;
}
if (ctx->cleanup_started) {
gf_msg_callingfn("timer", GF_LOG_INFO, 0, LG_MSG_CTX_CLEANUP_STARTED,
"ctx cleanup started");
- return 0;
+ return -1;
}
LOCK(&ctx->lock);
@@ -93,10 +93,9 @@ gf_timer_call_cancel(glusterfs_ctx_t *ctx, gf_timer_t *event)
if (!reg) {
/* This can happen when cleanup may have just started and
* gf_timer_registry_destroy() sets ctx->timer to NULL.
- * Just bail out as success as gf_timer_proc() takes
- * care of cleaning up the events.
+ * gf_timer_proc() takes care of cleaning up the events.
*/
- return 0;
+ return -1;
}
LOCK(&reg->lock);
@@ -203,6 +202,13 @@ gf_timer_proc(void *data)
list_for_each_entry_safe(event, tmp, &reg->active, list)
{
list_del(&event->list);
+ /* TODO Possible resource leak
+ * Before freeing the event, we need to call the respective
+ * event functions and free any resources.
+ * For example, In case of rpc_clnt_reconnect, we need to
+ * unref rpc object which was taken when added to timer
+ * wheel.
+ */
GF_FREE(event);
}
}
diff --git a/rpc/rpc-lib/src/rpc-clnt.c b/rpc/rpc-lib/src/rpc-clnt.c
index 3f7bb3c..6f47515 100644
--- a/rpc/rpc-lib/src/rpc-clnt.c
+++ b/rpc/rpc-lib/src/rpc-clnt.c
@@ -495,6 +495,7 @@ rpc_clnt_connection_cleanup(rpc_clnt_connection_t *conn)
int unref = 0;
int ret = 0;
gf_boolean_t timer_unref = _gf_false;
+ gf_boolean_t reconnect_unref = _gf_false;
if (!conn) {
goto out;
@@ -514,6 +515,12 @@ rpc_clnt_connection_cleanup(rpc_clnt_connection_t *conn)
timer_unref = _gf_true;
conn->timer = NULL;
}
+ if (conn->reconnect) {
+ ret = gf_timer_call_cancel(clnt->ctx, conn->reconnect);
+ if (!ret)
+ reconnect_unref = _gf_true;
+ conn->reconnect = NULL;
+ }
conn->connected = 0;
conn->disconnected = 1;
@@ -533,6 +540,8 @@ rpc_clnt_connection_cleanup(rpc_clnt_connection_t *conn)
if (timer_unref)
rpc_clnt_unref(clnt);
+ if (reconnect_unref)
+ rpc_clnt_unref(clnt);
out:
return 0;
}
@@ -830,7 +839,7 @@ rpc_clnt_handle_disconnect(struct rpc_clnt *clnt, rpc_clnt_connection_t *conn)
pthread_mutex_lock(&conn->lock);
{
if (!conn->rpc_clnt->disabled && (conn->reconnect == NULL)) {
- ts.tv_sec = 10;
+ ts.tv_sec = 3;
ts.tv_nsec = 0;
rpc_clnt_ref(clnt);
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index 041946d..b3c4158 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -3364,6 +3364,25 @@ out:
return ret;
}
+int
+glusterd_is_path_mounted(const char *path)
+{
+ FILE *mtab = NULL;
+ struct mntent *part = NULL;
+ int is_mounted = 0;
+
+ if ((mtab = setmntent("/etc/mtab", "r")) != NULL) {
+ while ((part = getmntent(mtab)) != NULL) {
+ if ((part->mnt_fsname != NULL) &&
+ (strcmp(part->mnt_dir, path)) == 0) {
+ is_mounted = 1;
+ break;
+ }
+ }
+ endmntent(mtab);
+ }
+ return is_mounted;
+}
/* This function will do unmount for snaps.
*/
int32_t
@@ -3388,14 +3407,11 @@ glusterd_snap_unmount(xlator_t *this, glusterd_volinfo_t *volinfo)
continue;
}
- /* Fetch the brick mount path from the brickinfo->path */
- ret = glusterd_get_brick_root(brickinfo->path, &brick_mount_path);
+ ret = glusterd_find_brick_mount_path(brickinfo->path,
+ &brick_mount_path);
if (ret) {
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BRICK_PATH_UNMOUNTED,
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRK_MNTPATH_GET_FAIL,
"Failed to find brick_mount_path for %s", brickinfo->path);
- /* There is chance that brick path is already
- * unmounted. */
- ret = 0;
goto out;
}
/* unmount cannot be done when the brick process is still in
@@ -3440,6 +3456,10 @@ glusterd_umount(const char *path)
GF_ASSERT(this);
GF_ASSERT(path);
+ if (!glusterd_is_path_mounted(path)) {
+ return 0;
+ }
+
runinit(&runner);
snprintf(msg, sizeof(msg), "umount path %s", path);
runner_add_args(&runner, _PATH_UMOUNT, "-f", path, NULL);
--
1.8.3.1

162
SOURCES/0101-shd-mux-Fix-coverity-issues-introduced-by-shd-mux-pa.patch

@ -0,0 +1,162 @@
From 0021a4bbc9af2bfe28d4a79f76c3cd33f23dd118 Mon Sep 17 00:00:00 2001
From: Mohammed Rafi KC <rkavunga@redhat.com>
Date: Fri, 5 Apr 2019 12:33:55 +0530
Subject: [PATCH 101/124] shd/mux: Fix coverity issues introduced by shd mux
patch

CID 1400475: Null pointer dereferences (FORWARD_NULL)
CID 1400474: Null pointer dereferences (FORWARD_NULL)
CID 1400471: Code maintainability issues (UNUSED_VALUE)
CID 1400470: Null pointer dereferences (FORWARD_NULL)
CID 1400469: Memory - illegal accesses (USE_AFTER_FREE)
CID 1400467: Code maintainability issues (UNUSED_VALUE)

Backport of: https://review.gluster.org/#/c/glusterfs/+/22514/

>Change-Id: I0ca1c733be335c6e5844f44850f8066626ac40d4
>updates: bz#789278
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>

Change-Id: I0425efca9ab5a95801eff9e99259219449a16380
BUG: 1471742
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167832
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
libglusterfs/src/graph.c | 21 +++++++++++++--------
xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 6 ++++++
xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 24 +++++++++++++++++-------
3 files changed, 36 insertions(+), 15 deletions(-)

diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
index a492dd8..4c8b02d 100644
--- a/libglusterfs/src/graph.c
+++ b/libglusterfs/src/graph.c
@@ -1470,7 +1470,9 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
goto out;
parent_graph = ctx->active;
graph = volfile_obj->graph;
- if (graph && graph->first)
+ if (!graph)
+ goto out;
+ if (graph->first)
xl = graph->first;
last_xl = graph->last_xl;
@@ -1591,12 +1593,10 @@ glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
parent_graph->leaf_count += graph->leaf_count;
parent_graph->id++;
+ volfile_obj = GF_CALLOC(1, sizeof(gf_volfile_t), gf_common_volfile_t);
if (!volfile_obj) {
- volfile_obj = GF_CALLOC(1, sizeof(gf_volfile_t), gf_common_volfile_t);
- if (!volfile_obj) {
- ret = -1;
- goto out;
- }
+ ret = -1;
+ goto out;
}
graph->used = 1;
@@ -1641,6 +1641,7 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
{
glusterfs_graph_t *oldvolfile_graph = NULL;
glusterfs_graph_t *newvolfile_graph = NULL;
+ char vol_id[NAME_MAX + 1];
int ret = -1;
@@ -1672,6 +1673,9 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
glusterfs_graph_prepare(newvolfile_graph, ctx, newvolfile_graph->first);
if (!is_graph_topology_equal(oldvolfile_graph, newvolfile_graph)) {
+ ret = snprintf(vol_id, sizeof(vol_id), "%s", volfile_obj->vol_id);
+ if (ret < 0)
+ goto out;
ret = glusterfs_process_svc_detach(ctx, volfile_obj);
if (ret) {
gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL,
@@ -1680,8 +1684,9 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
"old graph. Aborting the reconfiguration operation");
goto out;
}
- ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp,
- volfile_obj->vol_id, checksum);
+ volfile_obj = NULL;
+ ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp, vol_id,
+ checksum);
goto out;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
index 937ea30..04a4b2e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
@@ -101,6 +101,8 @@ glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
svc->conn.rpc = rpc_clnt_ref(mux_svc->rpc);
ret = snprintf(svc->conn.sockpath, sizeof(svc->conn.sockpath), "%s",
mux_conn->sockpath);
+ if (ret < 0)
+ goto out;
} else {
ret = mkdir_p(logdir, 0755, _gf_true);
if ((ret == -1) && (EEXIST != errno)) {
@@ -663,6 +665,10 @@ glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig)
glusterd_volinfo_ref(volinfo);
svc_proc->data = volinfo;
ret = glusterd_svc_stop(svc, sig);
+ if (ret) {
+ glusterd_volinfo_unref(volinfo);
+ goto out;
+ }
}
if (!empty && pid != -1) {
ret = glusterd_detach_svc(svc, volinfo, sig);
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
index e42703c..02945b1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
@@ -411,9 +411,14 @@ __gf_find_compatible_svc(gd_node_type daemon)
conf = THIS->private;
GF_VALIDATE_OR_GOTO("glusterd", conf, out);
- if (daemon == GD_NODE_SHD) {
- svc_procs = &conf->shd_procs;
- if (!svc_procs)
+ switch (daemon) {
+ case GD_NODE_SHD: {
+ svc_procs = &conf->shd_procs;
+ if (!svc_procs)
+ goto out;
+ } break;
+ default:
+ /* Add support for other client daemons here */
goto out;
}
@@ -540,11 +545,16 @@ __gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid)
if (!conf)
return NULL;
- if (daemon == GD_NODE_SHD) {
- svc_procs = &conf->shd_procs;
- if (!svc_proc)
+ switch (daemon) {
+ case GD_NODE_SHD: {
+ svc_procs = &conf->shd_procs;
+ if (!svc_procs)
+ return NULL;
+ } break;
+ default:
+ /* Add support for other client daemons here */
return NULL;
- } /* Can be moved to switch when mux is implemented for other daemon; */
+ }
cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list)
{
--
1.8.3.1

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save