You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
129 lines
4.7 KiB
129 lines
4.7 KiB
From b528c21e6fedc9ac841942828b82e0c808da5efb Mon Sep 17 00:00:00 2001 |
|
From: Sheetal Pamecha <spamecha@redhat.com> |
|
Date: Thu, 2 Jan 2020 12:05:12 +0530 |
|
Subject: [PATCH 393/449] afr: restore timestamp of files during metadata heal |
|
|
|
For files: During metadata heal, we restore timestamps |
|
only for non-regular (char, block etc.) files. |
|
Extenting it for regular files as timestamp is updated |
|
via touch command also |
|
|
|
> upstream patch link: https://review.gluster.org/#/c/glusterfs/+/23953/ |
|
> fixes: bz#1787274 |
|
> Change-Id: I26fe4fb6dff679422ba4698a7f828bf62ca7ca18 |
|
> Signed-off-by: Sheetal Pamecha <spamecha@redhat.com> |
|
|
|
BUG: 1761531 |
|
Change-Id: I26fe4fb6dff679422ba4698a7f828bf62ca7ca18 |
|
Signed-off-by: Sheetal Pamecha <spamecha@redhat.com> |
|
Reviewed-on: https://code.engineering.redhat.com/gerrit/202332 |
|
Tested-by: RHGS Build Bot <nigelb@redhat.com> |
|
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com> |
|
--- |
|
.../bug-1761531-metadata-heal-restore-time.t | 74 ++++++++++++++++++++++ |
|
xlators/cluster/afr/src/afr-self-heal-metadata.c | 8 +-- |
|
2 files changed, 76 insertions(+), 6 deletions(-) |
|
create mode 100644 tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t |
|
|
|
diff --git a/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t |
|
new file mode 100644 |
|
index 0000000..7e24eae |
|
--- /dev/null |
|
+++ b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t |
|
@@ -0,0 +1,74 @@ |
|
+#!/bin/bash |
|
+ |
|
+. $(dirname $0)/../../include.rc |
|
+. $(dirname $0)/../../volume.rc |
|
+. $(dirname $0)/../../afr.rc |
|
+cleanup |
|
+ |
|
+GET_MDATA_PATH=$(dirname $0)/../../utils |
|
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c |
|
+ |
|
+TEST glusterd |
|
+TEST pidof glusterd |
|
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0..2} |
|
+TEST $CLI volume start $V0 |
|
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 |
|
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 |
|
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 |
|
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 |
|
+ |
|
+TEST touch $M0/a |
|
+sleep 1 |
|
+TEST kill_brick $V0 $H0 $B0/brick0 |
|
+TEST touch $M0/a |
|
+ |
|
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0 |
|
+ |
|
+TEST $CLI volume start $V0 force |
|
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 |
|
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status |
|
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 |
|
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 |
|
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 |
|
+ |
|
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 |
|
+ |
|
+mtime0=$(get_mtime $B0/brick0/a) |
|
+mtime1=$(get_mtime $B0/brick1/a) |
|
+TEST [ $mtime0 -eq $mtime1 ] |
|
+ |
|
+ctime0=$(get_ctime $B0/brick0/a) |
|
+ctime1=$(get_ctime $B0/brick1/a) |
|
+TEST [ $ctime0 -eq $ctime1 ] |
|
+ |
|
+############################################################################### |
|
+# Repeat the test with ctime feature disabled. |
|
+TEST $CLI volume set $V0 features.ctime off |
|
+ |
|
+TEST touch $M0/b |
|
+sleep 1 |
|
+TEST kill_brick $V0 $H0 $B0/brick0 |
|
+TEST touch $M0/b |
|
+ |
|
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0 |
|
+ |
|
+TEST $CLI volume start $V0 force |
|
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 |
|
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status |
|
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 |
|
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 |
|
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 |
|
+ |
|
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 |
|
+ |
|
+mtime2=$(get_mtime $B0/brick0/b) |
|
+mtime3=$(get_mtime $B0/brick1/b) |
|
+TEST [ $mtime2 -eq $mtime3 ] |
|
+ |
|
+TEST rm $GET_MDATA_PATH/get-mdata-xattr |
|
+ |
|
+TEST force_umount $M0 |
|
+TEST $CLI volume stop $V0 |
|
+TEST $CLI volume delete $V0 |
|
+ |
|
+cleanup |
|
diff --git a/xlators/cluster/afr/src/afr-self-heal-metadata.c b/xlators/cluster/afr/src/afr-self-heal-metadata.c |
|
index ecfa791..f4e31b6 100644 |
|
--- a/xlators/cluster/afr/src/afr-self-heal-metadata.c |
|
+++ b/xlators/cluster/afr/src/afr-self-heal-metadata.c |
|
@@ -421,12 +421,8 @@ afr_selfheal_metadata(call_frame_t *frame, xlator_t *this, inode_t *inode) |
|
if (ret) |
|
goto unlock; |
|
|
|
- /* Restore atime/mtime for files that don't need data heal as |
|
- * restoring timestamps happens only as a part of data-heal. |
|
- */ |
|
- if (!IA_ISREG(locked_replies[source].poststat.ia_type)) |
|
- afr_selfheal_restore_time(frame, this, inode, source, healed_sinks, |
|
- locked_replies); |
|
+ afr_selfheal_restore_time(frame, this, inode, source, healed_sinks, |
|
+ locked_replies); |
|
|
|
ret = afr_selfheal_undo_pending( |
|
frame, this, inode, sources, sinks, healed_sinks, undid_pending, |
|
-- |
|
1.8.3.1 |
|
|
|
|