You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

428 lines
15 KiB

From 48f7be493588fdf5e99dff0c3b91327e07da05f3 Mon Sep 17 00:00:00 2001
From: Mohammed Rafi KC <rkavunga@redhat.com>
Date: Thu, 11 Jul 2019 12:48:34 +0530
Subject: [PATCH 242/255] Revert "tests/shd: Add test coverage for shd mux"
This reverts commit b7f832288d2d2e57231d90765afc049ad7cb2f9d.
BUG: 1471742
Change-Id: Ifccac5150f07b98006714e43c77c5a4b1fd38cb8
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/175951
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/basic/glusterd-restart-shd-mux.t | 96 ---------------------
tests/basic/shd-mux.t | 149 ---------------------------------
tests/basic/volume-scale-shd-mux.t | 112 -------------------------
tests/volume.rc | 15 ----
4 files changed, 372 deletions(-)
delete mode 100644 tests/basic/glusterd-restart-shd-mux.t
delete mode 100644 tests/basic/shd-mux.t
delete mode 100644 tests/basic/volume-scale-shd-mux.t
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
deleted file mode 100644
index a50af9d..0000000
--- a/tests/basic/glusterd-restart-shd-mux.t
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../volume.rc
-
-cleanup;
-
-TESTS_EXPECTED_IN_LOOP=20
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
-TEST $CLI volume set $V0 cluster.eager-lock off
-TEST $CLI volume set $V0 performance.flush-behind off
-TEST $CLI volume start $V0
-
-for i in $(seq 1 3); do
- TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
- TEST $CLI volume start ${V0}_afr$i
- TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
- TEST $CLI volume start ${V0}_ec$i
-done
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-
-#Stop the glusterd
-TEST pkill glusterd
-#Only stopping glusterd, so there will be one shd
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
-TEST glusterd
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-shd_pid=$(get_shd_mux_pid $V0)
-for i in $(seq 1 3); do
- afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
- ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
-done
-
-#Reboot a node scenario
-TEST pkill gluster
-#Only stopped glusterd, so there will be one shd
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
-
-TEST glusterd
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-
-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-shd_pid=$(get_shd_mux_pid $V0)
-for i in $(seq 1 3); do
- afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
- ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
-done
-
-for i in $(seq 1 3); do
- TEST $CLI volume stop ${V0}_afr$i
- TEST $CLI volume stop ${V0}_ec$i
-done
-
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
-
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST kill_brick $V0 $H0 $B0/${V0}3
-
-TEST touch $M0/foo{1..100}
-
-EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0
-
-TEST $CLI volume start ${V0} force
-
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-
-TEST rm -rf $M0/*
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-
-
-TEST $CLI volume stop ${V0}
-TEST $CLI volume delete ${V0}
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count
-
-cleanup
diff --git a/tests/basic/shd-mux.t b/tests/basic/shd-mux.t
deleted file mode 100644
index e42a34a..0000000
--- a/tests/basic/shd-mux.t
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../volume.rc
-
-cleanup;
-
-TESTS_EXPECTED_IN_LOOP=16
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
-TEST $CLI volume set $V0 cluster.eager-lock off
-TEST $CLI volume set $V0 performance.flush-behind off
-TEST $CLI volume start $V0
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
-
-shd_pid=$(get_shd_mux_pid $V0)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-#Create a one more volume
-TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
-TEST $CLI volume start ${V0}_1
-
-#Check whether the shd has multiplexed or not
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
-
-TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
-TEST $CLI volume set ${V0}_1 cluster.eager-lock off
-TEST $CLI volume set ${V0}_1 performance.flush-behind off
-TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
-
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST kill_brick $V0 $H0 $B0/${V0}4
-TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
-TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
-
-TEST touch $M0/foo{1..100}
-TEST touch $M1/foo{1..100}
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
-
-TEST $CLI volume start ${V0} force
-TEST $CLI volume start ${V0}_1 force
-
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
-
-TEST rm -rf $M0/*
-TEST rm -rf $M1/*
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
-
-#Stop the volume
-TEST $CLI volume stop ${V0}_1
-TEST $CLI volume delete ${V0}_1
-
-#Check the stop succeeded and detached the volume with out restarting it
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
-
-#Check the thread count become to earlier number after stopping
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-
-#Now create a ec volume and check mux works
-TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
-TEST $CLI volume start ${V0}_2
-
-#Check whether the shd has multiplexed or not
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
-
-TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
-TEST $CLI volume set ${V0}_2 cluster.eager-lock off
-TEST $CLI volume set ${V0}_2 performance.flush-behind off
-TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
-
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST kill_brick $V0 $H0 $B0/${V0}4
-TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
-TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
-
-TEST touch $M0/foo{1..100}
-TEST touch $M1/foo{1..100}
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
-
-TEST $CLI volume start ${V0} force
-TEST $CLI volume start ${V0}_2 force
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
-
-TEST rm -rf $M0/*
-TEST rm -rf $M1/*
-
-
-#Stop the volume
-TEST $CLI volume stop ${V0}_2
-TEST $CLI volume delete ${V0}_2
-
-#Check the stop succeeded and detached the volume with out restarting it
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
-
-#Check the thread count become to zero for ec related threads
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-#Check the thread count become to earlier number after stopping
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-for i in $(seq 1 3); do
- TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
- TEST $CLI volume start ${V0}_afr$i
- TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
- TEST $CLI volume start ${V0}_ec$i
-done
-
-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-#Delete the volumes
-for i in $(seq 1 3); do
- TEST $CLI volume stop ${V0}_afr$i
- TEST $CLI volume stop ${V0}_ec$i
- TEST $CLI volume delete ${V0}_afr$i
- TEST $CLI volume delete ${V0}_ec$i
-done
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-TEST $CLI volume stop ${V0}
-TEST $CLI volume delete ${V0}
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
-
-cleanup
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
deleted file mode 100644
index dd9cf83..0000000
--- a/tests/basic/volume-scale-shd-mux.t
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../volume.rc
-
-cleanup;
-
-TESTS_EXPECTED_IN_LOOP=6
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
-TEST $CLI volume set $V0 cluster.eager-lock off
-TEST $CLI volume set $V0 performance.flush-behind off
-TEST $CLI volume start $V0
-
-for i in $(seq 1 2); do
- TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
- TEST $CLI volume start ${V0}_afr$i
- TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
- TEST $CLI volume start ${V0}_ec$i
-done
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
-#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-#Remove the brick and check the detach is successful
-$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
-#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-
-#Remove the brick and check the detach is successful
-$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
-
-
-for i in $(seq 1 2); do
- TEST $CLI volume stop ${V0}_afr$i
- TEST $CLI volume stop ${V0}_ec$i
-done
-
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
-
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST kill_brick $V0 $H0 $B0/${V0}4
-
-TEST touch $M0/foo{1..100}
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
-
-TEST $CLI volume start ${V0} force
-
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-
-TEST rm -rf $M0/*
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-shd_pid=$(get_shd_mux_pid $V0)
-TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
-TEST $CLI volume start ${V0}_distribute1
-
-#Creating a non-replicate/non-ec volume should not have any effect in shd
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
-
-TEST mkdir $B0/add/
-#Now convert the distributed volume to replicate
-TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-#scale down the volume
-TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
-
-TEST $CLI volume stop ${V0}
-TEST $CLI volume delete ${V0}
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
-
-TEST rm -rf $B0/add/
-TEST mkdir $B0/add/
-#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
-TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
-EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "__afr_shd_healer_wait"
-
-#Now convert the replica volume to distribute again and make sure the shd is now stopped
-TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
-TEST rm -rf $B0/add/
-
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
-
-cleanup
diff --git a/tests/volume.rc b/tests/volume.rc
index 6a78c37..022d972 100644
--- a/tests/volume.rc
+++ b/tests/volume.rc
@@ -913,21 +913,6 @@ function volgen_check_ancestry {
fi
}
-function get_shd_mux_pid {
- local volume=$1
- pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
- echo $pid
-}
-
-function shd_count {
- ps aux | grep "glustershd" | grep -v grep | wc -l
-}
-
-function number_healer_threads_shd {
- local pid=$(get_shd_mux_pid $1)
- pstack $pid | grep $2 | wc -l
-}
-
function get_mtime {
local time=$(get-mdata-xattr -m $1)
if [ $time == "-1" ];
--
1.8.3.1