|
|
|
#!/bin/sh
|
|
|
|
|
|
|
|
MD_UUID=$(getargs rd.md.uuid -d rd_MD_UUID=)
|
|
|
|
|
|
|
|
if ( ! [ -n "$MD_UUID" ] && ! getargbool 0 rd.auto ) || ! getargbool 1 rd.md -d -n rd_NO_MD; then
|
|
|
|
info "rd.md=0: removing MD RAID activation"
|
|
|
|
udevproperty rd_NO_MD=1
|
|
|
|
else
|
|
|
|
# rewrite the md rules to only process the specified raid array
|
|
|
|
if [ -n "$MD_UUID" ]; then
|
|
|
|
for f in /etc/udev/rules.d/65-md-incremental*.rules; do
|
|
|
|
[ -e "$f" ] || continue
|
|
|
|
while read line || [ -n "$line" ]; do
|
|
|
|
if [ "${line%%UUID CHECK}" != "$line" ]; then
|
90mdraid: fix/adjust 65-md* rules and related scripts
Reworked the flow of the rules file a bit, removed redundant tests, also
should be easier to follow. It's much shorter now as well, a bit more
similar to 90lvm script - both revolve around same concepts after all.
There's no reason to treat conf-assembled arrays differently from
incremental ones. Once we hit timeout in init's udev loop, we can use
common script (mdraid_start.sh) to try force inactive arrays
into degraded mode.
md-finished.sh was kind-of out of place - it didn't really wait for any
particular device(s) to show up, just watched if onetime mdadm scripts
are still in place. Furthermore, after moving mdraid_start to --timeout
initqueue, it didn't really have too much to watch at all, besides
mdadm_auto (and that served no purpose, as we do wait for concrete
devices).
Either way, with stock 64-md fixes, current version of 65-md*.rules does
the following:
- limits assembly to certain uuids, if specified
- watch for no ddf/imsm
- if mdadm.conf => setup onetime -As script, without forced --run option
- if !mdadm.conf => incrementally assemble
- for both cases, setup timeout script, run-forcing arrays as a last resort
Signed-off-by: Michal Soltys <soltys@ziu.info>
14 years ago
|
|
|
printf 'IMPORT{program}="/sbin/mdadm --examine --export $tempnode"\n'
|
|
|
|
for uuid in $MD_UUID; do
|
90mdraid: fix/adjust 65-md* rules and related scripts
Reworked the flow of the rules file a bit, removed redundant tests, also
should be easier to follow. It's much shorter now as well, a bit more
similar to 90lvm script - both revolve around same concepts after all.
There's no reason to treat conf-assembled arrays differently from
incremental ones. Once we hit timeout in init's udev loop, we can use
common script (mdraid_start.sh) to try force inactive arrays
into degraded mode.
md-finished.sh was kind-of out of place - it didn't really wait for any
particular device(s) to show up, just watched if onetime mdadm scripts
are still in place. Furthermore, after moving mdraid_start to --timeout
initqueue, it didn't really have too much to watch at all, besides
mdadm_auto (and that served no purpose, as we do wait for concrete
devices).
Either way, with stock 64-md fixes, current version of 65-md*.rules does
the following:
- limits assembly to certain uuids, if specified
- watch for no ddf/imsm
- if mdadm.conf => setup onetime -As script, without forced --run option
- if !mdadm.conf => incrementally assemble
- for both cases, setup timeout script, run-forcing arrays as a last resort
Signed-off-by: Michal Soltys <soltys@ziu.info>
14 years ago
|
|
|
printf 'ENV{MD_UUID}=="%s", GOTO="md_uuid_ok"\n' $uuid
|
|
|
|
printf 'ENV{ID_FS_UUID}=="%s", GOTO="md_uuid_ok"\n' $uuid
|
|
|
|
done;
|
90mdraid: fix/adjust 65-md* rules and related scripts
Reworked the flow of the rules file a bit, removed redundant tests, also
should be easier to follow. It's much shorter now as well, a bit more
similar to 90lvm script - both revolve around same concepts after all.
There's no reason to treat conf-assembled arrays differently from
incremental ones. Once we hit timeout in init's udev loop, we can use
common script (mdraid_start.sh) to try force inactive arrays
into degraded mode.
md-finished.sh was kind-of out of place - it didn't really wait for any
particular device(s) to show up, just watched if onetime mdadm scripts
are still in place. Furthermore, after moving mdraid_start to --timeout
initqueue, it didn't really have too much to watch at all, besides
mdadm_auto (and that served no purpose, as we do wait for concrete
devices).
Either way, with stock 64-md fixes, current version of 65-md*.rules does
the following:
- limits assembly to certain uuids, if specified
- watch for no ddf/imsm
- if mdadm.conf => setup onetime -As script, without forced --run option
- if !mdadm.conf => incrementally assemble
- for both cases, setup timeout script, run-forcing arrays as a last resort
Signed-off-by: Michal Soltys <soltys@ziu.info>
14 years ago
|
|
|
printf 'GOTO="md_end"\n'
|
|
|
|
printf 'LABEL="md_uuid_ok"\n'
|
|
|
|
else
|
90mdraid: fix/adjust 65-md* rules and related scripts
Reworked the flow of the rules file a bit, removed redundant tests, also
should be easier to follow. It's much shorter now as well, a bit more
similar to 90lvm script - both revolve around same concepts after all.
There's no reason to treat conf-assembled arrays differently from
incremental ones. Once we hit timeout in init's udev loop, we can use
common script (mdraid_start.sh) to try force inactive arrays
into degraded mode.
md-finished.sh was kind-of out of place - it didn't really wait for any
particular device(s) to show up, just watched if onetime mdadm scripts
are still in place. Furthermore, after moving mdraid_start to --timeout
initqueue, it didn't really have too much to watch at all, besides
mdadm_auto (and that served no purpose, as we do wait for concrete
devices).
Either way, with stock 64-md fixes, current version of 65-md*.rules does
the following:
- limits assembly to certain uuids, if specified
- watch for no ddf/imsm
- if mdadm.conf => setup onetime -As script, without forced --run option
- if !mdadm.conf => incrementally assemble
- for both cases, setup timeout script, run-forcing arrays as a last resort
Signed-off-by: Michal Soltys <soltys@ziu.info>
14 years ago
|
|
|
echo "$line"
|
|
|
|
fi
|
|
|
|
done < "${f}" > "${f}.new"
|
|
|
|
mv "${f}.new" "$f"
|
|
|
|
done
|
|
|
|
for uuid in $MD_UUID; do
|
|
|
|
wait_for_dev "/dev/disk/by-id/md-uuid-${uuid}"
|
|
|
|
done
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
|
|
if [ -e /etc/mdadm.conf ] && getargbool 1 rd.md.conf -d -n rd_NO_MDADMCONF; then
|
|
|
|
udevproperty rd_MDADMCONF=1
|
|
|
|
rm -f -- $hookdir/pre-pivot/*mdraid-cleanup.sh
|
|
|
|
fi
|
|
|
|
|
|
|
|
if ! getargbool 1 rd.md.conf -d -n rd_NO_MDADMCONF; then
|
|
|
|
rm -f -- /etc/mdadm/mdadm.conf /etc/mdadm.conf
|
|
|
|
ln -s $(command -v mdraid-cleanup) $hookdir/pre-pivot/31-mdraid-cleanup.sh 2>/dev/null
|
|
|
|
fi
|
|
|
|
|
|
|
|
# noiswmd nodmraid for anaconda / rc.sysinit compatibility
|
|
|
|
# note nodmraid really means nobiosraid, so we don't want MDIMSM then either
|
|
|
|
if ! getargbool 1 rd.md.imsm -d -n rd_NO_MDIMSM -n noiswmd -n nodmraid; then
|
|
|
|
info "no MD RAID for imsm/isw raids"
|
|
|
|
udevproperty rd_NO_MDIMSM=1
|
|
|
|
fi
|
|
|
|
|
|
|
|
# same thing with ddf containers
|
|
|
|
if ! getargbool 1 rd.md.ddf -n rd_NO_MDDDF -n noddfmd -n nodmraid; then
|
|
|
|
info "no MD RAID for SNIA ddf raids"
|
|
|
|
udevproperty rd_NO_MDDDF=1
|
|
|
|
fi
|
|
|
|
|
|
|
|
strstr "$(mdadm --help-options 2>&1)" offroot && udevproperty rd_MD_OFFROOT=--offroot
|