From deba8115d2c2af26df42966b91ef04ff4dd79cde Mon Sep 17 00:00:00 2001 From: V3n3RiX Date: Thu, 14 May 2020 11:09:11 +0100 Subject: gentoo resync : 14.05.2020 --- sys-fs/lvm2/files/lvm.rc-2.02.184-r3 | 154 ----------------------------------- sys-fs/lvm2/files/lvm.rc-2.03.05 | 132 ------------------------------ 2 files changed, 286 deletions(-) delete mode 100644 sys-fs/lvm2/files/lvm.rc-2.02.184-r3 delete mode 100644 sys-fs/lvm2/files/lvm.rc-2.03.05 (limited to 'sys-fs/lvm2/files') diff --git a/sys-fs/lvm2/files/lvm.rc-2.02.184-r3 b/sys-fs/lvm2/files/lvm.rc-2.02.184-r3 deleted file mode 100644 index b48efb0c99a8..000000000000 --- a/sys-fs/lvm2/files/lvm.rc-2.02.184-r3 +++ /dev/null @@ -1,154 +0,0 @@ -#!/sbin/openrc-run -# Copyright 1999-2019 Gentoo Authors -# Distributed under the terms of the GNU General Public License v2 - -_get_lvm_path() { - local lvm_path= - for lvm_path in /bin/lvm /sbin/lvm ; do - [ -x "$lvm_path" ] && break - done - echo "${lvm_path}" -} - -_need_lvmetad() -{ - local lvm_path="$(_get_lvm_path)" - [ ! -x "${lvm_path}" ] && return 1 - ${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmetad=1' -} - -_need_lvmlockd() -{ - local lvm_path="$(_get_lvm_path)" - [ ! -x "${lvm_path}" ] && return 1 - ${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmlockd=1' -} - -depend() { - before checkfs fsck - after modules device-mapper - # We may use lvmetad based on the configuration. If we added lvmetad - # support while lvm2 is running then we aren't dependent on it. For the - # more common case, if its disabled in the config we aren't dependent - # on it. - config /etc/lvm/lvm.conf - local _need= - if service_started; then - _need=$(service_get_value need) - else - if _need_lvmetad; then - _need="${_need} lvmetad" - fi - if _need_lvmlockd; then - _need="${_need} lvmlockd" - fi - fi - # Make sure you review /etc/conf.d/lvm as well! - # Depending on your system, it might also introduce udev & mdraid - need sysfs ${_need} -} - -config='global { locking_dir = "/run/lock/lvm" }' - -dm_in_proc() { - local retval=0 - for x in devices misc ; do - grep -qs 'device-mapper' /proc/${x} - retval=$((${retval} + $?)) - done - return ${retval} -} - -start() { - # LVM support for /usr, /home, /opt .... - # This should be done *before* checking local - # volumes, or they never get checked. - - # NOTE: Add needed modules for LVM or RAID, etc - # to /etc/modules.autoload if needed - lvm_path="$(_get_lvm_path)" - for lvm_path in /bin/lvm /sbin/lvm ; do - [ -x "$lvm_path" ] && break - done - if [ ! -x "$lvm_path" ]; then - eerror "Cannot find lvm binary in /sbin or /bin!" - return 1 - fi - if [ -z "${CDBOOT}" ] ; then - if [ -e /proc/modules ] && ! dm_in_proc ; then - modprobe dm-mod 2>/dev/null - fi - if [ -d /proc/lvm ] || dm_in_proc ; then - ebegin "Setting up the Logical Volume Manager" - #still echo stderr for debugging - lvm_commands="#!${lvm_path}\n" - # Extra PV find pass because some devices might not have been available until very recently - lvm_commands="${lvm_commands}pvscan --config '${config}'\n" - # Now make the nodes - lvm_commands="${lvm_commands}vgscan --config '${config}' --mknodes\n" - # And turn them on! - lvm_commands="${lvm_commands}vgchange --config '${config}' --sysinit -a ly\n" - if _need_lvmlockd; then - # Start lockd VGs as required - lvm_commands="${lvm_commands}vgchange --config '${config}' --lock-start --lock-opt auto\n" - fi - # Order of this is important, have to work around dash and LVM readline - printf "%b\n" "${lvm_commands}" | $lvm_path /proc/self/fd/0 >/dev/null - eend $? "Failed to setup the LVM" - fi - fi -} - -start_post() -{ - # Save if we needed lvmetad - if _need_lvmetad; then - service_set_value need lvmetad - fi -} - -stop() { - for lvm_path in /bin/lvm /sbin/lvm ; do - [ -x "$lvm_path" ] && break - done - if [ ! -x "$lvm_path" ]; then - eerror "Cannot find lvm binary in /sbin or /bin!" - return 1 - fi - - # Stop LVM2 - if [ -x /sbin/vgs ] && \ - [ -x /sbin/vgchange ] && \ - [ -x /sbin/lvchange ] && \ - [ -f /etc/lvmtab -o -d /etc/lvm ] && \ - [ -d /proc/lvm -o "`grep device-mapper /proc/misc 2>/dev/null`" ] - then - einfo "Shutting down the Logical Volume Manager" - - VGS=$($lvm_path vgs --config "${config}" -o vg_name --noheadings --nosuffix --rows 2> /dev/null) - - if [ "$VGS" ] - then - local _ending="eend" - [ "$RC_RUNLEVEL" = shutdown ] && _ending="ewend" - ebegin " Shutting Down LVs & VGs" - #still echo stderr for debugging - lvm_commands="#!${lvm_path}\n" - # Extra PV find pass because some devices might not have been available until very recently - lvm_commands="${lvm_commands}lvchange --config '${config}' --sysinit -a ln ${VGS}\n" - # Now make the nodes - lvm_commands="${lvm_commands}vgchange --config '${config}' --sysinit -a ln ${VGS}\n" - # Order of this is important, have to work around dash and LVM readline - printf "%b\n" "${lvm_commands}" | $lvm_path /proc/self/fd/0 --config "${config}" >/dev/null - rc=$? - msg="Failed (possibly some LVs still needed for /usr or root)" - [ "$RC_RUNLEVEL" = shutdown ] && msg="${msg} [rc=$rc]" && rc=0 - ${_ending} $rc "${msg}" - fi - - einfo "Finished shutting down the Logical Volume Manager" - return 0 - fi -} - -# vim:ts=4 diff --git a/sys-fs/lvm2/files/lvm.rc-2.03.05 b/sys-fs/lvm2/files/lvm.rc-2.03.05 deleted file mode 100644 index 7b7d90b98269..000000000000 --- a/sys-fs/lvm2/files/lvm.rc-2.03.05 +++ /dev/null @@ -1,132 +0,0 @@ -#!/sbin/openrc-run -# Copyright 1999-2019 Gentoo Authors -# Distributed under the terms of the GNU General Public License v2 - -_get_lvm_path() { - local lvm_path= - for lvm_path in /bin/lvm /sbin/lvm ; do - [ -x "$lvm_path" ] && break - done - echo "${lvm_path}" -} - -_need_lvmlockd() -{ - local lvm_path="$(_get_lvm_path)" - [ ! -x "${lvm_path}" ] && return 1 - ${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmlockd=1' -} - -depend() { - before checkfs fsck - after modules device-mapper - config /etc/lvm/lvm.conf - local _need= - if service_started; then - _need=$(service_get_value need) - else - if _need_lvmlockd; then - _need="${_need} lvmlockd" - fi - fi - # Make sure you review /etc/conf.d/lvm as well! - # Depending on your system, it might also introduce udev & mdraid - need sysfs ${_need} -} - -config='global { locking_dir = "/run/lock/lvm" }' - -dm_in_proc() { - local retval=0 - for x in devices misc ; do - grep -qs 'device-mapper' /proc/${x} - retval=$((${retval} + $?)) - done - return ${retval} -} - -start() { - # LVM support for /usr, /home, /opt .... - # This should be done *before* checking local - # volumes, or they never get checked. - - # NOTE: Add needed modules for LVM or RAID, etc - # to /etc/modules.autoload if needed - lvm_path="$(_get_lvm_path)" - for lvm_path in /bin/lvm /sbin/lvm ; do - [ -x "$lvm_path" ] && break - done - if [ ! -x "$lvm_path" ]; then - eerror "Cannot find lvm binary in /sbin or /bin!" - return 1 - fi - if [ -z "${CDBOOT}" ] ; then - if [ -e /proc/modules ] && ! dm_in_proc ; then - modprobe dm-mod 2>/dev/null - fi - if [ -d /proc/lvm ] || dm_in_proc ; then - ebegin "Setting up the Logical Volume Manager" - #still echo stderr for debugging - lvm_commands="#!${lvm_path}\n" - # Extra PV find pass because some devices might not have been available until very recently - lvm_commands="${lvm_commands}pvscan --config '${config}'\n" - # Now make the nodes - lvm_commands="${lvm_commands}vgscan --config '${config}' --mknodes\n" - # And turn them on! - lvm_commands="${lvm_commands}vgchange --config '${config}' --sysinit -a ly\n" - if _need_lvmlockd; then - # Start lockd VGs as required - lvm_commands="${lvm_commands}vgchange --config '${config}' --lock-start --lock-opt auto\n" - fi - # Order of this is important, have to work around dash and LVM readline - printf "%b\n" "${lvm_commands}" | $lvm_path /proc/self/fd/0 >/dev/null - eend $? "Failed to setup the LVM" - fi - fi -} - -stop() { - for lvm_path in /bin/lvm /sbin/lvm ; do - [ -x "$lvm_path" ] && break - done - if [ ! -x "$lvm_path" ]; then - eerror "Cannot find lvm binary in /sbin or /bin!" - return 1 - fi - - # Stop LVM2 - if [ -x /sbin/vgs ] && \ - [ -x /sbin/vgchange ] && \ - [ -x /sbin/lvchange ] && \ - [ -f /etc/lvmtab -o -d /etc/lvm ] && \ - [ -d /proc/lvm -o "`grep device-mapper /proc/misc 2>/dev/null`" ] - then - einfo "Shutting down the Logical Volume Manager" - - VGS=$($lvm_path vgs --config "${config}" -o vg_name --noheadings --nosuffix --rows 2> /dev/null) - - if [ "$VGS" ] - then - local _ending="eend" - [ "$RC_RUNLEVEL" = shutdown ] && _ending="ewend" - ebegin " Shutting Down LVs & VGs" - #still echo stderr for debugging - lvm_commands="#!${lvm_path}\n" - # Extra PV find pass because some devices might not have been available until very recently - lvm_commands="${lvm_commands}lvchange --config '${config}' --sysinit -a ln ${VGS}\n" - # Now make the nodes - lvm_commands="${lvm_commands}vgchange --config '${config}' --sysinit -a ln ${VGS}\n" - # Order of this is important, have to work around dash and LVM readline - printf "%b\n" "${lvm_commands}" | $lvm_path /proc/self/fd/0 --config "${config}" >/dev/null - rc=$? - msg="Failed (possibly some LVs still needed for /usr or root)" - [ "$RC_RUNLEVEL" = shutdown ] && msg="${msg} [rc=$rc]" && rc=0 - ${_ending} $rc "${msg}" - fi - - einfo "Finished shutting down the Logical Volume Manager" - return 0 - fi -} - -# vim:ts=4 -- cgit v1.2.3