diff options
author | Matt Macy <mmacy@FreeBSD.org> | 2020-08-24 22:48:19 +0000 |
---|---|---|
committer | Matt Macy <mmacy@FreeBSD.org> | 2020-08-24 22:48:19 +0000 |
commit | 3b0ce0e28db46d0403929aba45c682285e1ac217 (patch) | |
tree | 91721e6e5518bd0d8113dee535898f2225443411 /etc |
Notes
Diffstat (limited to 'etc')
40 files changed, 2117 insertions, 0 deletions
diff --git a/etc/Makefile.am b/etc/Makefile.am new file mode 100644 index 0000000000000..ac71da9445d81 --- /dev/null +++ b/etc/Makefile.am @@ -0,0 +1,5 @@ +SUBDIRS = zfs sudoers.d +if BUILD_LINUX +SUBDIRS += default $(ZFS_INIT_SYSTEMD) $(ZFS_INIT_SYSV) $(ZFS_MODULE_LOAD) +endif +DIST_SUBDIRS = default init.d zfs systemd modules-load.d sudoers.d diff --git a/etc/default/.gitignore b/etc/default/.gitignore new file mode 100644 index 0000000000000..73304bc2cd4a8 --- /dev/null +++ b/etc/default/.gitignore @@ -0,0 +1 @@ +zfs diff --git a/etc/default/Makefile.am b/etc/default/Makefile.am new file mode 100644 index 0000000000000..0ec868e134841 --- /dev/null +++ b/etc/default/Makefile.am @@ -0,0 +1,5 @@ +include $(top_srcdir)/config/Substfiles.am + +initconf_SCRIPTS = zfs + +SUBSTFILES += $(initconf_SCRIPTS) diff --git a/etc/default/zfs.in b/etc/default/zfs.in new file mode 100644 index 0000000000000..3b6e5486dd337 --- /dev/null +++ b/etc/default/zfs.in @@ -0,0 +1,103 @@ +# ZoL userland configuration. + +# NOTE: This file is intended for sysv init and initramfs. +# Changing some of these settings may not make any difference on +# systemd-based setup, e.g. setting ZFS_MOUNT=no will not prevent systemd +# from launching zfs-mount.service during boot. +# See: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=901436 + +# To enable a boolean setting, set it to yes, on, true, or 1. +# Anything else will be interpreted as unset. + +# Run `zfs mount -a` during system start? +ZFS_MOUNT='yes' + +# Run `zfs unmount -a` during system stop? +ZFS_UNMOUNT='yes' + +# Run `zfs share -a` during system start? +# nb: The shareiscsi, sharenfs, and sharesmb dataset properties. +ZFS_SHARE='yes' + +# Run `zfs unshare -a` during system stop? +ZFS_UNSHARE='yes' + +# By default, a verbatim import of all pools is performed at boot based on the +# contents of the default zpool cache file. The contents of the cache are +# managed automatically by the 'zpool import' and 'zpool export' commands. +# +# By setting this to 'yes', the system will instead search all devices for +# pools and attempt to import them all at boot, even those that have been +# exported. Under this mode, the search path can be controlled by the +# ZPOOL_IMPORT_PATH variable and a list of pools that should not be imported +# can be listed in the ZFS_POOL_EXCEPTIONS variable. +# +# Note that importing all visible pools may include pools that you don't +# expect, such as those on removable devices and SANs, and those pools may +# proceed to mount themselves in places you do not want them to. The results +# can be unpredictable and possibly dangerous. Only enable this option if you +# understand this risk and have complete physical control over your system and +# SAN to prevent the insertion of malicious pools. +ZPOOL_IMPORT_ALL_VISIBLE='no' + +# Specify specific path(s) to look for device nodes and/or links for the +# pool import(s). See zpool(8) for more information about this variable. +# It supersedes the old USE_DISK_BY_ID which indicated that it would only +# try '/dev/disk/by-id'. +# The old variable will still work in the code, but is deprecated. +#ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:/dev/disk/by-id" + +# List of pools that should NOT be imported at boot +# when ZPOOL_IMPORT_ALL_VISIBLE is 'yes'. +# This is a space separated list. +#ZFS_POOL_EXCEPTIONS="test2" + +# Should the datasets be mounted verbosely? +# A mount counter will be used when mounting if set to 'yes'. +VERBOSE_MOUNT='no' + +# Should we allow overlay mounts? +# This is standard in Linux, but not ZFS which comes from Solaris where this +# is not allowed). +DO_OVERLAY_MOUNTS='no' + +# Any additional option to the 'zfs import' commandline? +# Include '-o' for each option wanted. +# You don't need to put '-f' in here, unless you want it ALL the time. +# Using the option 'zfsforce=1' on the grub/kernel command line will +# do the same, but on a case-to-case basis. +ZPOOL_IMPORT_OPTS="" + +# Full path to the ZFS cache file? +# See "cachefile" in zpool(8). +# The default is "@sysconfdir@/zfs/zpool.cache". +#ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" +# +# Setting ZPOOL_CACHE to an empty string ('') AND setting ZPOOL_IMPORT_OPTS to +# "-c @sysconfdir@/zfs/zpool.cache" will _enforce_ the use of a cache file. +# This is needed in some cases (extreme amounts of VDEVs, multipath etc). +# Generally, the use of a cache file is usually not recommended on Linux +# because it sometimes is more trouble than it's worth (laptops with external +# devices or when/if device nodes changes names). +#ZPOOL_IMPORT_OPTS="-c @sysconfdir@/zfs/zpool.cache" +#ZPOOL_CACHE="" + +# Any additional option to the 'zfs mount' command line? +# Include '-o' for each option wanted. +MOUNT_EXTRA_OPTIONS="" + +# Build kernel modules with the --enable-debug switch? +# Only applicable for Debian GNU/Linux {dkms,initramfs}. +ZFS_DKMS_ENABLE_DEBUG='no' + +# Build kernel modules with the --enable-debuginfo switch? +# Only applicable for Debian GNU/Linux {dkms,initramfs}. +ZFS_DKMS_ENABLE_DEBUGINFO='no' + +# Keep debugging symbols in kernel modules? +# Only applicable for Debian GNU/Linux {dkms,initramfs}. +ZFS_DKMS_DISABLE_STRIP='no' + +# Optional arguments for the ZFS Event Daemon (ZED). +# See zed(8) for more information on available options. +#ZED_ARGS="-M" diff --git a/etc/init.d/.gitignore b/etc/init.d/.gitignore new file mode 100644 index 0000000000000..43a673d55343d --- /dev/null +++ b/etc/init.d/.gitignore @@ -0,0 +1,5 @@ +zfs-import +zfs-mount +zfs-share +zfs-zed +zfs diff --git a/etc/init.d/Makefile.am b/etc/init.d/Makefile.am new file mode 100644 index 0000000000000..9285a995a1cf1 --- /dev/null +++ b/etc/init.d/Makefile.am @@ -0,0 +1,7 @@ +include $(top_srcdir)/config/Substfiles.am + +EXTRA_DIST += README.md + +init_SCRIPTS = zfs-import zfs-mount zfs-share zfs-zed + +SUBSTFILES += $(init_SCRIPTS) diff --git a/etc/init.d/README.md b/etc/init.d/README.md new file mode 100644 index 0000000000000..ad7c053aacabb --- /dev/null +++ b/etc/init.d/README.md @@ -0,0 +1,72 @@ +DESCRIPTION + These script were written with the primary intention of being portable and + usable on as many systems as possible. + + This is, in practice, usually not possible. But the intention is there. + And it is a good one. + + They have been tested successfully on: + + * Debian GNU/Linux Wheezy + * Debian GNU/Linux Jessie + * Ubuntu Trusty + * CentOS 6.0 + * CentOS 6.6 + * Gentoo + +SUPPORT + If you find that they don't work for your platform, please report this + at the ZFS On Linux issue tracker at https://github.com/zfsonlinux/zfs/issues. + + Please include: + + * Distribution name + * Distribution version + * Where to find an install CD image + * Architecture + + If you have code to share that fixes the problem, that is much better. + But please remember to try your best keep portability in mind. If you + suspect that what you're writing/modifying won't work on anything else + than your distribution, please make sure to put that code in appropriate + if/else/fi code. + + It currently MUST be bash (or fully compatible) for this to work. + + If you're making your own distribution and you want the scripts to + work on that, the biggest problem you'll (probably) have is the part + at the beginning of the "zfs-functions" file which sets up the + logging output. + +INSTALLING INIT SCRIPT LINKS + To setup the init script links in /etc/rc?.d manually on a Debian GNU/Linux + (or derived) system, run the following commands (the order is important!): + + update-rc.d zfs-import start 07 S . stop 07 0 1 6 . + update-rc.d zfs-mount start 02 2 3 4 5 . stop 06 0 1 6 . + update-rc.d zfs-zed start 07 2 3 4 5 . stop 08 0 1 6 . + update-rc.d zfs-share start 27 2 3 4 5 . stop 05 0 1 6 . + + To do the same on RedHat, Fedora and/or CentOS: + + chkconfig zfs-import + chkconfig zfs-mount + chkconfig zfs-zed + chkconfig zfs-share + + On Gentoo: + + rc-update add zfs-import boot + rc-update add zfs-mount boot + rc-update add zfs-zed default + rc-update add zfs-share default + + The idea here is to make sure all of the ZFS filesystems, including possibly + separate datasets like /var, are mounted before anything else is started. + + Then, ZED, which depends on /var, can be started. It will consume and act + on events that occurred before it started. ZED may also play a role in + sharing filesystems in the future, so it is important to start before the + 'share' service. + + Finally, we share filesystems configured with the share\* property. diff --git a/etc/init.d/zfs-import.in b/etc/init.d/zfs-import.in new file mode 100755 index 0000000000000..714cc6c089d74 --- /dev/null +++ b/etc/init.d/zfs-import.in @@ -0,0 +1,340 @@ +#!@DEFAULT_INIT_SHELL@ +# +# zfs-import This script will import ZFS pools +# +# chkconfig: 2345 01 99 +# description: This script will perform a verbatim import of ZFS pools +# during system boot. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-import +# Required-Start: mtab +# Required-Stop: $local_fs mtab +# Default-Start: S +# Default-Stop: 0 1 6 +# X-Start-Before: checkfs +# X-Stop-After: zfs-mount +# Short-Description: Import ZFS pools +# Description: Run the `zpool import` command. +### END INIT INFO +# +# NOTE: Not having '$local_fs' on Required-Start but only on Required-Stop +# is on purpose. If we have '$local_fs' in both (and X-Start-Before=checkfs) +# we get conflicts - import needs to be started extremely early, +# but not stopped too late. +# +# Released under the 2-clause BSD license. +# +# The original script that acted as a template for this script came from +# the Debian GNU/Linux kFreeBSD ZFS packages (which did not include a +# licensing stansa) in the commit dated Mar 24, 2011: +# https://github.com/zfsonlinux/pkg-zfs/commit/80a3ae582b59c0250d7912ba794dca9e669e605a + +# Source the common init script +. @sysconfdir@/zfs/zfs-functions + +# ---------------------------------------------------- + +do_depend() +{ + before swap + after sysfs udev + keyword -lxc -openvz -prefix -vserver +} + +# Use the zpool cache file to import pools +do_verbatim_import() +{ + if [ -f "$ZPOOL_CACHE" ] + then + zfs_action "Importing ZFS pool(s)" \ + "$ZPOOL" import -c "$ZPOOL_CACHE" -N -a + fi +} + +# Support function to get a list of all pools, separated with ';' +find_pools() +{ + local CMD="$*" + local pools + + pools=$($CMD 2> /dev/null | \ + grep -E "pool:|^[a-zA-Z0-9]" | \ + sed 's@.*: @@' | \ + sort | \ + while read pool; do \ + echo -n "$pool;" + done) + + echo "${pools%%;}" # Return without the last ';'. +} + +# Find and import all visible pools, even exported ones +do_import_all_visible() +{ + local already_imported available_pools pool npools + local exception dir ZPOOL_IMPORT_PATH RET=0 r=1 + + # In case not shutdown cleanly. + [ -n "$init" ] && rm -f /etc/dfs/sharetab + + # Just simplify code later on. + if [ -n "$USE_DISK_BY_ID" -a "$USE_DISK_BY_ID" != 'yes' ] + then + # It's something, but not 'yes' so it's no good to us. + unset USE_DISK_BY_ID + fi + + # Find list of already imported pools. + already_imported=$(find_pools "$ZPOOL" list -H -oname) + available_pools=$(find_pools "$ZPOOL" import) + + # Just in case - seen it happen (that a pool isn't visible/found + # with a simple "zpool import" but only when using the "-d" + # option or setting ZPOOL_IMPORT_PATH). + if [ -d "/dev/disk/by-id" ] + then + npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id) + if [ -n "$npools" ] + then + # Because we have found extra pool(s) here, which wasn't + # found 'normally', we need to force USE_DISK_BY_ID to + # make sure we're able to actually import it/them later. + USE_DISK_BY_ID='yes' + + if [ -n "$available_pools" ] + then + # Filter out duplicates (pools found with the simpl + # "zpool import" but which is also found with the + # "zpool import -d ..."). + npools=$(echo "$npools" | sed "s,$available_pools,,") + + # Add the list to the existing list of + # available pools + available_pools="$available_pools;$npools" + else + available_pools="$npools" + fi + fi + fi + + # Filter out any exceptions... + if [ -n "$ZFS_POOL_EXCEPTIONS" ] + then + local found="" + local apools="" + OLD_IFS="$IFS" ; IFS=";" + + for pool in $available_pools + do + for exception in $ZFS_POOL_EXCEPTIONS + do + [ "$pool" = "$exception" ] && continue 2 + found="$pool" + done + + if [ -n "$found" ] + then + if [ -n "$apools" ] + then + apools="$apools;$pool" + else + apools="$pool" + fi + fi + done + + IFS="$OLD_IFS" + available_pools="$apools" + fi + + # For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set + # to something we can use later with the real import(s). We want to + # make sure we find all by* dirs, BUT by-vdev should be first (if it + # exists). + if [ -n "$USE_DISK_BY_ID" -a -z "$ZPOOL_IMPORT_PATH" ] + then + local dirs + dirs="$(for dir in $(echo /dev/disk/by-*) + do + # Ignore by-vdev here - we want it first! + echo "$dir" | grep -q /by-vdev && continue + [ ! -d "$dir" ] && continue + + echo -n "$dir:" + done | sed 's,:$,,g')" + + if [ -d "/dev/disk/by-vdev" ] + then + # Add by-vdev at the beginning. + ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:" + fi + + # Help with getting LUKS partitions etc imported. + if [ -d "/dev/mapper" ]; then + if [ -n "$ZPOOL_IMPORT_PATH" ]; then + ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH:/dev/mapper:" + else + ZPOOL_IMPORT_PATH="/dev/mapper:" + fi + fi + + # ... and /dev at the very end, just for good measure. + ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev" + fi + + # Needs to be exported for "zpool" to catch it. + [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH + + # Mount all available pools (except those set in ZFS_POOL_EXCEPTIONS. + # + # If not interactive (run from init - variable init='/sbin/init') + # we get ONE line for all pools being imported, with just a dot + # as status for each pool. + # Example: Importing ZFS pool(s)... [OK] + # + # If it IS interactive (started from the shell manually), then we + # get one line per pool importing. + # Example: Importing ZFS pool pool1 [OK] + # Importing ZFS pool pool2 [OK] + # [etc] + [ -n "$init" ] && zfs_log_begin_msg "Importing ZFS pool(s)" + OLD_IFS="$IFS" ; IFS=";" + for pool in $available_pools + do + [ -z "$pool" ] && continue + + # We have pools that haven't been imported - import them + if [ -n "$init" ] + then + # Not interactive - a dot for each pool. + # Except on Gentoo where this doesn't work. + zfs_log_progress_msg "." + else + # Interactive - one 'Importing ...' line per pool + zfs_log_begin_msg "Importing ZFS pool $pool" + fi + + # Import by using ZPOOL_IMPORT_PATH (either set above or in + # the config file) _or_ with the 'built in' default search + # paths. This is the preferred way. + "$ZPOOL" import -N ${ZPOOL_IMPORT_OPTS} "$pool" 2> /dev/null + r="$?" ; RET=$((RET + r)) + if [ "$r" -eq 0 ] + then + # Output success and process the next pool + [ -z "$init" ] && zfs_log_end_msg 0 + continue + fi + # We don't want a fail msg here, we're going to try import + # using the cache file soon and that might succeed. + [ ! -f "$ZPOOL_CACHE" ] && zfs_log_end_msg "$RET" + + if [ "$r" -gt 0 -a -f "$ZPOOL_CACHE" ] + then + # Failed to import without a cache file. Try WITH... + if [ -z "$init" ] && check_boolean "$VERBOSE_MOUNT" + then + # Interactive + Verbose = more information + zfs_log_progress_msg " using cache file" + fi + + "$ZPOOL" import -c "$ZPOOL_CACHE" -N ${ZPOOL_IMPORT_OPTS} \ + "$pool" 2> /dev/null + r="$?" ; RET=$((RET + r)) + if [ "$r" -eq 0 ] + then + [ -z "$init" ] && zfs_log_end_msg 0 + continue 3 # Next pool + fi + zfs_log_end_msg "$RET" + fi + done + [ -n "$init" ] && zfs_log_end_msg "$RET" + + IFS="$OLD_IFS" + [ -n "$already_imported" -a -z "$available_pools" ] && return 0 + + return "$RET" +} + +do_import() +{ + if check_boolean "$ZPOOL_IMPORT_ALL_VISIBLE" + then + do_import_all_visible + else + # This is the default option + do_verbatim_import + fi +} + +# Output the status and list of pools +do_status() +{ + check_module_loaded "zfs" || exit 0 + + "$ZPOOL" status && echo "" && "$ZPOOL" list +} + +do_start() +{ + if check_boolean "$VERBOSE_MOUNT" + then + zfs_log_begin_msg "Checking if ZFS userspace tools present" + fi + + if checksystem + then + check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 0 + + check_boolean "$VERBOSE_MOUNT" && \ + zfs_log_begin_msg "Loading kernel ZFS infrastructure" + + if ! load_module "zfs" + then + check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 1 + return 5 + fi + check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 0 + + do_import && udev_trigger # just to make sure we get zvols. + + return 0 + else + return 1 + fi +} + +# ---------------------------------------------------- + +if [ ! -e /sbin/openrc-run ] +then + case "$1" in + start) + do_start + ;; + stop) + # no-op + ;; + status) + do_status + ;; + force-reload|condrestart|reload|restart) + # no-op + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|status}" + exit 3 + ;; + esac + + exit $? +else + # Create wrapper functions since Gentoo don't use the case part. + depend() { do_depend; } + start() { do_start; } + status() { do_status; } +fi diff --git a/etc/init.d/zfs-mount.in b/etc/init.d/zfs-mount.in new file mode 100755 index 0000000000000..9b400916f42ea --- /dev/null +++ b/etc/init.d/zfs-mount.in @@ -0,0 +1,227 @@ +#!@DEFAULT_INIT_SHELL@ +# +# zfs-mount This script will mount/umount the zfs filesystems. +# +# chkconfig: 2345 06 99 +# description: This script will mount/umount the zfs filesystems during +# system boot/shutdown. Configuration of which filesystems +# should be mounted is handled by the zfs 'mountpoint' and +# 'canmount' properties. See the zfs(8) man page for details. +# It is also responsible for all userspace zfs services. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-mount +# Required-Start: $local_fs zfs-import +# Required-Stop: $local_fs zfs-import +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# X-Stop-After: zfs-zed +# Short-Description: Mount ZFS filesystems and volumes +# Description: Run the `zfs mount -a` or `zfs umount -a` commands. +### END INIT INFO +# +# Released under the 2-clause BSD license. +# +# The original script that acted as a template for this script came from +# the Debian GNU/Linux kFreeBSD ZFS packages (which did not include a +# licensing stansa) in the commit dated Mar 24, 2011: +# https://github.com/zfsonlinux/pkg-zfs/commit/80a3ae582b59c0250d7912ba794dca9e669e605a + +# Source the common init script +. @sysconfdir@/zfs/zfs-functions + +# ---------------------------------------------------- + +chkroot() { + while read line; do + set -- $line + if [ "$2" = "/" ]; then + return 0 + fi + done < /proc/self/mounts + + return 1 +} + +do_depend() +{ + # Try to allow people to mix and match fstab with ZFS in a way that makes sense. + if [ "$(mountinfo -s /)" = 'zfs' ] + then + before localmount + else + after localmount + fi + + # bootmisc will log to /var which may be a different zfs than root. + before bootmisc logger + + after zfs-import sysfs + use mtab + keyword -lxc -openvz -prefix -vserver +} + +# Mount all datasets/filesystems +do_mount() +{ + local verbose overlay i mntpt val + + check_boolean "$VERBOSE_MOUNT" && verbose=v + check_boolean "$DO_OVERLAY_MOUNTS" && overlay=O + + zfs_action "Mounting ZFS filesystem(s)" \ + "$ZFS" mount -a$verbose$overlay "$MOUNT_EXTRA_OPTIONS" + + # Require each volume/filesystem to have 'noauto' and no fsck + # option. This shouldn't really be necessary, as long as one + # can get zfs-import to run sufficiently early on in the boot + # process - before local mounts. This is just here in case/if + # this isn't possible. + check_boolean "$VERBOSE_MOUNT" && \ + zfs_log_begin_msg "Mounting volumes and filesystems registered in fstab" + + read_mtab "^/dev/(zd|zvol)" + read_fstab "^/dev/(zd|zvol)" + i=0; var=$(eval echo FSTAB_$i) + while [ -n "$(eval echo "$""$var")" ] + do + mntpt=$(eval echo "$""$var") + dev=$(eval echo "$"FSTAB_dev_$i) + if ! in_mtab "$mntpt" && ! is_mounted "$mntpt" && [ -e "$dev" ] + then + check_boolean "$VERBOSE_MOUNT" && \ + zfs_log_progress_msg "$mntpt " + fsck "$dev" && mount "$mntpt" + fi + + i=$((i + 1)) + var=$(eval echo FSTAB_$i) + done + + read_mtab "[[:space:]]zfs[[:space:]]" + read_fstab "[[:space:]]zfs[[:space:]]" + i=0; var=$(eval echo FSTAB_$i) + while [ -n "$(eval echo "$""$var")" ] + do + mntpt=$(eval echo "$""$var") + if ! in_mtab "$mntpt" && ! is_mounted "$mntpt" + then + check_boolean "$VERBOSE_MOUNT" && \ + zfs_log_progress_msg "$mntpt " + mount "$mntpt" + fi + + i=$((i + 1)) + var=$(eval echo FSTAB_$i) + done + check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 0 + + return 0 +} + +# Unmount all filesystems +do_unmount() +{ + local i var mntpt + + # This shouldn't really be necessary, as long as one can get + # zfs-import to run sufficiently late in the shutdown/reboot process + # - after unmounting local filesystems. This is just here in case/if + # this isn't possible. + zfs_action "Unmounting ZFS filesystems" "$ZFS" unmount -a + + check_boolean "$VERBOSE_MOUNT" && \ + zfs_log_begin_msg "Unmounting volumes and filesystems registered in fstab" + + read_mtab "^/dev/(zd|zvol)" + read_fstab "^/dev/(zd|zvol)" + i=0; var=$(eval echo FSTAB_$i) + while [ -n "$(eval echo "$""$var")" ] + do + mntpt=$(eval echo "$""$var") + dev=$(eval echo "$"FSTAB_dev_$i) + if in_mtab "$mntpt" + then + check_boolean "$VERBOSE_MOUNT" && \ + zfs_log_progress_msg "$mntpt " + umount "$mntpt" + fi + + i=$((i + 1)) + var=$(eval echo FSTAB_$i) + done + + read_mtab "[[:space:]]zfs[[:space:]]" + read_fstab "[[:space:]]zfs[[:space:]]" + i=0; var=$(eval echo FSTAB_$i) + while [ -n "$(eval echo "$""$var")" ] + do + mntpt=$(eval echo "$""$var") + if in_mtab "$mntpt"; then + check_boolean "$VERBOSE_MOUNT" && \ + zfs_log_progress_msg "$mntpt " + umount "$mntpt" + fi + + i=$((i + 1)) + var=$(eval echo FSTAB_$i) + done + check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 0 + + return 0 +} + +do_start() +{ + check_boolean "$ZFS_MOUNT" || exit 0 + + check_module_loaded "zfs" || exit 0 + + # Ensure / exists in /proc/self/mounts. + # This should be handled by rc.sysinit but lets be paranoid. + if ! chkroot + then + mount -f / + fi + + do_mount +} + +do_stop() +{ + check_boolean "$ZFS_UNMOUNT" || exit 0 + + check_module_loaded "zfs" || exit 0 + + do_unmount +} + +# ---------------------------------------------------- + +if [ ! -e /sbin/openrc-run ] +then + case "$1" in + start) + do_start + ;; + stop) + do_stop + ;; + force-reload|condrestart|reload|restart|status) + # no-op + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop}" + exit 3 + ;; + esac + + exit $? +else + # Create wrapper functions since Gentoo don't use the case part. + depend() { do_depend; } + start() { do_start; } + stop() { do_stop; } +fi diff --git a/etc/init.d/zfs-share.in b/etc/init.d/zfs-share.in new file mode 100755 index 0000000000000..3256d1d067f15 --- /dev/null +++ b/etc/init.d/zfs-share.in @@ -0,0 +1,85 @@ +#!@DEFAULT_INIT_SHELL@ +# +# zfs-share This script will network share zfs filesystems and volumes. +# +# chkconfig: 2345 30 99 +# description: Run the `zfs share -a` or `zfs unshare -a` commands +# for controlling iSCSI, NFS, or CIFS network shares. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-share +# Required-Start: $local_fs $network $remote_fs zfs-mount +# Required-Stop: $local_fs $network $remote_fs zfs-mount +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Should-Start: iscsi iscsitarget istgt scst @DEFAULT_INIT_NFS_SERVER@ samba samba4 zfs-mount zfs-zed +# Should-Stop: iscsi iscsitarget istgt scst @DEFAULT_INIT_NFS_SERVER@ samba samba4 zfs-mount zfs-zed +# Short-Description: Network share ZFS datasets and volumes. +# Description: Run the `zfs share -a` or `zfs unshare -a` commands +# for controlling iSCSI, NFS, or CIFS network shares. +### END INIT INFO +# +# Released under the 2-clause BSD license. +# +# The original script that acted as a template for this script came from +# the Debian GNU/Linux kFreeBSD ZFS packages (which did not include a +# licensing stansa) in the commit dated Mar 24, 2011: +# https://github.com/zfsonlinux/pkg-zfs/commit/80a3ae582b59c0250d7912ba794dca9e669e605a + +# Source the common init script +. @sysconfdir@/zfs/zfs-functions + +# ---------------------------------------------------- + +do_depend() +{ + after sysfs zfs-mount zfs-zed + keyword -lxc -openvz -prefix -vserver +} + +do_start() +{ + check_boolean "$ZFS_SHARE" || exit 0 + + check_module_loaded "zfs" || exit 0 + + zfs_action "Sharing ZFS filesystems" "$ZFS" share -a +} + +do_stop() +{ + check_boolean "$ZFS_UNSHARE" || exit 0 + + check_module_loaded "zfs" || exit 0 + + zfs_action "Unsharing ZFS filesystems" "$ZFS" unshare -a +} + +# ---------------------------------------------------- + +if [ ! -e /sbin/openrc-run ]; then + case "$1" in + start) + do_start + ;; + stop) + do_stop + ;; + force-reload|reload|restart|status) + # no-op + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop}" + exit 3 + ;; + esac + + exit $? +else + # Create wrapper functions since Gentoo don't use the case part. + depend() { do_depend; } + start() { do_start; } + stop() { do_stop; } +fi diff --git a/etc/init.d/zfs-zed.in b/etc/init.d/zfs-zed.in new file mode 100755 index 0000000000000..6af9ee60c8c16 --- /dev/null +++ b/etc/init.d/zfs-zed.in @@ -0,0 +1,134 @@ +#!@DEFAULT_INIT_SHELL@ +# +# zfs-zed +# +# chkconfig: 2345 29 99 +# description: This script will start and stop the ZFS Event Daemon. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-zed +# Required-Start: zfs-mount +# Required-Stop: zfs-mount +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# X-Stop-After: zfs-share +# Short-Description: ZFS Event Daemon +# Description: zed monitors ZFS events. When a zevent is posted, zed +# will run any scripts that have been enabled for the +# corresponding zevent class. +### END INIT INFO +# +# Released under the 2-clause BSD license. +# +# The original script that acted as a template for this script came from +# the Debian GNU/Linux kFreeBSD ZFS packages (which did not include a +# licensing stansa) in the commit dated Mar 24, 2011: +# https://github.com/zfsonlinux/pkg-zfs/commit/80a3ae582b59c0250d7912ba794dca9e669e605a + +# Source the common init script +. @sysconfdir@/zfs/zfs-functions + +ZED_NAME="zed" +ZED_PIDFILE="@runstatedir@/$ZED_NAME.pid" + +extra_started_commands="reload" + +# Exit if the package is not installed +[ -x "$ZED" ] || exit 0 + +# ---------------------------------------------------- + +do_depend() +{ + after zfs-mount localmount +} + +do_start() +{ + check_module_loaded "zfs" || exit 0 + + ZED_ARGS="$ZED_ARGS -p $ZED_PIDFILE" + + zfs_action "Starting ZFS Event Daemon" zfs_daemon_start \ + "$ZED_PIDFILE" "$ZED" "$ZED_ARGS" + return "$?" +} + +do_stop() +{ + local pools RET + check_module_loaded "zfs" || exit 0 + + zfs_action "Stopping ZFS Event Daemon" zfs_daemon_stop \ + "$ZED_PIDFILE" "$ZED" "$ZED_NAME" + if [ "$?" -eq "0" ] + then + # Let's see if we have any pools imported + pools=$("$ZPOOL" list -H -oname) + if [ -z "$pools" ] + then + # No pools imported, it is/should be safe/possible to + # unload modules. + zfs_action "Unloading modules" rmmod zfs zunicode \ + zavl zcommon znvpair zlua spl + return "$?" + fi + else + return "$?" + fi +} + +do_status() +{ + check_module_loaded "zfs" || exit 0 + + zfs_daemon_status "$ZED_PIDFILE" "$ZED" "$ZED_NAME" + return "$?" +} + +do_reload() +{ + check_module_loaded "zfs" || exit 0 + + zfs_action "Reloading ZFS Event Daemon" zfs_daemon_reload \ + "$ZED_PIDFILE" "$ZED_NAME" + return "$?" +} + +# ---------------------------------------------------- + +if [ ! -e /sbin/openrc-run ]; then + case "$1" in + start) + do_start + ;; + stop) + do_stop + ;; + status) + do_status + ;; + reload|force-reload) + do_reload + ;; + restart) + do_stop + do_start + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop|status|reload|restart}" + exit 1 + ;; + esac + + exit $? +else + # Create wrapper functions since Gentoo don't use the case part. + depend() { do_depend; } + start() { do_start; } + stop() { do_stop; } + status() { do_status; } + reload() { do_reload; } +fi diff --git a/etc/modules-load.d/.gitignore b/etc/modules-load.d/.gitignore new file mode 100644 index 0000000000000..fee9217083375 --- /dev/null +++ b/etc/modules-load.d/.gitignore @@ -0,0 +1 @@ +*.conf diff --git a/etc/modules-load.d/Makefile.am b/etc/modules-load.d/Makefile.am new file mode 100644 index 0000000000000..8a2955767b1e6 --- /dev/null +++ b/etc/modules-load.d/Makefile.am @@ -0,0 +1,2 @@ +dist_modulesload_DATA = \ + zfs.conf diff --git a/etc/modules-load.d/zfs.conf b/etc/modules-load.d/zfs.conf new file mode 100644 index 0000000000000..44e1bb3ed906b --- /dev/null +++ b/etc/modules-load.d/zfs.conf @@ -0,0 +1,3 @@ +# The default behavior is to allow udev to load the kernel modules on demand. +# Uncomment the following line to unconditionally load them at boot. +#zfs diff --git a/etc/sudoers.d/Makefile.am b/etc/sudoers.d/Makefile.am new file mode 100644 index 0000000000000..6f7ac8dbfd619 --- /dev/null +++ b/etc/sudoers.d/Makefile.am @@ -0,0 +1,5 @@ +sudoersddir = $(sysconfdir)/sudoers.d +sudoersd_DATA = zfs + +EXTRA_DIST = \ + zfs diff --git a/etc/sudoers.d/zfs b/etc/sudoers.d/zfs new file mode 100644 index 0000000000000..82a25ba81ec77 --- /dev/null +++ b/etc/sudoers.d/zfs @@ -0,0 +1,9 @@ +## +## Allow any user to run `zpool iostat/status -c smart` in order +## to read basic SMART health statistics for a pool. +## +## CAUTION: Any syntax error introduced here will break sudo. +## Editing with 'visudo' is recommended: visudo -f /etc/sudoers.d/zfs +## + +# ALL ALL = (root) NOPASSWD: /usr/sbin/smartctl -a /dev/[hsv]d[a-z0-9]* diff --git a/etc/systemd/Makefile.am b/etc/systemd/Makefile.am new file mode 100644 index 0000000000000..7b47b93fc1059 --- /dev/null +++ b/etc/systemd/Makefile.am @@ -0,0 +1 @@ +SUBDIRS = system system-generators diff --git a/etc/systemd/system-generators/.gitignore b/etc/systemd/system-generators/.gitignore new file mode 100644 index 0000000000000..fc2ebc1a29502 --- /dev/null +++ b/etc/systemd/system-generators/.gitignore @@ -0,0 +1 @@ +zfs-mount-generator diff --git a/etc/systemd/system-generators/Makefile.am b/etc/systemd/system-generators/Makefile.am new file mode 100644 index 0000000000000..fee88dad8ca12 --- /dev/null +++ b/etc/systemd/system-generators/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Substfiles.am + +systemdgenerator_SCRIPTS = \ + zfs-mount-generator + +SUBSTFILES += $(systemdgenerator_SCRIPTS) diff --git a/etc/systemd/system-generators/zfs-mount-generator.in b/etc/systemd/system-generators/zfs-mount-generator.in new file mode 100755 index 0000000000000..fdef13cfa95ac --- /dev/null +++ b/etc/systemd/system-generators/zfs-mount-generator.in @@ -0,0 +1,450 @@ +#!/bin/sh + +# zfs-mount-generator - generates systemd mount units for zfs +# Copyright (c) 2017 Antonio Russo <antonio.e.russo@gmail.com> +# Copyright (c) 2020 InsanePrawn <insane.prawny@gmail.com> +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +set -e + +FSLIST="@sysconfdir@/zfs/zfs-list.cache" + +[ -d "${FSLIST}" ] || exit 0 + +do_fail() { + printf 'zfs-mount-generator: %s\n' "$*" > /dev/kmsg + exit 1 +} + +# test if $1 is in space-separated list $2 +is_known() { + query="$1" + IFS=' ' + # protect against special characters + set -f + for element in $2 ; do + if [ "$query" = "$element" ] ; then + return 0 + fi + done + return 1 +} + +# create dependency on unit file $1 +# of type $2, i.e. "wants" or "requires" +# in the target units from space-separated list $3 +create_dependencies() { + unitfile="$1" + suffix="$2" + # protect against special characters + set -f + for target in $3 ; do + target_dir="${dest_norm}/${target}.${suffix}/" + mkdir -p "${target_dir}" + ln -s "../${unitfile}" "${target_dir}" + done +} + +# see systemd.generator +if [ $# -eq 0 ] ; then + dest_norm="/tmp" +elif [ $# -eq 3 ] ; then + dest_norm="${1}" +else + do_fail "zero or three arguments required" +fi + + +# All needed information about each ZFS is available from +# zfs list -H -t filesystem -o <properties> +# cached in $FSLIST, and each line is processed by the following function: +# See the list below for the properties and their order + +process_line() { + + # zfs list -H -o name,... + # fields are tab separated + IFS="$(printf '\t')" + # protect against special characters in, e.g., mountpoints + set -f + # shellcheck disable=SC2086 + set -- $1 + dataset="${1}" + p_mountpoint="${2}" + p_canmount="${3}" + p_atime="${4}" + p_relatime="${5}" + p_devices="${6}" + p_exec="${7}" + p_readonly="${8}" + p_setuid="${9}" + p_nbmand="${10}" + p_encroot="${11}" + p_keyloc="${12}" + p_systemd_requires="${13}" + p_systemd_requiresmountsfor="${14}" + p_systemd_before="${15}" + p_systemd_after="${16}" + p_systemd_wantedby="${17}" + p_systemd_requiredby="${18}" + p_systemd_nofail="${19}" + p_systemd_ignore="${20}" + + # Minimal pre-requisites to mount a ZFS dataset + # By ordering before zfs-mount.service, we avoid race conditions. + after="zfs-import.target" + before="zfs-mount.service" + wants="zfs-import.target" + requires="" + requiredmounts="" + bindsto="" + wantedby="" + requiredby="" + noauto="off" + + if [ -n "${p_systemd_after}" ] && \ + [ "${p_systemd_after}" != "-" ] ; then + after="${p_systemd_after} ${after}" + fi + + if [ -n "${p_systemd_before}" ] && \ + [ "${p_systemd_before}" != "-" ] ; then + before="${p_systemd_before} ${before}" + fi + + if [ -n "${p_systemd_requires}" ] && \ + [ "${p_systemd_requires}" != "-" ] ; then + requires="Requires=${p_systemd_requires}" + fi + + if [ -n "${p_systemd_requiresmountsfor}" ] && \ + [ "${p_systemd_requiresmountsfor}" != "-" ] ; then + requiredmounts="RequiresMountsFor=${p_systemd_requiresmountsfor}" + fi + + # Handle encryption + if [ -n "${p_encroot}" ] && + [ "${p_encroot}" != "-" ] ; then + keyloadunit="zfs-load-key-$(systemd-escape "${p_encroot}").service" + if [ "${p_encroot}" = "${dataset}" ] ; then + keymountdep="" + if [ "${p_keyloc%%://*}" = "file" ] ; then + if [ -n "${requiredmounts}" ] ; then + keymountdep="${requiredmounts} '${p_keyloc#file://}'" + else + keymountdep="RequiresMountsFor='${p_keyloc#file://}'" + fi + keyloadscript="@sbindir@/zfs load-key \"${dataset}\"" + elif [ "${p_keyloc}" = "prompt" ] ; then + keyloadscript="\ +count=0;\ +while [ \$\$count -lt 3 ];do\ + systemd-ask-password --id=\"zfs:${dataset}\"\ + \"Enter passphrase for ${dataset}:\"|\ + @sbindir@/zfs load-key \"${dataset}\" && exit 0;\ + count=\$\$((count + 1));\ +done;\ +exit 1" + else + printf 'zfs-mount-generator: (%s) invalid keylocation\n' \ + "${dataset}" >/dev/kmsg + fi + keyloadcmd="\ +/bin/sh -c '\ +set -eu;\ +keystatus=\"\$\$(@sbindir@/zfs get -H -o value keystatus \"${dataset}\")\";\ +[ \"\$\$keystatus\" = \"unavailable\" ] || exit 0;\ +${keyloadscript}'" + keyunloadcmd="\ +/bin/sh -c '\ +set -eu;\ +keystatus=\"\$\$(@sbindir@/zfs get -H -o value keystatus \"${dataset}\")\";\ +[ \"\$\$keystatus\" = \"available\" ] || exit 0;\ +@sbindir@/zfs unload-key \"${dataset}\"'" + + + + # Generate the key-load .service unit + # + # Note: It is tempting to use a `<<EOF` style here-document for this, but + # bash requires a writable /tmp or $TMPDIR for that. This is not always + # available early during boot. + # + echo \ +"# Automatically generated by zfs-mount-generator + +[Unit] +Description=Load ZFS key for ${dataset} +SourcePath=${cachefile} +Documentation=man:zfs-mount-generator(8) +DefaultDependencies=no +Wants=${wants} +After=${after} +${requires} +${keymountdep} + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=${keyloadcmd} +ExecStop=${keyunloadcmd}" > "${dest_norm}/${keyloadunit}" + fi + # Update the dependencies for the mount file to want the + # key-loading unit. + wants="${wants}" + bindsto="BindsTo=${keyloadunit}" + after="${after} ${keyloadunit}" + fi + + # Prepare the .mount unit + + # skip generation of the mount unit if org.openzfs.systemd:ignore is "on" + if [ -n "${p_systemd_ignore}" ] ; then + if [ "${p_systemd_ignore}" = "on" ] ; then + return + elif [ "${p_systemd_ignore}" = "-" ] \ + || [ "${p_systemd_ignore}" = "off" ] ; then + : # This is OK + else + do_fail "invalid org.openzfs.systemd:ignore for ${dataset}" + fi + fi + + # Check for canmount=off . + if [ "${p_canmount}" = "off" ] ; then + return + elif [ "${p_canmount}" = "noauto" ] ; then + noauto="on" + elif [ "${p_canmount}" = "on" ] ; then + : # This is OK + else + do_fail "invalid canmount for ${dataset}" + fi + + # Check for legacy and blank mountpoints. + if [ "${p_mountpoint}" = "legacy" ] ; then + return + elif [ "${p_mountpoint}" = "none" ] ; then + return + elif [ "${p_mountpoint%"${p_mountpoint#?}"}" != "/" ] ; then + do_fail "invalid mountpoint for ${dataset}" + fi + + # Escape the mountpoint per systemd policy. + mountfile="$(systemd-escape --path --suffix=mount "${p_mountpoint}")" + + # Parse options + # see lib/libzfs/libzfs_mount.c:zfs_add_options + opts="" + + # atime + if [ "${p_atime}" = on ] ; then + # relatime + if [ "${p_relatime}" = on ] ; then + opts="${opts},atime,relatime" + elif [ "${p_relatime}" = off ] ; then + opts="${opts},atime,strictatime" + else + printf 'zfs-mount-generator: (%s) invalid relatime\n' \ + "${dataset}" >/dev/kmsg + fi + elif [ "${p_atime}" = off ] ; then + opts="${opts},noatime" + else + printf 'zfs-mount-generator: (%s) invalid atime\n' \ + "${dataset}" >/dev/kmsg + fi + + # devices + if [ "${p_devices}" = on ] ; then + opts="${opts},dev" + elif [ "${p_devices}" = off ] ; then + opts="${opts},nodev" + else + printf 'zfs-mount-generator: (%s) invalid devices\n' \ + "${dataset}" >/dev/kmsg + fi + + # exec + if [ "${p_exec}" = on ] ; then + opts="${opts},exec" + elif [ "${p_exec}" = off ] ; then + opts="${opts},noexec" + else + printf 'zfs-mount-generator: (%s) invalid exec\n' \ + "${dataset}" >/dev/kmsg + fi + + # readonly + if [ "${p_readonly}" = on ] ; then + opts="${opts},ro" + elif [ "${p_readonly}" = off ] ; then + opts="${opts},rw" + else + printf 'zfs-mount-generator: (%s) invalid readonly\n' \ + "${dataset}" >/dev/kmsg + fi + + # setuid + if [ "${p_setuid}" = on ] ; then + opts="${opts},suid" + elif [ "${p_setuid}" = off ] ; then + opts="${opts},nosuid" + else + printf 'zfs-mount-generator: (%s) invalid setuid\n' \ + "${dataset}" >/dev/kmsg + fi + + # nbmand + if [ "${p_nbmand}" = on ] ; then + opts="${opts},mand" + elif [ "${p_nbmand}" = off ] ; then + opts="${opts},nomand" + else + printf 'zfs-mount-generator: (%s) invalid nbmand\n' \ + "${dataset}" >/dev/kmsg + fi + + if [ -n "${p_systemd_wantedby}" ] && \ + [ "${p_systemd_wantedby}" != "-" ] ; then + noauto="on" + if [ "${p_systemd_wantedby}" = "none" ] ; then + wantedby="" + else + wantedby="${p_systemd_wantedby}" + before="${before} ${wantedby}" + fi + fi + + if [ -n "${p_systemd_requiredby}" ] && \ + [ "${p_systemd_requiredby}" != "-" ] ; then + noauto="on" + if [ "${p_systemd_requiredby}" = "none" ] ; then + requiredby="" + else + requiredby="${p_systemd_requiredby}" + before="${before} ${requiredby}" + fi + fi + + # For datasets with canmount=on, a dependency is created for + # local-fs.target by default. To avoid regressions, this dependency + # is reduced to "wants" rather than "requires" when nofail is not "off". + # **THIS MAY CHANGE** + # noauto=on disables this behavior completely. + if [ "${noauto}" != "on" ] ; then + if [ "${p_systemd_nofail}" = "off" ] ; then + requiredby="local-fs.target" + before="${before} local-fs.target" + else + wantedby="local-fs.target" + if [ "${p_systemd_nofail}" != "on" ] ; then + before="${before} local-fs.target" + fi + fi + fi + + # Handle existing files: + # 1. We never overwrite existing files, although we may delete + # files if we're sure they were created by us. (see 5.) + # 2. We handle files differently based on canmount. Units with canmount=on + # always have precedence over noauto. This is enforced by the sort pipe + # in the loop around this function. + # It is important to use $p_canmount and not $noauto here, since we + # sort by canmount while other properties also modify $noauto, e.g. + # org.openzfs.systemd:wanted-by. + # 3. If no unit file exists for a noauto dataset, we create one. + # Additionally, we use $noauto_files to track the unit file names + # (which are the systemd-escaped mountpoints) of all (exclusively) + # noauto datasets that had a file created. + # 4. If the file to be created is found in the tracking variable, + # we do NOT create it. + # 5. If a file exists for a noauto dataset, we check whether the file + # name is in the variable. If it is, we have multiple noauto datasets + # for the same mountpoint. In such cases, we remove the file for safety. + # To avoid further noauto datasets creating a file for this path again, + # we leave the file name in the tracking variable. + if [ -e "${dest_norm}/${mountfile}" ] ; then + if is_known "$mountfile" "$noauto_files" ; then + # if it's in $noauto_files, we must be noauto too. See 2. + printf 'zfs-mount-generator: removing duplicate noauto %s\n' \ + "${mountfile}" >/dev/kmsg + # See 5. + rm "${dest_norm}/${mountfile}" + else + # don't log for canmount=noauto + if [ "${p_canmount}" = "on" ] ; then + printf 'zfs-mount-generator: %s already exists. Skipping.\n' \ + "${mountfile}" >/dev/kmsg + fi + fi + # file exists; Skip current dataset. + return + else + if is_known "${mountfile}" "${noauto_files}" ; then + # See 4. + return + elif [ "${p_canmount}" = "noauto" ] ; then + noauto_files="${mountfile} ${noauto_files}" + fi + fi + + # Create the .mount unit file. + # + # (Do not use `<<EOF`-style here-documents for this, see warning above) + # + echo \ +"# Automatically generated by zfs-mount-generator + +[Unit] +SourcePath=${cachefile} +Documentation=man:zfs-mount-generator(8) + +Before=${before} +After=${after} +Wants=${wants} +${bindsto} +${requires} +${requiredmounts} + +[Mount] +Where=${p_mountpoint} +What=${dataset} +Type=zfs +Options=defaults${opts},zfsutil" > "${dest_norm}/${mountfile}" + + # Finally, create the appropriate dependencies + create_dependencies "${mountfile}" "wants" "$wantedby" + create_dependencies "${mountfile}" "requires" "$requiredby" + +} + +for cachefile in "${FSLIST}/"* ; do + # Sort cachefile's lines by canmount, "on" before "noauto" + # and feed each line into process_line + sort -t "$(printf '\t')" -k 3 -r "${cachefile}" | \ + ( # subshell is necessary for `sort|while read` and $noauto_files + noauto_files="" + while read -r fs ; do + process_line "${fs}" + done + ) +done diff --git a/etc/systemd/system/.gitignore b/etc/systemd/system/.gitignore new file mode 100644 index 0000000000000..efada54ad9321 --- /dev/null +++ b/etc/systemd/system/.gitignore @@ -0,0 +1,3 @@ +*.service +*.target +*.preset diff --git a/etc/systemd/system/50-zfs.preset.in b/etc/systemd/system/50-zfs.preset.in new file mode 100644 index 0000000000000..e4056a92cd985 --- /dev/null +++ b/etc/systemd/system/50-zfs.preset.in @@ -0,0 +1,9 @@ +# ZFS is enabled by default +enable zfs-import-cache.service +disable zfs-import-scan.service +enable zfs-import.target +enable zfs-mount.service +enable zfs-share.service +enable zfs-zed.service +enable zfs-volume-wait.service +enable zfs.target diff --git a/etc/systemd/system/Makefile.am b/etc/systemd/system/Makefile.am new file mode 100644 index 0000000000000..c374a52ac7db1 --- /dev/null +++ b/etc/systemd/system/Makefile.am @@ -0,0 +1,21 @@ +include $(top_srcdir)/config/Substfiles.am + +systemdpreset_DATA = \ + 50-zfs.preset + +systemdunit_DATA = \ + zfs-zed.service \ + zfs-import-cache.service \ + zfs-import-scan.service \ + zfs-mount.service \ + zfs-share.service \ + zfs-volume-wait.service \ + zfs-import.target \ + zfs-volumes.target \ + zfs.target + +SUBSTFILES += $(systemdpreset_DATA) $(systemdunit_DATA) + +install-data-hook: + $(MKDIR_P) "$(DESTDIR)$(systemdunitdir)" + ln -sf /dev/null "$(DESTDIR)$(systemdunitdir)/zfs-import.service" diff --git a/etc/systemd/system/zfs-import-cache.service.in b/etc/systemd/system/zfs-import-cache.service.in new file mode 100644 index 0000000000000..47c5b07f8ff03 --- /dev/null +++ b/etc/systemd/system/zfs-import-cache.service.in @@ -0,0 +1,20 @@ +[Unit] +Description=Import ZFS pools by cache file +Documentation=man:zpool(8) +DefaultDependencies=no +Requires=systemd-udev-settle.service +After=systemd-udev-settle.service +After=cryptsetup.target +After=multipathd.target +After=systemd-remount-fs.service +Before=zfs-import.target +ConditionPathExists=@sysconfdir@/zfs/zpool.cache +ConditionPathIsDirectory=/sys/module/zfs + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=@sbindir@/zpool import -c @sysconfdir@/zfs/zpool.cache -aN + +[Install] +WantedBy=zfs-import.target diff --git a/etc/systemd/system/zfs-import-scan.service.in b/etc/systemd/system/zfs-import-scan.service.in new file mode 100644 index 0000000000000..6520f32463dd4 --- /dev/null +++ b/etc/systemd/system/zfs-import-scan.service.in @@ -0,0 +1,19 @@ +[Unit] +Description=Import ZFS pools by device scanning +Documentation=man:zpool(8) +DefaultDependencies=no +Requires=systemd-udev-settle.service +After=systemd-udev-settle.service +After=cryptsetup.target +After=multipathd.target +Before=zfs-import.target +ConditionPathExists=!@sysconfdir@/zfs/zpool.cache +ConditionPathIsDirectory=/sys/module/zfs + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=@sbindir@/zpool import -aN -o cachefile=none + +[Install] +WantedBy=zfs-import.target diff --git a/etc/systemd/system/zfs-import.target.in b/etc/systemd/system/zfs-import.target.in new file mode 100644 index 0000000000000..8d78a7a960307 --- /dev/null +++ b/etc/systemd/system/zfs-import.target.in @@ -0,0 +1,6 @@ +[Unit] +Description=ZFS pool import target +Before=dracut-mount.service + +[Install] +WantedBy=zfs.target diff --git a/etc/systemd/system/zfs-mount.service.in b/etc/systemd/system/zfs-mount.service.in new file mode 100644 index 0000000000000..480f39a49769e --- /dev/null +++ b/etc/systemd/system/zfs-mount.service.in @@ -0,0 +1,18 @@ +[Unit] +Description=Mount ZFS filesystems +Documentation=man:zfs(8) +DefaultDependencies=no +After=systemd-udev-settle.service +After=zfs-import.target +After=systemd-remount-fs.service +Before=local-fs.target +Before=systemd-random-seed.service +ConditionPathIsDirectory=/sys/module/zfs + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=@sbindir@/zfs mount -a + +[Install] +WantedBy=zfs.target diff --git a/etc/systemd/system/zfs-share.service.in b/etc/systemd/system/zfs-share.service.in new file mode 100644 index 0000000000000..b720085874e58 --- /dev/null +++ b/etc/systemd/system/zfs-share.service.in @@ -0,0 +1,18 @@ +[Unit] +Description=ZFS file system shares +Documentation=man:zfs(8) +After=nfs-server.service nfs-kernel-server.service +After=smb.service +Before=rpc-statd-notify.service +Wants=zfs-mount.service +After=zfs-mount.service +PartOf=nfs-server.service nfs-kernel-server.service +PartOf=smb.service + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=@sbindir@/zfs share -a + +[Install] +WantedBy=zfs.target diff --git a/etc/systemd/system/zfs-volume-wait.service.in b/etc/systemd/system/zfs-volume-wait.service.in new file mode 100644 index 0000000000000..75bd9fcdd56cb --- /dev/null +++ b/etc/systemd/system/zfs-volume-wait.service.in @@ -0,0 +1,13 @@ +[Unit] +Description=Wait for ZFS Volume (zvol) links in /dev +DefaultDependencies=no +After=systemd-udev-settle.service +After=zfs-import.target + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=@bindir@/zvol_wait + +[Install] +WantedBy=zfs-volumes.target diff --git a/etc/systemd/system/zfs-volumes.target.in b/etc/systemd/system/zfs-volumes.target.in new file mode 100644 index 0000000000000..5cb9a10f49c5a --- /dev/null +++ b/etc/systemd/system/zfs-volumes.target.in @@ -0,0 +1,7 @@ +[Unit] +Description=ZFS volumes are ready +After=zfs-volume-wait.service +Requires=zfs-volume-wait.service + +[Install] +WantedBy=zfs.target diff --git a/etc/systemd/system/zfs-zed.service.in b/etc/systemd/system/zfs-zed.service.in new file mode 100644 index 0000000000000..f4313625ee5ec --- /dev/null +++ b/etc/systemd/system/zfs-zed.service.in @@ -0,0 +1,11 @@ +[Unit] +Description=ZFS Event Daemon (zed) +Documentation=man:zed(8) + +[Service] +ExecStart=@sbindir@/zed -F +Restart=on-abort + +[Install] +Alias=zed.service +WantedBy=zfs.target diff --git a/etc/systemd/system/zfs.target.in b/etc/systemd/system/zfs.target.in new file mode 100644 index 0000000000000..4699463b0ddf3 --- /dev/null +++ b/etc/systemd/system/zfs.target.in @@ -0,0 +1,5 @@ +[Unit] +Description=ZFS startup target + +[Install] +WantedBy=multi-user.target diff --git a/etc/zfs/.gitignore b/etc/zfs/.gitignore new file mode 100644 index 0000000000000..1b2d752debdaf --- /dev/null +++ b/etc/zfs/.gitignore @@ -0,0 +1 @@ +zfs-functions diff --git a/etc/zfs/Makefile.am b/etc/zfs/Makefile.am new file mode 100644 index 0000000000000..b9123c176b93c --- /dev/null +++ b/etc/zfs/Makefile.am @@ -0,0 +1,15 @@ +include $(top_srcdir)/config/Substfiles.am + +pkgsysconfdir = $(sysconfdir)/zfs + +dist_pkgsysconf_DATA = \ + vdev_id.conf.alias.example \ + vdev_id.conf.sas_direct.example \ + vdev_id.conf.sas_switch.example \ + vdev_id.conf.multipath.example \ + vdev_id.conf.scsi.example + +pkgsysconf_SCRIPTS = \ + zfs-functions + +SUBSTFILES += $(pkgsysconf_SCRIPTS) diff --git a/etc/zfs/vdev_id.conf.alias.example b/etc/zfs/vdev_id.conf.alias.example new file mode 100644 index 0000000000000..33735b05b9566 --- /dev/null +++ b/etc/zfs/vdev_id.conf.alias.example @@ -0,0 +1,4 @@ +# by-vdev +# name fully qualified or base name of device link +alias d1 /dev/disk/by-id/wwn-0x5000c5002de3b9ca +alias d2 wwn-0x5000c5002def789e diff --git a/etc/zfs/vdev_id.conf.multipath.example b/etc/zfs/vdev_id.conf.multipath.example new file mode 100644 index 0000000000000..c1359d37d59c3 --- /dev/null +++ b/etc/zfs/vdev_id.conf.multipath.example @@ -0,0 +1,7 @@ +multipath yes + +# PCI_ID HBA PORT CHANNEL NAME +channel 85:00.0 1 A +channel 85:00.0 0 B +channel 86:00.0 1 A +channel 86:00.0 0 B diff --git a/etc/zfs/vdev_id.conf.sas_direct.example b/etc/zfs/vdev_id.conf.sas_direct.example new file mode 100644 index 0000000000000..d17ed149d89bc --- /dev/null +++ b/etc/zfs/vdev_id.conf.sas_direct.example @@ -0,0 +1,28 @@ +multipath no +topology sas_direct +phys_per_port 4 + +# Additionally create /dev/by-enclosure/ symlinks for enclosure devices +enclosure_symlinks yes + +# PCI_ID HBA PORT CHANNEL NAME +channel 85:00.0 1 A +channel 85:00.0 0 B +channel 86:00.0 1 C +channel 86:00.0 0 D + + +# Custom mapping for Channel A + +# Linux Mapped +# Slot Slot Channel +slot 1 7 A +slot 2 10 A +slot 3 3 A +slot 4 6 A + +# Default mapping for B, C, and D +slot 1 4 +slot 2 2 +slot 3 1 +slot 4 3 diff --git a/etc/zfs/vdev_id.conf.sas_switch.example b/etc/zfs/vdev_id.conf.sas_switch.example new file mode 100644 index 0000000000000..b87d655274529 --- /dev/null +++ b/etc/zfs/vdev_id.conf.sas_switch.example @@ -0,0 +1,7 @@ +topology sas_switch + +# SWITCH PORT CHANNEL NAME +channel 1 A +channel 2 B +channel 3 C +channel 4 D diff --git a/etc/zfs/vdev_id.conf.scsi.example b/etc/zfs/vdev_id.conf.scsi.example new file mode 100644 index 0000000000000..b8c0ab2bf67d9 --- /dev/null +++ b/etc/zfs/vdev_id.conf.scsi.example @@ -0,0 +1,9 @@ +multipath no +topology scsi +phys_per_port 1 +# Usually scsi disks are numbered from 0, but this can be offset, to +# match the physical bay numbers, as follows: +first_bay_number 1 + +# PCI_ID HBA PORT CHANNEL NAME +channel 0c:00.0 0 Y diff --git a/etc/zfs/zfs-functions.in b/etc/zfs/zfs-functions.in new file mode 100644 index 0000000000000..c2ce6157c6e09 --- /dev/null +++ b/etc/zfs/zfs-functions.in @@ -0,0 +1,434 @@ +# This is a script with common functions etc used by zfs-import, zfs-mount, +# zfs-share and zfs-zed. +# +# It is _NOT_ to be called independently +# +# Released under the 2-clause BSD license. +# +# The original script that acted as a template for this script came from +# the Debian GNU/Linux kFreeBSD ZFS packages (which did not include a +# licensing stansa) in the commit dated Mar 24, 2011: +# https://github.com/zfsonlinux/pkg-zfs/commit/80a3ae582b59c0250d7912ba794dca9e669e605a + +PATH=/sbin:/bin:/usr/bin:/usr/sbin + +# Source function library +if [ -f /etc/rc.d/init.d/functions ]; then + # RedHat and derivates + . /etc/rc.d/init.d/functions +elif [ -L /etc/init.d/functions.sh ]; then + # Gentoo + . /etc/init.d/functions.sh +elif [ -f /lib/lsb/init-functions ]; then + # LSB, Debian GNU/Linux and derivates + . /lib/lsb/init-functions +fi + +# Of course the functions we need are called differently +# on different distributions - it would be way too easy +# otherwise!! +if type log_failure_msg > /dev/null 2>&1 ; then + # LSB functions - fall through + zfs_log_begin_msg() { log_begin_msg "$1"; } + zfs_log_end_msg() { log_end_msg "$1"; } + zfs_log_failure_msg() { log_failure_msg "$1"; } + zfs_log_progress_msg() { log_progress_msg "$1"; } +elif type success > /dev/null 2>&1 ; then + # Fedora/RedHat functions + zfs_set_ifs() { + # For some reason, the init function library have a problem + # with a changed IFS, so this function goes around that. + local tIFS="$1" + if [ -n "$tIFS" ] + then + TMP_IFS="$IFS" + IFS="$tIFS" + fi + } + + zfs_log_begin_msg() { echo -n "$1 "; } + zfs_log_end_msg() { + zfs_set_ifs "$OLD_IFS" + if [ "$1" -eq 0 ]; then + success + else + failure + fi + echo + zfs_set_ifs "$TMP_IFS" + } + zfs_log_failure_msg() { + zfs_set_ifs "$OLD_IFS" + failure + echo + zfs_set_ifs "$TMP_IFS" + } + zfs_log_progress_msg() { echo -n $"$1"; } +elif type einfo > /dev/null 2>&1 ; then + # Gentoo functions + zfs_log_begin_msg() { ebegin "$1"; } + zfs_log_end_msg() { eend "$1"; } + zfs_log_failure_msg() { eend "$1"; } +# zfs_log_progress_msg() { echo -n "$1"; } + zfs_log_progress_msg() { echo -n; } +else + # Unknown - simple substitutes. + zfs_log_begin_msg() { echo -n "$1"; } + zfs_log_end_msg() { + ret=$1 + if [ "$ret" -ge 1 ]; then + echo " failed!" + else + echo " success" + fi + return "$ret" + } + zfs_log_failure_msg() { echo "$1"; } + zfs_log_progress_msg() { echo -n "$1"; } +fi + +# Paths to what we need +ZFS="@sbindir@/zfs" +ZED="@sbindir@/zed" +ZPOOL="@sbindir@/zpool" +ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" + +# Sensible defaults +ZFS_MOUNT='yes' +ZFS_UNMOUNT='yes' +ZFS_SHARE='yes' +ZFS_UNSHARE='yes' + +# Source zfs configuration, overriding the defaults +if [ -f @initconfdir@/zfs ]; then + . @initconfdir@/zfs +fi + +# ---------------------------------------------------- + +export ZFS ZED ZPOOL ZPOOL_CACHE ZFS_MOUNT ZFS_UNMOUNT ZFS_SHARE ZFS_UNSHARE + +zfs_action() +{ + local MSG="$1"; shift + local CMD="$*" + local ret + + zfs_log_begin_msg "$MSG " + $CMD + ret=$? + if [ "$ret" -eq 0 ]; then + zfs_log_end_msg $ret + else + zfs_log_failure_msg $ret + fi + + return $ret +} + +# Returns +# 0 if daemon has been started +# 1 if daemon was already running +# 2 if daemon could not be started +# 3 if unsupported +# +zfs_daemon_start() +{ + local PIDFILE="$1"; shift + local DAEMON_BIN="$1"; shift + local DAEMON_ARGS="$*" + + if type start-stop-daemon > /dev/null 2>&1 ; then + # LSB functions + start-stop-daemon --start --quiet --pidfile "$PIDFILE" \ + --exec "$DAEMON_BIN" --test > /dev/null || return 1 + + start-stop-daemon --start --quiet --exec "$DAEMON_BIN" -- \ + $DAEMON_ARGS || return 2 + + # On Debian GNU/Linux, there's a 'sendsigs' script that will + # kill basically everything quite early and zed is stopped + # much later than that. We don't want zed to be among them, + # so add the zed pid to list of pids to ignore. + if [ -f "$PIDFILE" -a -d /run/sendsigs.omit.d ] + then + ln -sf "$PIDFILE" /run/sendsigs.omit.d/zed + fi + elif type daemon > /dev/null 2>&1 ; then + # Fedora/RedHat functions + daemon --pidfile "$PIDFILE" "$DAEMON_BIN" $DAEMON_ARGS + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +# Returns +# 0 if daemon has been stopped +# 1 if daemon was already stopped +# 2 if daemon could not be stopped +# 3 if unsupported +# +zfs_daemon_stop() +{ + local PIDFILE="$1" + local DAEMON_BIN="$2" + local DAEMON_NAME="$3" + + if type start-stop-daemon > /dev/null 2>&1 ; then + # LSB functions + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \ + --pidfile "$PIDFILE" --name "$DAEMON_NAME" + [ "$?" = 0 ] && rm -f "$PIDFILE" + + return $? + elif type killproc > /dev/null 2>&1 ; then + # Fedora/RedHat functions + killproc -p "$PIDFILE" "$DAEMON_NAME" + [ "$?" = 0 ] && rm -f "$PIDFILE" + + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +# Returns status +zfs_daemon_status() +{ + local PIDFILE="$1" + local DAEMON_BIN="$2" + local DAEMON_NAME="$3" + + if type status_of_proc > /dev/null 2>&1 ; then + # LSB functions + status_of_proc "$DAEMON_NAME" "$DAEMON_BIN" + return $? + elif type status > /dev/null 2>&1 ; then + # Fedora/RedHat functions + status -p "$PIDFILE" "$DAEMON_NAME" + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +zfs_daemon_reload() +{ + local PIDFILE="$1" + local DAEMON_NAME="$2" + + if type start-stop-daemon > /dev/null 2>&1 ; then + # LSB functions + start-stop-daemon --stop --signal 1 --quiet \ + --pidfile "$PIDFILE" --name "$DAEMON_NAME" + return $? + elif type killproc > /dev/null 2>&1 ; then + # Fedora/RedHat functions + killproc -p "$PIDFILE" "$DAEMON_NAME" -HUP + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +zfs_installed() +{ + if [ ! -x "$ZPOOL" ]; then + return 1 + else + # Test if it works (will catch missing/broken libs etc) + "$ZPOOL" -? > /dev/null 2>&1 + return $? + fi + + if [ ! -x "$ZFS" ]; then + return 2 + else + # Test if it works (will catch missing/broken libs etc) + "$ZFS" -? > /dev/null 2>&1 + return $? + fi + + return 0 +} + +# Trigger udev and wait for it to settle. +udev_trigger() +{ + if [ -x /sbin/udevadm ]; then + /sbin/udevadm trigger --action=change --subsystem-match=block + /sbin/udevadm settle + elif [ -x /sbin/udevsettle ]; then + /sbin/udevtrigger + /sbin/udevsettle + fi +} + +# Do a lot of checks to make sure it's 'safe' to continue with the import. +checksystem() +{ + if grep -qiE '(^|[^\\](\\\\)* )zfs=(off|no|0)( |$)' /proc/cmdline; + then + # Called with zfs=(off|no|0) - bail because we don't + # want anything import, mounted or shared. + # HOWEVER, only do this if we're called at the boot up + # (from init), not if we're running interactively (as in + # from the shell - we know what we're doing). + [ -n "$init" ] && exit 3 + fi + + # Check if ZFS is installed. + zfs_installed || return 5 + + # Just make sure that /dev/zfs is created. + udev_trigger + + return 0 +} + +get_root_pool() +{ + set -- $(mount | grep ' on / ') + [ "$5" = "zfs" ] && echo "${1%%/*}" +} + +# Check if a variable is 'yes' (any case) or '1' +# Returns TRUE if set. +check_boolean() +{ + local var="$1" + + echo "$var" | grep -Eiq "^yes$|^on$|^true$|^1$" && return 0 || return 1 +} + +check_module_loaded() +{ + module="$1" + + [ -r "/sys/module/${module}/version" ] && return 0 || return 1 +} + +load_module() +{ + module="$1" + + # Load the zfs module stack + if ! check_module_loaded "$module"; then + if ! /sbin/modprobe "$module"; then + return 5 + fi + fi + return 0 +} + +# first parameter is a regular expression that filters mtab +read_mtab() +{ + local match="$1" + local fs mntpnt fstype opts rest TMPFILE + + # Unset all MTAB_* variables + unset $(env | grep ^MTAB_ | sed 's,=.*,,') + + while read -r fs mntpnt fstype opts rest; do + if echo "$fs $mntpnt $fstype $opts" | grep -qE "$match"; then + # * Fix problems (!?) in the mounts file. It will record + # 'rpool 1' as 'rpool\0401' instead of 'rpool\00401' + # which seems to be the correct (at least as far as + # 'printf' is concerned). + # * We need to use the external echo, because the + # internal one would interpret the backslash code + # (incorrectly), giving us a instead. + mntpnt=$(/bin/echo "$mntpnt" | sed "s,\\\0,\\\00,g") + fs=$(/bin/echo "$fs" | sed "s,\\\0,\\\00,") + + # Remove 'unwanted' characters. + mntpnt=$(printf '%b\n' "$mntpnt" | sed -e 's,/,,g' \ + -e 's,-,,g' -e 's,\.,,g' -e 's, ,,g') + fs=$(printf '%b\n' "$fs") + + # Set the variable. + eval export MTAB_$mntpnt=\"$fs\" + fi + done < /proc/self/mounts +} + +in_mtab() +{ + local mntpnt="$1" + # Remove 'unwanted' characters. + mntpnt=$(printf '%b\n' "$mntpnt" | sed -e 's,/,,g' \ + -e 's,-,,g' -e 's,\.,,g' -e 's, ,,g') + local var + + var="$(eval echo MTAB_$mntpnt)" + [ "$(eval echo "$""$var")" != "" ] + return "$?" +} + +# first parameter is a regular expression that filters fstab +read_fstab() +{ + local match="$1" + local i var TMPFILE + + # Unset all FSTAB_* variables + unset $(env | grep ^FSTAB_ | sed 's,=.*,,') + + i=0 + while read -r fs mntpnt fstype opts; do + echo "$fs" | egrep -qE '^#|^$' && continue + echo "$mntpnt" | egrep -qE '^none|^swap' && continue + echo "$fstype" | egrep -qE '^swap' && continue + + if echo "$fs $mntpnt $fstype $opts" | grep -qE "$match"; then + eval export FSTAB_dev_$i="$fs" + fs=$(printf '%b\n' "$fs" | sed 's,/,_,g') + eval export FSTAB_$i="$mntpnt" + + i=$((i + 1)) + fi + done < /etc/fstab +} + +in_fstab() +{ + local var + + var="$(eval echo FSTAB_$1)" + [ "${var}" != "" ] + return $? +} + +is_mounted() +{ + local mntpt="$1" + local line + + mount | \ + while read line; do + if echo "$line" | grep -q " on $mntpt "; then + # returns: + # 0 on unsuccessful match + # 1 on a successful match + return 1 + fi + done + + # The negation will flip the subshell return result where the default + # return value is 0 when a match is not found. + return $(( !$? )) +} |