diff options
Diffstat (limited to 'tests/zfs-tests/include/libtest.shlib')
-rw-r--r-- | tests/zfs-tests/include/libtest.shlib | 4194 |
1 files changed, 4194 insertions, 0 deletions
diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib new file mode 100644 index 0000000000000..1618c92bd57f9 --- /dev/null +++ b/tests/zfs-tests/include/libtest.shlib @@ -0,0 +1,4194 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved. +# Copyright (c) 2012, 2020, Delphix. All rights reserved. +# Copyright (c) 2017, Tim Chase. All rights reserved. +# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved. +# Copyright (c) 2017, Lawrence Livermore National Security LLC. +# Copyright (c) 2017, Datto Inc. All rights reserved. +# Copyright (c) 2017, Open-E Inc. All rights reserved. +# Use is subject to license terms. +# + +. ${STF_TOOLS}/include/logapi.shlib +. ${STF_SUITE}/include/math.shlib +. ${STF_SUITE}/include/blkdev.shlib + +. ${STF_SUITE}/include/tunables.cfg + +# +# Apply constrained path when available. This is required since the +# PATH may have been modified by sudo's secure_path behavior. +# +if [ -n "$STF_PATH" ]; then + PATH="$STF_PATH" +fi + +# +# Generic dot version comparison function +# +# Returns success when version $1 is greater than or equal to $2. +# +function compare_version_gte +{ + if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then + return 0 + else + return 1 + fi +} + +# Linux kernel version comparison function +# +# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version +# +# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ] +# +function linux_version +{ + typeset ver="$1" + + [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+") + + typeset version=$(echo $ver | cut -d '.' -f 1) + typeset major=$(echo $ver | cut -d '.' -f 2) + typeset minor=$(echo $ver | cut -d '.' -f 3) + + [[ -z "$version" ]] && version=0 + [[ -z "$major" ]] && major=0 + [[ -z "$minor" ]] && minor=0 + + echo $((version * 10000 + major * 100 + minor)) +} + +# Determine if this is a Linux test system +# +# Return 0 if platform Linux, 1 if otherwise + +function is_linux +{ + if [[ $(uname -o) == "GNU/Linux" ]]; then + return 0 + else + return 1 + fi +} + +# Determine if this is an illumos test system +# +# Return 0 if platform illumos, 1 if otherwise +function is_illumos +{ + if [[ $(uname -o) == "illumos" ]]; then + return 0 + else + return 1 + fi +} + +# Determine if this is a FreeBSD test system +# +# Return 0 if platform FreeBSD, 1 if otherwise + +function is_freebsd +{ + if [[ $(uname -o) == "FreeBSD" ]]; then + return 0 + else + return 1 + fi +} + +# Determine if this is a DilOS test system +# +# Return 0 if platform DilOS, 1 if otherwise + +function is_dilos +{ + typeset ID="" + [[ -f /etc/os-release ]] && . /etc/os-release + if [[ $ID == "dilos" ]]; then + return 0 + else + return 1 + fi +} + +# Determine if this is a 32-bit system +# +# Return 0 if platform is 32-bit, 1 if otherwise + +function is_32bit +{ + if [[ $(getconf LONG_BIT) == "32" ]]; then + return 0 + else + return 1 + fi +} + +# Determine if kmemleak is enabled +# +# Return 0 if kmemleak is enabled, 1 if otherwise + +function is_kmemleak +{ + if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then + return 0 + else + return 1 + fi +} + +# Determine whether a dataset is mounted +# +# $1 dataset name +# $2 filesystem type; optional - defaulted to zfs +# +# Return 0 if dataset is mounted; 1 if unmounted; 2 on error + +function ismounted +{ + typeset fstype=$2 + [[ -z $fstype ]] && fstype=zfs + typeset out dir name ret + + case $fstype in + zfs) + if [[ "$1" == "/"* ]] ; then + for out in $(zfs mount | awk '{print $2}'); do + [[ $1 == $out ]] && return 0 + done + else + for out in $(zfs mount | awk '{print $1}'); do + [[ $1 == $out ]] && return 0 + done + fi + ;; + ufs|nfs) + if is_freebsd; then + mount -pt $fstype | while read dev dir _t _flags; do + [[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0 + done + else + out=$(df -F $fstype $1 2>/dev/null) + ret=$? + (($ret != 0)) && return $ret + + dir=${out%%\(*} + dir=${dir%% *} + name=${out##*\(} + name=${name%%\)*} + name=${name%% *} + + [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0 + fi + ;; + ext*) + out=$(df -t $fstype $1 2>/dev/null) + return $? + ;; + zvol) + if [[ -L "$ZVOL_DEVDIR/$1" ]]; then + link=$(readlink -f $ZVOL_DEVDIR/$1) + [[ -n "$link" ]] && \ + mount | grep -q "^$link" && \ + return 0 + fi + ;; + esac + + return 1 +} + +# Return 0 if a dataset is mounted; 1 otherwise +# +# $1 dataset name +# $2 filesystem type; optional - defaulted to zfs + +function mounted +{ + ismounted $1 $2 + (($? == 0)) && return 0 + return 1 +} + +# Return 0 if a dataset is unmounted; 1 otherwise +# +# $1 dataset name +# $2 filesystem type; optional - defaulted to zfs + +function unmounted +{ + ismounted $1 $2 + (($? == 1)) && return 0 + return 1 +} + +# split line on "," +# +# $1 - line to split + +function splitline +{ + echo $1 | sed "s/,/ /g" +} + +function default_setup +{ + default_setup_noexit "$@" + + log_pass +} + +function default_setup_no_mountpoint +{ + default_setup_noexit "$1" "$2" "$3" "yes" + + log_pass +} + +# +# Given a list of disks, setup storage pools and datasets. +# +function default_setup_noexit +{ + typeset disklist=$1 + typeset container=$2 + typeset volume=$3 + typeset no_mountpoint=$4 + log_note begin default_setup_noexit + + if is_global_zone; then + if poolexists $TESTPOOL ; then + destroy_pool $TESTPOOL + fi + [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL + log_must zpool create -f $TESTPOOL $disklist + else + reexport_pool + fi + + rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR + mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR + + log_must zfs create $TESTPOOL/$TESTFS + if [[ -z $no_mountpoint ]]; then + log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS + fi + + if [[ -n $container ]]; then + rm -rf $TESTDIR1 || \ + log_unresolved Could not remove $TESTDIR1 + mkdir -p $TESTDIR1 || \ + log_unresolved Could not create $TESTDIR1 + + log_must zfs create $TESTPOOL/$TESTCTR + log_must zfs set canmount=off $TESTPOOL/$TESTCTR + log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1 + if [[ -z $no_mountpoint ]]; then + log_must zfs set mountpoint=$TESTDIR1 \ + $TESTPOOL/$TESTCTR/$TESTFS1 + fi + fi + + if [[ -n $volume ]]; then + if is_global_zone ; then + log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL + block_device_wait + else + log_must zfs create $TESTPOOL/$TESTVOL + fi + fi +} + +# +# Given a list of disks, setup a storage pool, file system and +# a container. +# +function default_container_setup +{ + typeset disklist=$1 + + default_setup "$disklist" "true" +} + +# +# Given a list of disks, setup a storage pool,file system +# and a volume. +# +function default_volume_setup +{ + typeset disklist=$1 + + default_setup "$disklist" "" "true" +} + +# +# Given a list of disks, setup a storage pool,file system, +# a container and a volume. +# +function default_container_volume_setup +{ + typeset disklist=$1 + + default_setup "$disklist" "true" "true" +} + +# +# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on +# filesystem +# +# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS +# $2 snapshot name. Default, $TESTSNAP +# +function create_snapshot +{ + typeset fs_vol=${1:-$TESTPOOL/$TESTFS} + typeset snap=${2:-$TESTSNAP} + + [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined." + [[ -z $snap ]] && log_fail "Snapshot's name is undefined." + + if snapexists $fs_vol@$snap; then + log_fail "$fs_vol@$snap already exists." + fi + datasetexists $fs_vol || \ + log_fail "$fs_vol must exist." + + log_must zfs snapshot $fs_vol@$snap +} + +# +# Create a clone from a snapshot, default clone name is $TESTCLONE. +# +# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default. +# $2 Clone name, $TESTPOOL/$TESTCLONE is default. +# +function create_clone # snapshot clone +{ + typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} + typeset clone=${2:-$TESTPOOL/$TESTCLONE} + + [[ -z $snap ]] && \ + log_fail "Snapshot name is undefined." + [[ -z $clone ]] && \ + log_fail "Clone name is undefined." + + log_must zfs clone $snap $clone +} + +# +# Create a bookmark of the given snapshot. Defaultly create a bookmark on +# filesystem. +# +# $1 Existing filesystem or volume name. Default, $TESTFS +# $2 Existing snapshot name. Default, $TESTSNAP +# $3 bookmark name. Default, $TESTBKMARK +# +function create_bookmark +{ + typeset fs_vol=${1:-$TESTFS} + typeset snap=${2:-$TESTSNAP} + typeset bkmark=${3:-$TESTBKMARK} + + [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined." + [[ -z $snap ]] && log_fail "Snapshot's name is undefined." + [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined." + + if bkmarkexists $fs_vol#$bkmark; then + log_fail "$fs_vol#$bkmark already exists." + fi + datasetexists $fs_vol || \ + log_fail "$fs_vol must exist." + snapexists $fs_vol@$snap || \ + log_fail "$fs_vol@$snap must exist." + + log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark +} + +# +# Create a temporary clone result of an interrupted resumable 'zfs receive' +# $1 Destination filesystem name. Must not exist, will be created as the result +# of this function along with its %recv temporary clone +# $2 Source filesystem name. Must not exist, will be created and destroyed +# +function create_recv_clone +{ + typeset recvfs="$1" + typeset sendfs="${2:-$TESTPOOL/create_recv_clone}" + typeset snap="$sendfs@snap1" + typeset incr="$sendfs@snap2" + typeset mountpoint="$TESTDIR/create_recv_clone" + typeset sendfile="$TESTDIR/create_recv_clone.zsnap" + + [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined." + + datasetexists $recvfs && log_fail "Recv filesystem must not exist." + datasetexists $sendfs && log_fail "Send filesystem must not exist." + + log_must zfs create -o mountpoint="$mountpoint" $sendfs + log_must zfs snapshot $snap + log_must eval "zfs send $snap | zfs recv -u $recvfs" + log_must mkfile 1m "$mountpoint/data" + log_must zfs snapshot $incr + log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \ + iflag=fullblock > $sendfile" + log_mustnot eval "zfs recv -su $recvfs < $sendfile" + destroy_dataset "$sendfs" "-r" + log_must rm -f "$sendfile" + + if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then + log_fail "Error creating temporary $recvfs/%recv clone" + fi +} + +function default_mirror_setup +{ + default_mirror_setup_noexit $1 $2 $3 + + log_pass +} + +# +# Given a pair of disks, set up a storage pool and dataset for the mirror +# @parameters: $1 the primary side of the mirror +# $2 the secondary side of the mirror +# @uses: ZPOOL ZFS TESTPOOL TESTFS +function default_mirror_setup_noexit +{ + readonly func="default_mirror_setup_noexit" + typeset primary=$1 + typeset secondary=$2 + + [[ -z $primary ]] && \ + log_fail "$func: No parameters passed" + [[ -z $secondary ]] && \ + log_fail "$func: No secondary partition passed" + [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL + log_must zpool create -f $TESTPOOL mirror $@ + log_must zfs create $TESTPOOL/$TESTFS + log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS +} + +# +# create a number of mirrors. +# We create a number($1) of 2 way mirrors using the pairs of disks named +# on the command line. These mirrors are *not* mounted +# @parameters: $1 the number of mirrors to create +# $... the devices to use to create the mirrors on +# @uses: ZPOOL ZFS TESTPOOL +function setup_mirrors +{ + typeset -i nmirrors=$1 + + shift + while ((nmirrors > 0)); do + log_must test -n "$1" -a -n "$2" + [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors + log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2 + shift 2 + ((nmirrors = nmirrors - 1)) + done +} + +# +# create a number of raidz pools. +# We create a number($1) of 2 raidz pools using the pairs of disks named +# on the command line. These pools are *not* mounted +# @parameters: $1 the number of pools to create +# $... the devices to use to create the pools on +# @uses: ZPOOL ZFS TESTPOOL +function setup_raidzs +{ + typeset -i nraidzs=$1 + + shift + while ((nraidzs > 0)); do + log_must test -n "$1" -a -n "$2" + [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs + log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2 + shift 2 + ((nraidzs = nraidzs - 1)) + done +} + +# +# Destroy the configured testpool mirrors. +# the mirrors are of the form ${TESTPOOL}{number} +# @uses: ZPOOL ZFS TESTPOOL +function destroy_mirrors +{ + default_cleanup_noexit + + log_pass +} + +# +# Given a minimum of two disks, set up a storage pool and dataset for the raid-z +# $1 the list of disks +# +function default_raidz_setup +{ + typeset disklist="$*" + disks=(${disklist[*]}) + + if [[ ${#disks[*]} -lt 2 ]]; then + log_fail "A raid-z requires a minimum of two disks." + fi + + [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL + log_must zpool create -f $TESTPOOL raidz $disklist + log_must zfs create $TESTPOOL/$TESTFS + log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS + + log_pass +} + +# +# Common function used to cleanup storage pools and datasets. +# +# Invoked at the start of the test suite to ensure the system +# is in a known state, and also at the end of each set of +# sub-tests to ensure errors from one set of tests doesn't +# impact the execution of the next set. + +function default_cleanup +{ + default_cleanup_noexit + + log_pass +} + +# +# Utility function used to list all available pool names. +# +# NOTE: $KEEP is a variable containing pool names, separated by a newline +# character, that must be excluded from the returned list. +# +function get_all_pools +{ + zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS" +} + +function default_cleanup_noexit +{ + typeset pool="" + # + # Destroying the pool will also destroy any + # filesystems it contains. + # + if is_global_zone; then + zfs unmount -a > /dev/null 2>&1 + ALL_POOLS=$(get_all_pools) + # Here, we loop through the pools we're allowed to + # destroy, only destroying them if it's safe to do + # so. + while [ ! -z ${ALL_POOLS} ] + do + for pool in ${ALL_POOLS} + do + if safe_to_destroy_pool $pool ; + then + destroy_pool $pool + fi + done + ALL_POOLS=$(get_all_pools) + done + + zfs mount -a + else + typeset fs="" + for fs in $(zfs list -H -o name \ + | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do + destroy_dataset "$fs" "-Rf" + done + + # Need cleanup here to avoid garbage dir left. + for fs in $(zfs list -H -o name); do + [[ $fs == /$ZONE_POOL ]] && continue + [[ -d $fs ]] && log_must rm -rf $fs/* + done + + # + # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to + # the default value + # + for fs in $(zfs list -H -o name); do + if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then + log_must zfs set reservation=none $fs + log_must zfs set recordsize=128K $fs + log_must zfs set mountpoint=/$fs $fs + typeset enc="" + enc=$(get_prop encryption $fs) + if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \ + [[ "$enc" == "off" ]]; then + log_must zfs set checksum=on $fs + fi + log_must zfs set compression=off $fs + log_must zfs set atime=on $fs + log_must zfs set devices=off $fs + log_must zfs set exec=on $fs + log_must zfs set setuid=on $fs + log_must zfs set readonly=off $fs + log_must zfs set snapdir=hidden $fs + log_must zfs set aclmode=groupmask $fs + log_must zfs set aclinherit=secure $fs + fi + done + fi + + [[ -d $TESTDIR ]] && \ + log_must rm -rf $TESTDIR + + disk1=${DISKS%% *} + if is_mpath_device $disk1; then + delete_partitions + fi + + rm -f $TEST_BASE_DIR/{err,out} +} + + +# +# Common function used to cleanup storage pools, file systems +# and containers. +# +function default_container_cleanup +{ + if ! is_global_zone; then + reexport_pool + fi + + ismounted $TESTPOOL/$TESTCTR/$TESTFS1 + [[ $? -eq 0 ]] && \ + log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1 + + destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R" + destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf" + + [[ -e $TESTDIR1 ]] && \ + log_must rm -rf $TESTDIR1 > /dev/null 2>&1 + + default_cleanup +} + +# +# Common function used to cleanup snapshot of file system or volume. Default to +# delete the file system's snapshot +# +# $1 snapshot name +# +function destroy_snapshot +{ + typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} + + if ! snapexists $snap; then + log_fail "'$snap' does not exist." + fi + + # + # For the sake of the value which come from 'get_prop' is not equal + # to the really mountpoint when the snapshot is unmounted. So, firstly + # check and make sure this snapshot's been mounted in current system. + # + typeset mtpt="" + if ismounted $snap; then + mtpt=$(get_prop mountpoint $snap) + (($? != 0)) && \ + log_fail "get_prop mountpoint $snap failed." + fi + + destroy_dataset "$snap" + [[ $mtpt != "" && -d $mtpt ]] && \ + log_must rm -rf $mtpt +} + +# +# Common function used to cleanup clone. +# +# $1 clone name +# +function destroy_clone +{ + typeset clone=${1:-$TESTPOOL/$TESTCLONE} + + if ! datasetexists $clone; then + log_fail "'$clone' does not existed." + fi + + # With the same reason in destroy_snapshot + typeset mtpt="" + if ismounted $clone; then + mtpt=$(get_prop mountpoint $clone) + (($? != 0)) && \ + log_fail "get_prop mountpoint $clone failed." + fi + + destroy_dataset "$clone" + [[ $mtpt != "" && -d $mtpt ]] && \ + log_must rm -rf $mtpt +} + +# +# Common function used to cleanup bookmark of file system or volume. Default +# to delete the file system's bookmark. +# +# $1 bookmark name +# +function destroy_bookmark +{ + typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK} + + if ! bkmarkexists $bkmark; then + log_fail "'$bkmarkp' does not existed." + fi + + destroy_dataset "$bkmark" +} + +# Return 0 if a snapshot exists; $? otherwise +# +# $1 - snapshot name + +function snapexists +{ + zfs list -H -t snapshot "$1" > /dev/null 2>&1 + return $? +} + +# +# Return 0 if a bookmark exists; $? otherwise +# +# $1 - bookmark name +# +function bkmarkexists +{ + zfs list -H -t bookmark "$1" > /dev/null 2>&1 + return $? +} + +# +# Return 0 if a hold exists; $? otherwise +# +# $1 - hold tag +# $2 - snapshot name +# +function holdexists +{ + zfs holds "$2" | awk '{ print $2 }' | grep "$1" > /dev/null 2>&1 + return $? +} + +# +# Set a property to a certain value on a dataset. +# Sets a property of the dataset to the value as passed in. +# @param: +# $1 dataset who's property is being set +# $2 property to set +# $3 value to set property to +# @return: +# 0 if the property could be set. +# non-zero otherwise. +# @use: ZFS +# +function dataset_setprop +{ + typeset fn=dataset_setprop + + if (($# < 3)); then + log_note "$fn: Insufficient parameters (need 3, had $#)" + return 1 + fi + typeset output= + output=$(zfs set $2=$3 $1 2>&1) + typeset rv=$? + if ((rv != 0)); then + log_note "Setting property on $1 failed." + log_note "property $2=$3" + log_note "Return Code: $rv" + log_note "Output: $output" + return $rv + fi + return 0 +} + +# +# Assign suite defined dataset properties. +# This function is used to apply the suite's defined default set of +# properties to a dataset. +# @parameters: $1 dataset to use +# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP +# @returns: +# 0 if the dataset has been altered. +# 1 if no pool name was passed in. +# 2 if the dataset could not be found. +# 3 if the dataset could not have it's properties set. +# +function dataset_set_defaultproperties +{ + typeset dataset="$1" + + [[ -z $dataset ]] && return 1 + + typeset confset= + typeset -i found=0 + for confset in $(zfs list); do + if [[ $dataset = $confset ]]; then + found=1 + break + fi + done + [[ $found -eq 0 ]] && return 2 + if [[ -n $COMPRESSION_PROP ]]; then + dataset_setprop $dataset compression $COMPRESSION_PROP || \ + return 3 + log_note "Compression set to '$COMPRESSION_PROP' on $dataset" + fi + if [[ -n $CHECKSUM_PROP ]]; then + dataset_setprop $dataset checksum $CHECKSUM_PROP || \ + return 3 + log_note "Checksum set to '$CHECKSUM_PROP' on $dataset" + fi + return 0 +} + +# +# Check a numeric assertion +# @parameter: $@ the assertion to check +# @output: big loud notice if assertion failed +# @use: log_fail +# +function assert +{ + (($@)) || log_fail "$@" +} + +# +# Function to format partition size of a disk +# Given a disk cxtxdx reduces all partitions +# to 0 size +# +function zero_partitions #<whole_disk_name> +{ + typeset diskname=$1 + typeset i + + if is_freebsd; then + gpart destroy -F $diskname + elif is_linux; then + DSK=$DEV_DSKDIR/$diskname + DSK=$(echo $DSK | sed -e "s|//|/|g") + log_must parted $DSK -s -- mklabel gpt + blockdev --rereadpt $DSK 2>/dev/null + block_device_wait + else + for i in 0 1 3 4 5 6 7 + do + log_must set_partition $i "" 0mb $diskname + done + fi + + return 0 +} + +# +# Given a slice, size and disk, this function +# formats the slice to the specified size. +# Size should be specified with units as per +# the `format` command requirements eg. 100mb 3gb +# +# NOTE: This entire interface is problematic for the Linux parted utility +# which requires the end of the partition to be specified. It would be +# best to retire this interface and replace it with something more flexible. +# At the moment a best effort is made. +# +# arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name> +function set_partition +{ + typeset -i slicenum=$1 + typeset start=$2 + typeset size=$3 + typeset disk=${4#$DEV_DSKDIR/} + disk=${disk#$DEV_RDSKDIR/} + + case "$(uname)" in + Linux) + if [[ -z $size || -z $disk ]]; then + log_fail "The size or disk name is unspecified." + fi + disk=$DEV_DSKDIR/$disk + typeset size_mb=${size%%[mMgG]} + + size_mb=${size_mb%%[mMgG][bB]} + if [[ ${size:1:1} == 'g' ]]; then + ((size_mb = size_mb * 1024)) + fi + + # Create GPT partition table when setting slice 0 or + # when the device doesn't already contain a GPT label. + parted $disk -s -- print 1 >/dev/null + typeset ret_val=$? + if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then + parted $disk -s -- mklabel gpt + if [[ $? -ne 0 ]]; then + log_note "Failed to create GPT partition table on $disk" + return 1 + fi + fi + + # When no start is given align on the first cylinder. + if [[ -z "$start" ]]; then + start=1 + fi + + # Determine the cylinder size for the device and using + # that calculate the end offset in cylinders. + typeset -i cly_size_kb=0 + cly_size_kb=$(parted -m $disk -s -- \ + unit cyl print | head -3 | tail -1 | \ + awk -F '[:k.]' '{print $4}') + ((end = (size_mb * 1024 / cly_size_kb) + start)) + + parted $disk -s -- \ + mkpart part$slicenum ${start}cyl ${end}cyl + typeset ret_val=$? + if [[ $ret_val -ne 0 ]]; then + log_note "Failed to create partition $slicenum on $disk" + return 1 + fi + + blockdev --rereadpt $disk 2>/dev/null + block_device_wait $disk + ;; + FreeBSD) + if [[ -z $size || -z $disk ]]; then + log_fail "The size or disk name is unspecified." + fi + disk=$DEV_DSKDIR/$disk + + if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then + gpart destroy -F $disk >/dev/null 2>&1 + gpart create -s GPT $disk + if [[ $? -ne 0 ]]; then + log_note "Failed to create GPT partition table on $disk" + return 1 + fi + fi + + typeset index=$((slicenum + 1)) + + if [[ -n $start ]]; then + start="-b $start" + fi + gpart add -t freebsd-zfs $start -s $size -i $index $disk + if [[ $ret_val -ne 0 ]]; then + log_note "Failed to create partition $slicenum on $disk" + return 1 + fi + + block_device_wait $disk + ;; + *) + if [[ -z $slicenum || -z $size || -z $disk ]]; then + log_fail "The slice, size or disk name is unspecified." + fi + + typeset format_file=/var/tmp/format_in.$$ + + echo "partition" >$format_file + echo "$slicenum" >> $format_file + echo "" >> $format_file + echo "" >> $format_file + echo "$start" >> $format_file + echo "$size" >> $format_file + echo "label" >> $format_file + echo "" >> $format_file + echo "q" >> $format_file + echo "q" >> $format_file + + format -e -s -d $disk -f $format_file + typeset ret_val=$? + rm -f $format_file + ;; + esac + + if [[ $ret_val -ne 0 ]]; then + log_note "Unable to format $disk slice $slicenum to $size" + return 1 + fi + return 0 +} + +# +# Delete all partitions on all disks - this is specifically for the use of multipath +# devices which currently can only be used in the test suite as raw/un-partitioned +# devices (ie a zpool cannot be created on a whole mpath device that has partitions) +# +function delete_partitions +{ + typeset disk + + if [[ -z $DISKSARRAY ]]; then + DISKSARRAY=$DISKS + fi + + if is_linux; then + typeset -i part + for disk in $DISKSARRAY; do + for (( part = 1; part < MAX_PARTITIONS; part++ )); do + typeset partition=${disk}${SLICE_PREFIX}${part} + parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1 + if lsblk | grep -qF ${partition}; then + log_fail "Partition ${partition} not deleted" + else + log_note "Partition ${partition} deleted" + fi + done + done + elif is_freebsd; then + for disk in $DISKSARRAY; do + if gpart destroy -F $disk; then + log_note "Partitions for ${disk} deleted" + else + log_fail "Partitions for ${disk} not deleted" + fi + done + fi +} + +# +# Get the end cyl of the given slice +# +function get_endslice #<disk> <slice> +{ + typeset disk=$1 + typeset slice=$2 + if [[ -z $disk || -z $slice ]] ; then + log_fail "The disk name or slice number is unspecified." + fi + + case "$(uname)" in + Linux) + endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \ + grep "part${slice}" | \ + awk '{print $3}' | \ + sed 's,cyl,,') + ((endcyl = (endcyl + 1))) + ;; + FreeBSD) + disk=${disk#/dev/zvol/} + disk=${disk%p*} + slice=$((slice + 1)) + endcyl=$(gpart show $disk | \ + awk -v slice=$slice '$3 == slice { print $1 + $2 }') + ;; + *) + disk=${disk#/dev/dsk/} + disk=${disk#/dev/rdsk/} + disk=${disk%s*} + + typeset -i ratio=0 + ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \ + grep "sectors\/cylinder" | \ + awk '{print $2}') + + if ((ratio == 0)); then + return + fi + + typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 | + nawk -v token="$slice" '{if ($1==token) print $6}') + + ((endcyl = (endcyl + 1) / ratio)) + ;; + esac + + echo $endcyl +} + + +# +# Given a size,disk and total slice number, this function formats the +# disk slices from 0 to the total slice number with the same specified +# size. +# +function partition_disk #<slice_size> <whole_disk_name> <total_slices> +{ + typeset -i i=0 + typeset slice_size=$1 + typeset disk_name=$2 + typeset total_slices=$3 + typeset cyl + + zero_partitions $disk_name + while ((i < $total_slices)); do + if ! is_linux; then + if ((i == 2)); then + ((i = i + 1)) + continue + fi + fi + log_must set_partition $i "$cyl" $slice_size $disk_name + cyl=$(get_endslice $disk_name $i) + ((i = i+1)) + done +} + +# +# This function continues to write to a filenum number of files into dirnum +# number of directories until either file_write returns an error or the +# maximum number of files per directory have been written. +# +# Usage: +# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data] +# +# Return value: 0 on success +# non 0 on error +# +# Where : +# destdir: is the directory where everything is to be created under +# dirnum: the maximum number of subdirectories to use, -1 no limit +# filenum: the maximum number of files per subdirectory +# bytes: number of bytes to write +# num_writes: number of types to write out bytes +# data: the data that will be written +# +# E.g. +# fill_fs /testdir 20 25 1024 256 0 +# +# Note: bytes * num_writes equals the size of the testfile +# +function fill_fs # destdir dirnum filenum bytes num_writes data +{ + typeset destdir=${1:-$TESTDIR} + typeset -i dirnum=${2:-50} + typeset -i filenum=${3:-50} + typeset -i bytes=${4:-8192} + typeset -i num_writes=${5:-10240} + typeset data=${6:-0} + + mkdir -p $destdir/{1..$dirnum} + for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do + file_write -o create -f $f -b $bytes -c $num_writes -d $data \ + || return $? + done + return 0 +} + +# +# Simple function to get the specified property. If unable to +# get the property then exits. +# +# Note property is in 'parsable' format (-p) +# +function get_prop # property dataset +{ + typeset prop_val + typeset prop=$1 + typeset dataset=$2 + + prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null) + if [[ $? -ne 0 ]]; then + log_note "Unable to get $prop property for dataset " \ + "$dataset" + return 1 + fi + + echo "$prop_val" + return 0 +} + +# +# Simple function to get the specified property of pool. If unable to +# get the property then exits. +# +# Note property is in 'parsable' format (-p) +# +function get_pool_prop # property pool +{ + typeset prop_val + typeset prop=$1 + typeset pool=$2 + + if poolexists $pool ; then + prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \ + awk '{print $3}') + if [[ $? -ne 0 ]]; then + log_note "Unable to get $prop property for pool " \ + "$pool" + return 1 + fi + else + log_note "Pool $pool not exists." + return 1 + fi + + echo "$prop_val" + return 0 +} + +# Return 0 if a pool exists; $? otherwise +# +# $1 - pool name + +function poolexists +{ + typeset pool=$1 + + if [[ -z $pool ]]; then + log_note "No pool name given." + return 1 + fi + + zpool get name "$pool" > /dev/null 2>&1 + return $? +} + +# Return 0 if all the specified datasets exist; $? otherwise +# +# $1-n dataset name +function datasetexists +{ + if (($# == 0)); then + log_note "No dataset name given." + return 1 + fi + + while (($# > 0)); do + zfs get name $1 > /dev/null 2>&1 || \ + return $? + shift + done + + return 0 +} + +# return 0 if none of the specified datasets exists, otherwise return 1. +# +# $1-n dataset name +function datasetnonexists +{ + if (($# == 0)); then + log_note "No dataset name given." + return 1 + fi + + while (($# > 0)); do + zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \ + && return 1 + shift + done + + return 0 +} + +function is_shared_freebsd +{ + typeset fs=$1 + + pgrep -q mountd && showmount -E | grep -qx $fs +} + +function is_shared_illumos +{ + typeset fs=$1 + typeset mtpt + + for mtpt in `share | awk '{print $2}'` ; do + if [[ $mtpt == $fs ]] ; then + return 0 + fi + done + + typeset stat=$(svcs -H -o STA nfs/server:default) + if [[ $stat != "ON" ]]; then + log_note "Current nfs/server status: $stat" + fi + + return 1 +} + +function is_shared_linux +{ + typeset fs=$1 + typeset mtpt + + for mtpt in `share | awk '{print $1}'` ; do + if [[ $mtpt == $fs ]] ; then + return 0 + fi + done + return 1 +} + +# +# Given a mountpoint, or a dataset name, determine if it is shared via NFS. +# +# Returns 0 if shared, 1 otherwise. +# +function is_shared +{ + typeset fs=$1 + typeset mtpt + + if [[ $fs != "/"* ]] ; then + if datasetnonexists "$fs" ; then + return 1 + else + mtpt=$(get_prop mountpoint "$fs") + case $mtpt in + none|legacy|-) return 1 + ;; + *) fs=$mtpt + ;; + esac + fi + fi + + case $(uname) in + FreeBSD) is_shared_freebsd "$fs" ;; + Linux) is_shared_linux "$fs" ;; + *) is_shared_illumos "$fs" ;; + esac +} + +function is_exported_illumos +{ + typeset fs=$1 + typeset mtpt + + for mtpt in `awk '{print $1}' /etc/dfs/sharetab` ; do + if [[ $mtpt == $fs ]] ; then + return 0 + fi + done + + return 1 +} + +function is_exported_freebsd +{ + typeset fs=$1 + typeset mtpt + + for mtpt in `awk '{print $1}' /etc/zfs/exports` ; do + if [[ $mtpt == $fs ]] ; then + return 0 + fi + done + + return 1 +} + +function is_exported_linux +{ + typeset fs=$1 + typeset mtpt + + for mtpt in `awk '{print $1}' /etc/exports.d/zfs.exports` ; do + if [[ $mtpt == $fs ]] ; then + return 0 + fi + done + + return 1 +} + +# +# Given a mountpoint, or a dataset name, determine if it is exported via +# the os-specific NFS exports file. +# +# Returns 0 if exported, 1 otherwise. +# +function is_exported +{ + typeset fs=$1 + typeset mtpt + + if [[ $fs != "/"* ]] ; then + if datasetnonexists "$fs" ; then + return 1 + else + mtpt=$(get_prop mountpoint "$fs") + case $mtpt in + none|legacy|-) return 1 + ;; + *) fs=$mtpt + ;; + esac + fi + fi + + case $(uname) in + FreeBSD) is_exported_freebsd "$fs" ;; + Linux) is_exported_linux "$fs" ;; + *) is_exported_illumos "$fs" ;; + esac +} + +# +# Given a dataset name determine if it is shared via SMB. +# +# Returns 0 if shared, 1 otherwise. +# +function is_shared_smb +{ + typeset fs=$1 + typeset mtpt + + if datasetnonexists "$fs" ; then + return 1 + else + fs=$(echo $fs | sed 's@/@_@g') + fi + + if is_linux; then + for mtpt in `net usershare list | awk '{print $1}'` ; do + if [[ $mtpt == $fs ]] ; then + return 0 + fi + done + return 1 + else + log_note "Currently unsupported by the test framework" + return 1 + fi +} + +# +# Given a mountpoint, determine if it is not shared via NFS. +# +# Returns 0 if not shared, 1 otherwise. +# +function not_shared +{ + typeset fs=$1 + + is_shared $fs + if (($? == 0)); then + return 1 + fi + + return 0 +} + +# +# Given a dataset determine if it is not shared via SMB. +# +# Returns 0 if not shared, 1 otherwise. +# +function not_shared_smb +{ + typeset fs=$1 + + is_shared_smb $fs + if (($? == 0)); then + return 1 + fi + + return 0 +} + +# +# Helper function to unshare a mountpoint. +# +function unshare_fs #fs +{ + typeset fs=$1 + + is_shared $fs || is_shared_smb $fs + if (($? == 0)); then + zfs unshare $fs || log_fail "zfs unshare $fs failed" + fi + + return 0 +} + +# +# Helper function to share a NFS mountpoint. +# +function share_nfs #fs +{ + typeset fs=$1 + + if is_linux; then + is_shared $fs + if (($? != 0)); then + log_must share "*:$fs" + fi + else + is_shared $fs + if (($? != 0)); then + log_must share -F nfs $fs + fi + fi + + return 0 +} + +# +# Helper function to unshare a NFS mountpoint. +# +function unshare_nfs #fs +{ + typeset fs=$1 + + if is_linux; then + is_shared $fs + if (($? == 0)); then + log_must unshare -u "*:$fs" + fi + else + is_shared $fs + if (($? == 0)); then + log_must unshare -F nfs $fs + fi + fi + + return 0 +} + +# +# Helper function to show NFS shares. +# +function showshares_nfs +{ + if is_linux; then + share -v + else + share -F nfs + fi + + return 0 +} + +# +# Helper function to show SMB shares. +# +function showshares_smb +{ + if is_linux; then + net usershare list + else + share -F smb + fi + + return 0 +} + +function check_nfs +{ + if is_linux; then + share -s + elif is_freebsd; then + showmount -e + else + log_unsupported "Unknown platform" + fi + + if [[ $? -ne 0 ]]; then + log_unsupported "The NFS utilities are not installed" + fi +} + +# +# Check NFS server status and trigger it online. +# +function setup_nfs_server +{ + # Cannot share directory in non-global zone. + # + if ! is_global_zone; then + log_note "Cannot trigger NFS server by sharing in LZ." + return + fi + + if is_linux; then + # + # Re-synchronize /var/lib/nfs/etab with /etc/exports and + # /etc/exports.d./* to provide a clean test environment. + # + log_must share -r + + log_note "NFS server must be started prior to running ZTS." + return + elif is_freebsd; then + kill -s HUP $(cat /var/run/mountd.pid) + + log_note "NFS server must be started prior to running ZTS." + return + fi + + typeset nfs_fmri="svc:/network/nfs/server:default" + if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then + # + # Only really sharing operation can enable NFS server + # to online permanently. + # + typeset dummy=/tmp/dummy + + if [[ -d $dummy ]]; then + log_must rm -rf $dummy + fi + + log_must mkdir $dummy + log_must share $dummy + + # + # Waiting for fmri's status to be the final status. + # Otherwise, in transition, an asterisk (*) is appended for + # instances, unshare will reverse status to 'DIS' again. + # + # Waiting for 1's at least. + # + log_must sleep 1 + timeout=10 + while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]] + do + log_must sleep 1 + + ((timeout -= 1)) + done + + log_must unshare $dummy + log_must rm -rf $dummy + fi + + log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'" +} + +# +# To verify whether calling process is in global zone +# +# Return 0 if in global zone, 1 in non-global zone +# +function is_global_zone +{ + if is_linux || is_freebsd; then + return 0 + else + typeset cur_zone=$(zonename 2>/dev/null) + if [[ $cur_zone != "global" ]]; then + return 1 + fi + return 0 + fi +} + +# +# Verify whether test is permitted to run from +# global zone, local zone, or both +# +# $1 zone limit, could be "global", "local", or "both"(no limit) +# +# Return 0 if permitted, otherwise exit with log_unsupported +# +function verify_runnable # zone limit +{ + typeset limit=$1 + + [[ -z $limit ]] && return 0 + + if is_global_zone ; then + case $limit in + global|both) + ;; + local) log_unsupported "Test is unable to run from "\ + "global zone." + ;; + *) log_note "Warning: unknown limit $limit - " \ + "use both." + ;; + esac + else + case $limit in + local|both) + ;; + global) log_unsupported "Test is unable to run from "\ + "local zone." + ;; + *) log_note "Warning: unknown limit $limit - " \ + "use both." + ;; + esac + + reexport_pool + fi + + return 0 +} + +# Return 0 if create successfully or the pool exists; $? otherwise +# Note: In local zones, this function should return 0 silently. +# +# $1 - pool name +# $2-n - [keyword] devs_list + +function create_pool #pool devs_list +{ + typeset pool=${1%%/*} + + shift + + if [[ -z $pool ]]; then + log_note "Missing pool name." + return 1 + fi + + if poolexists $pool ; then + destroy_pool $pool + fi + + if is_global_zone ; then + [[ -d /$pool ]] && rm -rf /$pool + log_must zpool create -f $pool $@ + fi + + return 0 +} + +# Return 0 if destroy successfully or the pool exists; $? otherwise +# Note: In local zones, this function should return 0 silently. +# +# $1 - pool name +# Destroy pool with the given parameters. + +function destroy_pool #pool +{ + typeset pool=${1%%/*} + typeset mtpt + + if [[ -z $pool ]]; then + log_note "No pool name given." + return 1 + fi + + if is_global_zone ; then + if poolexists "$pool" ; then + mtpt=$(get_prop mountpoint "$pool") + + # At times, syseventd/udev activity can cause attempts + # to destroy a pool to fail with EBUSY. We retry a few + # times allowing failures before requiring the destroy + # to succeed. + log_must_busy zpool destroy -f $pool + + [[ -d $mtpt ]] && \ + log_must rm -rf $mtpt + else + log_note "Pool does not exist. ($pool)" + return 1 + fi + fi + + return 0 +} + +# Return 0 if created successfully; $? otherwise +# +# $1 - dataset name +# $2-n - dataset options + +function create_dataset #dataset dataset_options +{ + typeset dataset=$1 + + shift + + if [[ -z $dataset ]]; then + log_note "Missing dataset name." + return 1 + fi + + if datasetexists $dataset ; then + destroy_dataset $dataset + fi + + log_must zfs create $@ $dataset + + return 0 +} + +# Return 0 if destroy successfully or the dataset exists; $? otherwise +# Note: In local zones, this function should return 0 silently. +# +# $1 - dataset name +# $2 - custom arguments for zfs destroy +# Destroy dataset with the given parameters. + +function destroy_dataset #dataset #args +{ + typeset dataset=$1 + typeset mtpt + typeset args=${2:-""} + + if [[ -z $dataset ]]; then + log_note "No dataset name given." + return 1 + fi + + if is_global_zone ; then + if datasetexists "$dataset" ; then + mtpt=$(get_prop mountpoint "$dataset") + log_must_busy zfs destroy $args $dataset + + [[ -d $mtpt ]] && \ + log_must rm -rf $mtpt + else + log_note "Dataset does not exist. ($dataset)" + return 1 + fi + fi + + return 0 +} + +# +# Firstly, create a pool with 5 datasets. Then, create a single zone and +# export the 5 datasets to it. In addition, we also add a ZFS filesystem +# and a zvol device to the zone. +# +# $1 zone name +# $2 zone root directory prefix +# $3 zone ip +# +function zfs_zones_setup #zone_name zone_root zone_ip +{ + typeset zone_name=${1:-$(hostname)-z} + typeset zone_root=${2:-"/zone_root"} + typeset zone_ip=${3:-"10.1.1.10"} + typeset prefix_ctr=$ZONE_CTR + typeset pool_name=$ZONE_POOL + typeset -i cntctr=5 + typeset -i i=0 + + # Create pool and 5 container within it + # + [[ -d /$pool_name ]] && rm -rf /$pool_name + log_must zpool create -f $pool_name $DISKS + while ((i < cntctr)); do + log_must zfs create $pool_name/$prefix_ctr$i + ((i += 1)) + done + + # create a zvol + log_must zfs create -V 1g $pool_name/zone_zvol + block_device_wait + + # + # If current system support slog, add slog device for pool + # + if verify_slog_support ; then + typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2" + log_must mkfile $MINVDEVSIZE $sdevs + log_must zpool add $pool_name log mirror $sdevs + fi + + # this isn't supported just yet. + # Create a filesystem. In order to add this to + # the zone, it must have it's mountpoint set to 'legacy' + # log_must zfs create $pool_name/zfs_filesystem + # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem + + [[ -d $zone_root ]] && \ + log_must rm -rf $zone_root/$zone_name + [[ ! -d $zone_root ]] && \ + log_must mkdir -p -m 0700 $zone_root/$zone_name + + # Create zone configure file and configure the zone + # + typeset zone_conf=/tmp/zone_conf.$$ + echo "create" > $zone_conf + echo "set zonepath=$zone_root/$zone_name" >> $zone_conf + echo "set autoboot=true" >> $zone_conf + i=0 + while ((i < cntctr)); do + echo "add dataset" >> $zone_conf + echo "set name=$pool_name/$prefix_ctr$i" >> \ + $zone_conf + echo "end" >> $zone_conf + ((i += 1)) + done + + # add our zvol to the zone + echo "add device" >> $zone_conf + echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf + echo "end" >> $zone_conf + + # add a corresponding zvol rdsk to the zone + echo "add device" >> $zone_conf + echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf + echo "end" >> $zone_conf + + # once it's supported, we'll add our filesystem to the zone + # echo "add fs" >> $zone_conf + # echo "set type=zfs" >> $zone_conf + # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf + # echo "set dir=/export/zfs_filesystem" >> $zone_conf + # echo "end" >> $zone_conf + + echo "verify" >> $zone_conf + echo "commit" >> $zone_conf + log_must zonecfg -z $zone_name -f $zone_conf + log_must rm -f $zone_conf + + # Install the zone + zoneadm -z $zone_name install + if (($? == 0)); then + log_note "SUCCESS: zoneadm -z $zone_name install" + else + log_fail "FAIL: zoneadm -z $zone_name install" + fi + + # Install sysidcfg file + # + typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg + echo "system_locale=C" > $sysidcfg + echo "terminal=dtterm" >> $sysidcfg + echo "network_interface=primary {" >> $sysidcfg + echo "hostname=$zone_name" >> $sysidcfg + echo "}" >> $sysidcfg + echo "name_service=NONE" >> $sysidcfg + echo "root_password=mo791xfZ/SFiw" >> $sysidcfg + echo "security_policy=NONE" >> $sysidcfg + echo "timezone=US/Eastern" >> $sysidcfg + + # Boot this zone + log_must zoneadm -z $zone_name boot +} + +# +# Reexport TESTPOOL & TESTPOOL(1-4) +# +function reexport_pool +{ + typeset -i cntctr=5 + typeset -i i=0 + + while ((i < cntctr)); do + if ((i == 0)); then + TESTPOOL=$ZONE_POOL/$ZONE_CTR$i + if ! ismounted $TESTPOOL; then + log_must zfs mount $TESTPOOL + fi + else + eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i + if eval ! ismounted \$TESTPOOL$i; then + log_must eval zfs mount \$TESTPOOL$i + fi + fi + ((i += 1)) + done +} + +# +# Verify a given disk or pool state +# +# Return 0 is pool/disk matches expected state, 1 otherwise +# +function check_state # pool disk state{online,offline,degraded} +{ + typeset pool=$1 + typeset disk=${2#$DEV_DSKDIR/} + typeset state=$3 + + [[ -z $pool ]] || [[ -z $state ]] \ + && log_fail "Arguments invalid or missing" + + if [[ -z $disk ]]; then + #check pool state only + zpool get -H -o value health $pool \ + | grep -i "$state" > /dev/null 2>&1 + else + zpool status -v $pool | grep "$disk" \ + | grep -i "$state" > /dev/null 2>&1 + fi + + return $? +} + +# +# Get the mountpoint of snapshot +# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap> +# as its mountpoint +# +function snapshot_mountpoint +{ + typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} + + if [[ $dataset != *@* ]]; then + log_fail "Error name of snapshot '$dataset'." + fi + + typeset fs=${dataset%@*} + typeset snap=${dataset#*@} + + if [[ -z $fs || -z $snap ]]; then + log_fail "Error name of snapshot '$dataset'." + fi + + echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap +} + +# +# Given a device and 'ashift' value verify it's correctly set on every label +# +function verify_ashift # device ashift +{ + typeset device="$1" + typeset ashift="$2" + + zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / { + if (ashift != $2) + exit 1; + else + count++; + } END { + if (count != 4) + exit 1; + else + exit 0; + }' + + return $? +} + +# +# Given a pool and file system, this function will verify the file system +# using the zdb internal tool. Note that the pool is exported and imported +# to ensure it has consistent state. +# +function verify_filesys # pool filesystem dir +{ + typeset pool="$1" + typeset filesys="$2" + typeset zdbout="/tmp/zdbout.$$" + + shift + shift + typeset dirs=$@ + typeset search_path="" + + log_note "Calling zdb to verify filesystem '$filesys'" + zfs unmount -a > /dev/null 2>&1 + log_must zpool export $pool + + if [[ -n $dirs ]] ; then + for dir in $dirs ; do + search_path="$search_path -d $dir" + done + fi + + log_must zpool import $search_path $pool + + zdb -cudi $filesys > $zdbout 2>&1 + if [[ $? != 0 ]]; then + log_note "Output: zdb -cudi $filesys" + cat $zdbout + log_fail "zdb detected errors with: '$filesys'" + fi + + log_must zfs mount -a + log_must rm -rf $zdbout +} + +# +# Given a pool issue a scrub and verify that no checksum errors are reported. +# +function verify_pool +{ + typeset pool=${1:-$TESTPOOL} + + log_must zpool scrub $pool + log_must wait_scrubbed $pool + + typeset -i cksum=$(zpool status $pool | awk ' + !NF { isvdev = 0 } + isvdev { errors += $NF } + /CKSUM$/ { isvdev = 1 } + END { print errors } + ') + if [[ $cksum != 0 ]]; then + log_must zpool status -v + log_fail "Unexpected CKSUM errors found on $pool ($cksum)" + fi +} + +# +# Given a pool, and this function list all disks in the pool +# +function get_disklist # pool +{ + typeset disklist="" + + disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \ + grep -v "\-\-\-\-\-" | \ + egrep -v -e "^(mirror|raidz[1-3]|spare|log|cache|special|dedup)$") + + echo $disklist +} + +# +# Given a pool, and this function list all disks in the pool with their full +# path (like "/dev/sda" instead of "sda"). +# +function get_disklist_fullpath # pool +{ + args="-P $1" + get_disklist $args +} + + + +# /** +# This function kills a given list of processes after a time period. We use +# this in the stress tests instead of STF_TIMEOUT so that we can have processes +# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT +# would be listed as FAIL, which we don't want : we're happy with stress tests +# running for a certain amount of time, then finishing. +# +# @param $1 the time in seconds after which we should terminate these processes +# @param $2..$n the processes we wish to terminate. +# */ +function stress_timeout +{ + typeset -i TIMEOUT=$1 + shift + typeset cpids="$@" + + log_note "Waiting for child processes($cpids). " \ + "It could last dozens of minutes, please be patient ..." + log_must sleep $TIMEOUT + + log_note "Killing child processes after ${TIMEOUT} stress timeout." + typeset pid + for pid in $cpids; do + ps -p $pid > /dev/null 2>&1 + if (($? == 0)); then + log_must kill -USR1 $pid + fi + done +} + +# +# Verify a given hotspare disk is inuse or avail +# +# Return 0 is pool/disk matches expected state, 1 otherwise +# +function check_hotspare_state # pool disk state{inuse,avail} +{ + typeset pool=$1 + typeset disk=${2#$DEV_DSKDIR/} + typeset state=$3 + + cur_state=$(get_device_state $pool $disk "spares") + + if [[ $state != ${cur_state} ]]; then + return 1 + fi + return 0 +} + +# +# Wait until a hotspare transitions to a given state or times out. +# +# Return 0 when pool/disk matches expected state, 1 on timeout. +# +function wait_hotspare_state # pool disk state timeout +{ + typeset pool=$1 + typeset disk=${2#*$DEV_DSKDIR/} + typeset state=$3 + typeset timeout=${4:-60} + typeset -i i=0 + + while [[ $i -lt $timeout ]]; do + if check_hotspare_state $pool $disk $state; then + return 0 + fi + + i=$((i+1)) + sleep 1 + done + + return 1 +} + +# +# Verify a given slog disk is inuse or avail +# +# Return 0 is pool/disk matches expected state, 1 otherwise +# +function check_slog_state # pool disk state{online,offline,unavail} +{ + typeset pool=$1 + typeset disk=${2#$DEV_DSKDIR/} + typeset state=$3 + + cur_state=$(get_device_state $pool $disk "logs") + + if [[ $state != ${cur_state} ]]; then + return 1 + fi + return 0 +} + +# +# Verify a given vdev disk is inuse or avail +# +# Return 0 is pool/disk matches expected state, 1 otherwise +# +function check_vdev_state # pool disk state{online,offline,unavail} +{ + typeset pool=$1 + typeset disk=${2#*$DEV_DSKDIR/} + typeset state=$3 + + cur_state=$(get_device_state $pool $disk) + + if [[ $state != ${cur_state} ]]; then + return 1 + fi + return 0 +} + +# +# Wait until a vdev transitions to a given state or times out. +# +# Return 0 when pool/disk matches expected state, 1 on timeout. +# +function wait_vdev_state # pool disk state timeout +{ + typeset pool=$1 + typeset disk=${2#*$DEV_DSKDIR/} + typeset state=$3 + typeset timeout=${4:-60} + typeset -i i=0 + + while [[ $i -lt $timeout ]]; do + if check_vdev_state $pool $disk $state; then + return 0 + fi + + i=$((i+1)) + sleep 1 + done + + return 1 +} + +# +# Check the output of 'zpool status -v <pool>', +# and to see if the content of <token> contain the <keyword> specified. +# +# Return 0 is contain, 1 otherwise +# +function check_pool_status # pool token keyword <verbose> +{ + typeset pool=$1 + typeset token=$2 + typeset keyword=$3 + typeset verbose=${4:-false} + + scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" ' + ($1==token) {print $0}') + if [[ $verbose == true ]]; then + log_note $scan + fi + echo $scan | egrep -i "$keyword" > /dev/null 2>&1 + + return $? +} + +# +# The following functions are instance of check_pool_status() +# is_pool_resilvering - to check if the pool resilver is in progress +# is_pool_resilvered - to check if the pool resilver is completed +# is_pool_scrubbing - to check if the pool scrub is in progress +# is_pool_scrubbed - to check if the pool scrub is completed +# is_pool_scrub_stopped - to check if the pool scrub is stopped +# is_pool_scrub_paused - to check if the pool scrub has paused +# is_pool_removing - to check if the pool removing is a vdev +# is_pool_removed - to check if the pool remove is completed +# is_pool_discarding - to check if the pool checkpoint is being discarded +# +function is_pool_resilvering #pool <verbose> +{ + check_pool_status "$1" "scan" \ + "resilver[ ()0-9A-Za-z_-]* in progress since" $2 + return $? +} + +function is_pool_resilvered #pool <verbose> +{ + check_pool_status "$1" "scan" "resilvered " $2 + return $? +} + +function is_pool_scrubbing #pool <verbose> +{ + check_pool_status "$1" "scan" "scrub in progress since " $2 + return $? +} + +function is_pool_scrubbed #pool <verbose> +{ + check_pool_status "$1" "scan" "scrub repaired" $2 + return $? +} + +function is_pool_scrub_stopped #pool <verbose> +{ + check_pool_status "$1" "scan" "scrub canceled" $2 + return $? +} + +function is_pool_scrub_paused #pool <verbose> +{ + check_pool_status "$1" "scan" "scrub paused since " $2 + return $? +} + +function is_pool_removing #pool +{ + check_pool_status "$1" "remove" "in progress since " + return $? +} + +function is_pool_removed #pool +{ + check_pool_status "$1" "remove" "completed on" + return $? +} + +function is_pool_discarding #pool +{ + check_pool_status "$1" "checkpoint" "discarding" + return $? +} + +function wait_for_degraded +{ + typeset pool=$1 + typeset timeout=${2:-30} + typeset t0=$SECONDS + + while :; do + [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break + log_note "$pool is not yet degraded." + sleep 1 + if ((SECONDS - t0 > $timeout)); then + log_note "$pool not degraded after $timeout seconds." + return 1 + fi + done + + return 0 +} + +# +# Use create_pool()/destroy_pool() to clean up the information in +# in the given disk to avoid slice overlapping. +# +function cleanup_devices #vdevs +{ + typeset pool="foopool$$" + + for vdev in $@; do + zero_partitions $vdev + done + + poolexists $pool && destroy_pool $pool + create_pool $pool $@ + destroy_pool $pool + + return 0 +} + +#/** +# A function to find and locate free disks on a system or from given +# disks as the parameter. It works by locating disks that are in use +# as swap devices and dump devices, and also disks listed in /etc/vfstab +# +# $@ given disks to find which are free, default is all disks in +# the test system +# +# @return a string containing the list of available disks +#*/ +function find_disks +{ + # Trust provided list, no attempt is made to locate unused devices. + if is_linux || is_freebsd; then + echo "$@" + return + fi + + + sfi=/tmp/swaplist.$$ + dmpi=/tmp/dumpdev.$$ + max_finddisksnum=${MAX_FINDDISKSNUM:-6} + + swap -l > $sfi + dumpadm > $dmpi 2>/dev/null + +# write an awk script that can process the output of format +# to produce a list of disks we know about. Note that we have +# to escape "$2" so that the shell doesn't interpret it while +# we're creating the awk script. +# ------------------- + cat > /tmp/find_disks.awk <<EOF +#!/bin/nawk -f + BEGIN { FS="."; } + + /^Specify disk/{ + searchdisks=0; + } + + { + if (searchdisks && \$2 !~ "^$"){ + split(\$2,arr," "); + print arr[1]; + } + } + + /^AVAILABLE DISK SELECTIONS:/{ + searchdisks=1; + } +EOF +#--------------------- + + chmod 755 /tmp/find_disks.awk + disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)} + rm /tmp/find_disks.awk + + unused="" + for disk in $disks; do + # Check for mounted + grep "${disk}[sp]" /etc/mnttab >/dev/null + (($? == 0)) && continue + # Check for swap + grep "${disk}[sp]" $sfi >/dev/null + (($? == 0)) && continue + # check for dump device + grep "${disk}[sp]" $dmpi >/dev/null + (($? == 0)) && continue + # check to see if this disk hasn't been explicitly excluded + # by a user-set environment variable + echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null + (($? == 0)) && continue + unused_candidates="$unused_candidates $disk" + done + rm $sfi + rm $dmpi + +# now just check to see if those disks do actually exist +# by looking for a device pointing to the first slice in +# each case. limit the number to max_finddisksnum + count=0 + for disk in $unused_candidates; do + if is_disk_device $DEV_DSKDIR/${disk}s0 && \ + [ $count -lt $max_finddisksnum ]; then + unused="$unused $disk" + # do not impose limit if $@ is provided + [[ -z $@ ]] && ((count = count + 1)) + fi + done + +# finally, return our disk list + echo $unused +} + +function add_user_freebsd #<group_name> <user_name> <basedir> +{ + typeset group=$1 + typeset user=$2 + typeset basedir=$3 + + # Check to see if the user exists. + if id $user > /dev/null 2>&1; then + return 0 + fi + + # Assign 1000 as the base uid + typeset -i uid=1000 + while true; do + typeset -i ret + pw useradd -u $uid -g $group -d $basedir/$user -m -n $user + ret=$? + case $ret in + 0) break ;; + # The uid is not unique + 65) ((uid += 1)) ;; + *) return 1 ;; + esac + if [[ $uid == 65000 ]]; then + log_fail "No user id available under 65000 for $user" + fi + done + + # Silence MOTD + touch $basedir/$user/.hushlogin + + return 0 +} + +# +# Delete the specified user. +# +# $1 login name +# +function del_user_freebsd #<logname> +{ + typeset user=$1 + + if id $user > /dev/null 2>&1; then + log_must pw userdel $user + fi + + return 0 +} + +# +# Select valid gid and create specified group. +# +# $1 group name +# +function add_group_freebsd #<group_name> +{ + typeset group=$1 + + # See if the group already exists. + if pw groupshow $group >/dev/null 2>&1; then + return 0 + fi + + # Assign 1000 as the base gid + typeset -i gid=1000 + while true; do + pw groupadd -g $gid -n $group > /dev/null 2>&1 + typeset -i ret=$? + case $ret in + 0) return 0 ;; + # The gid is not unique + 65) ((gid += 1)) ;; + *) return 1 ;; + esac + if [[ $gid == 65000 ]]; then + log_fail "No user id available under 65000 for $group" + fi + done +} + +# +# Delete the specified group. +# +# $1 group name +# +function del_group_freebsd #<group_name> +{ + typeset group=$1 + + pw groupdel -n $group > /dev/null 2>&1 + typeset -i ret=$? + case $ret in + # Group does not exist, or was deleted successfully. + 0|6|65) return 0 ;; + # Name already exists as a group name + 9) log_must pw groupdel $group ;; + *) return 1 ;; + esac + + return 0 +} + +function add_user_illumos #<group_name> <user_name> <basedir> +{ + typeset group=$1 + typeset user=$2 + typeset basedir=$3 + + log_must useradd -g $group -d $basedir/$user -m $user + + return 0 +} + +function del_user_illumos #<user_name> +{ + typeset user=$1 + + if id $user > /dev/null 2>&1; then + log_must_retry "currently used" 6 userdel $user + fi + + return 0 +} + +function add_group_illumos #<group_name> +{ + typeset group=$1 + + typeset -i gid=100 + while true; do + groupadd -g $gid $group > /dev/null 2>&1 + typeset -i ret=$? + case $ret in + 0) return 0 ;; + # The gid is not unique + 4) ((gid += 1)) ;; + *) return 1 ;; + esac + done +} + +function del_group_illumos #<group_name> +{ + typeset group=$1 + + groupmod -n $grp $grp > /dev/null 2>&1 + typeset -i ret=$? + case $ret in + # Group does not exist. + 6) return 0 ;; + # Name already exists as a group name + 9) log_must groupdel $grp ;; + *) return 1 ;; + esac +} + +function add_user_linux #<group_name> <user_name> <basedir> +{ + typeset group=$1 + typeset user=$2 + typeset basedir=$3 + + log_must useradd -g $group -d $basedir/$user -m $user + + # Add new users to the same group and the command line utils. + # This allows them to be run out of the original users home + # directory as long as it permissioned to be group readable. + cmd_group=$(stat --format="%G" $(which zfs)) + log_must usermod -a -G $cmd_group $user + + return 0 +} + +function del_user_linux #<user_name> +{ + typeset user=$1 + + if id $user > /dev/null 2>&1; then + log_must_retry "currently used" 6 userdel $user + fi + + return 0 +} + +function add_group_linux #<group_name> +{ + typeset group=$1 + + # Assign 100 as the base gid, a larger value is selected for + # Linux because for many distributions 1000 and under are reserved. + while true; do + groupadd $group > /dev/null 2>&1 + typeset -i ret=$? + case $ret in + 0) return 0 ;; + *) return 1 ;; + esac + done +} + +function del_group_linux #<group_name> +{ + typeset group=$1 + + getent group $group > /dev/null 2>&1 + typeset -i ret=$? + case $ret in + # Group does not exist. + 2) return 0 ;; + # Name already exists as a group name + 0) log_must groupdel $group ;; + *) return 1 ;; + esac + + return 0 +} + +# +# Add specified user to specified group +# +# $1 group name +# $2 user name +# $3 base of the homedir (optional) +# +function add_user #<group_name> <user_name> <basedir> +{ + typeset group=$1 + typeset user=$2 + typeset basedir=${3:-"/var/tmp"} + + if ((${#group} == 0 || ${#user} == 0)); then + log_fail "group name or user name are not defined." + fi + + case $(uname) in + FreeBSD) + add_user_freebsd "$group" "$user" "$basedir" + ;; + Linux) + add_user_linux "$group" "$user" "$basedir" + ;; + *) + add_user_illumos "$group" "$user" "$basedir" + ;; + esac + + echo "export PATH=\"$STF_PATH\"" >>$basedir/$user/.profile + echo "export PATH=\"$STF_PATH\"" >>$basedir/$user/.bash_profile + echo "export PATH=\"$STF_PATH\"" >>$basedir/$user/.login + + return 0 +} + +# +# Delete the specified user. +# +# $1 login name +# $2 base of the homedir (optional) +# +function del_user #<logname> <basedir> +{ + typeset user=$1 + typeset basedir=${2:-"/var/tmp"} + + if ((${#user} == 0)); then + log_fail "login name is necessary." + fi + + case $(uname) in + FreeBSD) + del_user_freebsd "$user" + ;; + Linux) + del_user_linux "$user" + ;; + *) + del_user_illumos "$user" + ;; + esac + + [[ -d $basedir/$user ]] && rm -fr $basedir/$user + + return 0 +} + +# +# Select valid gid and create specified group. +# +# $1 group name +# +function add_group #<group_name> +{ + typeset group=$1 + + if ((${#group} == 0)); then + log_fail "group name is necessary." + fi + + case $(uname) in + FreeBSD) + add_group_freebsd "$group" + ;; + Linux) + add_group_linux "$group" + ;; + *) + add_group_illumos "$group" + ;; + esac + + return 0 +} + +# +# Delete the specified group. +# +# $1 group name +# +function del_group #<group_name> +{ + typeset group=$1 + + if ((${#group} == 0)); then + log_fail "group name is necessary." + fi + + case $(uname) in + FreeBSD) + del_group_freebsd "$group" + ;; + Linux) + del_group_linux "$group" + ;; + *) + del_group_illumos "$group" + ;; + esac + + return 0 +} + +# +# This function will return true if it's safe to destroy the pool passed +# as argument 1. It checks for pools based on zvols and files, and also +# files contained in a pool that may have a different mountpoint. +# +function safe_to_destroy_pool { # $1 the pool name + + typeset pool="" + typeset DONT_DESTROY="" + + # We check that by deleting the $1 pool, we're not + # going to pull the rug out from other pools. Do this + # by looking at all other pools, ensuring that they + # aren't built from files or zvols contained in this pool. + + for pool in $(zpool list -H -o name) + do + ALTMOUNTPOOL="" + + # this is a list of the top-level directories in each of the + # files that make up the path to the files the pool is based on + FILEPOOL=$(zpool status -v $pool | grep /$1/ | \ + awk '{print $1}') + + # this is a list of the zvols that make up the pool + ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \ + | awk '{print $1}') + + # also want to determine if it's a file-based pool using an + # alternate mountpoint... + POOL_FILE_DIRS=$(zpool status -v $pool | \ + grep / | awk '{print $1}' | \ + awk -F/ '{print $2}' | grep -v "dev") + + for pooldir in $POOL_FILE_DIRS + do + OUTPUT=$(zfs list -H -r -o mountpoint $1 | \ + grep "${pooldir}$" | awk '{print $1}') + + ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}" + done + + + if [ ! -z "$ZVOLPOOL" ] + then + DONT_DESTROY="true" + log_note "Pool $pool is built from $ZVOLPOOL on $1" + fi + + if [ ! -z "$FILEPOOL" ] + then + DONT_DESTROY="true" + log_note "Pool $pool is built from $FILEPOOL on $1" + fi + + if [ ! -z "$ALTMOUNTPOOL" ] + then + DONT_DESTROY="true" + log_note "Pool $pool is built from $ALTMOUNTPOOL on $1" + fi + done + + if [ -z "${DONT_DESTROY}" ] + then + return 0 + else + log_note "Warning: it is not safe to destroy $1!" + return 1 + fi +} + +# +# Verify zfs operation with -p option work as expected +# $1 operation, value could be create, clone or rename +# $2 dataset type, value could be fs or vol +# $3 dataset name +# $4 new dataset name +# +function verify_opt_p_ops +{ + typeset ops=$1 + typeset datatype=$2 + typeset dataset=$3 + typeset newdataset=$4 + + if [[ $datatype != "fs" && $datatype != "vol" ]]; then + log_fail "$datatype is not supported." + fi + + # check parameters accordingly + case $ops in + create) + newdataset=$dataset + dataset="" + if [[ $datatype == "vol" ]]; then + ops="create -V $VOLSIZE" + fi + ;; + clone) + if [[ -z $newdataset ]]; then + log_fail "newdataset should not be empty" \ + "when ops is $ops." + fi + log_must datasetexists $dataset + log_must snapexists $dataset + ;; + rename) + if [[ -z $newdataset ]]; then + log_fail "newdataset should not be empty" \ + "when ops is $ops." + fi + log_must datasetexists $dataset + ;; + *) + log_fail "$ops is not supported." + ;; + esac + + # make sure the upper level filesystem does not exist + destroy_dataset "${newdataset%/*}" "-rRf" + + # without -p option, operation will fail + log_mustnot zfs $ops $dataset $newdataset + log_mustnot datasetexists $newdataset ${newdataset%/*} + + # with -p option, operation should succeed + log_must zfs $ops -p $dataset $newdataset + block_device_wait + + if ! datasetexists $newdataset ; then + log_fail "-p option does not work for $ops" + fi + + # when $ops is create or clone, redo the operation still return zero + if [[ $ops != "rename" ]]; then + log_must zfs $ops -p $dataset $newdataset + fi + + return 0 +} + +# +# Get configuration of pool +# $1 pool name +# $2 config name +# +function get_config +{ + typeset pool=$1 + typeset config=$2 + typeset alt_root + + if ! poolexists "$pool" ; then + return 1 + fi + alt_root=$(zpool list -H $pool | awk '{print $NF}') + if [[ $alt_root == "-" ]]; then + value=$(zdb -C $pool | grep "$config:" | awk -F: \ + '{print $2}') + else + value=$(zdb -e $pool | grep "$config:" | awk -F: \ + '{print $2}') + fi + if [[ -n $value ]] ; then + value=${value#'} + value=${value%'} + fi + echo $value + + return 0 +} + +# +# Privated function. Random select one of items from arguments. +# +# $1 count +# $2-n string +# +function _random_get +{ + typeset cnt=$1 + shift + + typeset str="$@" + typeset -i ind + ((ind = RANDOM % cnt + 1)) + + typeset ret=$(echo "$str" | cut -f $ind -d ' ') + echo $ret +} + +# +# Random select one of item from arguments which include NONE string +# +function random_get_with_non +{ + typeset -i cnt=$# + ((cnt =+ 1)) + + _random_get "$cnt" "$@" +} + +# +# Random select one of item from arguments which doesn't include NONE string +# +function random_get +{ + _random_get "$#" "$@" +} + +# +# Detect if the current system support slog +# +function verify_slog_support +{ + typeset dir=$TEST_BASE_DIR/disk.$$ + typeset pool=foo.$$ + typeset vdev=$dir/a + typeset sdev=$dir/b + + mkdir -p $dir + mkfile $MINVDEVSIZE $vdev $sdev + + typeset -i ret=0 + if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then + ret=1 + fi + rm -r $dir + + return $ret +} + +# +# The function will generate a dataset name with specific length +# $1, the length of the name +# $2, the base string to construct the name +# +function gen_dataset_name +{ + typeset -i len=$1 + typeset basestr="$2" + typeset -i baselen=${#basestr} + typeset -i iter=0 + typeset l_name="" + + if ((len % baselen == 0)); then + ((iter = len / baselen)) + else + ((iter = len / baselen + 1)) + fi + while ((iter > 0)); do + l_name="${l_name}$basestr" + + ((iter -= 1)) + done + + echo $l_name +} + +# +# Get cksum tuple of dataset +# $1 dataset name +# +# sample zdb output: +# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp +# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4 +# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P +# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744 +function datasetcksum +{ + typeset cksum + sync + cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \ + | awk -F= '{print $7}') + echo $cksum +} + +# +# Get cksum of file +# #1 file path +# +function checksum +{ + typeset cksum + cksum=$(cksum $1 | awk '{print $1}') + echo $cksum +} + +# +# Get the given disk/slice state from the specific field of the pool +# +function get_device_state #pool disk field("", "spares","logs") +{ + typeset pool=$1 + typeset disk=${2#$DEV_DSKDIR/} + typeset field=${3:-$pool} + + state=$(zpool status -v "$pool" 2>/dev/null | \ + nawk -v device=$disk -v pool=$pool -v field=$field \ + 'BEGIN {startconfig=0; startfield=0; } + /config:/ {startconfig=1} + (startconfig==1) && ($1==field) {startfield=1; next;} + (startfield==1) && ($1==device) {print $2; exit;} + (startfield==1) && + ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}') + echo $state +} + + +# +# print the given directory filesystem type +# +# $1 directory name +# +function get_fstype +{ + typeset dir=$1 + + if [[ -z $dir ]]; then + log_fail "Usage: get_fstype <directory>" + fi + + # + # $ df -n / + # / : ufs + # + df -n $dir | awk '{print $3}' +} + +# +# Given a disk, label it to VTOC regardless what label was on the disk +# $1 disk +# +function labelvtoc +{ + typeset disk=$1 + if [[ -z $disk ]]; then + log_fail "The disk name is unspecified." + fi + typeset label_file=/var/tmp/labelvtoc.$$ + typeset arch=$(uname -p) + + if is_linux || is_freebsd; then + log_note "Currently unsupported by the test framework" + return 1 + fi + + if [[ $arch == "i386" ]]; then + echo "label" > $label_file + echo "0" >> $label_file + echo "" >> $label_file + echo "q" >> $label_file + echo "q" >> $label_file + + fdisk -B $disk >/dev/null 2>&1 + # wait a while for fdisk finishes + sleep 60 + elif [[ $arch == "sparc" ]]; then + echo "label" > $label_file + echo "0" >> $label_file + echo "" >> $label_file + echo "" >> $label_file + echo "" >> $label_file + echo "q" >> $label_file + else + log_fail "unknown arch type" + fi + + format -e -s -d $disk -f $label_file + typeset -i ret_val=$? + rm -f $label_file + # + # wait the format to finish + # + sleep 60 + if ((ret_val != 0)); then + log_fail "unable to label $disk as VTOC." + fi + + return 0 +} + +# +# check if the system was installed as zfsroot or not +# return: 0 if zfsroot, non-zero if not +# +function is_zfsroot +{ + df -n / | grep zfs > /dev/null 2>&1 + return $? +} + +# +# get the root filesystem name if it's zfsroot system. +# +# return: root filesystem name +function get_rootfs +{ + typeset rootfs="" + + if is_freebsd; then + rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}') + elif ! is_linux; then + rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \ + /etc/mnttab) + fi + if [[ -z "$rootfs" ]]; then + log_fail "Can not get rootfs" + fi + zfs list $rootfs > /dev/null 2>&1 + if (($? == 0)); then + echo $rootfs + else + log_fail "This is not a zfsroot system." + fi +} + +# +# get the rootfs's pool name +# return: +# rootpool name +# +function get_rootpool +{ + typeset rootfs="" + typeset rootpool="" + + if is_freebsd; then + rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}') + elif ! is_linux; then + rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \ + /etc/mnttab) + fi + if [[ -z "$rootfs" ]]; then + log_fail "Can not get rootpool" + fi + zfs list $rootfs > /dev/null 2>&1 + if (($? == 0)); then + echo ${rootfs%%/*} + else + log_fail "This is not a zfsroot system." + fi +} + +# +# Get the word numbers from a string separated by white space +# +function get_word_count +{ + echo $1 | wc -w +} + +# +# To verify if the require numbers of disks is given +# +function verify_disk_count +{ + typeset -i min=${2:-1} + + typeset -i count=$(get_word_count "$1") + + if ((count < min)); then + log_untested "A minimum of $min disks is required to run." \ + " You specified $count disk(s)" + fi +} + +function ds_is_volume +{ + typeset type=$(get_prop type $1) + [[ $type = "volume" ]] && return 0 + return 1 +} + +function ds_is_filesystem +{ + typeset type=$(get_prop type $1) + [[ $type = "filesystem" ]] && return 0 + return 1 +} + +function ds_is_snapshot +{ + typeset type=$(get_prop type $1) + [[ $type = "snapshot" ]] && return 0 + return 1 +} + +# +# Check if Trusted Extensions are installed and enabled +# +function is_te_enabled +{ + svcs -H -o state labeld 2>/dev/null | grep "enabled" + if (($? != 0)); then + return 1 + else + return 0 + fi +} + +# Utility function to determine if a system has multiple cpus. +function is_mp +{ + if is_linux; then + (($(nproc) > 1)) + elif is_freebsd; then + sysctl -n kern.smp.cpus + else + (($(psrinfo | wc -l) > 1)) + fi + + return $? +} + +function get_cpu_freq +{ + if is_linux; then + lscpu | awk '/CPU MHz/ { print $3 }' + elif is_freebsd; then + sysctl -n hw.clockrate + else + psrinfo -v 0 | awk '/processor operates at/ {print $6}' + fi +} + +# Run the given command as the user provided. +function user_run +{ + typeset user=$1 + shift + + log_note "user:$user $@" + eval su - \$user -c \"$@\" > $TEST_BASE_DIR/out 2>$TEST_BASE_DIR/err +} + +# +# Check if the pool contains the specified vdevs +# +# $1 pool +# $2..n <vdev> ... +# +# Return 0 if the vdevs are contained in the pool, 1 if any of the specified +# vdevs is not in the pool, and 2 if pool name is missing. +# +function vdevs_in_pool +{ + typeset pool=$1 + typeset vdev + + if [[ -z $pool ]]; then + log_note "Missing pool name." + return 2 + fi + + shift + + # We could use 'zpool list' to only get the vdevs of the pool but we + # can't reference a mirror/raidz vdev using its ID (i.e mirror-0), + # therefore we use the 'zpool status' output. + typeset tmpfile=$(mktemp) + zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile + for vdev in $@; do + grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1 + [[ $? -ne 0 ]] && return 1 + done + + rm -f $tmpfile + + return 0; +} + +function get_max +{ + typeset -l i max=$1 + shift + + for i in "$@"; do + max=$((max > i ? max : i)) + done + + echo $max +} + +function get_min +{ + typeset -l i min=$1 + shift + + for i in "$@"; do + min=$((min < i ? min : i)) + done + + echo $min +} + +# Write data that can be compressed into a directory +function write_compressible +{ + typeset dir=$1 + typeset megs=$2 + typeset nfiles=${3:-1} + typeset bs=${4:-1024k} + typeset fname=${5:-file} + + [[ -d $dir ]] || log_fail "No directory: $dir" + + # Under Linux fio is not currently used since its behavior can + # differ significantly across versions. This includes missing + # command line options and cases where the --buffer_compress_* + # options fail to behave as expected. + if is_linux; then + typeset file_bytes=$(to_bytes $megs) + typeset bs_bytes=4096 + typeset blocks=$(($file_bytes / $bs_bytes)) + + for (( i = 0; i < $nfiles; i++ )); do + truncate -s $file_bytes $dir/$fname.$i + + # Write every third block to get 66% compression. + for (( j = 0; j < $blocks; j += 3 )); do + dd if=/dev/urandom of=$dir/$fname.$i \ + seek=$j bs=$bs_bytes count=1 \ + conv=notrunc >/dev/null 2>&1 + done + done + else + log_must eval "fio \ + --name=job \ + --fallocate=0 \ + --minimal \ + --randrepeat=0 \ + --buffer_compress_percentage=66 \ + --buffer_compress_chunk=4096 \ + --directory=$dir \ + --numjobs=$nfiles \ + --nrfiles=$nfiles \ + --rw=write \ + --bs=$bs \ + --filesize=$megs \ + --filename_format='$fname.\$jobnum' >/dev/null" + fi +} + +function get_objnum +{ + typeset pathname=$1 + typeset objnum + + [[ -e $pathname ]] || log_fail "No such file or directory: $pathname" + if is_freebsd; then + objnum=$(stat -f "%i" $pathname) + else + objnum=$(stat -c %i $pathname) + fi + echo $objnum +} + +# +# Sync data to the pool +# +# $1 pool name +# $2 boolean to force uberblock (and config including zpool cache file) update +# +function sync_pool #pool <force> +{ + typeset pool=${1:-$TESTPOOL} + typeset force=${2:-false} + + if [[ $force == true ]]; then + log_must zpool sync -f $pool + else + log_must zpool sync $pool + fi + + return 0 +} + +# +# Wait for zpool 'freeing' property drops to zero. +# +# $1 pool name +# +function wait_freeing #pool +{ + typeset pool=${1:-$TESTPOOL} + while true; do + [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break + log_must sleep 1 + done +} + +# +# Wait for every device replace operation to complete +# +# $1 pool name +# +function wait_replacing #pool +{ + typeset pool=${1:-$TESTPOOL} + while true; do + [[ "" == "$(zpool status $pool | + awk '/replacing-[0-9]+/ {print $1}')" ]] && break + log_must sleep 1 + done +} + +# +# Wait for a pool to be scrubbed +# +# $1 pool name +# $2 number of seconds to wait (optional) +# +# Returns true when pool has been scrubbed, or false if there's a timeout or if +# no scrub was done. +# +function wait_scrubbed +{ + typeset pool=${1:-$TESTPOOL} + while true ; do + is_pool_scrubbed $pool && break + sleep 1 + done +} + +# Backup the zed.rc in our test directory so that we can edit it for our test. +# +# Returns: Backup file name. You will need to pass this to zed_rc_restore(). +function zed_rc_backup +{ + zedrc_backup="$(mktemp)" + cp $ZEDLET_DIR/zed.rc $zedrc_backup + echo $zedrc_backup +} + +function zed_rc_restore +{ + mv $1 $ZEDLET_DIR/zed.rc +} + +# +# Setup custom environment for the ZED. +# +# $@ Optional list of zedlets to run under zed. +function zed_setup +{ + if ! is_linux; then + log_unsupported "No zed on $(uname)" + fi + + if [[ ! -d $ZEDLET_DIR ]]; then + log_must mkdir $ZEDLET_DIR + fi + + if [[ ! -e $VDEVID_CONF ]]; then + log_must touch $VDEVID_CONF + fi + + if [[ -e $VDEVID_CONF_ETC ]]; then + log_fail "Must not have $VDEVID_CONF_ETC file present on system" + fi + EXTRA_ZEDLETS=$@ + + # Create a symlink for /etc/zfs/vdev_id.conf file. + log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC + + # Setup minimal ZED configuration. Individual test cases should + # add additional ZEDLETs as needed for their specific test. + log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR + log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR + + # Scripts must only be user writable. + if [[ -n "$EXTRA_ZEDLETS" ]] ; then + saved_umask=$(umask) + log_must umask 0022 + for i in $EXTRA_ZEDLETS ; do + log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR + done + log_must umask $saved_umask + fi + + # Customize the zed.rc file to enable the full debug log. + log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc + echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc + +} + +# +# Cleanup custom ZED environment. +# +# $@ Optional list of zedlets to remove from our test zed.d directory. +function zed_cleanup +{ + if ! is_linux; then + return + fi + EXTRA_ZEDLETS=$@ + + log_must rm -f ${ZEDLET_DIR}/zed.rc + log_must rm -f ${ZEDLET_DIR}/zed-functions.sh + log_must rm -f ${ZEDLET_DIR}/all-syslog.sh + log_must rm -f ${ZEDLET_DIR}/all-debug.sh + log_must rm -f ${ZEDLET_DIR}/state + + if [[ -n "$EXTRA_ZEDLETS" ]] ; then + for i in $EXTRA_ZEDLETS ; do + log_must rm -f ${ZEDLET_DIR}/$i + done + fi + log_must rm -f $ZED_LOG + log_must rm -f $ZED_DEBUG_LOG + log_must rm -f $VDEVID_CONF_ETC + log_must rm -f $VDEVID_CONF + rmdir $ZEDLET_DIR +} + +# +# Check if ZED is currently running, if not start ZED. +# +function zed_start +{ + if ! is_linux; then + return + fi + + # ZEDLET_DIR=/var/tmp/zed + if [[ ! -d $ZEDLET_DIR ]]; then + log_must mkdir $ZEDLET_DIR + fi + + # Verify the ZED is not already running. + pgrep -x zed > /dev/null + if (($? == 0)); then + log_note "ZED already running" + else + log_note "Starting ZED" + # run ZED in the background and redirect foreground logging + # output to $ZED_LOG. + log_must truncate -s 0 $ZED_DEBUG_LOG + log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \ + "-s $ZEDLET_DIR/state 2>$ZED_LOG &" + fi + + return 0 +} + +# +# Kill ZED process +# +function zed_stop +{ + if ! is_linux; then + return + fi + + log_note "Stopping ZED" + if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then + zedpid=$(<${ZEDLET_DIR}/zed.pid) + kill $zedpid + while ps -p $zedpid > /dev/null; do + sleep 1 + done + rm -f ${ZEDLET_DIR}/zed.pid + fi + return 0 +} + +# +# Drain all zevents +# +function zed_events_drain +{ + while [ $(zpool events -H | wc -l) -ne 0 ]; do + sleep 1 + zpool events -c >/dev/null + done +} + +# Set a variable in zed.rc to something, un-commenting it in the process. +# +# $1 variable +# $2 value +function zed_rc_set +{ + var="$1" + val="$2" + # Remove the line + cmd="'/$var/d'" + eval sed -i $cmd $ZEDLET_DIR/zed.rc + + # Add it at the end + echo "$var=$val" >> $ZEDLET_DIR/zed.rc +} + + +# +# Check is provided device is being active used as a swap device. +# +function is_swap_inuse +{ + typeset device=$1 + + if [[ -z $device ]] ; then + log_note "No device specified." + return 1 + fi + + if is_linux; then + swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1 + elif is_freebsd; then + swapctl -l | grep -w $device + else + swap -l | grep -w $device > /dev/null 2>&1 + fi + + return $? +} + +# +# Setup a swap device using the provided device. +# +function swap_setup +{ + typeset swapdev=$1 + + if is_linux; then + log_must eval "mkswap $swapdev > /dev/null 2>&1" + log_must swapon $swapdev + elif is_freebsd; then + log_must swapctl -a $swapdev + else + log_must swap -a $swapdev + fi + + return 0 +} + +# +# Cleanup a swap device on the provided device. +# +function swap_cleanup +{ + typeset swapdev=$1 + + if is_swap_inuse $swapdev; then + if is_linux; then + log_must swapoff $swapdev + elif is_freebsd; then + log_must swapoff $swapdev + else + log_must swap -d $swapdev + fi + fi + + return 0 +} + +# +# Set a global system tunable (64-bit value) +# +# $1 tunable name (use a NAME defined in tunables.cfg) +# $2 tunable values +# +function set_tunable64 +{ + set_tunable_impl "$1" "$2" Z +} + +# +# Set a global system tunable (32-bit value) +# +# $1 tunable name (use a NAME defined in tunables.cfg) +# $2 tunable values +# +function set_tunable32 +{ + set_tunable_impl "$1" "$2" W +} + +function set_tunable_impl +{ + typeset name="$1" + typeset value="$2" + typeset mdb_cmd="$3" + typeset module="${4:-zfs}" + + eval "typeset tunable=\$$name" + case "$tunable" in + UNSUPPORTED) + log_unsupported "Tunable '$name' is unsupported on $(uname)" + ;; + "") + log_fail "Tunable '$name' must be added to tunables.cfg" + ;; + *) + ;; + esac + + [[ -z "$value" ]] && return 1 + [[ -z "$mdb_cmd" ]] && return 1 + + case "$(uname)" in + Linux) + typeset zfs_tunables="/sys/module/$module/parameters" + [[ -w "$zfs_tunables/$tunable" ]] || return 1 + cat >"$zfs_tunables/$tunable" <<<"$value" + return $? + ;; + FreeBSD) + sysctl vfs.zfs.$tunable=$value + return "$?" + ;; + SunOS) + [[ "$module" -eq "zfs" ]] || return 1 + echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw + return $? + ;; + esac +} + +# +# Get a global system tunable +# +# $1 tunable name (use a NAME defined in tunables.cfg) +# +function get_tunable +{ + get_tunable_impl "$1" +} + +function get_tunable_impl +{ + typeset name="$1" + typeset module="${2:-zfs}" + + eval "typeset tunable=\$$name" + case "$tunable" in + UNSUPPORTED) + log_unsupported "Tunable '$name' is unsupported on $(uname)" + ;; + "") + log_fail "Tunable '$name' must be added to tunables.cfg" + ;; + *) + ;; + esac + + case "$(uname)" in + Linux) + typeset zfs_tunables="/sys/module/$module/parameters" + [[ -f "$zfs_tunables/$tunable" ]] || return 1 + cat $zfs_tunables/$tunable + return $? + ;; + FreeBSD) + sysctl -n vfs.zfs.$tunable + ;; + SunOS) + [[ "$module" -eq "zfs" ]] || return 1 + ;; + esac + + return 1 +} + +# +# Prints the current time in seconds since UNIX Epoch. +# +function current_epoch +{ + printf '%(%s)T' +} + +# +# Get decimal value of global uint32_t variable using mdb. +# +function mdb_get_uint32 +{ + typeset variable=$1 + typeset value + + value=$(mdb -k -e "$variable/X | ::eval .=U") + if [[ $? -ne 0 ]]; then + log_fail "Failed to get value of '$variable' from mdb." + return 1 + fi + + echo $value + return 0 +} + +# +# Set global uint32_t variable to a decimal value using mdb. +# +function mdb_set_uint32 +{ + typeset variable=$1 + typeset value=$2 + + mdb -kw -e "$variable/W 0t$value" > /dev/null + if [[ $? -ne 0 ]]; then + echo "Failed to set '$variable' to '$value' in mdb." + return 1 + fi + + return 0 +} + +# +# Set global scalar integer variable to a hex value using mdb. +# Note: Target should have CTF data loaded. +# +function mdb_ctf_set_int +{ + typeset variable=$1 + typeset value=$2 + + mdb -kw -e "$variable/z $value" > /dev/null + if [[ $? -ne 0 ]]; then + echo "Failed to set '$variable' to '$value' in mdb." + return 1 + fi + + return 0 +} + +# +# Compute MD5 digest for given file or stdin if no file given. +# Note: file path must not contain spaces +# +function md5digest +{ + typeset file=$1 + + case $(uname) in + FreeBSD) + md5 -q $file + ;; + *) + md5sum -b $file | awk '{ print $1 }' + ;; + esac +} + +# +# Compute SHA256 digest for given file or stdin if no file given. +# Note: file path must not contain spaces +# +function sha256digest +{ + typeset file=$1 + + case $(uname) in + FreeBSD) + sha256 -q $file + ;; + *) + sha256sum -b $file | awk '{ print $1 }' + ;; + esac +} + +function new_fs #<args> +{ + case $(uname) in + FreeBSD) + newfs "$@" + ;; + *) + echo y | newfs -v "$@" + ;; + esac +} + +function stat_size #<path> +{ + typeset path=$1 + + case $(uname) in + FreeBSD) + stat -f %z "$path" + ;; + *) + stat -c %s "$path" + ;; + esac +} + +# Run a command as if it was being run in a TTY. +# +# Usage: +# +# faketty command +# +function faketty +{ + if is_freebsd; then + script -q /dev/null env "$@" + else + script --return --quiet -c "$*" /dev/null + fi +} + +# +# Produce a random permutation of the integers in a given range (inclusive). +# +function range_shuffle # begin end +{ + typeset -i begin=$1 + typeset -i end=$2 + + seq ${begin} ${end} | sort -R +} + +# +# Cross-platform xattr helpers +# + +function get_xattr # name path +{ + typeset name=$1 + typeset path=$2 + + case $(uname) in + FreeBSD) + getextattr -qq user "${name}" "${path}" + ;; + *) + attr -qg "${name}" "${path}" + ;; + esac +} + +function set_xattr # name value path +{ + typeset name=$1 + typeset value=$2 + typeset path=$3 + + case $(uname) in + FreeBSD) + setextattr user "${name}" "${value}" "${path}" + ;; + *) + attr -qs "${name}" -V "${value}" "${path}" + ;; + esac +} + +function set_xattr_stdin # name value +{ + typeset name=$1 + typeset path=$2 + + case $(uname) in + FreeBSD) + setextattr -i user "${name}" "${path}" + ;; + *) + attr -qs "${name}" "${path}" + ;; + esac +} + +function rm_xattr # name path +{ + typeset name=$1 + typeset path=$2 + + case $(uname) in + FreeBSD) + rmextattr -q user "${name}" "${path}" + ;; + *) + attr -qr "${name}" "${path}" + ;; + esac +} + +function ls_xattr # path +{ + typeset path=$1 + + case $(uname) in + FreeBSD) + lsextattr -qq user "${path}" + ;; + *) + attr -ql "${path}" + ;; + esac +} + +function get_arcstat # stat +{ + typeset stat=$1 + + case $(uname) in + FreeBSD) + sysctl -n kstat.zfs.misc.arcstats.$stat + ;; + Linux) + typeset zfs_arcstats="/proc/spl/kstat/zfs/arcstats" + [[ -f "$zfs_arcstats" ]] || return 1 + grep $stat $zfs_arcstats | awk '{print $3}' + ;; + *) + false + ;; + esac +} + +# +# Given an array of pids, wait until all processes +# have completed and check their return status. +# +function wait_for_children #children +{ + rv=0 + children=("$@") + for child in "${children[@]}" + do + child_exit=0 + wait ${child} || child_exit=$? + if [ $child_exit -ne 0 ]; then + echo "child ${child} failed with ${child_exit}" + rv=1 + fi + done + return $rv +} |