diff options
author | Andrea Gelmini <andrea.gelmini@gelma.net> | 2020-06-10 04:24:09 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-06-10 04:24:09 +0000 |
commit | dd4bc569b9f73e8c2445bed90d82c92d98aada03 (patch) | |
tree | 64bacc1d16199ed350783f0b9a6490fe79ed6a70 /tests | |
parent | 7bcb7f0840d1857370dd1f9ee0ad48f9b7939dfd (diff) | |
download | src-dd4bc569b9f73e8c2445bed90d82c92d98aada03.tar.gz src-dd4bc569b9f73e8c2445bed90d82c92d98aada03.zip |
Diffstat (limited to 'tests')
15 files changed, 18 insertions, 18 deletions
diff --git a/tests/zfs-tests/cmd/mkbusy/mkbusy.c b/tests/zfs-tests/cmd/mkbusy/mkbusy.c index c26822bb5778..a03076ffc003 100644 --- a/tests/zfs-tests/cmd/mkbusy/mkbusy.c +++ b/tests/zfs-tests/cmd/mkbusy/mkbusy.c @@ -102,7 +102,7 @@ main(int argc, char *argv[]) /* * The argument supplied doesn't exist. Copy the path, and - * remove the trailing slash if presnt. + * remove the trailing slash if present. */ if ((arg = strdup(argv[0])) == NULL) fail("strdup", 1); diff --git a/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.set_props.zcp b/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.set_props.zcp index eade17aa89f0..756263a9d082 100644 --- a/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.set_props.zcp +++ b/tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.set_props.zcp @@ -64,7 +64,7 @@ neg_props["guid"] = { "12345" } set_fail = {} val_fail = {} --- Test properies that should work +-- Test properties that should work for prop, values in pairs(pos_props) do for i, val in ipairs(values) do old_val, src = zfs.get_prop(fs, prop) @@ -94,7 +94,7 @@ for prop, values in pairs(pos_props) do end end --- Test properies that should fail +-- Test properties that should fail for prop, expected in pairs(neg_props) do exp_val = expected[1] exp_err = expected[2] diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_ids_to_path/zfs_ids_to_path_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_ids_to_path/zfs_ids_to_path_001_pos.ksh index 4968e15073cc..563b3e00dd89 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_ids_to_path/zfs_ids_to_path_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_ids_to_path/zfs_ids_to_path_001_pos.ksh @@ -31,7 +31,7 @@ # filesystem, and verify that zfs_ids_to_path behaves correctly with them. # # STRATEGY: -# 1. Create a dateset +# 1. Create a dataset # 2. Makes files in the dataset # 3. Verify that zfs_ids_to_path outputs the correct format for each one # diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_multiple.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_multiple.ksh index 4a3b61c52637..a8107b94eb3b 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_multiple.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_multiple.ksh @@ -19,7 +19,7 @@ # # DESCRIPTION: -# 'zpool wait' works when waiting for mulitple activities. +# 'zpool wait' works when waiting for multiple activities. # # STRATEGY: # 1. Create a pool with some data. @@ -80,4 +80,4 @@ proc_must_exist $pid log_must zpool initialize -s $TESTPOOL $DISK1 bkgrnd_proc_succeeded $pid -log_pass "'zpool wait' works when waiting for mutliple activities." +log_pass "'zpool wait' works when waiting for multiple activities." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_no_activity.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_no_activity.ksh index ebe38b45d39c..f4819f37adb7 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_no_activity.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_no_activity.ksh @@ -23,7 +23,7 @@ # # STRATEGY: # 1. Create an empty pool with no activity -# 2. Run zpool wait with various acitivies, make sure it always returns +# 2. Run zpool wait with various activities, make sure it always returns # promptly # diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_trim_basic.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_trim_basic.ksh index 0604180b5120..f047050ea0d2 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_trim_basic.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_trim_basic.ksh @@ -38,7 +38,7 @@ function cleanup [[ -d "$TESTDIR" ]] && log_must rm -r "$TESTDIR" } -# Check wether any vdevs in given pool are being trimmed +# Check whether any vdevs in given pool are being trimmed function trim_in_progress { typeset pool="$1" diff --git a/tests/zfs-tests/tests/functional/history/history_common.kshlib b/tests/zfs-tests/tests/functional/history/history_common.kshlib index 40daaa4d663f..8ac34b2de909 100644 --- a/tests/zfs-tests/tests/functional/history/history_common.kshlib +++ b/tests/zfs-tests/tests/functional/history/history_common.kshlib @@ -365,7 +365,7 @@ function verify_destroy typeset cmd=$1 typeset flags=$3 - # This function doesn't currently verifiy the zpool command. + # This function doesn't currently verify the zpool command. [[ ${cmd%% *} == "zfs" ]] || return 1 [[ $flags =~ "i" ]] || return 1 diff --git a/tests/zfs-tests/tests/functional/largest_pool/largest_pool_001_pos.ksh b/tests/zfs-tests/tests/functional/largest_pool/largest_pool_001_pos.ksh index 1bc8f72d6ab4..a8934159b244 100755 --- a/tests/zfs-tests/tests/functional/largest_pool/largest_pool_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/largest_pool/largest_pool_001_pos.ksh @@ -158,4 +158,4 @@ for volsize in $VOLSIZES; do destroy_pool $TESTPOOL2 done -log_pass "Dateset can be created, mounted & destroy in largest pool succeeded." +log_pass "Dataset can be created, mounted & destroy in largest pool succeeded." diff --git a/tests/zfs-tests/tests/functional/mmap/mmap_read_001_pos.ksh b/tests/zfs-tests/tests/functional/mmap/mmap_read_001_pos.ksh index 42e1f73202c4..470f10d937bc 100755 --- a/tests/zfs-tests/tests/functional/mmap/mmap_read_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/mmap/mmap_read_001_pos.ksh @@ -40,7 +40,7 @@ # 1. Create a pool & dataset # 2. Call readmmap binary # 3. unmount this file system -# 4. Verify the integrity of this pool & dateset +# 4. Verify the integrity of this pool & dataset # verify_runnable "global" diff --git a/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_002_pos.ksh b/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_002_pos.ksh index ae219e01a4e8..0184f06efa50 100755 --- a/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_002_pos.ksh @@ -29,13 +29,13 @@ # STRATEGY: # 1. Create pool with a cache device. # 2. Create a an encrypted ZFS file system. -# 3. Create a random file in the encyrpted file system and random +# 3. Create a random file in the encrypted file system and random # read for 30 sec. # 4. Export pool. # 5. Read the amount of log blocks written from the header of the # L2ARC device. # 5. Import pool. -# 6. Mount the encypted ZFS file system. +# 6. Mount the encrypted ZFS file system. # 7. Read the amount of log blocks rebuilt in arcstats and compare to # (5). # 8. Check if the labels of the L2ARC device are intact. diff --git a/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_005_pos.ksh b/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_005_pos.ksh index b2cad9d1fc16..9fc6a5923864 100755 --- a/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_005_pos.ksh +++ b/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_005_pos.ksh @@ -34,7 +34,7 @@ # 4. Export pool. # 5. Read amount of log blocks written. # 6. Import pool. -# 7. Mount the encypted ZFS file system. +# 7. Mount the encrypted ZFS file system. # 8. Read amount of log blocks built. # 9. Compare the two amounts # 10. Read the file written in (3) and check if l2_hits in diff --git a/tests/zfs-tests/tests/functional/resilver/resilver_restart_001.ksh b/tests/zfs-tests/tests/functional/resilver/resilver_restart_001.ksh index 0a51f1ef2428..9af1c972faa9 100755 --- a/tests/zfs-tests/tests/functional/resilver/resilver_restart_001.ksh +++ b/tests/zfs-tests/tests/functional/resilver/resilver_restart_001.ksh @@ -26,7 +26,7 @@ # DESCRIPTION: # Testing resilver restart logic both with and without the deferred resilver # feature enabled, verifying that resilver is not restarted when it is -# unecessary. +# unnecessary. # # STRATEGY: # 1. Create a pool diff --git a/tests/zfs-tests/tests/functional/write_dirs/write_dirs_001_pos.ksh b/tests/zfs-tests/tests/functional/write_dirs/write_dirs_001_pos.ksh index 881f78d5829a..f7519eb6dd3b 100755 --- a/tests/zfs-tests/tests/functional/write_dirs/write_dirs_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/write_dirs/write_dirs_001_pos.ksh @@ -37,7 +37,7 @@ # is full. The zfs file system should be stable and works well. # # STRATEGY: -# 1. Create a pool & dateset +# 1. Create a pool & dataset # 2. Make directories in the zfs file system # 3. Create 50 big files in each directories # 4. Test case exit when the disk is full. diff --git a/tests/zfs-tests/tests/functional/write_dirs/write_dirs_002_pos.ksh b/tests/zfs-tests/tests/functional/write_dirs/write_dirs_002_pos.ksh index a0bc4ce3262a..07387017fa08 100755 --- a/tests/zfs-tests/tests/functional/write_dirs/write_dirs_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/write_dirs/write_dirs_002_pos.ksh @@ -37,7 +37,7 @@ # is full. The zfs file system should be work well and stable. # # STRATEGY: -# 1. Create a pool & dateset +# 1. Create a pool & dataset # 2. Make directories in the zfs file system # 3. Create 5000 files in each directories # 4. Test case exit when the disk is full diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh index 944a3d13c143..888136fec93c 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh @@ -25,7 +25,7 @@ # for all fio runs. The ARC is not cleared to ensure that all data is cached. # # This is basically a copy of the sequential_reads_cached test case, but with -# a smaller dateset so that we can fit everything into the decompressed, linear +# a smaller dataset so that we can fit everything into the decompressed, linear # space in the dbuf cache. # |