aboutsummaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/.gitignore2
-rw-r--r--scripts/Makefile.am97
-rwxr-xr-xscripts/commitcheck.sh123
-rw-r--r--scripts/common.sh.in22
-rwxr-xr-xscripts/convert_wycheproof.pl244
-rwxr-xr-xscripts/cstyle.pl988
-rwxr-xr-xscripts/dkms.mkconf87
-rwxr-xr-xscripts/dkms.postbuild25
-rwxr-xr-xscripts/kmodtool569
-rwxr-xr-xscripts/make_gitrev.sh81
-rwxr-xr-xscripts/man-dates.sh12
-rwxr-xr-xscripts/mancheck.sh60
-rw-r--r--scripts/objtool-wrapper.in36
-rwxr-xr-xscripts/paxcheck.sh43
-rwxr-xr-xscripts/spdxcheck.pl433
-rwxr-xr-xscripts/update_authors.pl378
-rwxr-xr-xscripts/zfs-helpers.sh197
m---------scripts/zfs-images0
-rwxr-xr-xscripts/zfs-tests-color.sh27
-rwxr-xr-xscripts/zfs-tests.sh847
-rwxr-xr-xscripts/zfs.sh245
-rwxr-xr-xscripts/zfs2zol-patch.sed32
-rwxr-xr-xscripts/zfs_prepare_disk17
-rwxr-xr-xscripts/zimport.sh512
-rwxr-xr-xscripts/zloop.sh358
-rwxr-xr-xscripts/zol2zfs-patch.sed20
26 files changed, 0 insertions, 5455 deletions
diff --git a/scripts/.gitignore b/scripts/.gitignore
deleted file mode 100644
index 443cb7b8484e..000000000000
--- a/scripts/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-common.sh
-objtool-wrapper
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
deleted file mode 100644
index f623526307b4..000000000000
--- a/scripts/Makefile.am
+++ /dev/null
@@ -1,97 +0,0 @@
-scriptsdir = $(datadir)/$(PACKAGE)
-dist_noinst_SCRIPTS = \
- %D%/commitcheck.sh \
- %D%/common.sh.in \
- %D%/dkms.mkconf \
- %D%/dkms.postbuild \
- %D%/kmodtool \
- %D%/make_gitrev.sh \
- %D%/man-dates.sh \
- %D%/mancheck.sh \
- %D%/paxcheck.sh \
- %D%/zfs-tests-color.sh
-
-scripts_scripts = \
- %D%/zfs-helpers.sh \
- %D%/zfs-tests.sh \
- %D%/zfs.sh \
- %D%/zimport.sh \
- %D%/zloop.sh
-
-if CONFIG_USER
-dist_scripts_SCRIPTS = $(scripts_scripts)
-dist_zfsexec_SCRIPTS = \
- %D%/zfs_prepare_disk
-else
-dist_noinst_SCRIPTS += $(scripts_scripts)
-endif
-
-dist_noinst_DATA += \
- %D%/cstyle.pl \
- %D%/update_authors.pl \
- %D%/zfs2zol-patch.sed \
- %D%/zol2zfs-patch.sed
-
-SHELLCHECKSCRIPTS += $(dist_scripts_SCRIPTS) $(dist_noinst_SCRIPTS)
-
-define SCRIPTS_EXTRA_ENVIRONMENT
-
-# Only required for in-tree use
-export INTREE="yes"
-export GDB="libtool --mode=execute gdb"
-export LDMOD=/sbin/insmod
-
-export CMD_DIR=$(abs_top_builddir)
-export UDEV_SCRIPT_DIR=$(abs_top_srcdir)/udev
-export UDEV_CMD_DIR=$(abs_top_builddir)/udev
-export UDEV_RULE_DIR=$(abs_top_builddir)/udev/rules.d
-export ZEDLET_ETC_DIR=$$CMD_DIR/cmd/zed/zed.d
-export ZEDLET_LIBEXEC_DIR=$$CMD_DIR/cmd/zed/zed.d
-export ZPOOL_SCRIPT_DIR=$$CMD_DIR/cmd/zpool/zpool.d
-export ZPOOL_SCRIPTS_PATH=$$CMD_DIR/cmd/zpool/zpool.d
-export ZPOOL_COMPAT_DIR=$$CMD_DIR/cmd/zpool/compatibility.d
-export CONTRIB_DIR=$(abs_top_builddir)/contrib
-export LIB_DIR=$(abs_top_builddir)/.libs
-export SYSCONF_DIR=$(abs_top_builddir)/etc
-
-export INSTALL_UDEV_DIR=$(udevdir)
-export INSTALL_UDEV_RULE_DIR=$(udevruledir)
-export INSTALL_MOUNT_HELPER_DIR=$(mounthelperdir)
-export INSTALL_SYSCONF_DIR=$(sysconfdir)
-export INSTALL_PYTHON_DIR=$(pythonsitedir)
-export INSTALL_PKGDATA_DIR=$(pkgdatadir)
-
-export KMOD_SPL=$(abs_top_builddir)/module/spl.ko
-export KMOD_ZFS=$(abs_top_builddir)/module/zfs.ko
-export KMOD_FREEBSD=$(abs_top_builddir)/module/openzfs.ko
-endef
-
-export SCRIPTS_EXTRA_ENVIRONMENT
-
-CLEANFILES += %D%/common.sh
-%D%/common.sh: %D%/common.sh.in Makefile
- -$(AM_V_at)$(MKDIR_P) $(@D)
- -$(AM_V_GEN)$(SED) -e '/^export BIN_DIR=/s|$$|$(abs_top_builddir)/tests/zfs-tests/bin|' \
- -e '/^export SBIN_DIR=/s|$$|$(abs_top_builddir)|' \
- -e '/^export LIBEXEC_DIR=/s|$$|$(abs_top_builddir)|' \
- -e '/^export ZTS_DIR=/s|$$|$(abs_top_srcdir)/tests|' \
- -e '/^export SCRIPT_DIR=/s|$$|$(abs_top_srcdir)/scripts|' \
- $< >$@
- -$(AM_V_at)echo "$$SCRIPTS_EXTRA_ENVIRONMENT" >>$@
-
-ALL_LOCAL += scripts-all-local
-scripts-all-local: %D%/common.sh $(PROGRAMS) $(SCRIPTS) $(DATA)
- -SCRIPT_COMMON=$< $(srcdir)/%D%/zfs-tests.sh -c
-
-CLEAN_LOCAL += scripts-clean-local
-scripts-clean-local:
- -$(RM) -r tests/zfs-tests/bin/
-
-INSTALL_DATA_HOOKS += scripts-install-data-hook
-scripts-install-data-hook: %D%/common.sh.in Makefile
- -$(SED) -e '/^export BIN_DIR=/s|$$|$(bindir)|' \
- -e '/^export SBIN_DIR=/s|$$|$(sbindir)|' \
- -e '/^export LIBEXEC_DIR=/s|$$|$(zfsexecdir)|' \
- -e '/^export ZTS_DIR=/s|$$|$(datadir)/$(PACKAGE)|' \
- -e '/^export SCRIPT_DIR=/s|$$|$(datadir)/$(PACKAGE)|' \
- $< >$(DESTDIR)$(datadir)/$(PACKAGE)/common.sh
diff --git a/scripts/commitcheck.sh b/scripts/commitcheck.sh
deleted file mode 100755
index 1b1d097501db..000000000000
--- a/scripts/commitcheck.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/bin/sh
-
-REF="HEAD"
-
-# test commit body for length
-# lines containing urls are exempt for the length limit.
-test_commit_bodylength()
-{
- length="72"
- body=$(git log --no-show-signature -n 1 --pretty=%b "$REF" | grep -Ev "http(s)*://" | grep -E -m 1 ".{$((length + 1))}")
- if [ -n "$body" ]; then
- echo "error: commit message body contains line over ${length} characters"
- return 1
- fi
-
- return 0
-}
-
-# check for a tagged line
-check_tagged_line()
-{
- regex='^[[:space:]]*'"$1"':[[:space:]][[:print:]]+[[:space:]]<[[:graph:]]+>$'
- foundline=$(git log --no-show-signature -n 1 "$REF" | grep -E -m 1 "$regex")
- if [ -z "$foundline" ]; then
- echo "error: missing \"$1\""
- return 1
- fi
-
- return 0
-}
-
-# check commit message for a normal commit
-new_change_commit()
-{
- error=0
-
- # subject is not longer than 72 characters
- long_subject=$(git log --no-show-signature -n 1 --pretty=%s "$REF" | grep -E -m 1 '.{73}')
- if [ -n "$long_subject" ]; then
- echo "error: commit subject over 72 characters"
- error=1
- fi
-
- # need a signed off by
- if ! check_tagged_line "Signed-off-by" ; then
- error=1
- fi
-
- # ensure that no lines in the body of the commit are over 72 characters
- if ! test_commit_bodylength ; then
- error=1
- fi
-
- return "$error"
-}
-
-is_coverity_fix()
-{
- # subject starts with Fix coverity defects means it's a coverity fix
- subject=$(git log --no-show-signature -n 1 --pretty=%s "$REF" | grep -E -m 1 '^Fix coverity defects')
- if [ -n "$subject" ]; then
- return 0
- fi
-
- return 1
-}
-
-coverity_fix_commit()
-{
- error=0
-
- # subject starts with Fix coverity defects: CID dddd, dddd...
- subject=$(git log --no-show-signature -n 1 --pretty=%s "$REF" |
- grep -E -m 1 'Fix coverity defects: CID [[:digit:]]+(, [[:digit:]]+)*')
- if [ -z "$subject" ]; then
- echo "error: Coverity defect fixes must have a subject line that starts with \"Fix coverity defects: CID dddd\""
- error=1
- fi
-
- # need a signed off by
- if ! check_tagged_line "Signed-off-by" ; then
- error=1
- fi
-
- # test each summary line for the proper format
- OLDIFS=$IFS
- IFS='
-'
- for line in $(git log --no-show-signature -n 1 --pretty=%b "$REF" | grep -E '^CID'); do
- if ! echo "$line" | grep -qE '^CID [[:digit:]]+: ([[:graph:]]+|[[:space:]])+ \(([[:upper:]]|\_)+\)'; then
- echo "error: commit message has an improperly formatted CID defect line"
- error=1
- fi
- done
- IFS=$OLDIFS
-
- # ensure that no lines in the body of the commit are over 72 characters
- if ! test_commit_bodylength; then
- error=1
- fi
-
- return "$error"
-}
-
-if [ -n "$1" ]; then
- REF="$1"
-fi
-
-# if coverity fix, test against that
-if is_coverity_fix; then
- if ! coverity_fix_commit; then
- exit 1
- else
- exit 0
- fi
-fi
-
-# have a normal commit
-if ! new_change_commit ; then
- exit 1
-fi
-
-exit 0
diff --git a/scripts/common.sh.in b/scripts/common.sh.in
deleted file mode 100644
index 33669457f415..000000000000
--- a/scripts/common.sh.in
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-# Directories
-export BIN_DIR=
-export SBIN_DIR=
-export LIBEXEC_DIR=
-export ZTS_DIR=
-export SCRIPT_DIR=
-
-# General commands
-export ZDB="${ZDB:-$SBIN_DIR/zdb}"
-export ZFS="${ZFS:-$SBIN_DIR/zfs}"
-export ZPOOL="${ZPOOL:-$SBIN_DIR/zpool}"
-export ZTEST="${ZTEST:-$SBIN_DIR/ztest}"
-export ZFS_SH="${ZFS_SH:-$SCRIPT_DIR/zfs.sh}"
-
-# Test Suite
-export RUNFILE_DIR="${RUNFILE_DIR:-$ZTS_DIR/runfiles}"
-export TEST_RUNNER="${TEST_RUNNER:-$ZTS_DIR/test-runner/bin/test-runner.py}"
-export ZTS_REPORT="${ZTS_REPORT:-$ZTS_DIR/test-runner/bin/zts-report.py}"
-export STF_TOOLS="${STF_TOOLS:-$ZTS_DIR/test-runner}"
-export STF_SUITE="${STF_SUITE:-$ZTS_DIR/zfs-tests}"
diff --git a/scripts/convert_wycheproof.pl b/scripts/convert_wycheproof.pl
deleted file mode 100755
index d6b1b773c0d9..000000000000
--- a/scripts/convert_wycheproof.pl
+++ /dev/null
@@ -1,244 +0,0 @@
-#!/usr/bin/env perl
-
-# SPDX-License-Identifier: MIT
-#
-# Copyright (c) 2025, Rob Norris <robn@despairlabs.com>
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to
-# deal in the Software without restriction, including without limitation the
-# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-# sell copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-#
-# This programs converts AEAD test vectors from Project Wycheproof into a
-# format that can be consumed more easily by tests/zfs-tests/cmd/crypto_test.
-# See tests/zfs-tests/tests/functional/crypto/README for more info.
-#
-
-use 5.010;
-use warnings;
-use strict;
-use JSON qw(decode_json);
-
-sub usage {
- say "usage: $0 <infile> [<outfile>]";
- exit 1;
-}
-
-my ($infile, $outfile) = @ARGV;
-
-usage() if !defined $infile;
-
-open my $infh, '<', $infile or die "E: $infile: $!\n";
-my $json = do { local $/; <$infh> };
-close $infh;
-
-my $data = decode_json $json;
-
-select STDERR;
-
-# 0.8 had a slightly different format. 0.9* is current, stabilising for 1.0
-my $version = $data->{generatorVersion} // "[unknown]";
-if ("$version" !~ m/^0\.9[^0-9]/) {
- warn
- "W: this converter was written for Wycheproof 0.9 test vectors\n".
- " input file has version: $version\n".
- " bravely continuing, but expect crashes or garbled output\n";
-}
-
-# we only support AEAD tests
-my $schema = $data->{schema} // "[unknown]";
-if ("$schema" ne 'aead_test_schema.json') {
- warn
- "W: this converter is expecting AEAD test vectors\n".
- " input file has schema: $schema\n".
- " bravely continuing, but expect crashes or garbled output\n";
-}
-
-# sanity check; algorithm is provided
-my $algorithm = $data->{algorithm};
-if (!defined $algorithm) {
- die "E: $infile: required field 'algorithm' not found\n";
-}
-
-# sanity check; test count is present and correct
-my $ntests = 0;
-$ntests += $_ for map { scalar @{$_->{tests}} } @{$data->{testGroups}};
-if (!exists $data->{numberOfTests}) {
- warn "W: input file has no test count, using mine: $ntests\n";
-} elsif ($data->{numberOfTests} != $ntests) {
- warn
- "W: input file has incorrect test count: $data->{numberOfTests}\n".
- " using my own count: $ntests\n";
-}
-
-say " version: $version";
-say " schema: $schema";
-say "algorithm: $algorithm";
-say " ntests: $ntests";
-
-my $skipped = 0;
-
-my @tests;
-
-# tests are grouped into "test groups". groups have the same type and IV, key
-# and tag sizes. we can infer this info from the tests themselves, but it's
-# useful for sanity checks
-#
-# "testGroups" : [
-# {
-# "ivSize" : 96,
-# "keySize" : 128,
-# "tagSize" : 128,
-# "type" : "AeadTest",
-# "tests" : [ ... ]
-#
-for my $group (@{$data->{testGroups}}) {
- # skip non-AEAD test groups
- my $type = $group->{type} // "[unknown]";
- if ($type ne 'AeadTest') {
- warn "W: group has unexpected type '$type', skipping it\n";
- $skipped += @{$data->{tests}};
- next;
- }
-
- my ($iv_size, $key_size, $tag_size) =
- @$group{qw(ivSize keySize tagSize)};
-
- # a typical test:
- #
- # {
- # "tcId" : 48,
- # "comment" : "Flipped bit 63 in tag",
- # "flags" : [
- # "ModifiedTag"
- # ],
- # "key" : "000102030405060708090a0b0c0d0e0f",
- # "iv" : "505152535455565758595a5b",
- # "aad" : "",
- # "msg" : "202122232425262728292a2b2c2d2e2f",
- # "ct" : "eb156d081ed6b6b55f4612f021d87b39",
- # "tag" : "d8847dbc326a066988c77ad3863e6083",
- # "result" : "invalid"
- # },
- #
- # we include everything in the output. the id is useful output so the
- # user can go back to the original test. comment and flags are useful
- # for output in a failing test
- #
- for my $test (@{$group->{tests}}) {
- my ($id, $comment, $iv, $key, $msg, $ct, $aad, $tag, $result) =
- @$test{qw(tcId comment iv key msg ct aad tag result)};
-
- # sanity check; iv and key must have the length declared by the
- # group params.
- unless (
- length_check($id, 'iv', $iv, $iv_size) &&
- length_check($id, 'key', $key, $key_size)) {
- $skipped++;
- next;
- }
-
- # sanity check; tag must have the length declared by the group
- # param, but only for valid tests (invalid tests should be
- # rejected, and so can't produce a tag anyway)
- unless (
- $result eq 'invalid' ||
- length_check($id, 'tag', $tag, $tag_size)) {
- $skipped++;
- next;
- }
-
- # flatten and sort the flags into a single string
- my $flags;
- if ($test->{flags}) {
- $flags = join(' ', sort @{$test->{flags}});
- }
-
- # the completed test record. we'll emit this later once we're
- # finished with the input; the output file is not open yet.
- push @tests, [
- [ id => $id ],
- [ comment => $comment ],
- (defined $flags ? [ flags => $flags ] : ()),
- [ iv => $iv ],
- [ key => $key ],
- [ msg => $msg ],
- [ ct => $ct ],
- [ aad => $aad ],
- [ tag => $tag ],
- [ result => $result ],
- ];
- }
-}
-
-if ($skipped) {
- $ntests -= $skipped;
- warn "W: skipped $skipped tests; new test count: $ntests\n";
-}
-if ($ntests == 0) {
- die "E: no tests extracted, sorry!\n";
-}
-
-my $outfh;
-if ($outfile) {
- open $outfh, '>', $outfile or die "E: $outfile: $!\n";
-} else {
- $outfh = *STDOUT;
-}
-
-# the "header" record has the algorithm and count of tests
-say $outfh "algorithm: $algorithm";
-say $outfh "tests: $ntests";
-
-#
-for my $test (@tests) {
- # blank line is a record separator
- say $outfh "";
-
- # output the test data in a simple record of 'key: value' lines
- #
- # id: 48
- # comment: Flipped bit 63 in tag
- # flags: ModifiedTag
- # iv: 505152535455565758595a5b
- # key: 000102030405060708090a0b0c0d0e0f
- # msg: 202122232425262728292a2b2c2d2e2f
- # ct: eb156d081ed6b6b55f4612f021d87b39
- # aad:
- # tag: d8847dbc326a066988c77ad3863e6083
- # result: invalid
- for my $row (@$test) {
- my ($k, $v) = @$row;
- say $outfh "$k: $v";
- }
-}
-
-close $outfh;
-
-# check that the length of hex string matches the wanted number of bits
-sub length_check {
- my ($id, $name, $hexstr, $wantbits) = @_;
- my $got = length($hexstr)/2;
- my $want = $wantbits/8;
- return 1 if $got == $want;
- my $gotbits = $got*8;
- say
- "W: $id: '$name' has incorrect len, skipping test:\n".
- " got $got bytes ($gotbits bits)\n".
- " want $want bytes ($wantbits bits)\n";
- return;
-}
diff --git a/scripts/cstyle.pl b/scripts/cstyle.pl
deleted file mode 100755
index 5a32ccc4e988..000000000000
--- a/scripts/cstyle.pl
+++ /dev/null
@@ -1,988 +0,0 @@
-#!/usr/bin/env perl
-# SPDX-License-Identifier: CDDL-1.0
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or https://opensource.org/licenses/CDDL-1.0.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-# Copyright 2016 Nexenta Systems, Inc.
-#
-# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
-# Use is subject to license terms.
-#
-# @(#)cstyle 1.58 98/09/09 (from shannon)
-#ident "%Z%%M% %I% %E% SMI"
-#
-# cstyle - check for some common stylistic errors.
-#
-# cstyle is a sort of "lint" for C coding style.
-# It attempts to check for the style used in the
-# kernel, sometimes known as "Bill Joy Normal Form".
-#
-# There's a lot this can't check for, like proper indentation
-# of code blocks. There's also a lot more this could check for.
-#
-# A note to the non perl literate:
-#
-# perl regular expressions are pretty much like egrep
-# regular expressions, with the following special symbols
-#
-# \s any space character
-# \S any non-space character
-# \w any "word" character [a-zA-Z0-9_]
-# \W any non-word character
-# \d a digit [0-9]
-# \D a non-digit
-# \b word boundary (between \w and \W)
-# \B non-word boundary
-#
-
-require 5.0;
-use warnings;
-use IO::File;
-use Getopt::Std;
-use strict;
-
-my $usage =
-"usage: cstyle [-cgpvP] file...
- -c check continuation indentation inside functions
- -g print github actions' workflow commands
- -p perform some of the more picky checks
- -v verbose
- -P check for use of non-POSIX types
-";
-
-my %opts;
-
-if (!getopts("cghpvCP", \%opts)) {
- print $usage;
- exit 2;
-}
-
-my $check_continuation = $opts{'c'};
-my $github_workflow = $opts{'g'} || $ENV{'CI'};
-my $picky = $opts{'p'};
-my $verbose = $opts{'v'};
-my $check_posix_types = $opts{'P'};
-
-my ($filename, $line, $prev); # shared globals
-
-my $fmt;
-my $hdr_comment_start;
-
-if ($verbose) {
- $fmt = "%s: %d: %s\n%s\n";
-} else {
- $fmt = "%s: %d: %s\n";
-}
-
-$hdr_comment_start = qr/^\s*\/\*$/;
-
-# Note, following must be in single quotes so that \s and \w work right.
-my $typename = '(int|char|short|long|unsigned|float|double' .
- '|\w+_t|struct\s+\w+|union\s+\w+|FILE)';
-
-# mapping of old types to POSIX compatible types
-my %old2posix = (
- 'unchar' => 'uchar_t',
- 'ushort' => 'ushort_t',
- 'uint' => 'uint_t',
- 'ulong' => 'ulong_t',
- 'u_int' => 'uint_t',
- 'u_short' => 'ushort_t',
- 'u_long' => 'ulong_t',
- 'u_char' => 'uchar_t',
- 'quad' => 'quad_t'
-);
-
-my $lint_re = qr/\/\*(?:
- NOTREACHED|LINTLIBRARY|VARARGS[0-9]*|
- CONSTCOND|CONSTANTCOND|CONSTANTCONDITION|EMPTY|
- FALLTHRU|FALLTHROUGH|LINTED.*?|PRINTFLIKE[0-9]*|
- PROTOLIB[0-9]*|SCANFLIKE[0-9]*|CSTYLED.*?
- )\*\//x;
-
-my $warlock_re = qr/\/\*\s*(?:
- VARIABLES\ PROTECTED\ BY|
- MEMBERS\ PROTECTED\ BY|
- ALL\ MEMBERS\ PROTECTED\ BY|
- READ-ONLY\ VARIABLES:|
- READ-ONLY\ MEMBERS:|
- VARIABLES\ READABLE\ WITHOUT\ LOCK:|
- MEMBERS\ READABLE\ WITHOUT\ LOCK:|
- LOCKS\ COVERED\ BY|
- LOCK\ UNNEEDED\ BECAUSE|
- LOCK\ NEEDED:|
- LOCK\ HELD\ ON\ ENTRY:|
- READ\ LOCK\ HELD\ ON\ ENTRY:|
- WRITE\ LOCK\ HELD\ ON\ ENTRY:|
- LOCK\ ACQUIRED\ AS\ SIDE\ EFFECT:|
- READ\ LOCK\ ACQUIRED\ AS\ SIDE\ EFFECT:|
- WRITE\ LOCK\ ACQUIRED\ AS\ SIDE\ EFFECT:|
- LOCK\ RELEASED\ AS\ SIDE\ EFFECT:|
- LOCK\ UPGRADED\ AS\ SIDE\ EFFECT:|
- LOCK\ DOWNGRADED\ AS\ SIDE\ EFFECT:|
- FUNCTIONS\ CALLED\ THROUGH\ POINTER|
- FUNCTIONS\ CALLED\ THROUGH\ MEMBER|
- LOCK\ ORDER:
- )/x;
-
-my $err_stat = 0; # exit status
-
-if ($#ARGV >= 0) {
- foreach my $arg (@ARGV) {
- my $fh = new IO::File $arg, "r";
- if (!defined($fh)) {
- printf "%s: can not open\n", $arg;
- } else {
- &cstyle($arg, $fh);
- close $fh;
- }
- }
-} else {
- &cstyle("<stdin>", *STDIN);
-}
-exit $err_stat;
-
-my $no_errs = 0; # set for CSTYLED-protected lines
-
-sub err($) {
- my ($error) = @_;
- unless ($no_errs) {
- if ($verbose) {
- printf $fmt, $filename, $., $error, $line;
- } else {
- printf $fmt, $filename, $., $error;
- }
- if ($github_workflow) {
- printf "::error file=%s,line=%s::%s\n", $filename, $., $error;
- }
- $err_stat = 1;
- }
-}
-
-sub err_prefix($$) {
- my ($prevline, $error) = @_;
- my $out = $prevline."\n".$line;
- unless ($no_errs) {
- if ($verbose) {
- printf $fmt, $filename, $., $error, $out;
- } else {
- printf $fmt, $filename, $., $error;
- }
- $err_stat = 1;
- }
-}
-
-sub err_prev($) {
- my ($error) = @_;
- unless ($no_errs) {
- if ($verbose) {
- printf $fmt, $filename, $. - 1, $error, $prev;
- } else {
- printf $fmt, $filename, $. - 1, $error;
- }
- $err_stat = 1;
- }
-}
-
-sub cstyle($$) {
-
-my ($fn, $filehandle) = @_;
-$filename = $fn; # share it globally
-
-my $in_cpp = 0;
-my $next_in_cpp = 0;
-
-my $in_comment = 0;
-my $comment_done = 0;
-my $in_warlock_comment = 0;
-my $in_macro_call = 0;
-my $in_function = 0;
-my $in_function_header = 0;
-my $function_header_full_indent = 0;
-my $in_declaration = 0;
-my $note_level = 0;
-my $nextok = 0;
-my $nocheck = 0;
-
-my $in_string = 0;
-
-my ($okmsg, $comment_prefix);
-
-$line = '';
-$prev = '';
-reset_indent();
-
-line: while (<$filehandle>) {
- s/\r?\n$//; # strip return and newline
-
- # save the original line, then remove all text from within
- # double or single quotes, we do not want to check such text.
-
- $line = $_;
-
- #
- # C allows strings to be continued with a backslash at the end of
- # the line. We translate that into a quoted string on the previous
- # line followed by an initial quote on the next line.
- #
- # (we assume that no-one will use backslash-continuation with character
- # constants)
- #
- $_ = '"' . $_ if ($in_string && !$nocheck && !$in_comment);
-
- #
- # normal strings and characters
- #
- s/'([^\\']|\\[^xX0]|\\0[0-9]*|\\[xX][0-9a-fA-F]*)'/''/g;
- s/"([^\\"]|\\.)*"/\"\"/g;
-
- #
- # detect string continuation
- #
- if ($nocheck || $in_comment) {
- $in_string = 0;
- } else {
- #
- # Now that all full strings are replaced with "", we check
- # for unfinished strings continuing onto the next line.
- #
- $in_string =
- (s/([^"](?:"")*)"([^\\"]|\\.)*\\$/$1""/ ||
- s/^("")*"([^\\"]|\\.)*\\$/""/);
- }
-
- #
- # figure out if we are in a cpp directive
- #
- $in_cpp = $next_in_cpp || /^\s*#/; # continued or started
- $next_in_cpp = $in_cpp && /\\$/; # only if continued
-
- # strip off trailing backslashes, which appear in long macros
- s/\s*\\$//;
-
- # an /* END CSTYLED */ comment ends a no-check block.
- if ($nocheck) {
- if (/\/\* *END *CSTYLED *\*\//) {
- $nocheck = 0;
- } else {
- reset_indent();
- next line;
- }
- }
-
- # a /*CSTYLED*/ comment indicates that the next line is ok.
- if ($nextok) {
- if ($okmsg) {
- err($okmsg);
- }
- $nextok = 0;
- $okmsg = 0;
- if (/\/\* *CSTYLED.*\*\//) {
- /^.*\/\* *CSTYLED *(.*) *\*\/.*$/;
- $okmsg = $1;
- $nextok = 1;
- }
- $no_errs = 1;
- } elsif ($no_errs) {
- $no_errs = 0;
- }
-
- # check length of line.
- # first, a quick check to see if there is any chance of being too long.
- if (($line =~ tr/\t/\t/) * 7 + length($line) > 80) {
- # yes, there is a chance.
- # replace tabs with spaces and check again.
- my $eline = $line;
- 1 while $eline =~
- s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e;
- if (length($eline) > 80) {
- err("line > 80 characters");
- }
- }
-
- # ignore NOTE(...) annotations (assumes NOTE is on lines by itself).
- if ($note_level || /\b_?NOTE\s*\(/) { # if in NOTE or this is NOTE
- s/[^()]//g; # eliminate all non-parens
- $note_level += s/\(//g - length; # update paren nest level
- next;
- }
-
- # a /* BEGIN CSTYLED */ comment starts a no-check block.
- if (/\/\* *BEGIN *CSTYLED *\*\//) {
- $nocheck = 1;
- }
-
- # a /*CSTYLED*/ comment indicates that the next line is ok.
- if (/\/\* *CSTYLED.*\*\//) {
- /^.*\/\* *CSTYLED *(.*) *\*\/.*$/;
- $okmsg = $1;
- $nextok = 1;
- }
- if (/\/\/ *CSTYLED/) {
- /^.*\/\/ *CSTYLED *(.*)$/;
- $okmsg = $1;
- $nextok = 1;
- }
-
- # universal checks; apply to everything
- if (/\t +\t/) {
- err("spaces between tabs");
- }
- if (/ \t+ /) {
- err("tabs between spaces");
- }
- if (/\s$/) {
- err("space or tab at end of line");
- }
- if (/[^ \t(]\/\*/ && !/\w\(\/\*.*\*\/\);/) {
- err("comment preceded by non-blank");
- }
- if (/ARGSUSED/) {
- err("ARGSUSED directive");
- }
-
- # is this the beginning or ending of a function?
- # (not if "struct foo\n{\n")
- if (/^\{$/ && $prev =~ /\)\s*(const\s*)?(\/\*.*\*\/\s*)?\\?$/) {
- $in_function = 1;
- $in_declaration = 1;
- $in_function_header = 0;
- $function_header_full_indent = 0;
- $prev = $line;
- next line;
- }
- if (/^\}\s*(\/\*.*\*\/\s*)*$/) {
- if ($prev =~ /^\s*return\s*;/) {
- err_prev("unneeded return at end of function");
- }
- $in_function = 0;
- reset_indent(); # we don't check between functions
- $prev = $line;
- next line;
- }
- if ($in_function_header && ! /^ (\w|\.)/ ) {
- if (/^\{\}$/ # empty functions
- || /;/ #run function with multiline arguments
- || /#/ #preprocessor commands
- || /^[^\s\\]*\(.*\)$/ #functions without ; at the end
- || /^$/ #function declaration can't have empty line
- ) {
- $in_function_header = 0;
- $function_header_full_indent = 0;
- } elsif ($prev =~ /^__attribute__/) { #__attribute__((*))
- $in_function_header = 0;
- $function_header_full_indent = 0;
- $prev = $line;
- next line;
- } elsif ($picky && ! (/^\t/ && $function_header_full_indent != 0)) {
-
- err("continuation line should be indented by 4 spaces");
- }
- }
-
- # If this looks like a top-level macro invocation, remember it so we
- # don't mistake it for a function declaration below.
- if (/^[A-Za-z_][A-Za-z_0-9]*\(/) {
- $in_macro_call = 1;
- }
-
- #
- # If this matches something of form "foo(", it's probably a function
- # definition, unless it ends with ") bar;", in which case it's a declaration
- # that uses a macro to generate the type.
- #
- if (!$in_macro_call && /^\w+\(/ && !/\) \w+;/) {
- $in_function_header = 1;
- if (/\($/) {
- $function_header_full_indent = 1;
- }
- }
- if ($in_function_header && /^\{$/) {
- $in_function_header = 0;
- $function_header_full_indent = 0;
- $in_function = 1;
- }
- if ($in_function_header && /\);$/) {
- $in_function_header = 0;
- $function_header_full_indent = 0;
- }
- if ($in_function_header && /\{$/ ) {
- if ($picky) {
- err("opening brace on same line as function header");
- }
- $in_function_header = 0;
- $function_header_full_indent = 0;
- $in_function = 1;
- next line;
- }
-
- if ($in_warlock_comment && /\*\//) {
- $in_warlock_comment = 0;
- $prev = $line;
- next line;
- }
-
- # a blank line terminates the declarations within a function.
- # XXX - but still a problem in sub-blocks.
- if ($in_declaration && /^$/) {
- $in_declaration = 0;
- }
-
- if ($comment_done) {
- $in_comment = 0;
- $comment_done = 0;
- }
- # does this looks like the start of a block comment?
- if (/$hdr_comment_start/) {
- if (!/^\t*\/\*/) {
- err("block comment not indented by tabs");
- }
- $in_comment = 1;
- /^(\s*)\//;
- $comment_prefix = $1;
- $prev = $line;
- next line;
- }
- # are we still in the block comment?
- if ($in_comment) {
- if (/^$comment_prefix \*\/$/) {
- $comment_done = 1;
- } elsif (/\*\//) {
- $comment_done = 1;
- err("improper block comment close");
- } elsif (!/^$comment_prefix \*[ \t]/ &&
- !/^$comment_prefix \*$/) {
- err("improper block comment");
- }
- }
-
- # check for errors that might occur in comments and in code.
-
- # allow spaces to be used to draw pictures in all comments.
- if (/[^ ] / && !/".* .*"/ && !$in_comment) {
- err("spaces instead of tabs");
- }
- if (/^ / && !/^ \*[ \t\/]/ && !/^ \*$/ &&
- (!/^ (\w|\.)/ || $in_function != 0)) {
- err("indent by spaces instead of tabs");
- }
- if (/^\t+ [^ \t\*]/ || /^\t+ \S/ || /^\t+ \S/) {
- err("continuation line not indented by 4 spaces");
- }
- if (/$warlock_re/ && !/\*\//) {
- $in_warlock_comment = 1;
- $prev = $line;
- next line;
- }
- if (/^\s*\/\*./ && !/^\s*\/\*.*\*\// && !/$hdr_comment_start/) {
- err("improper first line of block comment");
- }
-
- if ($in_comment) { # still in comment, don't do further checks
- $prev = $line;
- next line;
- }
-
- if ((/[^(]\/\*\S/ || /^\/\*\S/) && !/$lint_re/) {
- err("missing blank after open comment");
- }
- if (/\S\*\/[^)]|\S\*\/$/ && !/$lint_re/) {
- err("missing blank before close comment");
- }
- # check for unterminated single line comments, but allow them when
- # they are used to comment out the argument list of a function
- # declaration.
- if (/\S.*\/\*/ && !/\S.*\/\*.*\*\// && !/\(\/\*/) {
- err("unterminated single line comment");
- }
-
- if (/^(#else|#endif|#include)(.*)$/) {
- $prev = $line;
- if ($picky) {
- my $directive = $1;
- my $clause = $2;
- # Enforce ANSI rules for #else and #endif: no noncomment
- # identifiers are allowed after #endif or #else. Allow
- # C++ comments since they seem to be a fact of life.
- if ((($1 eq "#endif") || ($1 eq "#else")) &&
- ($clause ne "") &&
- (!($clause =~ /^\s+\/\*.*\*\/$/)) &&
- (!($clause =~ /^\s+\/\/.*$/))) {
- err("non-comment text following " .
- "$directive (or malformed $directive " .
- "directive)");
- }
- }
- next line;
- }
-
- #
- # delete any comments and check everything else. Note that
- # ".*?" is a non-greedy match, so that we don't get confused by
- # multiple comments on the same line.
- #
- s/\/\*.*?\*\///g;
- s/\/\/(?:\s.*)?$//; # Valid C++ comments
-
- # After stripping correctly spaced comments, check for (and strip) comments
- # without a blank. By checking this after clearing out C++ comments that
- # correctly have a blank, we guarantee URIs in a C++ comment will not cause
- # an error.
- if (s!//.*$!!) { # C++ comments
- err("missing blank after start comment");
- }
-
- # delete any trailing whitespace; we have already checked for that.
- s/\s*$//;
-
- # following checks do not apply to text in comments.
-
- if (/[^<>\s][!<>=]=/ || /[^<>][!<>=]=[^\s,]/ ||
- (/[^->]>[^,=>\s]/ && !/[^->]>$/) ||
- (/[^<]<[^,=<\s]/ && !/[^<]<$/) ||
- /[^<\s]<[^<]/ || /[^->\s]>[^>]/) {
- err("missing space around relational operator");
- }
- if (/\S>>=/ || /\S<<=/ || />>=\S/ || /<<=\S/ || /\S[-+*\/&|^%]=/ ||
- (/[^-+*\/&|^%!<>=\s]=[^=]/ && !/[^-+*\/&|^%!<>=\s]=$/) ||
- (/[^!<>=]=[^=\s]/ && !/[^!<>=]=$/)) {
- # XXX - should only check this for C++ code
- # XXX - there are probably other forms that should be allowed
- if (!/\soperator=/) {
- err("missing space around assignment operator");
- }
- }
- if (/[,;]\S/ && !/\bfor \(;;\)/) {
- err("comma or semicolon followed by non-blank");
- }
- # allow "for" statements to have empty "while" clauses
- # allow macro invocations to have empty parameters
- if (/\s[,;]/ && !/^[\t]+;$/ &&
- !($in_macro_call || /^\s*for \([^;]*; ;[^;]*\)/)) {
- err("comma or semicolon preceded by blank");
- }
- if (/^\s*(&&|\|\|)/) {
- err("improper boolean continuation");
- }
- if (/\S *(&&|\|\|)/ || /(&&|\|\|) *\S/) {
- err("more than one space around boolean operator");
- }
- if (/\b(for|if|while|switch|sizeof|return|case)\(/) {
- err("missing space between keyword and paren");
- }
- if (/(\b(for|if|while|switch|return)\b.*){2,}/ && !/^#define/) {
- # multiple "case" and "sizeof" allowed
- err("more than one keyword on line");
- }
- if (/\b(for|if|while|switch|sizeof|return|case)\s\s+\(/ &&
- !/^#if\s+\(/) {
- err("extra space between keyword and paren");
- }
- # try to detect "func (x)" but not "if (x)" or
- # "#define foo (x)" or "int (*func)();"
- if (/\w\s\(/) {
- my $s = $_;
- # strip off all keywords on the line
- s/\b(for|if|while|switch|return|case|sizeof)\s\(/XXX(/g;
- s/#elif\s\(/XXX(/g;
- s/^#define\s+\w+\s+\(/XXX(/;
- # do not match things like "void (*f)();"
- # or "typedef void (func_t)();"
- s/\w\s\(+\*/XXX(*/g;
- s/\b($typename|void)\s+\(+/XXX(/og;
- if (/\w\s\(/) {
- err("extra space between function name and left paren");
- }
- $_ = $s;
- }
- # try to detect "int foo(x)", but not "extern int foo(x);"
- # XXX - this still trips over too many legitimate things,
- # like "int foo(x,\n\ty);"
-# if (/^(\w+(\s|\*)+)+\w+\(/ && !/\)[;,](\s|)*$/ &&
-# !/^(extern|static)\b/) {
-# err("return type of function not on separate line");
-# }
- # this is a close approximation
- if (/^(\w+(\s|\*)+)+\w+\(.*\)(\s|)*$/ &&
- !/^(extern|static)\b/) {
- err("return type of function not on separate line");
- }
- if (/^#define /) {
- err("#define followed by space instead of tab");
- }
- if (/^\s*return\W[^;]*;/ && !/^\s*return\s*\(.*\);/) {
- err("unparenthesized return expression");
- }
- if (/\bsizeof\b/ && !/\bsizeof\s*\(.*\)/) {
- err("unparenthesized sizeof expression");
- }
- if (/\(\s/) {
- err("whitespace after left paren");
- }
- # Allow "for" statements to have empty "continue" clauses.
- # Allow right paren on its own line unless we're being picky (-p).
- if (/\s\)/ && !/^\s*for \([^;]*;[^;]*; \)/ && ($picky || !/^\s*\)/)) {
- err("whitespace before right paren");
- }
- if (/^\s*\(void\)[^ ]/) {
- err("missing space after (void) cast");
- }
- if (/\S\{/ && !/\{\{/) {
- err("missing space before left brace");
- }
- if ($in_function && /^\s+\{/ &&
- ($prev =~ /\)\s*$/ || $prev =~ /\bstruct\s+\w+$/)) {
- err("left brace starting a line");
- }
- if (/\}(else|while)/) {
- err("missing space after right brace");
- }
- if (/\}\s\s+(else|while)/) {
- err("extra space after right brace");
- }
- if (/\b_VOID\b|\bVOID\b|\bSTATIC\b/) {
- err("obsolete use of VOID or STATIC");
- }
- if (/\b$typename\*/o) {
- err("missing space between type name and *");
- }
- if (/^\s+#/) {
- err("preprocessor statement not in column 1");
- }
- if (/^#\s/) {
- err("blank after preprocessor #");
- }
- if (/!\s*(strcmp|strncmp|bcmp)\s*\(/) {
- err("don't use boolean ! with comparison functions");
- }
-
- #
- # We completely ignore, for purposes of indentation:
- # * lines outside of functions
- # * preprocessor lines
- #
- if ($check_continuation && $in_function && !$in_cpp) {
- process_indent($_);
- }
- if ($picky) {
- # try to detect spaces after casts, but allow (e.g.)
- # "sizeof (int) + 1", "void (*funcptr)(int) = foo;", and
- # "int foo(int) __NORETURN;"
- if ((/^\($typename( \*+)?\)\s/o ||
- /\W\($typename( \*+)?\)\s/o) &&
- !/sizeof\s*\($typename( \*)?\)\s/o &&
- !/\($typename( \*+)?\)\s+=[^=]/o) {
- err("space after cast");
- }
- if (/\b$typename\s*\*\s/o &&
- !/\b$typename\s*\*\s+const\b/o) {
- err("unary * followed by space");
- }
- }
- if ($check_posix_types && !$in_macro_call) {
- # try to detect old non-POSIX types.
- # POSIX requires all non-standard typedefs to end in _t,
- # but historically these have been used.
- #
- # We don't check inside macro invocations because macros have
- # legitmate uses for these names in function generators.
- if (/\b(unchar|ushort|uint|ulong|u_int|u_short|u_long|u_char|quad)\b/) {
- err("non-POSIX typedef $1 used: use $old2posix{$1} instead");
- }
- }
- if (/^\s*else\W/) {
- if ($prev =~ /^\s*\}$/) {
- err_prefix($prev,
- "else and right brace should be on same line");
- }
- }
-
- # Macro invocations end with a closing paren, and possibly a semicolon.
- # We do this check down here to make sure all the regular checks are
- # applied to calls that appear entirely on a single line.
- if ($in_macro_call && /\);?$/) {
- $in_macro_call = 0;
- }
-
- $prev = $line;
-}
-
-if ($prev eq "") {
- err("last line in file is blank");
-}
-
-}
-
-#
-# Continuation-line checking
-#
-# The rest of this file contains the code for the continuation checking
-# engine. It's a pretty simple state machine which tracks the expression
-# depth (unmatched '('s and '['s).
-#
-# Keep in mind that the argument to process_indent() has already been heavily
-# processed; all comments have been replaced by control-A, and the contents of
-# strings and character constants have been elided.
-#
-
-my $cont_in; # currently inside of a continuation
-my $cont_off; # skipping an initializer or definition
-my $cont_noerr; # suppress cascading errors
-my $cont_start; # the line being continued
-my $cont_base; # the base indentation
-my $cont_first; # this is the first line of a statement
-my $cont_multiseg; # this continuation has multiple segments
-
-my $cont_special; # this is a C statement (if, for, etc.)
-my $cont_macro; # this is a macro
-my $cont_case; # this is a multi-line case
-
-my @cont_paren; # the stack of unmatched ( and [s we've seen
-
-sub
-reset_indent()
-{
- $cont_in = 0;
- $cont_off = 0;
-}
-
-sub
-delabel($)
-{
- #
- # replace labels with tabs. Note that there may be multiple
- # labels on a line.
- #
- local $_ = $_[0];
-
- while (/^(\t*)( *(?:(?:\w+\s*)|(?:case\b[^:]*)): *)(.*)$/) {
- my ($pre_tabs, $label, $rest) = ($1, $2, $3);
- $_ = $pre_tabs;
- while ($label =~ s/^([^\t]*)(\t+)//) {
- $_ .= "\t" x (length($2) + length($1) / 8);
- }
- $_ .= ("\t" x (length($label) / 8)).$rest;
- }
-
- return ($_);
-}
-
-sub
-process_indent($)
-{
- require strict;
- local $_ = $_[0]; # preserve the global $_
-
- s///g; # No comments
- s/\s+$//; # Strip trailing whitespace
-
- return if (/^$/); # skip empty lines
-
- # regexps used below; keywords taking (), macros, and continued cases
- my $special = '(?:(?:\}\s*)?else\s+)?(?:if|for|while|switch)\b';
- my $macro = '[A-Z_][A-Z_0-9]*\(';
- my $case = 'case\b[^:]*$';
-
- # skip over enumerations, array definitions, initializers, etc.
- if ($cont_off <= 0 && !/^\s*$special/ &&
- (/(?:(?:\b(?:enum|struct|union)\s*[^\{]*)|(?:\s+=\s*))\{/ ||
- (/^\s*\{/ && $prev =~ /=\s*(?:\/\*.*\*\/\s*)*$/))) {
- $cont_in = 0;
- $cont_off = tr/{/{/ - tr/}/}/;
- return;
- }
- if ($cont_off) {
- $cont_off += tr/{/{/ - tr/}/}/;
- return;
- }
-
- if (!$cont_in) {
- $cont_start = $line;
-
- if (/^\t* /) {
- err("non-continuation indented 4 spaces");
- $cont_noerr = 1; # stop reporting
- }
- $_ = delabel($_); # replace labels with tabs
-
- # check if the statement is complete
- return if (/^\s*\}?$/);
- return if (/^\s*\}?\s*else\s*\{?$/);
- return if (/^\s*do\s*\{?$/);
- return if (/\{$/);
- return if (/\}[,;]?$/);
-
- # Allow macros on their own lines
- return if (/^\s*[A-Z_][A-Z_0-9]*$/);
-
- # cases we don't deal with, generally non-kosher
- if (/\{/) {
- err("stuff after {");
- return;
- }
-
- # Get the base line, and set up the state machine
- /^(\t*)/;
- $cont_base = $1;
- $cont_in = 1;
- @cont_paren = ();
- $cont_first = 1;
- $cont_multiseg = 0;
-
- # certain things need special processing
- $cont_special = /^\s*$special/? 1 : 0;
- $cont_macro = /^\s*$macro/? 1 : 0;
- $cont_case = /^\s*$case/? 1 : 0;
- } else {
- $cont_first = 0;
-
- # Strings may be pulled back to an earlier (half-)tabstop
- unless ($cont_noerr || /^$cont_base / ||
- (/^\t*(?: )?(?:gettext\()?\"/ && !/^$cont_base\t/)) {
- err_prefix($cont_start,
- "continuation should be indented 4 spaces");
- }
- }
-
- my $rest = $_; # keeps the remainder of the line
-
- #
- # The split matches 0 characters, so that each 'special' character
- # is processed separately. Parens and brackets are pushed and
- # popped off the @cont_paren stack. For normal processing, we wait
- # until a ; or { terminates the statement. "special" processing
- # (if/for/while/switch) is allowed to stop when the stack empties,
- # as is macro processing. Case statements are terminated with a :
- # and an empty paren stack.
- #
- foreach $_ (split /[^\(\)\[\]\{\}\;\:]*/) {
- next if (length($_) == 0);
-
- # rest contains the remainder of the line
- my $rxp = "[^\Q$_\E]*\Q$_\E";
- $rest =~ s/^$rxp//;
-
- if (/\(/ || /\[/) {
- push @cont_paren, $_;
- } elsif (/\)/ || /\]/) {
- my $cur = $_;
- tr/\)\]/\(\[/;
-
- my $old = (pop @cont_paren);
- if (!defined($old)) {
- err("unexpected '$cur'");
- $cont_in = 0;
- last;
- } elsif ($old ne $_) {
- err("'$cur' mismatched with '$old'");
- $cont_in = 0;
- last;
- }
-
- #
- # If the stack is now empty, do special processing
- # for if/for/while/switch and macro statements.
- #
- next if (@cont_paren != 0);
- if ($cont_special) {
- if ($rest =~ /^\s*\{?$/) {
- $cont_in = 0;
- last;
- }
- if ($rest =~ /^\s*;$/) {
- err("empty if/for/while body ".
- "not on its own line");
- $cont_in = 0;
- last;
- }
- if (!$cont_first && $cont_multiseg == 1) {
- err_prefix($cont_start,
- "multiple statements continued ".
- "over multiple lines");
- $cont_multiseg = 2;
- } elsif ($cont_multiseg == 0) {
- $cont_multiseg = 1;
- }
- # We've finished this section, start
- # processing the next.
- goto section_ended;
- }
- if ($cont_macro) {
- if ($rest =~ /^$/) {
- $cont_in = 0;
- last;
- }
- }
- } elsif (/\;/) {
- if ($cont_case) {
- err("unexpected ;");
- } elsif (!$cont_special) {
- err("unexpected ;") if (@cont_paren != 0);
- if (!$cont_first && $cont_multiseg == 1) {
- err_prefix($cont_start,
- "multiple statements continued ".
- "over multiple lines");
- $cont_multiseg = 2;
- } elsif ($cont_multiseg == 0) {
- $cont_multiseg = 1;
- }
- if ($rest =~ /^$/) {
- $cont_in = 0;
- last;
- }
- if ($rest =~ /^\s*special/) {
- err("if/for/while/switch not started ".
- "on its own line");
- }
- goto section_ended;
- }
- } elsif (/\{/) {
- err("{ while in parens/brackets") if (@cont_paren != 0);
- err("stuff after {") if ($rest =~ /[^\s}]/);
- $cont_in = 0;
- last;
- } elsif (/\}/) {
- err("} while in parens/brackets") if (@cont_paren != 0);
- if (!$cont_special && $rest !~ /^\s*(while|else)\b/) {
- if ($rest =~ /^$/) {
- err("unexpected }");
- } else {
- err("stuff after }");
- }
- $cont_in = 0;
- last;
- }
- } elsif (/\:/ && $cont_case && @cont_paren == 0) {
- err("stuff after multi-line case") if ($rest !~ /$^/);
- $cont_in = 0;
- last;
- }
- next;
-section_ended:
- # End of a statement or if/while/for loop. Reset
- # cont_special and cont_macro based on the rest of the
- # line.
- $cont_special = ($rest =~ /^\s*$special/)? 1 : 0;
- $cont_macro = ($rest =~ /^\s*$macro/)? 1 : 0;
- $cont_case = 0;
- next;
- }
- $cont_noerr = 0 if (!$cont_in);
-}
diff --git a/scripts/dkms.mkconf b/scripts/dkms.mkconf
deleted file mode 100755
index 046ce9edcefe..000000000000
--- a/scripts/dkms.mkconf
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/sh
-
-PROG=$0
-
-pkgcfg=/etc/sysconfig/zfs
-
-while getopts "n:v:c:f:" opt; do
- case $opt in
- n) pkgname=$OPTARG ;;
- v) pkgver=$OPTARG ;;
- c) pkgcfg=$OPTARG ;;
- f) filename=$OPTARG ;;
- *) err=1 ;;
- esac
-done
-
-if [ -z "${pkgname}" ] || [ -z "${pkgver}" ] || [ -z "${filename}" ] ||
- [ -n "${err}" ]; then
- echo "Usage: $PROG -n <pkgname> -v <pkgver> -c <pkgcfg> -f <filename>"
- exit 1
-fi
-
-exec cat >"${filename}" <<EOF
-PACKAGE_NAME="${pkgname}"
-PACKAGE_VERSION="${pkgver}"
-PACKAGE_CONFIG="${pkgcfg}"
-NO_WEAK_MODULES="yes"
-PRE_BUILD="configure
- --disable-dependency-tracking
- --prefix=/usr
- --with-config=kernel
- --with-linux=\$(
- if [ -e "\${kernel_source_dir/%build/source}" ]
- then
- echo "\${kernel_source_dir/%build/source}"
- else
- echo "\${kernel_source_dir}"
- fi
- )
- --with-linux-obj="\${kernel_source_dir}"
- \$(
- [[ -n \"\${ICP_ROOT}\" ]] && \\
- {
- echo --with-qat=\"\${ICP_ROOT}\"
- }
- )
- \$(
- [[ -r \${PACKAGE_CONFIG} ]] \\
- && source \${PACKAGE_CONFIG} \\
- && shopt -q -s extglob \\
- && \\
- {
- if [[ \${ZFS_DKMS_ENABLE_DEBUG,,} == @(y|yes) ]]
- then
- echo --enable-debug
- fi
- if [[ \${ZFS_DKMS_ENABLE_DEBUGINFO,,} == @(y|yes) ]]
- then
- echo --enable-debuginfo
- fi
- }
- )
-"
-POST_BUILD="scripts/dkms.postbuild
- -n \${PACKAGE_NAME}
- -v \${PACKAGE_VERSION}
- -a \${arch}
- -k \${kernelver}
- -t \${dkms_tree}
-"
-AUTOINSTALL="yes"
-MAKE[0]="make"
-STRIP[0]="\$(
- [[ -r \${PACKAGE_CONFIG} ]] \\
- && source \${PACKAGE_CONFIG} \\
- && shopt -q -s extglob \\
- && [[ \${ZFS_DKMS_DISABLE_STRIP,,} == @(y|yes) ]] \\
- && echo -n no
-)"
-STRIP[1]="\${STRIP[0]}"
-BUILT_MODULE_NAME[0]="zfs"
-BUILT_MODULE_LOCATION[0]="module/"
-DEST_MODULE_LOCATION[0]="/extra"
-BUILT_MODULE_NAME[1]="spl"
-BUILT_MODULE_LOCATION[1]="module/"
-DEST_MODULE_LOCATION[1]="/extra"
-EOF
diff --git a/scripts/dkms.postbuild b/scripts/dkms.postbuild
deleted file mode 100755
index a2ceff64a40b..000000000000
--- a/scripts/dkms.postbuild
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/sh
-
-PROG=$0
-
-while getopts "a:k:n:t:v:" opt; do
- case $opt in
- a) arch=$OPTARG ;;
- k) kver=$OPTARG ;;
- n) pkgname=$OPTARG ;;
- t) tree=$OPTARG ;;
- v) pkgver=$OPTARG ;;
- *) err=1 ;;
- esac
-done
-
-if [ -z "${arch}" ] || [ -z "${kver}" ] || [ -z "${pkgname}" ] || \
- [ -z "${tree}" ] || [ -z "${pkgver}" ] || [ -n "${err}" ]; then
- echo "Usage: $PROG -a <arch> -k <kver> -n <pkgname>" \
- "-t <tree> -v <pkgver>"
- exit 1
-fi
-
-exec cp "${tree}/${pkgname}/${pkgver}/build/zfs_config.h" \
- "${tree}/${pkgname}/${pkgver}/build/module/Module.symvers" \
- "${tree}/${pkgname}/${pkgver}/${kver}/${arch}/"
diff --git a/scripts/kmodtool b/scripts/kmodtool
deleted file mode 100755
index e24f206e698a..000000000000
--- a/scripts/kmodtool
+++ /dev/null
@@ -1,569 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: MIT
-# shellcheck disable=SC2086,SC2295
-
-# kmodtool - Helper script for building kernel module RPMs
-# Copyright (c) 2003-2012 Ville Skyttä <ville.skytta@iki.fi>,
-# Thorsten Leemhuis <fedora@leemhuis.info>
-# Nicolas Chauvet <kwizart@gmail.com>
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-shopt -s extglob
-
-myprog="kmodtool-${repo}"
-myver="0.12.1"
-
-kmodname=
-build_kernels="current"
-kernels_known_variants=
-kernel_versions=
-kernel_versions_to_build_for=
-prefix=
-filterfile=
-target=
-buildroot=
-dashvariant=
-
-error_out()
-{
- local errorlevel=${1}
- shift
- echo "Error: $*" >&2
- # the next line is not multi-line safe -- not needed *yet*
- echo "%global kmodtool_check echo \"kmodtool error: $*\"; exit ${errorlevel};"
- exit "${errorlevel}"
-}
-
-print_rpmtemplate_header()
-{
- echo
- echo "%global kmodinstdir_prefix ${prefix}/lib/modules/"
- echo "%global kmodinstdir_postfix /extra/${kmodname}/"
- echo "%global kernel_versions ${kernel_versions}"
- echo
-}
-
-print_akmodtemplate ()
-{
- echo
- cat <<EOF
-
-%global akmod_install mkdir -p \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/; \\\
-LANG=C rpmbuild --define "_sourcedir %{_sourcedir}" \\\
---define "_srcrpmdir \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/" \\\
--bs --nodeps %{_specdir}/%{name}.spec ; \\\
-ln -s \$(ls \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/) \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/${kmodname}-kmod.latest
-
-%package -n akmod-${kmodname}
-Summary: Akmod package for ${kmodname} kernel module(s)
-Group: System Environment/Kernel
-Requires: kmodtool
-Requires: akmods
-%{?AkmodsBuildRequires:Requires: %{AkmodsBuildRequires}}
-# same requires and provides as a kmods package would have
-Requires: ${kmodname}-kmod-common >= %{?epoch:%{epoch}:}%{version}
-Provides: ${kmodname}-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
-EOF
-
- cat <<EOF
-
-%description -n akmod-${kmodname}
-This package provides the akmod package for the ${kmodname} kernel modules.
-
-%posttrans -n akmod-${kmodname}
-nohup ${prefix}/sbin/akmods --from-akmod-posttrans --akmod ${kmodname} &> /dev/null &
-
-%files -n akmod-${kmodname}
-%defattr(-,root,root,-)
-%{_usrsrc}/akmods/*
-
-EOF
-}
-
-print_akmodmeta ()
-{
- cat <<EOF
-%package -n kmod-${kmodname}
-Summary: Metapackage which tracks in ${kmodname} kernel module for newest kernel${dashvariant}
-Group: System Environment/Kernel
-
-Provides: ${kmodname}-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
-Provides: kmod-${kmodname}-xen = %{?epoch:%{epoch}:}%{version}-%{release}
-Provides: kmod-${kmodname}-smp = %{?epoch:%{epoch}:}%{version}-%{release}
-Provides: kmod-${kmodname}-PAE = %{?epoch:%{epoch}:}%{version}-%{release}
-Requires: akmod-${kmodname} = %{?epoch:%{epoch}:}%{version}-%{release}
-EOF
-
-cat <<EOF
-
-%description -n kmod-${kmodname}${dashvariant}
-This is a meta-package without payload which sole purpose is to require the
-${kmodname} kernel module(s) for the newest kernel${dashvariant},
-to make sure you get it together with a new kernel.
-
-%files -n kmod-${kmodname}${dashvariant}
-%defattr(644,root,root,755)
-EOF
-}
-
-print_rpmtemplate_per_kmodpkg ()
-{
- if [[ "${1}" = "--custom" ]]; then
- shift
- local customkernel=true
- elif [[ "${1}" = "--redhat" ]]; then
- # this is needed for akmods
- shift
- local redhatkernel=true
- fi
-
- local kernel_uname_r=${1}
- local kernel_variant="${2:+-${2}}"
-
- # Detect depmod install location
- local depmod_path=/sbin/depmod
- if [[ ! -f "${depmod_path}" ]]; then
- depmod_path=/usr/sbin/depmod
- fi
-
- # first part
- cat <<EOF
-%package -n kmod-${kmodname}-${kernel_uname_r}
-Summary: ${kmodname} kernel module(s) for ${kernel_uname_r}
-Group: System Environment/Kernel
-Provides: kernel-modules-for-kernel = ${kernel_uname_r}
-Provides: kmod-${kmodname}-uname-r = ${kernel_uname_r}
-Provides: ${kmodname}-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
-Requires: ${kmodname}-kmod-common >= %{?epoch:%{epoch}:}%{version}
-
-%if 0%{?rhel} == 6 || 0%{?centos} == 6
-Requires(post): module-init-tools
-Requires(postun): module-init-tools
-%else
-Requires(post): kmod
-Requires(postun): kmod
-%endif
-EOF
-
- # second part
- if [[ -z "${customkernel}" ]]; then
- cat <<EOF
-Requires: kernel-uname-r = ${kernel_uname_r}
-BuildRequires: kernel-devel-uname-r = ${kernel_uname_r}
-%{?KmodsRequires:Requires: %{KmodsRequires}-uname-r = ${kernel_uname_r}}
-%{?KmodsRequires:BuildRequires: %{KmodsRequires}-uname-r = ${kernel_uname_r}}
-%post -n kmod-${kmodname}-${kernel_uname_r}
-if [[ -f "/boot/System.map-${kernel_uname_r}" ]]; then
- ${prefix}${depmod_path} -aeF /boot/System.map-${kernel_uname_r} ${kernel_uname_r} > /dev/null || :
-elif [[ -f "/lib/modules/${kernel_uname_r}/System.map" ]]; then
- ${prefix}${depmod_path} -aeF /lib/modules/${kernel_uname_r}/System.map ${kernel_uname_r} > /dev/null || :
-else
- ${prefix}${depmod_path} -ae ${kernel_uname_r} &> /dev/null || :
-fi
-%postun -n kmod-${kmodname}-${kernel_uname_r}
-if [[ -f "/boot/System.map-${kernel_uname_r}" ]]; then
- ${prefix}${depmod_path} -aF /boot/System.map-${kernel_uname_r} ${kernel_uname_r} &> /dev/null || :
-elif [[ -f "/lib/modules/${kernel_uname_r}/System.map" ]]; then
- ${prefix}${depmod_path} -aF /lib/modules/${kernel_uname_r}/System.map ${kernel_uname_r} &> /dev/null || :
-else
- ${prefix}${depmod_path} -a ${kernel_uname_r} &> /dev/null || :
-fi
-
-EOF
- else
- cat <<EOF
-%post -n kmod-${kmodname}-${kernel_uname_r}
-[ "\$(uname -r)" = "${kernel_uname_r}" ] && ${prefix}${depmod_path} -a > /dev/null || :
-%postun -n kmod-${kmodname}-${kernel_uname_r}
-[ "\$(uname -r)" = "${kernel_uname_r}" ] && ${prefix}${depmod_path} -a > /dev/null || :
-
-EOF
- fi
-
- # third part
- cat <<EOF
-%description -n kmod-${kmodname}-${kernel_uname_r}
-This package provides the ${kmodname} kernel modules built for the Linux
-kernel ${kernel_uname_r} for the %{_target_cpu} family of processors.
-%files -n kmod-${kmodname}-${kernel_uname_r}
-%defattr(644,root,root,755)
-%dir $prefix/lib/modules/${kernel_uname_r}/extra
-${prefix}/lib/modules/${kernel_uname_r}/extra/${kmodname}/
-
-
-EOF
-}
-
-print_rpmtemplate_kmoddevelpkg ()
-{
- if [[ "${1}" = "--custom" ]]; then
- shift
- local customkernel=true
- elif [[ "${1}" = "--redhat" ]]; then
- shift
- local redhatkernel=true
- fi
-
- local kernel_uname_r=${1}
-
- cat <<EOF
-%package -n kmod-${kmodname}-devel
-Summary: ${kmodname} kernel module(s) devel common
-Group: System Environment/Kernel
-Provides: ${kmodname}-devel-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
-EOF
-
- if [[ -z "${customkernel}" ]] && [[ -z "${redhatkernel}" ]]; then
- echo "Requires: kmod-${kmodname}-devel-${kernel_uname_r} >= %{?epoch:%{epoch}:}%{version}-%{release}"
- fi
-
- cat <<EOF
-%description -n kmod-${kmodname}-devel
-This package provides the common header files to build kernel modules
-which depend on the ${kmodname} kernel module. It may optionally require
-the ${kmodname}-devel-<kernel> objects for the newest kernel.
-
-%files -n kmod-${kmodname}-devel
-%defattr(644,root,root,755)
-%{_usrsrc}/${kmodname}-%{version}
-EOF
-
- for kernel in ${1}; do
- local kernel_uname_r=${kernel}
- echo "%exclude %{_usrsrc}/${kmodname}-%{version}/${kernel_uname_r}"
- done
-
- echo
- echo
-}
-
-print_rpmtemplate_per_kmoddevelpkg ()
-{
- if [[ "${1}" = "--custom" ]]; then
- shift
- local customkernel=true
- elif [[ "${1}" = "--redhat" ]]; then
- # this is needed for akmods
- shift
- local redhatkernel=true
- fi
-
- local kernel_uname_r=${1}
- local kernel_variant="${2:+-${2}}"
-
- # first part
- cat <<EOF
-%package -n kmod-${kmodname}-devel-${kernel_uname_r}
-Summary: ${kmodname} kernel module(s) devel for ${kernel_uname_r}
-Group: System Environment/Kernel
-Provides: kernel-objects-for-kernel = ${kernel_uname_r}
-Provides: ${kmodname}-devel-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
-Provides: kmod-${kmodname}-devel-uname-r = ${kernel_uname_r}
-EOF
-
- # second part
- if [[ -z "${customkernel}" ]]; then
- cat <<EOF
-Requires: kernel-devel-uname-r = ${kernel_uname_r}
-BuildRequires: kernel-devel-uname-r = ${kernel_uname_r}
-%{?KmodsDevelRequires:Requires: %{KmodsDevelRequires}-uname-r = ${kernel_uname_r}}
-%{?KmodsDevelRequires:BuildRequires: %{KmodsDevelRequires}-uname-r = ${kernel_uname_r}}
-EOF
- fi
-
- # third part
- cat <<EOF
-%description -n kmod-${kmodname}-devel-${kernel_uname_r}
-This package provides objects and symbols required to build kernel modules
-which depend on the ${kmodname} kernel modules built for the Linux
-kernel ${kernel_uname_r} for the %{_target_cpu} family of processors.
-%files -n kmod-${kmodname}-devel-${kernel_uname_r}
-%defattr(644,root,root,755)
-%{_usrsrc}/${kmodname}-%{version}/${kernel_uname_r}
-EOF
-}
-
-print_rpmtemplate_kmodmetapkg ()
-{
- local kernel_uname_r=${1}
- local kernel_variant="${2:+-${2}}"
-
- cat <<EOF
-%package -n kmod-${kmodname}${kernel_variant}
-Summary: Metapackage which tracks in ${kmodname} kernel module for newest kernel${kernel_variant}
-Group: System Environment/Kernel
-
-Provides: ${kmodname}-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
-Requires: kmod-${kmodname}-${kernel_uname_r} >= %{?epoch:%{epoch}:}%{version}-%{release}
-%{?KmodsMetaRequires:Requires: %{?KmodsMetaRequires}}
-EOF
-
- cat <<EOF
-
-%description -n kmod-${kmodname}${kernel_variant}
-This is a meta-package without payload which sole purpose is to require the
-${kmodname} kernel module(s) for the newest kernel${kernel_variant}.
-to make sure you get it together with a new kernel.
-
-%files -n kmod-${kmodname}${kernel_variant}
-%defattr(644,root,root,755)
-
-
-EOF
-}
-
-print_customrpmtemplate ()
-{
- for kernel in ${1}
- do
- if [[ -e "${prefix}/lib/modules/${kernel}/build/Makefile" ]]; then
- # likely a user-build-kernel with available buildfiles
- # fixme: we should check if uname from Makefile is the same as ${kernel}
-
- kernel_versions="${kernel_versions}${kernel}___${prefix}/lib/modules/${kernel}/build/ "
- print_rpmtemplate_per_kmodpkg --custom "${kernel}"
-
- # create development package
- if [[ -n "${devel}" ]]; then
- # create devel package including common headers
- print_rpmtemplate_kmoddevelpkg --custom "${kernel}"
-
- # create devel package
- print_rpmtemplate_per_kmoddevelpkg --custom "${kernel}"
- fi
- elif [[ -e "${buildroot}/usr/src/kernels/${kernel}" ]]; then
- # this looks like a Fedora/RH kernel -- print a normal template (which includes the proper BR) and be happy :)
- kernel_versions="${kernel_versions}${kernel}___${buildroot}%{_usrsrc}/kernels/${kernel} "
-
- # parse kernel versions string and print template
- local kernel_verrelarch=${kernel%%${kernels_known_variants}}
- print_rpmtemplate_per_kmodpkg --redhat ${kernel} ${kernel##${kernel_verrelarch}}
-
- # create development package
- if [[ -n "${devel}" ]]; then
- # create devel package including common headers
- print_rpmtemplate_kmoddevelpkg --redhat ${kernel} ${kernel##${kernel_verrelarch}}
-
- # create devel package
- print_rpmtemplate_per_kmoddevelpkg --redhat ${kernel} ${kernel##${kernel_verrelarch}}
- fi
- else
- error_out 2 "Don't know how to handle ${kernel} -- ${prefix}/lib/modules/${kernel}/build/Makefile not found"
- fi
- done
-
- # well, it's no header anymore, but who cares ;-)
- print_rpmtemplate_header
-}
-
-
-print_rpmtemplate ()
-{
- # create kernel_versions var
- for kernel_version in ${kernel_versions_to_build_for}
- do
- kernel_versions="${kernel_versions}${kernel_version}___%{_usrsrc}/kernels/${kernel_version} "
- done
-
- # and print it and some other required stuff as macro
- print_rpmtemplate_header
-
- # now print the packages
- for kernel in ${kernel_versions_to_build_for} ; do
-
- local kernel_verrelarch=${kernel%%${kernels_known_variants}}
-
- # create metapackage
- print_rpmtemplate_kmodmetapkg "${kernel}" "${kernel##${kernel_verrelarch}}"
-
- # create package
- print_rpmtemplate_per_kmodpkg "${kernel}" "${kernel##${kernel_verrelarch}}"
-
- if [[ -n "${devel}" ]]; then
- # create devel package including common headers
- print_rpmtemplate_kmoddevelpkg "${kernel}" "${kernel##${kernel_verrelarch}}"
-
- # create devel package
- print_rpmtemplate_per_kmoddevelpkg "${kernel}" "${kernel##${kernel_verrelarch}}"
- fi
- done
-}
-
-myprog_help ()
-{
- echo "Usage: ${0##*/} [OPTIONS]"
- echo
- echo "Creates a template to be used during kmod building"
- echo
- echo "Available options:"
- echo " --filterfile <file> -- filter the results with grep --file <file>"
- echo " --for-kernels <list> -- created templates only for these kernels"
- echo " --kmodname <file> -- name of the kmod (required)"
- echo " --devel -- make kmod-devel package"
- echo " --noakmod -- no akmod package"
- echo " --repo <name> -- use buildsys-build-<name>-kerneldevpkgs"
- echo " --target <arch> -- target-arch (required)"
- echo " --buildroot <dir> -- Build root (place to look for build files)"
-}
-
-while [[ -n "${1}" ]] ; do
- case "${1}" in
- --filterfile)
- shift
- if [[ -z "${1}" ]] ; then
- error_out 2 "Please provide path to a filter-file together with --filterfile" >&2
- elif [[ ! -e "${1}" ]]; then
- error_out 2 "Filterfile ${1} not found" >&2
- fi
- filterfile="${1}"
- shift
- ;;
- --kmodname)
- shift
- if [[ -z "${1}" ]] ; then
- error_out 2 "Please provide the name of the kmod together with --kmodname" >&2
- fi
- # strip pending -kmod
- kmodname="${1%%-kmod}"
- shift
- ;;
- --devel)
- shift
- devel="true"
- ;;
- --prefix)
- shift
- if [[ -z "${1}" ]] ; then
- error_out 2 "Please provide a prefix with --prefix" >&2
- fi
- prefix="${1}"
- shift
- ;;
- --repo)
- shift
- if [[ -z "${1}" ]] ; then
- error_out 2 "Please provide the name of the repo together with --repo" >&2
- fi
- repo=${1}
- shift
- ;;
- --for-kernels)
- shift
- if [[ -z "${1}" ]] ; then
- error_out 2 "Please provide the name of the kmod together with --kmodname" >&2
- fi
- for_kernels="${1}"
- shift
- ;;
- --noakmod)
- shift
- noakmod="true"
- ;;
- --target)
- shift
- target="${1}"
- shift
- ;;
- --akmod)
- shift
- build_kernels="akmod"
- ;;
- --newest)
- shift
- build_kernels="newest"
- ;;
- --current)
- shift
- build_kernels="current"
- ;;
- --buildroot)
- shift
- buildroot="${1}"
- shift
- ;;
- --help)
- myprog_help
- exit 0
- ;;
- --version)
- echo "${myprog} ${myver}"
- exit 0
- ;;
- *)
- echo "Error: Unknown option '${1}'." >&2
- usage >&2
- exit 2
- ;;
- esac
-done
-
-if [[ -e ./kmodtool-kernel-variants ]]; then
- kernels_known_variants="$(cat ./kmodtool-kernel-variants)"
-elif [[ -e /usr/share/kmodtool/kernel-variants ]] ; then
- kernels_known_variants="$(cat /usr/share/kmodtool/kernel-variants)"
-else
- kernels_known_variants="@(smp?(-debug)|PAE?(-debug)|debug|kdump|xen|kirkwood|highbank|imx|omap|tegra)"
-fi
-
-# general sanity checks
-if [[ -z "${target}" ]]; then
- error_out 2 "please pass target arch with --target"
-elif [[ -z "${kmodname}" ]]; then
- error_out 2 "please pass kmodname with --kmodname"
-elif [[ -z "${kernels_known_variants}" ]] ; then
- error_out 2 "could not determine known variants"
-fi
-
-# go
-if [[ -n "${for_kernels}" ]]; then
- # this is easy:
- print_customrpmtemplate "${for_kernels}"
-elif [[ "${build_kernels}" = "akmod" ]]; then
- # do only a akmod package
- print_akmodtemplate
- print_akmodmeta
-else
- # seems we are on out own to decide for which kernels to build
-
- # we need more sanity checks in this case
- if [[ -z "${repo}" ]]; then
- error_out 2 "please provide repo name with --repo"
- elif ! command -v "buildsys-build-${repo}-kerneldevpkgs" > /dev/null 2>&1; then
- error_out 2 "buildsys-build-${repo}-kerneldevpkgs not found"
- fi
-
- # call buildsys-build-${repo}-kerneldevpkgs to get the list of kernels
- cmdoptions="--target ${target}"
-
- # filterfile to filter list of kernels?
- if [[ -n "${filterfile}" ]] ; then
- cmdoptions="${cmdoptions} --filterfile ${filterfile}"
- fi
-
- kernel_versions_to_build_for=$(buildsys-build-${repo}-kerneldevpkgs "--${build_kernels}" ${cmdoptions}) ||
- error_out 2 "buildsys-build-${repo}-kerneldevpkgs failed: ${kernel_versions_to_build_for}"
-
- if [[ "${build_kernels}" = "current" ]] && [[ -z "${noakmod}" ]]; then
- print_akmodtemplate
- fi
-
- print_rpmtemplate
-fi
diff --git a/scripts/make_gitrev.sh b/scripts/make_gitrev.sh
deleted file mode 100755
index f85c9db097f0..000000000000
--- a/scripts/make_gitrev.sh
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: CDDL-1.0
-
-#
-# CDDL HEADER START
-#
-# This file and its contents are supplied under the terms of the
-# Common Development and Distribution License ("CDDL"), version 1.0.
-# You may only use this file in accordance with the terms of version
-# 1.0 of the CDDL.
-#
-# A full copy of the text of the CDDL should have accompanied this
-# source. A copy of the CDDL is also available via the Internet at
-# http://www.illumos.org/license/CDDL.
-#
-# CDDL HEADER END
-#
-
-# Copyright (c) 2018 by Delphix. All rights reserved.
-# Copyright (c) 2018 by Matthew Thode. All rights reserved.
-
-#
-# Generate zfs_gitrev.h. Note that we need to do this for every
-# invocation of `make`, including for incremental builds. Therefore we
-# can't use a zfs_gitrev.h.in file which would be processed only when
-# `configure` is run.
-#
-
-set -eu
-
-dist=no
-distdir=.
-while getopts D: flag
-do
- case $flag in
- \?) echo "Usage: $0 [-D distdir] [file]" >&2; exit 1;;
- D) dist=yes; distdir=${OPTARG};;
- *) ;;
- esac
-done
-shift $((OPTIND - 1))
-
-top_srcdir="$(dirname "$0")/.."
-GITREV="${1:-include/zfs_gitrev.h}"
-
-# GITREV should be a relative path (relative to top_builddir or distdir)
-case "${GITREV}" in
- /*) echo "Error: ${GITREV} should be a relative path" >&2
- exit 1;;
- *) ;;
-esac
-
-ZFS_GITREV=$({ cd "${top_srcdir}" &&
- git describe --always --long --dirty 2>/dev/null; } || :)
-
-if [ -z "${ZFS_GITREV}" ]
-then
- # If the source directory is not a git repository, check if the file
- # already exists (in the source)
- if [ -f "${top_srcdir}/${GITREV}" ]
- then
- ZFS_GITREV=$(sed -n \
- '1s/^#define[[:blank:]]ZFS_META_GITREV "\([^"]*\)"$/\1/p' \
- "${top_srcdir}/${GITREV}")
- fi
-elif [ "${dist}" = yes ]
-then
- # Append -dist when creating distributed sources from a git repository
- ZFS_GITREV="${ZFS_GITREV}-dist"
-fi
-ZFS_GITREV=${ZFS_GITREV:-unknown}
-
-GITREVTMP="${GITREV}~"
-printf '#define\tZFS_META_GITREV "%s"\n' "${ZFS_GITREV}" >"${GITREVTMP}"
-GITREV="${distdir}/${GITREV}"
-if cmp -s "${GITREV}" "${GITREVTMP}"
-then
- rm -f "${GITREVTMP}"
-else
- mv -f "${GITREVTMP}" "${GITREV}"
-fi
diff --git a/scripts/man-dates.sh b/scripts/man-dates.sh
deleted file mode 100755
index 39f1b5fb1324..000000000000
--- a/scripts/man-dates.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-
-# This script updates the date lines in the man pages to the date of the last
-# commit to that file.
-
-set -eu
-
-find man -type f | while read -r i ; do
- git_date=$(git log -1 --date=short --format="%ad" -- "$i")
- [ -z "$git_date" ] && continue
- sed -i "s|^\.Dd.*|.Dd $(date -d "$git_date" "+%B %-d, %Y")|" "$i"
-done
diff --git a/scripts/mancheck.sh b/scripts/mancheck.sh
deleted file mode 100755
index 33d7d3c7155f..000000000000
--- a/scripts/mancheck.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/sh
-#
-# Permission to use, copy, modify, and/or distribute this software for
-# any purpose with or without fee is hereby granted.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
-# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#
-# shellcheck disable=SC2068,SC2086
-
-trap 'rm -f "$stdout_file" "$stderr_file" "$result_file"' EXIT
-
-if [ "$#" -eq 0 ]; then
- echo "Usage: $0 <manpage-directory|manpage-file>..."
- exit 1
-fi
-
-if ! command -v mandoc > /dev/null; then
- echo "skipping mancheck because mandoc is not installed"
- exit 0
-fi
-
-IFS="
-"
-files="$(
- for path in $@ ; do
- find -L $path -type f -name '*[1-9]*' -not -name '.*'
- done | sort | uniq
-)"
-
-if [ "$files" = "" ] ; then
- echo no files to process! 1>&2
- exit 1
-fi
-
-add_excl="$(awk '
- /^.\\" lint-ok:/ {
- print "-e"
- $1 = "mandoc:"
- $2 = FILENAME ":[[:digit:]]+:[[:digit:]]+:"
- print
- }' $files)"
-
-# Redirect to file instead of 2>&1ing because mandoc flushes inconsistently(?) which tears lines
-# https://github.com/openzfs/zfs/pull/12129/checks?check_run_id=2701608671#step:5:3
-stdout_file="$(mktemp)"
-stderr_file="$(mktemp)"
-mandoc -Tlint $files 1>"$stdout_file" 2>"$stderr_file"
-result_file="$(mktemp)"
-grep -vhE -e 'mandoc: outdated mandoc.db' -e 'STYLE: referenced manual not found' $add_excl "$stdout_file" "$stderr_file" > "$result_file"
-
-if [ -s "$result_file" ]; then
- cat "$result_file"
- exit 1
-fi
diff --git a/scripts/objtool-wrapper.in b/scripts/objtool-wrapper.in
deleted file mode 100644
index 0451f8718233..000000000000
--- a/scripts/objtool-wrapper.in
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-
-# SPDX-License-Identifier: MIT
-#
-# Copyright (c) 2025 Attila Fülöp <attila@fueloep.org>
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to
-# deal in the Software without restriction, including without limitation the
-# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-# sell copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-# Filter out objtools '--Werror' flag.
-
-objtool="@abs_objtool_binary@"
-args=$(echo "$*" | sed s/--Werror//)
-
-if [ -z "$objtool" ]; then
- echo "$(basename "$0"): No objtool binary configured" 1>&2
- exit 1;
-fi
-
-# shellcheck disable=SC2086
-exec "$objtool" $args
diff --git a/scripts/paxcheck.sh b/scripts/paxcheck.sh
deleted file mode 100755
index aba770e9e6f3..000000000000
--- a/scripts/paxcheck.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-if ! command -v scanelf > /dev/null; then
- echo "scanelf (from pax-utils) is required for these checks." >&2
- exit 3
-fi
-
-RET=0
-
-# check for exec stacks
-OUT=$(scanelf -qyRAF '%e %p' "$1")
-
-if [ x"${OUT}" != x ]; then
- RET=2
- echo "The following files contain writable and executable sections"
- echo " Files with such sections will not work properly (or at all!) on some"
- echo " architectures/operating systems."
- echo " For more information, see:"
- echo " https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart"
- echo
- echo "${OUT}"
- echo
-fi
-
-
-# check for TEXTRELS
-OUT=$(scanelf -qyRAF '%T %p' "$1")
-
-if [ x"${OUT}" != x ]; then
- RET=2
- echo "The following files contain runtime text relocations"
- echo " Text relocations force the dynamic linker to perform extra"
- echo " work at startup, waste system resources, and may pose a security"
- echo " risk. On some architectures, the code may not even function"
- echo " properly, if at all."
- echo " For more information, see:"
- echo " https://wiki.gentoo.org/wiki/Hardened/HOWTO_locate_and_fix_textrels"
- echo
- echo "${OUT}"
- echo
-fi
-
-exit "$RET"
diff --git a/scripts/spdxcheck.pl b/scripts/spdxcheck.pl
deleted file mode 100755
index 4d4e14368beb..000000000000
--- a/scripts/spdxcheck.pl
+++ /dev/null
@@ -1,433 +0,0 @@
-#!/usr/bin/env perl
-
-# SPDX-License-Identifier: MIT
-#
-# Copyright (c) 2025, Rob Norris <robn@despairlabs.com>
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to
-# deal in the Software without restriction, including without limitation the
-# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-# sell copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-use 5.010;
-use warnings;
-use strict;
-
-# All files known to git are either "tagged" or "untagged". Tagged files are
-# expected to have a license tag, while untagged files are expected to _not_
-# have a license tag. There is no "optional" tag; all files are either "tagged"
-# or "untagged".
-#
-# Whether or not a file is tagged or untagged is determined using the patterns
-# in $tagged_patterns and $untagged_patterns and the following sequence:
-#
-# - if the file's full path is explicity listed in $tagged_patterns, then the
-# file is tagged.
-#
-# - if the file's full path is explicitly listed in $untagged_patterns, then
-# file is untagged.
-#
-# - if the filename matches a pattern in $tagged_patterns, and does not match a
-# pattern in $untagged_patterns, then the file is tagged
-#
-# - otherwise, the file is untagged.
-#
-# The patterns do a simple glob-like match over the entire path relative to the
-# root of the git repo (no leading /). '*' matches as anything at that point,
-# across path fragments. '?' matches a single character.
-
-my $tagged_patterns = q(
- # Compiled source files
- *.c
- *.h
- *.S
-
- # Python files, eg test suite drivers, libzfs bindings
- *.py
- *.py.in
-
- # Various support scripts
- *.sh
- *.pl
-
- # Test suite
- *.ksh
- *.ksh.in
- *.kshlib
- *.kshlib.in
- *.shlib
-
- # Test suite data files
- *.run
- *.cfg
- *.cfg.in
- *.fio
- *.lua
- *.zcp
-
- # Manpages
- man/man?/*.?
- man/man?/*.?.in
-
- # Unsuffixed programs (or generated of same)
- cmd/zarcstat.in
- cmd/zarcsummary
- cmd/dbufstat.in
- cmd/zilstat.in
- cmd/zpool/zpool.d/*
- etc/init.d/zfs-import.in
- etc/init.d/zfs-load-key.in
- etc/init.d/zfs-mount.in
- etc/init.d/zfs-share.in
- etc/init.d/zfs-zed.in
- etc/zfs/zfs-functions.in
- scripts/objtool-wrapper.in
-
- # Misc items that have clear licensing info but aren't easily matched,
- # or are the first of a class that we aren't ready to match yet.
- config/ax_code_coverage.m4
- configure.ac
- module/lua/README.zfs
- scripts/kmodtool
- tests/zfs-tests/tests/functional/inheritance/README.config
- tests/zfs-tests/tests/functional/inheritance/README.state
- cmd/zed/zed.d/statechange-notify.sh
-);
-
-my $untagged_patterns = q(
- # Exclude CI tooling as it's not interesting for overall project
- # licensing.
- .github/*
-
- # Everything below this has unclear licensing. Work is happening to
- # identify and update them. Once one gains a tag it should be removed
- # from this list.
-
- cmd/zed/zed.d/*.sh
- cmd/zpool/zpool.d/*
-
- contrib/coverity/model.c
- include/libzdb.h
- include/os/freebsd/spl/sys/inttypes.h
- include/os/freebsd/spl/sys/mode.h
- include/os/freebsd/spl/sys/trace.h
- include/os/freebsd/spl/sys/trace_zfs.h
- include/os/freebsd/zfs/sys/zpl.h
- include/os/linux/kernel/linux/page_compat.h
- lib/libspl/include/os/freebsd/sys/sysmacros.h
- lib/libspl/include/sys/string.h
- lib/libspl/include/sys/trace_spl.h
- lib/libspl/include/sys/trace_zfs.h
- lib/libzdb/libzdb.c
- module/lua/setjmp/setjmp.S
- module/lua/setjmp/setjmp_ppc.S
- module/zstd/include/sparc_compat.h
- module/zstd/zstd_sparc.c
- tests/zfs-tests/cmd/cp_files.c
- tests/zfs-tests/cmd/zed_fd_spill-zedlet.c
- tests/zfs-tests/tests/functional/tmpfile/tmpfile_001_pos.c
- tests/zfs-tests/tests/functional/tmpfile/tmpfile_002_pos.c
- tests/zfs-tests/tests/functional/tmpfile/tmpfile_003_pos.c
- tests/zfs-tests/tests/functional/tmpfile/tmpfile_test.c
-
- autogen.sh
- contrib/bpftrace/zfs-trace.sh
- contrib/pyzfs/docs/source/conf.py
- contrib/pyzfs/libzfs_core/test/__init__.py
- contrib/pyzfs/setup.py.in
- contrib/zcp/autosnap.lua
- scripts/commitcheck.sh
- scripts/man-dates.sh
- scripts/mancheck.sh
- scripts/paxcheck.sh
- scripts/zfs-helpers.sh
- scripts/zfs-tests-color.sh
- scripts/zfs.sh
- scripts/zimport.sh
- tests/zfs-tests/callbacks/zfs_failsafe.ksh
- tests/zfs-tests/include/commands.cfg
- tests/zfs-tests/include/tunables.cfg
- tests/zfs-tests/include/zpool_script.shlib
- tests/zfs-tests/tests/functional/mv_files/random_creation.ksh
-);
-
-# For files expected to have a license tags, these are the acceptable tags by
-# path. A file in one of these paths with a tag not listed here must be in the
-# override list below. If the file is not in any of these paths, then
-# $default_license_tags is used.
-my $default_license_tags = [
- 'CDDL-1.0', '0BSD', 'BSD-2-Clause', 'BSD-3-Clause', 'MIT'
-];
-
-my @path_license_tags = (
- # Conventional wisdom is that the Linux SPL must be GPL2+ for
- # kernel compatibility.
- 'module/os/linux/spl' => ['GPL-2.0-or-later'],
- 'include/os/linux/spl' => ['GPL-2.0-or-later'],
-
- # Third-party code should keep it's original license
- 'module/zstd/lib' => ['BSD-3-Clause OR GPL-2.0-only'],
- 'module/lua' => ['MIT'],
-
- # lua/setjmp is platform-specific code sourced from various places
- 'module/lua/setjmp' => $default_license_tags,
-
- # Some of the fletcher modules are dual-licensed
- 'module/zcommon/zfs_fletcher' =>
- ['BSD-2-Clause OR GPL-2.0-only', 'CDDL-1.0'],
-
- 'module/icp' => ['Apache-2.0', 'CDDL-1.0'],
- 'contrib/icp' => ['Apache-2.0', 'CDDL-1.0'],
-
- # Python bindings are always Apache-2.0
- 'contrib/pyzfs' => ['Apache-2.0'],
-);
-
-# This is a list of "special case" license tags that are in use in the tree,
-# and the files where they occur. these exist for a variety of reasons, and
-# generally should not be used for new code. If you need to bring in code that
-# has a different license from the acceptable ones listed above, then you will
-# also need to add it here, with rationale provided and approval given in your
-# PR.
-my %override_file_license_tags = (
-
- # SPDX have repeatedly rejected the creation of a tag for a public
- # domain dedication, as not all dedications are clear and unambiguious
- # in their meaning and not all jurisdictions permit relinquishing a
- # copyright anyway.
- #
- # A reasonably common workaround appears to be to create a local
- # (project-specific) identifier to convey whatever meaning the project
- # wishes it to. To cover OpenZFS' use of third-party code with a
- # public domain dedication, we use this custom tag.
- #
- # Further reading:
- # https://github.com/spdx/old-wiki/blob/main/Pages/Legal%20Team/Decisions/Dealing%20with%20Public%20Domain%20within%20SPDX%20Files.md
- # https://spdx.github.io/spdx-spec/v2.3/other-licensing-information-detected/
- # https://cr.yp.to/spdx.html
- #
- 'LicenseRef-OpenZFS-ThirdParty-PublicDomain' => [qw(
- include/sys/skein.h
- module/icp/algs/skein/skein_block.c
- module/icp/algs/skein/skein.c
- module/icp/algs/skein/skein_impl.h
- module/icp/algs/skein/skein_iv.c
- module/icp/algs/skein/skein_port.h
- module/zfs/vdev_draid_rand.c
- )],
-
- # Legacy inclusions
- 'Brian-Gladman-3-Clause' => [qw(
- module/icp/asm-x86_64/aes/aestab.h
- module/icp/asm-x86_64/aes/aesopt.h
- module/icp/asm-x86_64/aes/aeskey.c
- module/icp/asm-x86_64/aes/aes_amd64.S
- )],
- 'OpenSSL-standalone' => [qw(
- module/icp/asm-x86_64/aes/aes_aesni.S
- )],
- 'LGPL-2.1-or-later' => [qw(
- config/ax_code_coverage.m4
- )],
-
- # Legacy inclusions of BSD-2-Clause files in Linux SPL.
- 'BSD-2-Clause' => [qw(
- include/os/linux/spl/sys/debug.h
- module/os/linux/spl/spl-zone.c
- )],
-
- # Temporary overrides for things that have the wrong license for
- # their path. Work is underway to understand and resolve these.
- 'GPL-2.0-or-later' => [qw(
- include/os/freebsd/spl/sys/kstat.h
- include/os/freebsd/spl/sys/sunddi.h
- )],
- 'CDDL-1.0' => [qw(
- include/os/linux/spl/sys/errno.h
- include/os/linux/spl/sys/ia32/asm_linkage.h
- include/os/linux/spl/sys/misc.h
- include/os/linux/spl/sys/procfs_list.h
- include/os/linux/spl/sys/trace.h
- include/os/linux/spl/sys/trace_spl.h
- include/os/linux/spl/sys/trace_taskq.h
- include/os/linux/spl/sys/wmsum.h
- module/os/linux/spl/spl-procfs-list.c
- module/os/linux/spl/spl-trace.c
- module/lua/README.zfs
- )],
-);
-
-##########
-
-sub setup_patterns {
- my ($patterns) = @_;
-
- my @re;
- my @files;
-
- for my $pat (split "\n", $patterns) {
- # remove leading/trailing whitespace and comments
- $pat =~ s/(:?^\s*|\s*(:?#.*)?$)//g;
- # skip (now-)empty lines
- next if $pat eq '';
-
- # if the "pattern" has no metachars, then it's a literal file
- # path and gets matched a bit more strongly
- unless ($pat =~ m/[?*]/) {
- push @files, $pat;
- next;
- }
-
- # naive pattern to regex conversion
-
- # escape simple metachars
- $pat =~ s/([\.\(\[])/\Q$1\E/g;
-
- $pat =~ s/\?/./g; # glob ? -> regex .
- $pat =~ s/\*/.*/g; # glob * -> regex .*
-
- push @re, $pat;
- }
-
- my $re = join '|', @re;
- return (qr/^(?:$re)$/, { map { $_ => 1 } @files });
-};
-
-my ($tagged_re, $tagged_files) = setup_patterns($tagged_patterns);
-my ($untagged_re, $untagged_files) = setup_patterns($untagged_patterns);
-
-sub file_is_tagged {
- my ($file) = @_;
-
- # explicitly tagged
- if ($tagged_files->{$file}) {
- delete $tagged_files->{$file};
- return 1;
- }
-
- # explicitly untagged
- if ($untagged_files->{$file}) {
- delete $untagged_files->{$file};
- return 0;
- }
-
- # must match tagged patterns and not match untagged patterns
- return ($file =~ $tagged_re) && !($file =~ $untagged_re);
-}
-
-my %override_tags = map {
- my $tag = $_;
- map { $_ => $tag } @{$override_file_license_tags{$_}};
-} keys %override_file_license_tags;
-
-##########
-
-my $rc = 0;
-
-# Get a list of all files known to git. This is a crude way of avoiding any
-# build artifacts that have tags embedded in them.
-my @git_files = sort grep { chomp } qx(git ls-tree --name-only -r HEAD);
-
-# Scan all files and work out if their tags are correct.
-for my $file (@git_files) {
- # Ignore non-files. git can store other types of objects (submodule
- # dirs, symlinks, etc) that aren't interesting for licensing.
- next unless -f $file && ! -l $file;
-
- # Open the file, and extract its license tag. We only check the first
- # 4K of each file because many of these files are large, binary, or
- # both. For a typical source file that means the tag should be found
- # within the first ~50 lines.
- open my $fh, '<', $file or die "$0: couldn't open $file: $!\n";
- my $nbytes = read $fh, my $buf, 4096;
- die "$0: couldn't read $file: $!\n" if !defined $nbytes;
-
- my ($tag) =
- $buf =~ m/\bSPDX-License-Identifier: ([A-Za-z0-9_\-\. ]+)$/smg;
-
- close $fh;
-
- # Decide if the file should have a tag at all
- my $tagged = file_is_tagged($file);
-
- # If no license tag is wanted, there's not much left to do
- if (!$tagged) {
- if (defined $tag) {
- # untagged file has a tag, pattern change required
- say "unexpected license tag: $file";
- $rc = 1;
- }
- next;
- }
-
- # If a tag is required, but doesn't have one, warn and loop.
- if (!defined $tag) {
- say "missing license tag: $file";
- $rc = 1;
- next;
- }
-
- # Determine the set of valid license tags for this file. Start with
- # the defaults.
- my $tags = $default_license_tags;
-
- if ($override_tags{$file}) {
- # File has an explicit override, use it.
- $tags = [delete $override_tags{$file}];
- } else {
- # Work through the path tag sets, taking the set with the
- # most precise match. If no sets match, we fall through and
- # are left with the default set.
- my $matchlen = 0;
- for (my $n = 0; $n < @path_license_tags; $n += 2) {
- my ($path, $t) = @path_license_tags[$n,$n+1];
- if (substr($file, 0, length($path)) eq $path &&
- length($path) > $matchlen) {
- $tags = $t;
- $matchlen = length($path);
- }
- }
- }
-
- # Confirm the file's tag is in the set, and warn if not.
- my %tags = map { $_ => 1 } @$tags;
- unless ($tags{$tag}) {
- say "invalid license tag: $file";
- say " (got $tag; expected: @$tags)";
- $rc = 1;
- next;
- }
-}
-
-##########
-
-# List any files explicitly listed as tagged or untagged that we didn't see.
-# Likely the file was removed from the repo but not from our lists.
-
-for my $file (sort keys %$tagged_files) {
- say "explicitly tagged file not on disk: $file";
- $rc = 1;
-}
-for my $file (sort keys %$untagged_files) {
- say "explicitly untagged file not on disk: $file";
- $rc = 1;
-}
-for my $file (sort keys %override_tags) {
- say "explicitly overridden file not on disk: $file";
- $rc = 1;
-}
-
-exit $rc;
diff --git a/scripts/update_authors.pl b/scripts/update_authors.pl
deleted file mode 100755
index c634d185869a..000000000000
--- a/scripts/update_authors.pl
+++ /dev/null
@@ -1,378 +0,0 @@
-#!/usr/bin/env perl
-
-# SPDX-License-Identifier: MIT
-#
-# Copyright (c) 2023, Rob Norris <robn@despairlabs.com>
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to
-# deal in the Software without restriction, including without limitation the
-# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-# sell copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-
-# This program will update the AUTHORS file to include commit authors that are
-# in the git history but are not yet credited.
-#
-# The CONTRIBUTORS section of the AUTHORS file attempts to be a list of
-# individual contributors to OpenZFS, with one name, address and line per
-# person. This is good for readability, but does not really leave room for the
-# that names and emails on commits from the same individual can be different,
-# for all kinds of reasons, not limited to:
-#
-# - a person might change organisations, and so their email address changes
-#
-# - a person might be paid to work on OpenZFS for their employer, and then hack
-# on personal projects in the evening, so commits legitimately come from
-# different addresses
-#
-# - names change for all kinds of reasons
-#
-# To try and account for this, this program will try to find all the possible
-# names and emails for a single contributor, and then select the "best" one to
-# add to the AUTHORS file.
-#
-# The CONTRIBUTORS section of the AUTHORS file is considered the source of
-# truth. Once an individual committer is listed in there, that line will not be
-# removed regardless of what is discovered in the commit history. However, it
-# can't just be _anything_. The name or email still has to match something seen
-# in the commit history, so that we're able to undertand that its the same
-# contributor.
-#
-# The bulk of the work is in running `git log` to fetch commit author names and
-# emails. For each value, we generate a "slug" to use as an internal id for
-# that value, which is mostly just the lowercase of the value with whitespace
-# and punctuation removed. Two values with subtle differences can produce the
-# same slug, so at this point we also try to keep the "best" pre-slug value as
-# the display version. We use this slug to update two maps, one of email->name,
-# the other of name->email.
-#
-# Where possible, we also consider Signed-off-by: trailers in the commit
-# message, and if they match the commit author, enter them into the maps also.
-# Because a commit can contain multiple signoffs, we only track one if either
-# the name or the email address match the commit author (by slug). This is
-# mostly aimed at letting an explicit signoff override a generated name or
-# email on the same commit (usually a Github noreply), while avoiding every
-# signoff ever being treated as a possible canonical ident for some other
-# committer. (Also note that this behaviour only works for signoffs that can be
-# extracted with git-interpret-trailers, which misses many seen in the OpenZFS
-# git history, for various reasons).
-#
-# Once collected, we then walk all the emails we've seen and get all the names
-# associated with every instance. Then for each of those names, we get all the
-# emails associated, and so on until we've seen all the connected names and
-# emails. This collection is every possible name and email for an individual
-# contributor.
-#
-# Finaly, we consider these groups, and select the "best" name and email for
-# the contributor, and add them to the author tables if they aren't there
-# already. Once we've done everyone, we write out a new AUTHORS file, and
-# that's the whole job.
-#
-# This is imperfect! Its necessary for the user to examine the diff and make
-# sure its sensible. If it hasn't hooked up right, it may necessary to adjust
-# the input data (via .mailmap) or improve the heuristics in this program. It
-# took a long time to get into good shape when first written (355 new names
-# added to AUTHORS!) but hopefully in the future we'll be running this
-# regularly so it doesn't fall so far behind.
-
-
-use 5.010;
-use warnings;
-use strict;
-
-# Storage for the "best looking" version of name or email, keyed on slug.
-my %display_name;
-my %display_email;
-
-# First, we load the existing AUTHORS file. We save everything before
-# CONTRIBUTORS: line as-is so we can write it back out to the new file. Then
-# we extract name,email pairs from the remainder and store them in a pair of
-# hashtables, keyed on slug.
-my %authors_name;
-my %authors_email;
-
-my @authors_header;
-
-for my $line (do { local (@ARGV) = ('AUTHORS'); <> }) {
- chomp $line;
- state $in_header = 1;
- if ($in_header) {
- push @authors_header, $line;
- $in_header = 0 if $line =~ m/^CONTRIBUTORS:/;
- } else {
- my ($name, $email) = $line =~ m/^\s+(.+)(?= <) <([^>]+)/;
- next unless $name;
-
- my $semail = email_slug($email);
- my $sname = name_slug($name);
-
- $authors_name{$semail} = $sname;
- $authors_email{$sname} = $semail;
-
- # The name/email in AUTHORS is already the "best looking"
- # version, by definition.
- $display_name{$sname} = $name;
- $display_email{$semail} = $email;
- }
-}
-
-# Next, we load all the commit authors and signoff pairs, and form name<->email
-# mappings, keyed on slug. Note that this format is getting the
-# .mailmap-converted form. This lets us control the input to some extent by
-# making changes there.
-my %seen_names;
-my %seen_emails;
-
-# The true email address from commits, by slug. We do this so we can generate
-# mailmap entries, which will only match the exact address from the commit,
-# not anything "prettified". This lets us remember the prefix part of Github
-# noreply addresses, while not including it in AUTHORS if that is truly the
-# best option we have.
-my %commit_email;
-
-for my $line (reverse qx(git log --pretty=tformat:'%aN:::%aE:::%(trailers:key=signed-off-by,valueonly,separator=:::)')) {
- chomp $line;
- my ($name, $email, @signoffs) = split ':::', $line;
- next unless $name && $email;
-
- my $semail = email_slug($email);
- my $sname = name_slug($name);
-
- # Track the committer name and email.
- $seen_names{$semail}{$sname} = 1;
- $seen_emails{$sname}{$semail} = 1;
-
- # Keep the original commit address.
- $commit_email{$semail} = $email;
-
- # Consider if these are the best we've ever seen.
- update_display_name($name);
- update_display_email($email);
-
- # Check signoffs. any that have a matching name or email as the
- # committer (by slug), also track them.
- for my $signoff (@signoffs) {
- my ($soname, $soemail) = $signoff =~ m/^([^<]+)\s+<(.+)>$/;
- next unless $soname && $soemail;
- my $ssoname = name_slug($soname);
- my $ssoemail = email_slug($soemail);
- if (($semail eq $ssoemail) ^ ($sname eq $ssoname)) {
- $seen_names{$ssoemail}{$ssoname} = 1;
- $seen_emails{$ssoname}{$ssoemail} = 1;
- update_display_name($soname);
- update_display_email($soemail);
- }
- }
-}
-
-# Now collect unique committers by all names+emails we've ever seen for them.
-# We start with emails and resolve all possible names, then we resolve the
-# emails for those names, and round and round until there's nothing left.
-my @committers;
-for my $start_email (sort keys %seen_names) {
- # it might have been deleted already through a cross-reference
- next unless $seen_names{$start_email};
-
- my %emails;
- my %names;
-
- my @check_emails = ($start_email);
- my @check_names;
- while (@check_emails || @check_names) {
- while (my $email = shift @check_emails) {
- next if $emails{$email}++;
- push @check_names,
- sort keys %{delete $seen_names{$email}};
- }
- while (my $name = shift @check_names) {
- next if $names{$name}++;
- push @check_emails,
- sort keys %{delete $seen_emails{$name}};
- }
- }
-
- # A "committer" is the collection of connected names and emails.
- push @committers, [[sort keys %emails], [sort keys %names]];
-}
-
-# Now we have our committers, we can work out what to add to AUTHORS.
-for my $committer (@committers) {
- my ($emails, $names) = @$committer;
-
- # If this commiter is already in AUTHORS, we must not touch.
- next if grep { $authors_name{$_} } @$emails;
- next if grep { $authors_email{$_} } @$names;
-
- # Decide on the "best" name and email to use
- my $email = best_email(@$emails);
- my $name = best_name(@$names);
-
- $authors_email{$name} = $email;
- $authors_name{$email} = $name;
-
- # We've now selected our canonical name going forward. If there
- # were other options from commit authors only (not signoffs),
- # emit mailmap lines for the user to past into .mailmap
- my $cemail = $display_email{email_slug($authors_email{$name})};
- for my $alias (@$emails) {
- next if $alias eq $email;
-
- my $calias = $commit_email{$alias};
- next unless $calias;
-
- my $cname = $display_name{$name};
- say "$cname <$cemail> <$calias>";
- }
-}
-
-# Now output the new AUTHORS file
-open my $fh, '>', 'AUTHORS' or die "E: couldn't open AUTHORS for write: $!\n";
-say $fh join("\n", @authors_header, "");
-for my $name (sort keys %authors_email) {
- my $cname = $display_name{$name};
- my $cemail = $display_email{email_slug($authors_email{$name})};
- say $fh " $cname <$cemail>";
-}
-
-exit 0;
-
-# "Slugs" are used at the hashtable key for names and emails. They are used to
-# making two variants of a value be the "same" for matching. Mostly this is
-# to make upper and lower-case versions of a name or email compare the same,
-# but we do a little bit of munging to handle some common cases.
-#
-# Note that these are only used for matching internally; for display, the
-# slug will be used to look up the display form.
-sub name_slug {
- my ($name) = @_;
-
- # Remove spaces and dots, to handle differences in initials.
- $name =~ s/[\s\.]//g;
-
- return lc $name;
-}
-sub email_slug {
- my ($email) = @_;
-
- # Remove everything up to and including the first space, and the last
- # space and everything after it.
- $email =~ s/^(.*\s+)|(\s+.*)$//g;
-
- # Remove the leading userid+ on Github noreply addresses. They're
- # optional and we want to treat them as the same thing.
- $email =~ s/^[^\+]*\+//g if $email =~ m/\.noreply\.github\.com$/;
-
- return lc $email;
-}
-
-# As we accumulate new names and addresses, record the "best looking" version
-# of each. Once we decide to add a committer to AUTHORS, we'll take the best
-# version of their name and address from here.
-#
-# Note that we don't record them if they're already in AUTHORS (that is, in
-# %authors_name or %authors_email) because that file already contains the
-# "best" version, by definition. So we return immediately if we've seen it
-# there already.
-sub update_display_name {
- my ($name) = @_;
- my $sname = name_slug($name);
- return if $authors_email{$sname};
-
- # For names, "more specific" means "has more non-lower-case characters"
- # (in ASCII), guessing that if a person has gone to some effort to
- # specialise their name in a later commit, they presumably care more
- # about it. If this is wrong, its probably better to add a .mailmap
- # entry.
-
- my $cname = $display_name{$sname};
- if (!$cname ||
- ($name =~ tr/a-z //) < ($cname =~ tr/a-z //)) {
- $display_name{$sname} = $name;
- }
-}
-sub update_display_email {
- my ($email) = @_;
- my $semail = email_slug($email);
- return if $authors_name{$semail};
-
- # Like names, we prefer uppercase when possible. We also remove any
- # leading "plus address" for Github noreply addresses.
-
- $email =~ s/^[^\+]*\+//g if $email =~ m/\.noreply\.github\.com$/;
-
- my $cemail = $display_email{$semail};
- if (!$cemail ||
- ($email =~ tr/a-z //) < ($cemail =~ tr/a-z //)) {
- $display_email{$semail} = $email;
- }
-}
-
-sub best_name {
- my @names = sort {
- my $cmp;
- my ($aa) = $display_name{$a};
- my ($bb) = $display_name{$b};
-
- # The "best" name is very subjective, and a simple sort
- # produced good-enough results, so I didn't try harder. Use of
- # accented characters, punctuation and caps are probably an
- # indicator of "better", but possibly we should also take into
- # account the most recent name we saw, in case the committer
- # has changed their name or nickname or similar.
- #
- # Really, .mailmap is the place to control this.
-
- return ($aa cmp $bb);
- } @_;
-
- return shift @names;
-}
-sub best_email {
- state $internal_re = qr/\.(?:internal|local|\(none\))$/;
- state $noreply_re = qr/\.noreply\.github\.com$/;
- state $freemail_re = qr/\@(?:gmail|hotmail)\.com$/;
-
- my @emails = sort {
- my $cmp;
-
- # prefer address with a single @ over those without
- $cmp = (($b =~ tr/@//) == 1) <=> (($a =~ tr/@//) == 1);
- return $cmp unless $cmp == 0;
-
- # prefer any address over internal/local addresses
- $cmp = (($a =~ $internal_re) <=> ($b =~ $internal_re));
- return $cmp unless $cmp == 0;
-
- # prefer any address over github noreply aliases
- $cmp = (($a =~ $noreply_re) <=> ($b =~ $noreply_re));
- return $cmp unless $cmp == 0;
-
- # prefer any address over freemail providers
- $cmp = (($a =~ $freemail_re) <=> ($b =~ $freemail_re));
- return $cmp unless $cmp == 0;
-
- # alphabetical by domain
- my ($alocal, $adom) = split /\@/, $a;
- my ($blocal, $bdom) = split /\@/, $b;
- $cmp = ($adom cmp $bdom);
- return $cmp unless $cmp == 0;
-
- # alphabetical by local part
- return ($alocal cmp $blocal);
- } @_;
-
- return shift @emails;
-}
diff --git a/scripts/zfs-helpers.sh b/scripts/zfs-helpers.sh
deleted file mode 100755
index 2e97d40db1c1..000000000000
--- a/scripts/zfs-helpers.sh
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/bin/sh
-# shellcheck disable=SC2154
-#
-# This script is designed to facilitate in-tree development and testing
-# by installing symlinks on your system which refer to in-tree helper
-# utilities. These helper utilities must be installed to in order to
-# exercise all ZFS functionality. By using symbolic links and keeping
-# the scripts in-tree during development they can be easily modified
-# and those changes tracked.
-#
-# Use the following configuration option to override the installation
-# paths for these scripts. The correct path is automatically set for
-# most distributions but you can optionally set it for your environment.
-#
-# --with-mounthelperdir=DIR install mount.zfs in dir [/sbin]
-# --with-udevdir=DIR install udev helpers [default=check]
-# --with-udevruledir=DIR install udev rules [default=UDEVDIR/rules.d]
-# --sysconfdir=DIR install zfs configuration files [PREFIX/etc]
-#
-
-BASE_DIR=${0%/*}
-SCRIPT_COMMON=common.sh
-if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
- . "${BASE_DIR}/${SCRIPT_COMMON}"
-else
- echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
-fi
-
-PROG=zfs-helpers.sh
-DRYRUN="no"
-INSTALL="no"
-REMOVE="no"
-VERBOSE="no"
-
-fail() {
- echo "${PROG}: $1" >&2
- exit 1
-}
-
-msg() {
- if [ "$VERBOSE" = "yes" ]; then
- echo "$@"
- fi
-}
-
-usage() {
-cat << EOF
-USAGE:
-$0 [-dhirv]
-
-DESCRIPTION:
- Install/remove the ZFS helper utilities.
-
-OPTIONS:
- -d Dry run
- -h Show this message
- -i Install the helper utilities
- -r Remove the helper utilities
- -v Verbose
-
-$0 -iv
-$0 -r
-
-EOF
-}
-
-while getopts 'hdirv' OPTION; do
- case $OPTION in
- h)
- usage
- exit 1
- ;;
- d)
- DRYRUN="yes"
- ;;
- i)
- INSTALL="yes"
- ;;
- r)
- REMOVE="yes"
- ;;
- v)
- VERBOSE="yes"
- ;;
- ?)
- usage
- exit
- ;;
- *)
- ;;
- esac
-done
-
-if [ "$INSTALL" = "yes" ] && [ "$REMOVE" = "yes" ]; then
- fail "Specify -i or -r but not both"
-fi
-
-if [ "$INSTALL" = "no" ] && [ "$REMOVE" = "no" ]; then
- fail "Either -i or -r must be specified"
-fi
-
-if [ "$(id -u)" != "0" ] && [ "$DRYRUN" = "no" ]; then
- fail "Must run as root"
-fi
-
-if [ "$INTREE" != "yes" ]; then
- fail "Must be run in-tree"
-fi
-
-if [ "$VERBOSE" = "yes" ]; then
- echo "--- Configuration ---"
- echo "udevdir: $INSTALL_UDEV_DIR"
- echo "udevruledir: $INSTALL_UDEV_RULE_DIR"
- echo "mounthelperdir: $INSTALL_MOUNT_HELPER_DIR"
- echo "sysconfdir: $INSTALL_SYSCONF_DIR"
- echo "pythonsitedir: $INSTALL_PYTHON_DIR"
- echo "dryrun: $DRYRUN"
- echo
-fi
-
-install() {
- src=$1
- dst=$2
-
- # We may have an old symlink pointing to different ZFS workspace.
- # Remove the old symlink if it doesn't point to our workspace.
- if [ -h "$dst" ] && [ "$(readlink -f """$dst""")" != "$src" ] ; then
- echo "Removing old symlink: $dst -> $(readlink """$dst""")"
- rm "$dst"
- fi
-
- if [ -h "$dst" ]; then
- echo "Symlink exists: $dst"
- elif [ -e "$dst" ]; then
- echo "File exists: $dst"
- elif ! [ -e "$src" ]; then
- echo "Source missing: $src"
- else
- msg "ln -s $src $dst"
-
- if [ "$DRYRUN" = "no" ]; then
- DIR=${dst%/*}
- mkdir -p "$DIR" >/dev/null 2>&1
- ln -s "$src" "$dst"
- fi
- fi
-}
-
-remove() {
- dst=$1
-
- if [ -h "$dst" ]; then
- msg "rm $dst"
- rm "$dst"
- DIR=${dst%/*}
- rmdir "$DIR" >/dev/null 2>&1
- elif [ -e "$dst" ]; then
- echo "Expected symlink: $dst"
- fi
-}
-
-if [ "${INSTALL}" = "yes" ]; then
- for cmd in "mount.zfs" "fsck.zfs"; do
- install "$CMD_DIR/$cmd" "$INSTALL_MOUNT_HELPER_DIR/$cmd"
- done
- for udev in "$UDEV_CMD_DIR/zvol_id" "$UDEV_SCRIPT_DIR/vdev_id"; do
- install "$udev" "$INSTALL_UDEV_DIR/${udev##*/}"
- done
- for rule in "60-zvol.rules" "69-vdev.rules" "90-zfs.rules"; do
- install "$UDEV_RULE_DIR/$rule" "$INSTALL_UDEV_RULE_DIR/$rule"
- done
- install "$ZPOOL_SCRIPT_DIR" "$INSTALL_SYSCONF_DIR/zfs/zpool.d"
- install "$ZPOOL_COMPAT_DIR" "$INSTALL_PKGDATA_DIR/compatibility.d"
- install "$CONTRIB_DIR/pyzfs/libzfs_core" "$INSTALL_PYTHON_DIR/libzfs_core"
- # Ideally we would install these in the configured ${libdir}, which is
- # by default "/usr/local/lib and unfortunately not included in the
- # dynamic linker search path.
- install "$LIB_DIR"/libzfs_core.so.?.?.? "/lib/libzfs_core.so"
- install "$LIB_DIR"/libnvpair.so.?.?.? "/lib/libnvpair.so"
- [ "$DRYRUN" = "no" ] && ldconfig
-else
- remove "$INSTALL_MOUNT_HELPER_DIR/mount.zfs"
- remove "$INSTALL_MOUNT_HELPER_DIR/fsck.zfs"
- remove "$INSTALL_UDEV_DIR/zvol_id"
- remove "$INSTALL_UDEV_DIR/vdev_id"
- remove "$INSTALL_UDEV_RULE_DIR/60-zvol.rules"
- remove "$INSTALL_UDEV_RULE_DIR/69-vdev.rules"
- remove "$INSTALL_UDEV_RULE_DIR/90-zfs.rules"
- remove "$INSTALL_SYSCONF_DIR/zfs/zpool.d"
- remove "$INSTALL_PKGDATA_DIR/compatibility.d"
- remove "$INSTALL_PYTHON_DIR/libzfs_core"
- remove "/lib/libzfs_core.so"
- remove "/lib/libnvpair.so"
- ldconfig
-fi
-
-exit 0
diff --git a/scripts/zfs-images b/scripts/zfs-images
deleted file mode 160000
-Subproject 3331601f6dc50ef2c9779c1656218701b48b276
diff --git a/scripts/zfs-tests-color.sh b/scripts/zfs-tests-color.sh
deleted file mode 100755
index 9098abb62d74..000000000000
--- a/scripts/zfs-tests-color.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-# A large mass of sed for coloring zfs-tests.sh output
-# Version 2, thanks to наб.
-# Just pipe zfs-tests.sh output into this, and watch.
-
-exec "$(command -v gsed || echo sed)" \
- -e 's/\] \[PASS\]$/] [\x1b[92mPASS\x1b[0m]/' \
- -e 's/\] \[FAIL\]$/] [\x1b[1;91mFAIL\x1b[0m]/' \
- -e 's/\] \[KILLED\]$/] [\x1b[1;101mKILLED\x1b[0m]/' \
- -e 's/\] \[SKIP\]$/] [\x1b[1mSKIP\x1b[0m]/' \
- -e 's/\] \[RERAN\]$/] [\x1b[1;93mRERAN\x1b[0m]/' \
- -e 's/^\(PASS\W\)/\x1b[92m\1\x1b[0m/' \
- -e 's/^\(FAIL\W\)/\x1b[1;91m\1\x1b[0m/' \
- -e 's/^\(KILLED\W\)/\x1b[1;101m\1\x1b[0m/' \
- -e 's/^\(SKIP\W\)/\x1b[1m\1\x1b[0m/' \
- -e 's/^\(RERAN\W\)/\x1b[1;93m\1\x1b[0m/' \
- -e 's/^Tests with result\(.\+\)PASS\(.\+\)$/Tests with result\1\x1b[92mPASS\x1b[0m\2/' \
- -e 's/^\(\W\+\)\(KILLED\)\(\W\)/\1\x1b[1;101m\2\x1b[0m\3/g' \
- -e 's/^\(\W\+\)\(FAIL\)\(\W\)/\1\x1b[1;91m\2\x1b[0m\3/g' \
- -e 's/^\(\W\+\)\(RERUN\)\(\W\)/\1\x1b[1;93m\2\x1b[0m\3/g' \
- -e 's/^\(\W\+\)\(SKIP\)\(\W\)/\1\x1b[1m\2\x1b[0m\3/g' \
- -e 's/expected \(PASS\))$/expected \x1b[92m\1\x1b[0m)/' \
- -e 's/expected \(KILLED\))$/expected \x1b[1;101m\1\x1b[0m)/' \
- -e 's/expected \(FAIL\))$/expected \x1b[1;91m\1\x1b[0m)/' \
- -e 's/expected \(RERUN\))$/expected \x1b[1;93m\1\x1b[0m)/' \
- -e 's/expected \(SKIP\))$/expected \x1b[1m\1\x1b[0m)/' \
- -e 's/^Test\( ([[:alnum:] ]\+)\)\?: \(.\+\) (run as \(.\+\)) \[\([0-9]\+:[0-9]\+\)\] \[\(.\+\)\]$/\x1b[1mTest\1: \x1b[0m\2 (run as \x1b[1m\3\x1b[0m) [\x1b[1m\4\x1b[0m\] [\5\]/'
diff --git a/scripts/zfs-tests.sh b/scripts/zfs-tests.sh
deleted file mode 100755
index 5a0a1a609448..000000000000
--- a/scripts/zfs-tests.sh
+++ /dev/null
@@ -1,847 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: CDDL-1.0
-# shellcheck disable=SC2154
-# shellcheck disable=SC2292
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License, Version 1.0 only
-# (the "License"). You may not use this file except in compliance
-# with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or https://opensource.org/licenses/CDDL-1.0.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-#
-# Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
-#
-
-SCRIPT_COMMON=${SCRIPT_COMMON:-${0%/*}/common.sh}
-. "${SCRIPT_COMMON}" || exit
-
-PROG=zfs-tests.sh
-VERBOSE="no"
-QUIET=""
-DEBUG=""
-CLEANUP="yes"
-CLEANUPALL="no"
-KMSG=""
-TIMEOUT_DEBUG=""
-LOOPBACK="yes"
-STACK_TRACER="no"
-FILESIZE="4G"
-DEFAULT_RUNFILES="common.run,$(uname | tr '[:upper:]' '[:lower:]').run"
-RUNFILES=${RUNFILES:-$DEFAULT_RUNFILES}
-FILEDIR=${FILEDIR:-/var/tmp}
-DISKS=${DISKS:-""}
-SINGLETEST=""
-SINGLETESTUSER="root"
-TAGS=""
-ITERATIONS=1
-ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh"
-ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh"
-UNAME=$(uname)
-RERUN=""
-KMEMLEAK=""
-
-# Override some defaults if on FreeBSD
-if [ "$UNAME" = "FreeBSD" ] ; then
- TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DMESG"}
- LOSETUP=/sbin/mdconfig
- DMSETUP=/sbin/gpart
-else
- ZFS_MMP="$STF_SUITE/callbacks/zfs_mmp.ksh"
- TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DBGMSG:$ZFS_DMESG:$ZFS_MMP"}
- LOSETUP=${LOSETUP:-/sbin/losetup}
- DMSETUP=${DMSETUP:-/sbin/dmsetup}
-fi
-
-#
-# Log an informational message when additional verbosity is enabled.
-#
-msg() {
- if [ "$VERBOSE" = "yes" ]; then
- echo "$@"
- fi
-}
-
-#
-# Log a failure message, cleanup, and return an error.
-#
-fail() {
- echo "$PROG: $1" >&2
- cleanup
- exit 1
-}
-
-cleanup_freebsd_loopback() {
- for TEST_LOOPBACK in ${LOOPBACKS}; do
- if [ -c "/dev/${TEST_LOOPBACK}" ]; then
- sudo "${LOSETUP}" -d -u "${TEST_LOOPBACK}" ||
- echo "Failed to destroy: ${TEST_LOOPBACK}"
- fi
- done
-}
-
-cleanup_linux_loopback() {
- for TEST_LOOPBACK in ${LOOPBACKS}; do
- LOOP_DEV="${TEST_LOOPBACK##*/}"
- DM_DEV=$(sudo "${DMSETUP}" ls 2>/dev/null | \
- awk -v l="${LOOP_DEV}" '$0 ~ l {print $1}')
-
- if [ -n "$DM_DEV" ]; then
- sudo "${DMSETUP}" remove "${DM_DEV}" ||
- echo "Failed to remove: ${DM_DEV}"
- fi
-
- if [ -n "${TEST_LOOPBACK}" ]; then
- sudo "${LOSETUP}" -d "${TEST_LOOPBACK}" ||
- echo "Failed to remove: ${TEST_LOOPBACK}"
- fi
- done
-}
-
-#
-# Attempt to remove loopback devices and files which where created earlier
-# by this script to run the test framework. The '-k' option may be passed
-# to the script to suppress cleanup for debugging purposes.
-#
-cleanup() {
- if [ "$CLEANUP" = "no" ]; then
- return 0
- fi
-
-
- if [ "$LOOPBACK" = "yes" ]; then
- if [ "$UNAME" = "FreeBSD" ] ; then
- cleanup_freebsd_loopback
- else
- cleanup_linux_loopback
- fi
- fi
-
- # shellcheck disable=SC2086
- rm -f ${FILES} >/dev/null 2>&1
-
- if [ "$STF_PATH_REMOVE" = "yes" ] && [ -d "$STF_PATH" ]; then
- rm -Rf "$STF_PATH"
- fi
-}
-trap cleanup EXIT
-
-#
-# Attempt to remove all testpools (testpool.XXX), unopened dm devices,
-# loopback devices, and files. This is a useful way to cleanup a previous
-# test run failure which has left the system in an unknown state. This can
-# be dangerous and should only be used in a dedicated test environment.
-#
-cleanup_all() {
- TEST_POOLS=$(ASAN_OPTIONS=detect_leaks=false "$ZPOOL" list -Ho name | grep testpool)
- if [ "$UNAME" = "FreeBSD" ] ; then
- TEST_LOOPBACKS=$(sudo "${LOSETUP}" -l)
- else
- TEST_LOOPBACKS=$("${LOSETUP}" -a | awk -F: '/file-vdev/ {print $1}')
- fi
- TEST_FILES=$(ls "${FILEDIR}"/file-vdev* 2>/dev/null)
-
- msg
- msg "--- Cleanup ---"
- # shellcheck disable=2116,2086
- msg "Removing pool(s): $(echo ${TEST_POOLS})"
- for TEST_POOL in $TEST_POOLS; do
- sudo env ASAN_OPTIONS=detect_leaks=false "$ZPOOL" destroy "${TEST_POOL}"
- done
-
- if [ "$UNAME" != "FreeBSD" ] ; then
- msg "Removing all dm(s): $(sudo "${DMSETUP}" ls |
- grep loop | tr '\n' ' ')"
- sudo "${DMSETUP}" remove_all
- fi
-
- # shellcheck disable=2116,2086
- msg "Removing loopback(s): $(echo ${TEST_LOOPBACKS})"
- for TEST_LOOPBACK in $TEST_LOOPBACKS; do
- if [ "$UNAME" = "FreeBSD" ] ; then
- sudo "${LOSETUP}" -d -u "${TEST_LOOPBACK}"
- else
- sudo "${LOSETUP}" -d "${TEST_LOOPBACK}"
- fi
- done
-
- # shellcheck disable=2116,2086
- msg "Removing files(s): $(echo ${TEST_FILES})"
- # shellcheck disable=2086
- sudo rm -f ${TEST_FILES}
-}
-
-#
-# Takes a name as the only arguments and looks for the following variations
-# on that name. If one is found it is returned.
-#
-# $RUNFILE_DIR/<name>
-# $RUNFILE_DIR/<name>.run
-# <name>
-# <name>.run
-#
-find_runfile() {
- NAME=$1
-
- if [ -f "$RUNFILE_DIR/$NAME" ]; then
- echo "$RUNFILE_DIR/$NAME"
- elif [ -f "$RUNFILE_DIR/$NAME.run" ]; then
- echo "$RUNFILE_DIR/$NAME.run"
- elif [ -f "$NAME" ]; then
- echo "$NAME"
- elif [ -f "$NAME.run" ]; then
- echo "$NAME.run"
- else
- return 1
- fi
-}
-
-# Given a TAGS with a format like "1/3" or "2/3" then divide up the test list
-# into portions and print that portion. So "1/3" for "the first third of the
-# test tags".
-#
-#
-split_tags() {
- # Get numerator and denominator
- NUM=$(echo "$TAGS" | cut -d/ -f1)
- DEN=$(echo "$TAGS" | cut -d/ -f2)
- # At the point this is called, RUNFILES will contain a comma separated
- # list of full paths to the runfiles, like:
- #
- # "/home/hutter/qemu/tests/runfiles/common.run,/home/hutter/qemu/tests/runfiles/linux.run"
- #
- # So to get tags for our selected tests we do:
- #
- # 1. Remove unneeded chars: [],\
- # 2. Print out the last field of each tag line. This will be the tag
- # for the test (like 'zpool_add').
- # 3. Remove duplicates between the runfiles. If the same tag is defined
- # in multiple runfiles, then when you do '-T <tag>' ZTS is smart
- # enough to know to run the tag in each runfile. So '-T zpool_add'
- # will run the zpool_add from common.run and linux.run.
- # 4. Ignore the 'functional' tag since we only want individual tests
- # 5. Print out the tests in our faction of all tests. This uses modulus
- # so "1/3" will run tests 1,3,6,9 etc. That way the tests are
- # interleaved so, say, "3/4" isn't running all the zpool_* tests that
- # appear alphabetically at the end.
- # 6. Remove trailing comma from list
- #
- # TAGS will then look like:
- #
- # "append,atime,bootfs,cachefile,checksum,cp_files,deadman,dos_attributes, ..."
-
- # Change the comma to a space for easy processing
- _RUNFILES=${RUNFILES//","/" "}
- # shellcheck disable=SC2002,SC2086
- cat $_RUNFILES | tr -d "[],\'" | awk '/tags = /{print $NF}' | sort | \
- uniq | grep -v functional | \
- awk -v num="$NUM" -v den="$DEN" '{ if(NR % den == (num - 1)) {printf "%s,",$0}}' | \
- sed -E 's/,$//'
-}
-
-#
-# Symlink file if it appears under any of the given paths.
-#
-create_links() {
- dir_list="$1"
- file_list="$2"
-
- [ -n "$STF_PATH" ] || fail "STF_PATH wasn't correctly set"
-
- for i in $file_list; do
- for j in $dir_list; do
- [ ! -e "$STF_PATH/$i" ] || continue
-
- if [ ! -d "$j/$i" ] && [ -e "$j/$i" ]; then
- ln -sf "$j/$i" "$STF_PATH/$i" || \
- fail "Couldn't link $i"
- break
- fi
- done
-
- [ ! -e "$STF_PATH/$i" ] && \
- STF_MISSING_BIN="$STF_MISSING_BIN $i"
- done
- STF_MISSING_BIN=${STF_MISSING_BIN# }
-}
-
-#
-# Constrain the path to limit the available binaries to a known set.
-# When running in-tree a top level ./bin/ directory is created for
-# convenience, otherwise a temporary directory is used.
-#
-constrain_path() {
- . "$STF_SUITE/include/commands.cfg"
-
- # On FreeBSD, base system zfs utils are in /sbin and OpenZFS utils
- # install to /usr/local/sbin. To avoid testing the wrong utils we
- # need /usr/local to come before / in the path search order.
- SYSTEM_DIRS="/usr/local/bin /usr/local/sbin"
- SYSTEM_DIRS="$SYSTEM_DIRS /usr/bin /usr/sbin /bin /sbin $LIBEXEC_DIR"
-
- if [ "$INTREE" = "yes" ]; then
- # Constrained path set to $(top_builddir)/tests/zfs-tests/bin
- STF_PATH="$BIN_DIR"
- STF_PATH_REMOVE="no"
- STF_MISSING_BIN=""
- if [ ! -d "$STF_PATH" ]; then
- mkdir "$STF_PATH"
- chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH"
- fi
-
- # Special case links for standard zfs utilities
- create_links "$CMD_DIR" "$ZFS_FILES"
-
- # Special case links for zfs test suite utilities
- create_links "$CMD_DIR/tests/zfs-tests/cmd" "$ZFSTEST_FILES"
- else
- # Constrained path set to $FILEDIR/constrained_path.*
- SYSTEMDIR=${SYSTEMDIR:-$FILEDIR/constrained_path.XXXXXX}
- STF_PATH=$(mktemp -d "$SYSTEMDIR")
- STF_PATH_REMOVE="yes"
- STF_MISSING_BIN=""
-
- chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH"
-
- # Special case links for standard zfs utilities
- create_links "$SYSTEM_DIRS" "$ZFS_FILES"
-
- # Special case links for zfs test suite utilities
- create_links "$STF_SUITE/bin" "$ZFSTEST_FILES"
- fi
-
- # Standard system utilities
- SYSTEM_FILES="$SYSTEM_FILES_COMMON"
- if [ "$UNAME" = "FreeBSD" ] ; then
- SYSTEM_FILES="$SYSTEM_FILES $SYSTEM_FILES_FREEBSD"
- else
- SYSTEM_FILES="$SYSTEM_FILES $SYSTEM_FILES_LINUX"
- fi
- create_links "$SYSTEM_DIRS" "$SYSTEM_FILES"
-
- # Exceptions
- if [ "$UNAME" = "Linux" ] ; then
- ln -fs /sbin/fsck.ext4 "$STF_PATH/fsck"
- ln -fs /sbin/mkfs.ext4 "$STF_PATH/newfs"
- ln -fs "$STF_PATH/gzip" "$STF_PATH/compress"
- ln -fs "$STF_PATH/gunzip" "$STF_PATH/uncompress"
- elif [ "$UNAME" = "FreeBSD" ] ; then
- ln -fs /usr/local/bin/ksh93 "$STF_PATH/ksh"
- fi
-}
-
-#
-# Output a useful usage message.
-#
-usage() {
-cat << EOF
-USAGE:
-$0 [-hvqxkfS] [-s SIZE] [-r RUNFILES] [-t PATH] [-u USER]
-
-DESCRIPTION:
- ZFS Test Suite launch script
-
-OPTIONS:
- -h Show this message
- -v Verbose zfs-tests.sh output
- -q Quiet test-runner output
- -D Debug; show all test output immediately (noisy)
- -x Remove all testpools, dm, lo, and files (unsafe)
- -k Disable cleanup after test failure
- -K Log test names to /dev/kmsg
- -f Use files only, disables block device tests
- -O Dump debugging info to /dev/kmsg on test timeout
- -S Enable stack tracer (negative performance impact)
- -c Only create and populate constrained path
- -R Automatically rerun failing tests
- -m Enable kmemleak reporting (Linux only)
- -n NFSFILE Use the nfsfile to determine the NFS configuration
- -I NUM Number of iterations
- -d DIR Use world-writable DIR for files and loopback devices
- -s SIZE Use vdevs of SIZE (default: 4G)
- -r RUNFILES Run tests in RUNFILES (default: ${DEFAULT_RUNFILES})
- -t PATH|NAME Run single test at PATH relative to test suite,
- or search for test by NAME
- -T TAGS Comma separated list of tags (default: 'functional')
- Alternately, specify a fraction like "1/3" or "2/3" to
- run the first third of tests or 2nd third of the tests. This
- is useful for splitting up the test amongst different
- runners.
- -u USER Run single test as USER (default: root)
-
-EXAMPLES:
-# Run the default ${DEFAULT_RUNFILES//\.run/} suite of tests and output the configuration used.
-$0 -v
-
-# Run a smaller suite of tests designed to run more quickly.
-$0 -r linux-fast
-
-# Run a single test
-$0 -t tests/functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh
-
-# Run a single test by name
-$0 -t zfs_bookmark_cliargs
-
-# Cleanup a previous run of the test suite prior to testing, run the
-# default ${DEFAULT_RUNFILES//\.run//} suite of tests and perform no cleanup on exit.
-$0 -x
-
-EOF
-}
-
-while getopts 'hvqxkKfScRmOn:d:Ds:r:?t:T:u:I:' OPTION; do
- case $OPTION in
- h)
- usage
- exit 1
- ;;
- v)
- VERBOSE="yes"
- ;;
- q)
- QUIET="yes"
- ;;
- x)
- CLEANUPALL="yes"
- ;;
- k)
- CLEANUP="no"
- ;;
- K)
- KMSG="yes"
- ;;
- f)
- LOOPBACK="no"
- ;;
- S)
- STACK_TRACER="yes"
- ;;
- c)
- constrain_path
- exit
- ;;
- R)
- RERUN="yes"
- ;;
- m)
- KMEMLEAK="yes"
- ;;
- n)
- nfsfile=$OPTARG
- [ -f "$nfsfile" ] || fail "Cannot read file: $nfsfile"
- export NFS=1
- . "$nfsfile"
- ;;
- O)
- TIMEOUT_DEBUG="yes"
- ;;
- d)
- FILEDIR="$OPTARG"
- ;;
- D)
- DEBUG="yes"
- ;;
- I)
- ITERATIONS="$OPTARG"
- if [ "$ITERATIONS" -le 0 ]; then
- fail "Iterations must be greater than 0."
- fi
- ;;
- s)
- FILESIZE="$OPTARG"
- ;;
- r)
- RUNFILES="$OPTARG"
- ;;
- t)
- if [ -n "$SINGLETEST" ]; then
- fail "-t can only be provided once."
- fi
- SINGLETEST="$OPTARG"
- ;;
- T)
- TAGS="$OPTARG"
- ;;
- u)
- SINGLETESTUSER="$OPTARG"
- ;;
- ?)
- usage
- exit
- ;;
- *)
- ;;
- esac
-done
-
-shift $((OPTIND-1))
-
-FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"}
-LOOPBACKS=${LOOPBACKS:-""}
-
-if [ -n "$SINGLETEST" ]; then
- if [ -n "$TAGS" ]; then
- fail "-t and -T are mutually exclusive."
- fi
- RUNFILE_DIR="$FILEDIR"
- RUNFILES="zfs-tests.$$.run"
- [ -n "$QUIET" ] && SINGLEQUIET="True" || SINGLEQUIET="False"
-
- cat >"${RUNFILE_DIR}/${RUNFILES}" << EOF
-[DEFAULT]
-pre =
-quiet = $SINGLEQUIET
-pre_user = root
-user = $SINGLETESTUSER
-timeout = 600
-post_user = root
-post =
-EOF
- if [ "$SINGLETEST" = "${SINGLETEST%/*}" ] ; then
- NEWSINGLETEST=$(find "$STF_SUITE" -name "$SINGLETEST*" -print -quit)
- if [ -z "$NEWSINGLETEST" ] ; then
- fail "couldn't find test matching '$SINGLETEST'"
- fi
- SINGLETEST=$NEWSINGLETEST
- fi
-
- SINGLETESTDIR="${SINGLETEST%/*}"
- SETUPDIR="$SINGLETESTDIR"
- [ "${SETUPDIR#/}" = "$SETUPDIR" ] && SETUPDIR="$STF_SUITE/$SINGLETESTDIR"
- [ -x "$SETUPDIR/setup.ksh" ] && SETUPSCRIPT="setup" || SETUPSCRIPT=
- [ -x "$SETUPDIR/cleanup.ksh" ] && CLEANUPSCRIPT="cleanup" || CLEANUPSCRIPT=
-
- SINGLETESTFILE="${SINGLETEST##*/}"
- cat >>"${RUNFILE_DIR}/${RUNFILES}" << EOF
-
-[$SINGLETESTDIR]
-tests = ['$SINGLETESTFILE']
-pre = $SETUPSCRIPT
-post = $CLEANUPSCRIPT
-tags = ['functional']
-EOF
-fi
-
-#
-# Use default tag if none was specified
-#
-TAGS=${TAGS:='functional'}
-
-
-
-#
-# Attempt to locate the runfiles describing the test workload.
-#
-R=""
-IFS=,
-for RUNFILE in $RUNFILES; do
- if [ -n "$RUNFILE" ]; then
- SAVED_RUNFILE="$RUNFILE"
- RUNFILE=$(find_runfile "$RUNFILE") ||
- fail "Cannot find runfile: $SAVED_RUNFILE"
- R="$R,$RUNFILE"
- fi
-
- if [ ! -r "$RUNFILE" ]; then
- fail "Cannot read runfile: $RUNFILE"
- fi
-done
-unset IFS
-RUNFILES=${R#,}
-
-# The tag can be a fraction to indicate which portion of ZTS to run, Like
-#
-# "1/3": Run first one third of all tests in runfiles
-# "2/3": Run second one third of all test in runfiles
-# "6/10": Run 6th tenth of all tests in runfiles
-#
-# This is useful for splitting up the test across multiple runners.
-#
-# After this code block, TAGS will be transformed from something like
-# "1/3" to a comma separate taglist, like:
-#
-# "append,atime,bootfs,cachefile,checksum,cp_files,deadman,dos_attributes, ..."
-#
-if echo "$TAGS" | grep -Eq '^[0-9]+/[0-9]+$' ; then
- TAGS=$(split_tags)
-fi
-
-#
-# This script should not be run as root. Instead the test user, which may
-# be a normal user account, needs to be configured such that it can
-# run commands via sudo passwordlessly.
-#
-if [ "$(id -u)" = "0" ]; then
- fail "This script must not be run as root."
-fi
-
-if [ "$(sudo id -un)" != "root" ]; then
- fail "Passwordless sudo access required."
-fi
-
-#
-# Constrain the available binaries to a known set.
-#
-constrain_path
-
-#
-# Check if ksh exists
-#
-if [ "$UNAME" = "FreeBSD" ]; then
- sudo ln -fs /usr/local/bin/ksh93 /bin/ksh
-fi
-[ -e "$STF_PATH/ksh" ] || fail "This test suite requires ksh."
-[ -e "$STF_SUITE/include/default.cfg" ] || fail \
- "Missing $STF_SUITE/include/default.cfg file."
-
-#
-# Verify the ZFS module stack is loaded.
-#
-if [ "$STACK_TRACER" = "yes" ]; then
- sudo "${ZFS_SH}" -S >/dev/null 2>&1
-else
- sudo "${ZFS_SH}" >/dev/null 2>&1
-fi
-
-#
-# Attempt to cleanup all previous state for a new test run.
-#
-if [ "$CLEANUPALL" = "yes" ]; then
- cleanup_all
-fi
-
-#
-# By default preserve any existing pools
-#
-if [ -z "${KEEP}" ]; then
- KEEP="$(ASAN_OPTIONS=detect_leaks=false "$ZPOOL" list -Ho name | tr -s '[:space:]' ' ')"
- if [ -z "${KEEP}" ]; then
- KEEP="rpool"
- fi
-else
- KEEP="$(echo "$KEEP" | tr -s '[:space:]' ' ')"
-fi
-
-#
-# NOTE: The following environment variables are undocumented
-# and should be used for testing purposes only:
-#
-# __ZFS_POOL_EXCLUDE - don't iterate over the pools it lists
-# __ZFS_POOL_RESTRICT - iterate only over the pools it lists
-#
-# See libzfs/libzfs_config.c for more information.
-#
-__ZFS_POOL_EXCLUDE="$KEEP"
-
-. "$STF_SUITE/include/default.cfg"
-
-#
-# No DISKS have been provided so a basic file or loopback based devices
-# must be created for the test suite to use.
-#
-if [ -z "${DISKS}" ]; then
- #
- # If this is a performance run, prevent accidental use of
- # loopback devices.
- #
- [ "$TAGS" = "perf" ] && fail "Running perf tests without disks."
-
- #
- # Create sparse files for the test suite. These may be used
- # directory or have loopback devices layered on them.
- #
- for TEST_FILE in ${FILES}; do
- [ -f "$TEST_FILE" ] && fail "Failed file exists: ${TEST_FILE}"
- truncate -s "${FILESIZE}" "${TEST_FILE}" ||
- fail "Failed creating: ${TEST_FILE} ($?)"
- done
-
- #
- # If requested setup loopback devices backed by the sparse files.
- #
- if [ "$LOOPBACK" = "yes" ]; then
- test -x "$LOSETUP" || fail "$LOSETUP utility must be installed"
-
- for TEST_FILE in ${FILES}; do
- if [ "$UNAME" = "FreeBSD" ] ; then
- MDDEVICE=$(sudo "${LOSETUP}" -a -t vnode -f "${TEST_FILE}")
- if [ -z "$MDDEVICE" ] ; then
- fail "Failed: ${TEST_FILE} -> loopback"
- fi
- DISKS="$DISKS $MDDEVICE"
- LOOPBACKS="$LOOPBACKS $MDDEVICE"
- else
- TEST_LOOPBACK=$(sudo "${LOSETUP}" --show -f "${TEST_FILE}") ||
- fail "Failed: ${TEST_FILE} -> ${TEST_LOOPBACK}"
- BASELOOPBACK="${TEST_LOOPBACK##*/}"
- DISKS="$DISKS $BASELOOPBACK"
- LOOPBACKS="$LOOPBACKS $TEST_LOOPBACK"
- fi
- done
- DISKS=${DISKS# }
- LOOPBACKS=${LOOPBACKS# }
- else
- DISKS="$FILES"
- fi
-fi
-
-#
-# It may be desirable to test with fewer disks than the default when running
-# the performance tests, but the functional tests require at least three.
-#
-NUM_DISKS=$(echo "${DISKS}" | awk '{print NF}')
-if [ "$TAGS" != "perf" ]; then
- [ "$NUM_DISKS" -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)"
-fi
-
-#
-# Disable SELinux until the ZFS Test Suite has been updated accordingly.
-#
-if command -v setenforce >/dev/null; then
- sudo setenforce permissive >/dev/null 2>&1
-fi
-
-#
-# Enable internal ZFS debug log and clear it.
-#
-if [ -e /sys/module/zfs/parameters/zfs_dbgmsg_enable ]; then
- sudo sh -c "echo 1 >/sys/module/zfs/parameters/zfs_dbgmsg_enable"
- sudo sh -c "echo 0 >/proc/spl/kstat/zfs/dbgmsg"
-fi
-
-#
-# Set TMPDIR. Some tests run mktemp, and we want those files contained to
-# the work dir the same as any other.
-#
-export TMPDIR="$FILEDIR"
-
-msg
-msg "--- Configuration ---"
-msg "Runfiles: $RUNFILES"
-msg "STF_TOOLS: $STF_TOOLS"
-msg "STF_SUITE: $STF_SUITE"
-msg "STF_PATH: $STF_PATH"
-msg "FILEDIR: $FILEDIR"
-msg "TMPDIR: $TMPDIR"
-msg "FILES: $FILES"
-msg "LOOPBACKS: $LOOPBACKS"
-msg "DISKS: $DISKS"
-msg "NUM_DISKS: $NUM_DISKS"
-msg "FILESIZE: $FILESIZE"
-msg "ITERATIONS: $ITERATIONS"
-msg "TAGS: $TAGS"
-msg "STACK_TRACER: $STACK_TRACER"
-msg "Keep pool(s): $KEEP"
-msg "Missing util(s): $STF_MISSING_BIN"
-msg ""
-
-export STF_TOOLS
-export STF_SUITE
-export STF_PATH
-export DISKS
-export FILEDIR
-export KEEP
-export __ZFS_POOL_EXCLUDE
-export TESTFAIL_CALLBACKS
-
-mktemp_file() {
- if [ "$UNAME" = "FreeBSD" ]; then
- mktemp -u "${FILEDIR}/$1.XXXXXX"
- else
- mktemp -ut "$1.XXXXXX" -p "$FILEDIR"
- fi
-}
-mkdir -p "$FILEDIR" || :
-RESULTS_FILE=$(mktemp_file zts-results)
-REPORT_FILE=$(mktemp_file zts-report)
-
-#
-# Run all the tests as specified.
-#
-msg "${TEST_RUNNER}" \
- "${QUIET:+-q}" \
- "${DEBUG:+-D}" \
- "${KMEMLEAK:+-m}" \
- "${KMSG:+-K}" \
- "${TIMEOUT_DEBUG:+-O}" \
- "-c \"${RUNFILES}\"" \
- "-T \"${TAGS}\"" \
- "-i \"${STF_SUITE}\"" \
- "-I \"${ITERATIONS}\""
-{ PATH=$STF_PATH \
- ${TEST_RUNNER} \
- ${QUIET:+-q} \
- ${DEBUG:+-D} \
- ${KMEMLEAK:+-m} \
- ${KMSG:+-K} \
- ${TIMEOUT_DEBUG:+-O} \
- -c "${RUNFILES}" \
- -T "${TAGS}" \
- -i "${STF_SUITE}" \
- -I "${ITERATIONS}" \
- 2>&1; echo $? >"$REPORT_FILE"; } | tee "$RESULTS_FILE"
-read -r RUNRESULT <"$REPORT_FILE"
-
-#
-# Analyze the results.
-#
-${ZTS_REPORT} ${RERUN:+--no-maybes} "$RESULTS_FILE" >"$REPORT_FILE"
-RESULT=$?
-
-if [ "$RESULT" -eq "2" ] && [ -n "$RERUN" ]; then
- MAYBES="$($ZTS_REPORT --list-maybes)"
- TEMP_RESULTS_FILE=$(mktemp_file zts-results-tmp)
- TEST_LIST=$(mktemp_file test-list)
- grep "^Test:.*\[FAIL\]" "$RESULTS_FILE" >"$TEMP_RESULTS_FILE"
- for test_name in $MAYBES; do
- grep "$test_name " "$TEMP_RESULTS_FILE" >>"$TEST_LIST"
- done
- { PATH=$STF_PATH \
- ${TEST_RUNNER} \
- ${QUIET:+-q} \
- ${DEBUG:+-D} \
- ${KMEMLEAK:+-m} \
- -c "${RUNFILES}" \
- -T "${TAGS}" \
- -i "${STF_SUITE}" \
- -I "${ITERATIONS}" \
- -l "${TEST_LIST}" \
- 2>&1; echo $? >"$REPORT_FILE"; } | tee "$RESULTS_FILE"
- read -r RUNRESULT <"$REPORT_FILE"
- #
- # Analyze the results.
- #
- ${ZTS_REPORT} --no-maybes "$RESULTS_FILE" >"$REPORT_FILE"
- RESULT=$?
-fi
-
-
-cat "$REPORT_FILE"
-
-RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE")
-if [ -d "$RESULTS_DIR" ]; then
- cat "$RESULTS_FILE" "$REPORT_FILE" >"$RESULTS_DIR/results"
-fi
-
-rm -f "$RESULTS_FILE" "$REPORT_FILE" "$TEST_LIST" "$TEMP_RESULTS_FILE"
-
-if [ -n "$SINGLETEST" ]; then
- rm -f "$RUNFILES" >/dev/null 2>&1
-fi
-
-[ "$RUNRESULT" -gt 3 ] && exit "$RUNRESULT" || exit "$RESULT"
diff --git a/scripts/zfs.sh b/scripts/zfs.sh
deleted file mode 100755
index 502c5430ab05..000000000000
--- a/scripts/zfs.sh
+++ /dev/null
@@ -1,245 +0,0 @@
-#!/bin/sh
-#
-# A simple script to load/unload the ZFS module stack.
-#
-
-BASE_DIR=${0%/*}
-SCRIPT_COMMON=common.sh
-if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
- . "${BASE_DIR}/${SCRIPT_COMMON}"
-else
- echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
-fi
-
-VERBOSE="no"
-UNLOAD="no"
-LOAD="yes"
-STACK_TRACER="no"
-
-ZED_PIDFILE=${ZED_PIDFILE:-/var/run/zed.pid}
-LDMOD=${LDMOD:-/sbin/modprobe}
-DELMOD=${DELMOD:-/sbin/rmmod}
-
-KMOD_ZLIB_DEFLATE=${KMOD_ZLIB_DEFLATE:-zlib_deflate}
-KMOD_ZLIB_INFLATE=${KMOD_ZLIB_INFLATE:-zlib_inflate}
-KMOD_SPL=${KMOD_SPL:-spl}
-KMOD_ZFS=${KMOD_ZFS:-zfs}
-KMOD_FREEBSD=${KMOD_FREEBSD:-openzfs}
-
-
-usage() {
- cat << EOF
-USAGE:
-$0 [hvudS]
-
-DESCRIPTION:
- Load/unload the ZFS module stack.
-
-OPTIONS:
- -h Show this message
- -v Verbose
- -r Reload modules
- -u Unload modules
- -S Enable kernel stack tracer
-EOF
- exit 1
-}
-
-while getopts 'hvruS' OPTION; do
- case $OPTION in
- v)
- VERBOSE="yes"
- ;;
- r)
- UNLOAD="yes"
- LOAD="yes"
- ;;
- u)
- UNLOAD="yes"
- LOAD="no"
- ;;
- S)
- STACK_TRACER="yes"
- ;;
- *)
- usage
- ;;
- esac
-done
-shift $(( OPTIND - 1 ))
-[ $# -eq 0 ] || usage
-
-kill_zed() {
- if [ -f "$ZED_PIDFILE" ]; then
- read -r PID <"$ZED_PIDFILE"
- kill "$PID"
- fi
-}
-
-check_modules_linux() {
- LOADED_MODULES=""
- MISSING_MODULES=""
-
- for KMOD in $KMOD_SPL $KMOD_ZFS; do
- NAME="${KMOD##*/}"
- NAME="${NAME%.ko}"
-
- if lsmod | grep -E -q "^${NAME}"; then
- LOADED_MODULES="$LOADED_MODULES\t$NAME\n"
- fi
-
- if ! modinfo "$KMOD" >/dev/null 2>&1; then
- MISSING_MODULES="$MISSING_MODULES\t${KMOD}\n"
- fi
- done
-
- if [ -n "$LOADED_MODULES" ]; then
- printf "Unload the kernel modules by running '%s -u':\n" "$0"
- printf "%b" "$LOADED_MODULES"
- exit 1
- fi
-
- if [ -n "$MISSING_MODULES" ]; then
- printf "The following kernel modules can not be found:\n"
- printf "%b" "$MISSING_MODULES"
- exit 1
- fi
-
- return 0
-}
-
-load_module_linux() {
- KMOD=$1
-
- FILE=$(modinfo "$KMOD" 2>&1 | awk 'NR == 1 && /zlib/ && /not found/ {print "(builtin)"; exit} /^filename:/ {print $2}')
- [ "$FILE" = "(builtin)" ] && return
-
- if [ "$VERBOSE" = "yes" ]; then
- VERSION=$(modinfo "$KMOD" | awk '/^version:/ {print $2}')
- echo "Loading: $FILE ($VERSION)"
- fi
-
- if ! $LDMOD "$KMOD" >/dev/null 2>&1; then
- echo "Failed to load $KMOD"
- return 1
- fi
-
- return 0
-}
-
-load_modules_freebsd() {
- kldload "$KMOD_FREEBSD" || return 1
-
- if [ "$VERBOSE" = "yes" ]; then
- echo "Successfully loaded ZFS module stack"
- fi
-
- return 0
-}
-
-load_modules_linux() {
- mkdir -p /etc/zfs
-
- for KMOD in "$KMOD_ZLIB_DEFLATE" "$KMOD_ZLIB_INFLATE" $KMOD_SPL $KMOD_ZFS; do
- load_module_linux "$KMOD" || return 1
- done
-
- if [ "$VERBOSE" = "yes" ]; then
- echo "Successfully loaded ZFS module stack"
- fi
-
- return 0
-}
-
-unload_modules_freebsd() {
- kldunload "$KMOD_FREEBSD" || echo "Failed to unload $KMOD_FREEBSD"
-
- if [ "$VERBOSE" = "yes" ]; then
- echo "Successfully unloaded ZFS module stack"
- fi
-
- return 0
-}
-
-unload_modules_linux() {
- legacy_kmods="icp zzstd zlua zcommon zunicode znvpair zavl"
- for KMOD in "$KMOD_ZFS" $legacy_kmods "$KMOD_SPL"; do
- NAME="${KMOD##*/}"
- NAME="${NAME%.ko}"
- ! [ -d "/sys/module/$NAME" ] || $DELMOD "$NAME" || return
- done
-
- if [ "$VERBOSE" = "yes" ]; then
- echo "Successfully unloaded ZFS module stack"
- fi
-}
-
-stack_clear_linux() {
- STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
- STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
-
- if [ "$STACK_TRACER" = "yes" ] && [ -e "$STACK_MAX_SIZE" ]; then
- echo 1 >"$STACK_TRACER_ENABLED"
- echo 0 >"$STACK_MAX_SIZE"
- fi
-}
-
-stack_check_linux() {
- STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
- STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
- STACK_LIMIT=15362
-
- if [ -e "$STACK_MAX_SIZE" ]; then
- read -r STACK_SIZE <"$STACK_MAX_SIZE"
- if [ "$STACK_SIZE" -ge "$STACK_LIMIT" ]; then
- echo
- echo "Warning: max stack size $STACK_SIZE bytes"
- cat "$STACK_TRACE"
- fi
- fi
-}
-
-if [ "$(id -u)" != 0 ]; then
- echo "Must run as root"
- exit 1
-fi
-
-UNAME=$(uname)
-
-if [ "$UNLOAD" = "yes" ]; then
- kill_zed
- umount -t zfs -a
- case $UNAME in
- FreeBSD)
- unload_modules_freebsd
- ;;
- Linux)
- stack_check_linux
- unload_modules_linux
- ;;
- *)
- echo "unknown system: $UNAME" >&2
- exit 1
- ;;
- esac
-fi
-if [ "$LOAD" = "yes" ]; then
- case $UNAME in
- FreeBSD)
- load_modules_freebsd
- ;;
- Linux)
- stack_clear_linux
- check_modules_linux
- load_modules_linux
- udevadm trigger
- udevadm settle
- ;;
- *)
- echo "unknown system: $UNAME" >&2
- exit 1
- ;;
- esac
-fi
-
-exit 0
diff --git a/scripts/zfs2zol-patch.sed b/scripts/zfs2zol-patch.sed
deleted file mode 100755
index 2d744cd5de52..000000000000
--- a/scripts/zfs2zol-patch.sed
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sed -f
-
-s:usr/src/uts/common/fs/zfs/sys:include/sys:g
-s:usr/src/uts/common/fs/zfs:module/zfs:g
-s:usr/src/lib/libzpool:lib/libzpool:g
-s:usr/src/cmd:cmd:g
-s:usr/src/common/nvpair:module/nvpair:g
-s:usr/src/lib/libzfs/common/libzfs.h:include/libzfs.h:g
-s:usr/src/man/man1m/zfs.1m:man/man8/zfs.8:g
-s:usr/src/uts/common/sys:include/sys:g
-s:usr/src/lib/libzfs_core/common/libzfs_core.h:include/libzfs_core.h:g
-s:usr/src/lib/libzfs/common:lib/libzfs:g
-s:usr/src/lib/libzfs_core/common:lib/libzfs_core:g
-s:lib/libzpool/common/sys:include/sys:g
-s:lib/libzpool/common:lib/libzpool:g
-
-s:usr/src/test/zfs-tests/include:tests/zfs-tests/include:g
-s:usr/src/test/zfs-tests/runfiles:tests/runfiles:g
-s:usr/src/test/zfs-tests/tests/functional:tests/zfs-tests/tests/functional:g
-s:usr/src/test/zfs-tests/tests/perf:tests/zfs-tests/tests/perf:g
-s:usr/src/test/test-runner/cmd/run.py:tests/test-runner/cmd/test-runner.py:g
-s:usr/src/common/zfs/\(.*\)\.c:module/zcommon/\1.c:g
-
-# crypto framework
-s:usr/src/common/crypto:module/icp/algs:g
-s:usr/src/uts/common/crypto/io:module/icp/io:g
-
-# Headers
-s:usr/src/common/zfs/\(.*\)\.h:include/\1.h:g
-
-# Man pages
-s:usr/src/man:man:g
diff --git a/scripts/zfs_prepare_disk b/scripts/zfs_prepare_disk
deleted file mode 100755
index 02aa9f8a7728..000000000000
--- a/scripts/zfs_prepare_disk
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-#
-# This is an optional helper script that is automatically called by libzfs
-# before a disk is about to be added into the pool. It can be modified by
-# the user to run whatever commands are necessary to prepare a disk for
-# inclusion into the pool. For example, users can add lines to this
-# script to do things like update the drive's firmware or check the drive's
-# health. The script is optional and can be removed if it is not needed.
-#
-# See the zfs_prepare_disk(8) man page for details.
-#
-# Example:
-#
-# echo "Prepare disk $VDEV_PATH ($VDEV_UPATH) for $VDEV_PREPARE in $POOL_NAME"
-#
-
-exit 0
diff --git a/scripts/zimport.sh b/scripts/zimport.sh
deleted file mode 100755
index 2549a483b148..000000000000
--- a/scripts/zimport.sh
+++ /dev/null
@@ -1,512 +0,0 @@
-#!/usr/bin/env bash
-#
-# Verify that an assortment of known good reference pools can be imported
-# using different versions of OpenZFS code.
-#
-# By default references pools for the major ZFS implementation will be
-# checked against the most recent OpenZFS tags and the master development branch.
-# Alternate tags or branches may be verified with the '-s <src-tag> option.
-# Passing the keyword "installed" will instruct the script to test whatever
-# version is installed.
-#
-# Preferentially a reference pool is used for all tests. However, if one
-# does not exist and the pool-tag matches one of the src-tags then a new
-# reference pool will be created using binaries from that source build.
-# This is particularly useful when you need to test your changes before
-# opening a pull request. The keyword 'all' can be used as short hand
-# refer to all available reference pools.
-#
-# New reference pools may be added by placing a bzip2 compressed tarball
-# of the pool in the scripts/zfs-images directory and then passing
-# the -p <pool-tag> option. To increase the test coverage reference pools
-# should be collected for all the major ZFS implementations. Having these
-# pools easily available is also helpful to the developers.
-#
-# Care should be taken to run these tests with a kernel supported by all
-# the listed tags. Otherwise build failure will cause false positives.
-#
-#
-# EXAMPLES:
-#
-# The following example will verify the zfs-0.6.2 tag, the master branch,
-# and the installed zfs version can correctly import the listed pools.
-# Note there is no reference pool available for master and installed but
-# because binaries are available one is automatically constructed. The
-# working directory is also preserved between runs (-k) preventing the
-# need to rebuild from source for multiple runs.
-#
-# zimport.sh -k -f /var/tmp/zimport \
-# -s "zfs-0.6.2 master installed" \
-# -p "zevo-1.1.1 zol-0.6.2 zol-0.6.2-173 master installed"
-#
-# ------------------------ OpenZFS Source Versions ----------------
-# zfs-0.6.2 master 0.6.2-175_g36eb554
-# -----------------------------------------------------------------
-# Clone ZFS Local Local Skip
-# Build ZFS Pass Pass Skip
-# -----------------------------------------------------------------
-# zevo-1.1.1 Pass Pass Pass
-# zol-0.6.2 Pass Pass Pass
-# zol-0.6.2-173 Fail Pass Pass
-# master Pass Pass Pass
-# installed Pass Pass Pass
-#
-
-BASE_DIR=$(dirname "$0")
-SCRIPT_COMMON=common.sh
-if [[ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]]; then
- . "${BASE_DIR}/${SCRIPT_COMMON}"
-else
- echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
-fi
-
-PROG=zimport.sh
-SRC_TAGS="zfs-0.6.5.11 master"
-POOL_TAGS="all master"
-POOL_CREATE_OPTIONS=
-TEST_DIR=$(mktemp -u -d -p /var/tmp zimport.XXXXXXXX)
-KEEP="no"
-VERBOSE="no"
-COLOR="yes"
-REPO="https://github.com/openzfs"
-IMAGES_DIR="${BASE_DIR}/zfs-images/"
-IMAGES_TAR="https://github.com/openzfs/zfs-images/tarball/master"
-ERROR=0
-
-CONFIG_LOG="configure.log"
-CONFIG_OPTIONS=${CONFIG_OPTIONS:-""}
-MAKE_LOG="make.log"
-MAKE_OPTIONS=${MAKE_OPTIONS:-"-s -j$(nproc)"}
-
-COLOR_GREEN="\033[0;32m"
-COLOR_RED="\033[0;31m"
-COLOR_BROWN="\033[0;33m"
-COLOR_RESET="\033[0m"
-
-usage() {
-cat << EOF
-USAGE:
-zimport.sh [hvl] [-r repo] [-s src-tag] [-i pool-dir] [-p pool-tag]
- [-f path] [-o options]
-
-DESCRIPTION:
- ZPOOL import verification tests
-
-OPTIONS:
- -h Show this message
- -v Verbose
- -c No color
- -k Keep temporary directory
- -r <repo> Source repository ($REPO)
- -s <src-tag>... Verify OpenZFS versions with the listed tags
- -i <pool-dir> Pool image directory
- -p <pool-tag>... Verify pools created with the listed tags
- -f <path> Temporary directory to use
- -o <options> Additional options to pass to 'zpool create'
-
-EOF
-}
-
-while getopts 'hvckr:s:i:p:f:o:?' OPTION; do
- case $OPTION in
- h)
- usage
- exit 1
- ;;
- v)
- VERBOSE="yes"
- ;;
- c)
- COLOR="no"
- ;;
- k)
- KEEP="yes"
- ;;
- r)
- REPO="$OPTARG"
- ;;
- s)
- SRC_TAGS="$OPTARG"
- ;;
- i)
- IMAGES_DIR="$OPTARG"
- ;;
- p)
- POOL_TAGS="$OPTARG"
- ;;
- f)
- TEST_DIR="$OPTARG"
- ;;
- o)
- POOL_CREATE_OPTIONS="$OPTARG"
- ;;
- *)
- usage
- exit 1
- ;;
- esac
-done
-
-#
-# Verify the module start is not loaded
-#
-if lsmod | grep zfs >/dev/null; then
- echo "ZFS modules must be unloaded"
- exit 1
-fi
-
-#
-# Create a random directory tree of files and sub-directories to
-# to act as a copy source for the various regression tests.
-#
-populate() {
- local ROOT=$1
- local MAX_DIR_SIZE=$2
- local MAX_FILE_SIZE=$3
-
- mkdir -p "$ROOT"/{a,b,c,d,e,f,g}/{h,i}
- DIRS=$(find "$ROOT")
-
- for DIR in $DIRS; do
- COUNT=$((RANDOM % MAX_DIR_SIZE))
-
- for _ in $(seq "$COUNT"); do
- FILE=$(mktemp -p "$DIR")
- SIZE=$((RANDOM % MAX_FILE_SIZE))
- dd if=/dev/urandom of="$FILE" bs=1k \
- count="$SIZE" &>/dev/null
- done
- done
-
- return 0
-}
-
-SRC_DIR=$(mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX)
-trap 'rm -Rf "$SRC_DIR"' INT TERM EXIT
-populate "$SRC_DIR" 10 100
-
-SRC_DIR="$TEST_DIR/src"
-SRC_DIR_ZFS="$SRC_DIR/zfs"
-
-if [[ "$COLOR" = "no" ]]; then
- COLOR_GREEN=""
- COLOR_BROWN=""
- COLOR_RED=""
- COLOR_RESET=""
-fi
-
-pass_nonewline() {
- echo -n -e "${COLOR_GREEN}Pass${COLOR_RESET}\t\t"
-}
-
-skip_nonewline() {
- echo -n -e "${COLOR_BROWN}Skip${COLOR_RESET}\t\t"
-}
-
-fail_nonewline() {
- echo -n -e "${COLOR_RED}Fail${COLOR_RESET}\t\t"
-}
-
-#
-# Log a failure message, cleanup, and return an error.
-#
-fail() {
- echo -e "$PROG: $1" >&2
- $ZFS_SH -u >/dev/null 2>&1
- exit 1
-}
-
-#
-# Set several helper variables which are derived from a source tag.
-#
-# ZFS_TAG - The passed zfs-x.y.z tag
-# ZFS_DIR - The zfs directory name
-# ZFS_URL - The zfs github URL to fetch the tarball
-#
-src_set_vars() {
- local TAG=$1
-
- ZFS_TAG="$TAG"
- ZFS_DIR="$SRC_DIR_ZFS/$ZFS_TAG"
- ZFS_URL="$REPO/zfs/tarball/$ZFS_TAG"
-
- if [[ "$TAG" = "installed" ]]; then
- ZPOOL_CMD=$(command -v zpool)
- ZFS_CMD=$(command -v zfs)
- ZFS_SH="/usr/share/zfs/zfs.sh"
- else
- ZPOOL_CMD="./zpool"
- ZFS_CMD="./zfs"
- ZFS_SH="./scripts/zfs.sh"
- fi
-}
-
-#
-# Set several helper variables which are derived from a pool name such
-# as zol-0.6.x, zevo-1.1.1, etc. These refer to example pools from various
-# ZFS implementations which are used to verify compatibility.
-#
-# POOL_TAG - The example pools name in scripts/zfs-images/.
-# POOL_BZIP - The full path to the example bzip2 compressed pool.
-# POOL_DIR - The top level test path for this pool.
-# POOL_DIR_PRISTINE - The directory containing a pristine version of the pool.
-# POOL_DIR_COPY - The directory containing a working copy of the pool.
-# POOL_DIR_SRC - Location of a source build if it exists for this pool.
-#
-pool_set_vars() {
- local TAG=$1
-
- POOL_TAG=$TAG
- POOL_BZIP=$IMAGES_DIR/$POOL_TAG.tar.bz2
- POOL_DIR=$TEST_DIR/pools/$POOL_TAG
- POOL_DIR_PRISTINE=$POOL_DIR/pristine
- POOL_DIR_COPY=$POOL_DIR/copy
- POOL_DIR_SRC="$SRC_DIR_ZFS/${POOL_TAG//zol/zfs}"
-}
-
-#
-# Construct a non-trivial pool given a specific version of the source. More
-# interesting pools provide better test coverage so this function should
-# extended as needed to create more realistic pools.
-#
-pool_create() {
- pool_set_vars "$1"
- src_set_vars "$1"
-
- if [[ "$POOL_TAG" != "installed" ]]; then
- cd "$POOL_DIR_SRC" || fail "Failed 'cd $POOL_DIR_SRC'"
- fi
-
- $ZFS_SH zfs="spa_config_path=$POOL_DIR_PRISTINE" || \
- fail "Failed to load kmods"
-
- # Create a file vdev RAIDZ pool.
- truncate -s 1G \
- "$POOL_DIR_PRISTINE/vdev1" "$POOL_DIR_PRISTINE/vdev2" \
- "$POOL_DIR_PRISTINE/vdev3" "$POOL_DIR_PRISTINE/vdev4" || \
- fail "Failed 'truncate -s 1G ...'"
- # shellcheck disable=SC2086
- $ZPOOL_CMD create $POOL_CREATE_OPTIONS "$POOL_TAG" raidz \
- "$POOL_DIR_PRISTINE/vdev1" "$POOL_DIR_PRISTINE/vdev2" \
- "$POOL_DIR_PRISTINE/vdev3" "$POOL_DIR_PRISTINE/vdev4" || \
- fail "Failed '$ZPOOL_CMD create $POOL_CREATE_OPTIONS $POOL_TAG ...'"
-
- # Create a pool/fs filesystem with some random contents.
- $ZFS_CMD create "$POOL_TAG/fs" || \
- fail "Failed '$ZFS_CMD create $POOL_TAG/fs'"
- populate "/$POOL_TAG/fs/" 10 100
-
- # Snapshot that filesystem, clone it, remove the files/dirs,
- # replace them with new files/dirs.
- $ZFS_CMD snap "$POOL_TAG/fs@snap" || \
- fail "Failed '$ZFS_CMD snap $POOL_TAG/fs@snap'"
- $ZFS_CMD clone "$POOL_TAG/fs@snap" "$POOL_TAG/clone" || \
- fail "Failed '$ZFS_CMD clone $POOL_TAG/fs@snap $POOL_TAG/clone'"
- # shellcheck disable=SC2086
- rm -Rf /$POOL_TAG/clone/*
- populate "/$POOL_TAG/clone/" 10 100
-
- # Scrub the pool, delay slightly, then export it. It is now
- # somewhat interesting for testing purposes.
- $ZPOOL_CMD scrub "$POOL_TAG" || \
- fail "Failed '$ZPOOL_CMD scrub $POOL_TAG'"
- sleep 10
- $ZPOOL_CMD export "$POOL_TAG" || \
- fail "Failed '$ZPOOL_CMD export $POOL_TAG'"
-
- $ZFS_SH -u || fail "Failed to unload kmods"
-}
-
-# If the zfs-images directory doesn't exist fetch a copy from Github then
-# cache it in the $TEST_DIR and update $IMAGES_DIR.
-if [[ ! -d "$IMAGES_DIR" ]]; then
- IMAGES_DIR="$TEST_DIR/zfs-images"
- mkdir -p "$IMAGES_DIR"
- curl -sL "$IMAGES_TAR" | \
- tar -xz -C "$IMAGES_DIR" --strip-components=1 || \
- fail "Failed to download pool images"
-fi
-
-# Given the available images in the zfs-images directory substitute the
-# list of available images for the reserved keyword 'all'.
-for TAG in $POOL_TAGS; do
-
- if [[ "$TAG" = "all" ]]; then
- ALL_TAGS=$(echo "$IMAGES_DIR"/*.tar.bz2 | \
- sed "s|$IMAGES_DIR/||g;s|.tar.bz2||g")
- NEW_TAGS="$NEW_TAGS $ALL_TAGS"
- else
- NEW_TAGS="$NEW_TAGS $TAG"
- fi
-done
-POOL_TAGS="$NEW_TAGS"
-
-if [[ "$VERBOSE" = "yes" ]]; then
- echo "---------------------------- Options ----------------------------"
- echo "VERBOSE=$VERBOSE"
- echo "KEEP=$KEEP"
- echo "REPO=$REPO"
- echo "SRC_TAGS=$SRC_TAGS"
- echo "POOL_TAGS=$POOL_TAGS"
- echo "PATH=$TEST_DIR"
- echo "POOL_CREATE_OPTIONS=$POOL_CREATE_OPTIONS"
- echo
-fi
-
-if [[ ! -d "$TEST_DIR" ]]; then
- mkdir -p "$TEST_DIR"
-fi
-
-if [[ ! -d "$SRC_DIR" ]]; then
- mkdir -p "$SRC_DIR"
-fi
-
-# Print a header for all tags which are being tested.
-echo "------------------------ OpenZFS Source Versions ----------------"
-printf "%-16s" " "
-for TAG in $SRC_TAGS; do
- src_set_vars "$TAG"
-
- if [[ "$TAG" = "installed" ]]; then
- ZFS_VERSION=$(modinfo zfs | awk '/version:/ { print $2; exit }')
- if [[ -n "$ZFS_VERSION" ]]; then
- printf "%-16s" "$ZFS_VERSION"
- else
- fail "ZFS is not installed"
- fi
- else
- printf "%-16s" "$TAG"
- fi
-done
-echo -e "\n-----------------------------------------------------------------"
-
-#
-# Attempt to generate the tarball from your local git repository, if that
-# fails then attempt to download the tarball from Github.
-#
-printf "%-16s" "Clone ZFS"
-for TAG in $SRC_TAGS; do
- src_set_vars "$TAG"
-
- if [[ -d "$ZFS_DIR" ]]; then
- skip_nonewline
- elif [[ "$ZFS_TAG" = "installed" ]]; then
- skip_nonewline
- else
- cd "$SRC_DIR" || fail "Failed 'cd $SRC_DIR'"
-
- if [[ ! -d "$SRC_DIR_ZFS" ]]; then
- mkdir -p "$SRC_DIR_ZFS"
- fi
-
- git archive --format=tar --prefix="$ZFS_TAG/ $ZFS_TAG" \
- -o "$SRC_DIR_ZFS/$ZFS_TAG.tar" &>/dev/null || \
- rm "$SRC_DIR_ZFS/$ZFS_TAG.tar"
- if [[ -s "$SRC_DIR_ZFS/$ZFS_TAG.tar" ]]; then
- tar -xf "$SRC_DIR_ZFS/$ZFS_TAG.tar" -C "$SRC_DIR_ZFS"
- rm "$SRC_DIR_ZFS/$ZFS_TAG.tar"
- echo -n -e "${COLOR_GREEN}Local${COLOR_RESET}\t\t"
- else
- mkdir -p "$ZFS_DIR" || fail "Failed to create $ZFS_DIR"
- curl -sL "$ZFS_URL" | tar -xz -C "$ZFS_DIR" \
- --strip-components=1 || \
- fail "Failed to download $ZFS_URL"
- echo -n -e "${COLOR_GREEN}Remote${COLOR_RESET}\t\t"
- fi
- fi
-done
-printf "\n"
-
-# Build the listed tags
-printf "%-16s" "Build ZFS"
-for TAG in $SRC_TAGS; do
- src_set_vars "$TAG"
-
- if [[ -f "$ZFS_DIR/module/zfs/zfs.ko" ]]; then
- skip_nonewline
- elif [[ "$ZFS_TAG" = "installed" ]]; then
- skip_nonewline
- else
- cd "$ZFS_DIR" || fail "Failed 'cd $ZFS_DIR'"
- make distclean &>/dev/null
- ./autogen.sh >>"$CONFIG_LOG" 2>&1 || \
- fail "Failed ZFS 'autogen.sh'"
- # shellcheck disable=SC2086
- ./configure $CONFIG_OPTIONS >>"$CONFIG_LOG" 2>&1 || \
- fail "Failed ZFS 'configure $CONFIG_OPTIONS'"
- # shellcheck disable=SC2086
- make $MAKE_OPTIONS >>"$MAKE_LOG" 2>&1 || \
- fail "Failed ZFS 'make $MAKE_OPTIONS'"
- pass_nonewline
- fi
-done
-printf "\n"
-echo "-----------------------------------------------------------------"
-
-# Either create a new pool using 'zpool create', or alternately restore an
-# existing pool from another ZFS implementation for compatibility testing.
-for TAG in $POOL_TAGS; do
- pool_set_vars "$TAG"
- SKIP=0
-
- printf "%-16s" "$POOL_TAG"
- rm -Rf "$POOL_DIR"
- mkdir -p "$POOL_DIR_PRISTINE"
-
- # Use the existing compressed image if available.
- if [[ -f "$POOL_BZIP" ]]; then
- tar -xjf "$POOL_BZIP" -C "$POOL_DIR_PRISTINE" \
- --strip-components=1 || \
- fail "Failed 'tar -xjf $POOL_BZIP"
- # Use the installed version to create the pool.
- elif [[ "$TAG" = "installed" ]]; then
- pool_create "$TAG"
- # A source build is available to create the pool.
- elif [[ -d "$POOL_DIR_SRC" ]]; then
- pool_create "$TAG"
- else
- SKIP=1
- fi
-
- # Verify 'zpool import' works for all listed source versions.
- for SRC_TAG in $SRC_TAGS; do
-
- if [[ "$SKIP" -eq 1 ]]; then
- skip_nonewline
- continue
- fi
-
- src_set_vars "$SRC_TAG"
- if [[ "$SRC_TAG" != "installed" ]]; then
- cd "$ZFS_DIR" || fail "Failed 'cd $ZFS_DIR'"
- fi
- $ZFS_SH zfs="spa_config_path=$POOL_DIR_COPY"
-
- cp -a --sparse=always "$POOL_DIR_PRISTINE" \
- "$POOL_DIR_COPY" || \
- fail "Failed to copy $POOL_DIR_PRISTINE to $POOL_DIR_COPY"
- POOL_NAME=$($ZPOOL_CMD import -d "$POOL_DIR_COPY" | \
- awk '/pool:/ { print $2; exit }')
-
- if ! $ZPOOL_CMD import -N -d "$POOL_DIR_COPY"
- "$POOL_NAME" &>/dev/null; then
- fail_nonewline
- ERROR=1
- else
- $ZPOOL_CMD export "$POOL_NAME" || \
- fail "Failed to export pool"
- pass_nonewline
- fi
-
- rm -Rf "$POOL_DIR_COPY"
-
- $ZFS_SH -u || fail "Failed to unload kmods"
- done
- printf "\n"
-done
-
-if [[ "$KEEP" = "no" ]]; then
- rm -Rf "$TEST_DIR"
-fi
-
-exit "$ERROR"
diff --git a/scripts/zloop.sh b/scripts/zloop.sh
deleted file mode 100755
index 04640396ce75..000000000000
--- a/scripts/zloop.sh
+++ /dev/null
@@ -1,358 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: CDDL-1.0
-
-#
-# CDDL HEADER START
-#
-# This file and its contents are supplied under the terms of the
-# Common Development and Distribution License ("CDDL"), version 1.0.
-# You may only use this file in accordance with the terms of version
-# 1.0 of the CDDL.
-#
-# A full copy of the text of the CDDL should have accompanied this
-# source. A copy of the CDDL is also available via the Internet at
-# http://www.illumos.org/license/CDDL.
-#
-# CDDL HEADER END
-#
-
-#
-# Copyright (c) 2015 by Delphix. All rights reserved.
-# Copyright (C) 2016 Lawrence Livermore National Security, LLC.
-# Copyright (c) 2017, Intel Corporation.
-#
-
-BASE_DIR=${0%/*}
-SCRIPT_COMMON=common.sh
-if [[ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]]; then
- . "${BASE_DIR}/${SCRIPT_COMMON}"
-else
- echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
-fi
-
-# shellcheck disable=SC2034
-PROG=zloop.sh
-GDB=${GDB:-gdb}
-
-DEFAULTWORKDIR=/var/tmp
-DEFAULTCOREDIR=/var/tmp/zloop
-
-function usage
-{
- cat >&2 <<EOF
-
-$0 [-hl] [-c <dump directory>] [-f <vdev directory>]
- [-m <max core dumps>] [-s <vdev size>] [-t <timeout>]
- [-I <max iterations>] [-- [extra ztest parameters]]
-
- This script runs ztest repeatedly with randomized arguments.
- If a crash is encountered, the ztest logs, any associated
- vdev files, and core file (if one exists) are moved to the
- output directory ($DEFAULTCOREDIR by default). Any options
- after the -- end-of-options marker will be passed to ztest.
-
- Options:
- -c Specify a core dump directory to use.
- -f Specify working directory for ztest vdev files.
- -h Print this help message.
- -l Create 'ztest.core.N' symlink to core directory.
- -m Max number of core dumps to allow before exiting.
- -s Size of vdev devices.
- -t Total time to loop for, in seconds. If not provided,
- zloop runs forever.
- -I Max number of iterations to loop before exiting.
-
-EOF
-}
-
-function or_die
-{
- if ! "$@"; then
- echo "Command failed: $*"
- exit 1
- fi
-}
-
-case $(uname) in
-FreeBSD)
- coreglob="z*.core"
- ;;
-Linux)
- # core file helpers
- read -r origcorepattern </proc/sys/kernel/core_pattern
- coreglob="$(grep -E -o '^([^|%[:space:]]*)' /proc/sys/kernel/core_pattern)*"
-
- if [[ $coreglob = "*" ]]; then
- echo "Setting core file pattern..."
- echo "core" > /proc/sys/kernel/core_pattern
- coreglob="$(grep -E -o '^([^|%[:space:]]*)' \
- /proc/sys/kernel/core_pattern)*"
- fi
- ;;
-*)
- exit 1
- ;;
-esac
-
-function core_file
-{
- # shellcheck disable=SC2012,SC2086
- ls -tr1 $coreglob 2>/dev/null | head -1
-}
-
-function core_prog
-{
- # shellcheck disable=SC2154
- prog=$ZTEST
- core_id=$($GDB --batch -c "$1" | grep "Core was generated by" | \
- tr \' ' ')
- if [[ "$core_id" == *"zdb "* ]]; then
- # shellcheck disable=SC2154
- prog=$ZDB
- fi
- printf "%s" "$prog"
-}
-
-function store_core
-{
- core="$(core_file)"
- if [[ $ztrc -ne 0 ]] || [[ -f "$core" ]]; then
- df -h "$workdir" >>ztest.out
- coreid=$(date "+zloop-%y%m%d-%H%M%S")
- foundcrashes=$((foundcrashes + 1))
-
- # zdb debugging
- zdbcmd="$ZDB -U "$workdir/zpool.cache" -dddMmDDG ztest"
- zdbdebug=$($zdbcmd 2>&1)
- echo -e "$zdbcmd\n" >>ztest.zdb
- echo "$zdbdebug" >>ztest.zdb
-
- dest=$coredir/$coreid
- or_die mkdir -p "$dest/vdev"
-
- if [[ $symlink -ne 0 ]]; then
- or_die ln -sf "$dest" "ztest.core.${foundcrashes}"
- fi
-
- echo "*** ztest crash found - moving logs to $dest"
-
- or_die mv ztest.history ztest.zdb ztest.out "$dest/"
- or_die mv "$workdir/"ztest* "$dest/vdev/"
-
- if [[ -e "$workdir/zpool.cache" ]]; then
- or_die mv "$workdir/zpool.cache" "$dest/vdev/"
- fi
-
- # check for core
- if [[ -f "$core" ]]; then
- coreprog=$(core_prog "$core")
- coredebug=$($GDB --batch --quiet \
- -ex "set print thread-events off" \
- -ex "printf \"*\n* Backtrace \n*\n\"" \
- -ex "bt" \
- -ex "printf \"*\n* Libraries \n*\n\"" \
- -ex "info sharedlib" \
- -ex "printf \"*\n* Threads (full) \n*\n\"" \
- -ex "info threads" \
- -ex "printf \"*\n* Backtraces \n*\n\"" \
- -ex "thread apply all bt" \
- -ex "printf \"*\n* Backtraces (full) \n*\n\"" \
- -ex "thread apply all bt full" \
- -ex "quit" "$coreprog" "$core" 2>&1 | \
- grep -v "New LWP")
-
- # Dump core + logs to stored directory
- echo "$coredebug" >>"$dest/ztest.gdb"
- or_die mv "$core" "$dest/"
-
- # Record info in cores logfile
- echo "*** core @ $coredir/$coreid/$core:" | \
- tee -a ztest.cores
- fi
-
- if [[ $coremax -gt 0 ]] &&
- [[ $foundcrashes -ge $coremax ]]; then
- echo "exiting... max $coremax allowed cores"
- exit 1
- else
- echo "continuing..."
- fi
- fi
-}
-
-# parse arguments
-# expected format: zloop [-t timeout] [-c coredir] [-- extra ztest args]
-coredir=$DEFAULTCOREDIR
-basedir=$DEFAULTWORKDIR
-rundir="zloop-run"
-timeout=0
-size="512m"
-coremax=0
-symlink=0
-iterations=0
-while getopts ":ht:m:I:s:c:f:l" opt; do
- case $opt in
- t ) [[ $OPTARG -gt 0 ]] && timeout=$OPTARG ;;
- m ) [[ $OPTARG -gt 0 ]] && coremax=$OPTARG ;;
- I ) [[ -n $OPTARG ]] && iterations=$OPTARG ;;
- s ) [[ -n $OPTARG ]] && size=$OPTARG ;;
- c ) [[ -n $OPTARG ]] && coredir=$OPTARG ;;
- f ) [[ -n $OPTARG ]] && basedir=$(readlink -f "$OPTARG") ;;
- l ) symlink=1 ;;
- h ) usage
- exit 2
- ;;
- * ) echo "Invalid argument: -$OPTARG";
- usage
- exit 1
- esac
-done
-# pass remaining arguments on to ztest
-shift $((OPTIND - 1))
-
-# enable core dumps
-ulimit -c unlimited
-export ASAN_OPTIONS=abort_on_error=true:halt_on_error=true:allocator_may_return_null=true:disable_coredump=false:detect_stack_use_after_return=true
-export UBSAN_OPTIONS=abort_on_error=true:halt_on_error=true:print_stacktrace=true
-
-if [[ -f "$(core_file)" ]]; then
- echo -n "There's a core dump here you might want to look at first... "
- core_file
- echo
- exit 1
-fi
-
-if [[ ! -d $coredir ]]; then
- echo "core dump directory ($coredir) does not exist, creating it."
- or_die mkdir -p "$coredir"
-fi
-
-if [[ ! -w $coredir ]]; then
- echo "core dump directory ($coredir) is not writable."
- exit 1
-fi
-
-or_die rm -f ztest.history ztest.zdb ztest.cores
-
-ztrc=0 # ztest return value
-foundcrashes=0 # number of crashes found so far
-starttime=$(date +%s)
-curtime=$starttime
-iteration=0
-
-# if no timeout was specified, loop forever.
-while (( timeout == 0 )) || (( curtime <= (starttime + timeout) )); do
- if (( iterations > 0 )) && (( iteration++ == iterations )); then
- break
- fi
-
- zopt="-G -VVVVV"
-
- # start each run with an empty directory
- workdir="$basedir/$rundir"
- or_die rm -rf "$workdir"
- or_die mkdir "$workdir"
-
- # ashift range 9 - 15
- align=$(((RANDOM % 2) * 3 + 9))
-
- # choose parity value
- parity=$(((RANDOM % 3) + 1))
-
- draid_data=0
- draid_spares=0
-
- # randomly use special classes
- class="special=random"
-
- # choose between four types of configs
- # (basic, raidz mix, raidz expansion, and draid mix)
- case $((RANDOM % 4)) in
-
- # basic mirror configuration
- 0) parity=1
- mirrors=2
- raid_children=0
- vdevs=2
- raid_type="raidz"
- ;;
-
- # fully randomized mirror/raidz (sans dRAID)
- 1) mirrors=$(((RANDOM % 3) * 1))
- raid_children=$((((RANDOM % 9) + parity + 1) * (RANDOM % 2)))
- vdevs=$(((RANDOM % 3) + 3))
- raid_type="raidz"
- ;;
-
- # randomized raidz expansion (one top-level raidz vdev)
- 2) mirrors=0
- vdevs=1
- # derive initial raidz disk count based on parity choice
- # P1: 3 - 7 disks
- # P2: 5 - 9 disks
- # P3: 7 - 11 disks
- raid_children=$(((RANDOM % 5) + (parity * 2) + 1))
-
- # 1/3 of the time use a dedicated '-X' raidz expansion test
- if [[ $((RANDOM % 3)) -eq 0 ]]; then
- zopt="$zopt -X -t 16"
- raid_type="raidz"
- else
- raid_type="eraidz"
- fi
- ;;
-
- # fully randomized dRAID (sans mirror/raidz)
- 3) mirrors=0
- draid_data=$(((RANDOM % 8) + 3))
- draid_spares=$(((RANDOM % 2) + parity))
- stripe=$((draid_data + parity))
- extra=$((draid_spares + (RANDOM % 4)))
- raid_children=$(((((RANDOM % 4) + 1) * stripe) + extra))
- vdevs=$((RANDOM % 3))
- raid_type="draid"
- ;;
- *)
- # avoid shellcheck SC2249
- ;;
- esac
-
- zopt="$zopt -K $raid_type"
- zopt="$zopt -m $mirrors"
- zopt="$zopt -r $raid_children"
- zopt="$zopt -D $draid_data"
- zopt="$zopt -S $draid_spares"
- zopt="$zopt -R $parity"
- zopt="$zopt -v $vdevs"
- zopt="$zopt -a $align"
- zopt="$zopt -C $class"
- zopt="$zopt -s $size"
- zopt="$zopt -f $workdir"
-
- cmd="$ZTEST $zopt $*"
- echo "$(date '+%m/%d %T') $cmd" | tee -a ztest.history ztest.out
- $cmd >>ztest.out 2>&1
- ztrc=$?
- grep -E '===|WARNING' ztest.out >>ztest.history
-
- store_core
-
- curtime=$(date +%s)
-done
-
-echo "zloop finished, $foundcrashes crashes found"
-
-# restore core pattern.
-case $(uname) in
-Linux)
- echo "$origcorepattern" > /proc/sys/kernel/core_pattern
- ;;
-*)
- ;;
-esac
-
-uptime >>ztest.out
-
-if [[ $foundcrashes -gt 0 ]]; then
- exit 1
-fi
diff --git a/scripts/zol2zfs-patch.sed b/scripts/zol2zfs-patch.sed
deleted file mode 100755
index 0ca4b6cd6b7e..000000000000
--- a/scripts/zol2zfs-patch.sed
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sed -f
-
-s:cmd:usr/src/cmd:g
-s:include/libzfs.h:usr/src/lib/libzfs/common/libzfs.h:g
-s:include/libzfs_core.h:usr/src/lib/libzfs_core/common/libzfs_core.h:g
-s:include/sys:lib/libzpool/common/sys:g
-s:include/sys:usr/src/uts/common/fs/zfs/sys:g
-s:include/sys:usr/src/uts/common/sys:g
-s:include/zfs_fletcher.h:usr/src/common/zfs/zfs_fletcher.h:g
-s:include:usr/src/common/zfs:g
-s:lib/libzfs:usr/src/lib/libzfs/common:g
-s:lib/libzfs_core:usr/src/lib/libzfs_core/common:g
-s:lib/libzpool:lib/libzpool/common:g
-s:lib/libzpool:usr/src/lib/libzpool:g
-s:man/man7/zpool-features.7:usr/src/man/man5/zpool-features.5:g
-s:man/man8/zfs.8:usr/src/man/man1m/zfs.1m:g
-s:module/nvpair:usr/src/common/nvpair:g
-s:module/zcommon:usr/src/common/zfs/:g
-s:module/zfs:usr/src/uts/common/fs/zfs:g
-s:tests/zfs-tests:test/zfs-tests:g