aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib/openzfs/.github/workflows/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/openzfs/.github/workflows/scripts')
-rw-r--r--sys/contrib/openzfs/.github/workflows/scripts/README.md14
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/generate-ci-type.py108
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/merge_summary.awk109
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-1-setup.sh77
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh303
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh262
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps.sh28
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh396
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-4-build.sh11
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh137
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh119
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-7-prepare.sh124
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-8-summary.sh71
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-9-summary-page.sh57
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-prepare-for-build.sh8
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-test-repo-vm.sh90
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-wait-for-vm.sh10
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/replace-dupes-with-symlinks.sh32
18 files changed, 1956 insertions, 0 deletions
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/README.md b/sys/contrib/openzfs/.github/workflows/scripts/README.md
new file mode 100644
index 000000000000..811d23d93875
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/README.md
@@ -0,0 +1,14 @@
+
+Workflow for each operating system:
+- install qemu on the github runner
+- download current cloud image of operating system
+- start and init that image via cloud-init
+- install dependencies and poweroff system
+- start system and build openzfs and then poweroff again
+- clone build system and start 2 instances of it
+- run functional testings and complete in around 3h
+- when tests are done, do some logfile preparing
+- show detailed results for each system
+- in the end, generate the job summary
+
+/TR 14.09.2024
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/generate-ci-type.py b/sys/contrib/openzfs/.github/workflows/scripts/generate-ci-type.py
new file mode 100755
index 000000000000..08021aabcb61
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/generate-ci-type.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python3
+
+"""
+Determine the CI type based on the change list and commit message.
+
+Prints "quick" if (explicity required by user):
+- the *last* commit message contains 'ZFS-CI-Type: quick'
+or if (heuristics):
+- the files changed are not in the list of specified directories, and
+- all commit messages do not contain 'ZFS-CI-Type: full'
+
+Otherwise prints "full".
+"""
+
+import sys
+import subprocess
+import re
+
+"""
+Patterns of files that are not considered to trigger full CI.
+Note: not using pathlib.Path.match() because it does not support '**'
+"""
+FULL_RUN_IGNORE_REGEX = list(map(re.compile, [
+ r'.*\.md',
+ r'.*\.gitignore'
+]))
+
+"""
+Patterns of files that are considered to trigger full CI.
+"""
+FULL_RUN_REGEX = list(map(re.compile, [
+ r'\.github/workflows/scripts/.*',
+ r'cmd.*',
+ r'configs/.*',
+ r'META',
+ r'.*\.am',
+ r'.*\.m4',
+ r'autogen\.sh',
+ r'configure\.ac',
+ r'copy-builtin',
+ r'contrib',
+ r'etc',
+ r'include',
+ r'lib/.*',
+ r'module/.*',
+ r'scripts/.*',
+ r'tests/.*',
+ r'udev/.*'
+]))
+
+if __name__ == '__main__':
+
+ prog = sys.argv[0]
+
+ if len(sys.argv) != 3:
+ print(f'Usage: {prog} <head_ref> <base_ref>')
+ sys.exit(1)
+
+ head, base = sys.argv[1:3]
+
+ def output_type(type, reason):
+ print(f'{prog}: will run {type} CI: {reason}', file=sys.stderr)
+ print(type)
+ sys.exit(0)
+
+ # check last (HEAD) commit message
+ last_commit_message_raw = subprocess.run([
+ 'git', 'show', '-s', '--format=%B', head
+ ], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ for line in last_commit_message_raw.stdout.decode().splitlines():
+ if line.strip().lower() == 'zfs-ci-type: quick':
+ output_type('quick', f'explicitly requested by HEAD commit {head}')
+
+ # check all commit messages
+ all_commit_message_raw = subprocess.run([
+ 'git', 'show', '-s',
+ '--format=ZFS-CI-Commit: %H%n%B', f'{head}...{base}'
+ ], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ all_commit_message = all_commit_message_raw.stdout.decode().splitlines()
+
+ commit_ref = head
+ for line in all_commit_message:
+ if line.startswith('ZFS-CI-Commit:'):
+ commit_ref = line.lstrip('ZFS-CI-Commit:').rstrip()
+ if line.strip().lower() == 'zfs-ci-type: full':
+ output_type('full', f'explicitly requested by commit {commit_ref}')
+
+ # check changed files
+ changed_files_raw = subprocess.run([
+ 'git', 'diff', '--name-only', head, base
+ ], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ changed_files = changed_files_raw.stdout.decode().splitlines()
+
+ for f in changed_files:
+ for r in FULL_RUN_IGNORE_REGEX:
+ if r.match(f):
+ break
+ else:
+ for r in FULL_RUN_REGEX:
+ if r.match(f):
+ output_type(
+ 'full',
+ f'changed file "{f}" matches pattern "{r.pattern}"'
+ )
+
+ # catch-all
+ output_type('quick', 'no changed file matches full CI patterns')
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/merge_summary.awk b/sys/contrib/openzfs/.github/workflows/scripts/merge_summary.awk
new file mode 100755
index 000000000000..2b00d00226c9
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/merge_summary.awk
@@ -0,0 +1,109 @@
+#!/bin/awk -f
+#
+# Merge multiple ZTS tests results summaries into a single summary. This is
+# needed when you're running different parts of ZTS on different tests
+# runners or VMs.
+#
+# Usage:
+#
+# ./merge_summary.awk summary1.txt [summary2.txt] [summary3.txt] ...
+#
+# or:
+#
+# cat summary*.txt | ./merge_summary.awk
+#
+BEGIN {
+ i=-1
+ pass=0
+ fail=0
+ skip=0
+ state=""
+ cl=0
+ el=0
+ upl=0
+ ul=0
+
+ # Total seconds of tests runtime
+ total=0;
+}
+
+# Skip empty lines
+/^\s*$/{next}
+
+# Skip Configuration and Test lines
+/^Test:/{state=""; next}
+/Configuration/{state="";next}
+
+# When we see "test-runner.py" stop saving config lines, and
+# save test runner lines
+/test-runner.py/{state="testrunner"; runner=runner$0"\n"; next}
+
+# We need to differentiate the PASS counts from test result lines that start
+# with PASS, like:
+#
+# PASS mv_files/setup
+#
+# Use state="pass_count" to differentiate
+#
+/Results Summary/{state="pass_count"; next}
+/PASS/{ if (state=="pass_count") {pass += $2}}
+/FAIL/{ if (state=="pass_count") {fail += $2}}
+/SKIP/{ if (state=="pass_count") {skip += $2}}
+/Running Time/{
+ state="";
+ running[i]=$3;
+ split($3, arr, ":")
+ total += arr[1] * 60 * 60;
+ total += arr[2] * 60;
+ total += arr[3]
+ next;
+}
+
+/Tests with results other than PASS that are expected/{state="expected_lines"; next}
+/Tests with result of PASS that are unexpected/{state="unexpected_pass_lines"; next}
+/Tests with results other than PASS that are unexpected/{state="unexpected_lines"; next}
+{
+ if (state == "expected_lines") {
+ expected_lines[el] = $0
+ el++
+ }
+
+ if (state == "unexpected_pass_lines") {
+ unexpected_pass_lines[upl] = $0
+ upl++
+ }
+ if (state == "unexpected_lines") {
+ unexpected_lines[ul] = $0
+ ul++
+ }
+}
+
+# Reproduce summary
+END {
+ print runner;
+ print "\nResults Summary"
+ print "PASS\t"pass
+ print "FAIL\t"fail
+ print "SKIP\t"skip
+ print ""
+ print "Running Time:\t"strftime("%T", total, 1)
+ if (pass+fail+skip > 0) {
+ percent_passed=(pass/(pass+fail+skip) * 100)
+ }
+ printf "Percent passed:\t%3.2f%", percent_passed
+
+ print "\n\nTests with results other than PASS that are expected:"
+ asort(expected_lines, sorted)
+ for (j in sorted)
+ print sorted[j]
+
+ print "\n\nTests with result of PASS that are unexpected:"
+ asort(unexpected_pass_lines, sorted)
+ for (j in sorted)
+ print sorted[j]
+
+ print "\n\nTests with results other than PASS that are unexpected:"
+ asort(unexpected_lines, sorted)
+ for (j in sorted)
+ print sorted[j]
+}
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-1-setup.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-1-setup.sh
new file mode 100755
index 000000000000..0278264d9279
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-1-setup.sh
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+
+######################################################################
+# 1) setup qemu instance on action runner
+######################################################################
+
+set -eu
+
+# We've been seeing this script take over 15min to run. This may or
+# may not be normal. Just to get a little more insight, print out
+# a message to stdout with the top running process, and do this every
+# 30 seconds. We can delete this watchdog later once we get a better
+# handle on what the timeout value should be.
+(while [ 1 ] ; do sleep 30 && echo "[watchdog: $(ps -eo cmd --sort=-pcpu | head -n 2 | tail -n 1)}')]"; done) &
+
+# install needed packages
+export DEBIAN_FRONTEND="noninteractive"
+sudo apt-get -y update
+sudo apt-get install -y axel cloud-image-utils daemonize guestfs-tools \
+ virt-manager linux-modules-extra-$(uname -r) zfsutils-linux
+
+# generate ssh keys
+rm -f ~/.ssh/id_ed25519
+ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -q -N ""
+
+# not needed
+sudo systemctl stop docker.socket
+sudo systemctl stop multipathd.socket
+
+# remove default swapfile and /mnt
+sudo swapoff -a
+sudo umount -l /mnt
+DISK="/dev/disk/cloud/azure_resource-part1"
+sudo sed -e "s|^$DISK.*||g" -i /etc/fstab
+sudo wipefs -aq $DISK
+sudo systemctl daemon-reload
+
+sudo modprobe loop
+sudo modprobe zfs
+
+# partition the disk as needed
+DISK="/dev/disk/cloud/azure_resource"
+sudo sgdisk --zap-all $DISK
+sudo sgdisk -p \
+ -n 1:0:+16G -c 1:"swap" \
+ -n 2:0:0 -c 2:"tests" \
+$DISK
+sync
+sleep 1
+
+# swap with same size as RAM (16GiB)
+sudo mkswap $DISK-part1
+sudo swapon $DISK-part1
+
+# JBOD 2xdisk for OpenZFS storage (test vm's)
+SSD1="$DISK-part2"
+sudo fallocate -l 12G /test.ssd2
+SSD2=$(sudo losetup -b 4096 -f /test.ssd2 --show)
+
+# adjust zfs module parameter and create pool
+exec 1>/dev/null
+ARC_MIN=$((1024*1024*256))
+ARC_MAX=$((1024*1024*512))
+echo $ARC_MIN | sudo tee /sys/module/zfs/parameters/zfs_arc_min
+echo $ARC_MAX | sudo tee /sys/module/zfs/parameters/zfs_arc_max
+echo 1 | sudo tee /sys/module/zfs/parameters/zvol_use_blk_mq
+sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 -O relatime=off \
+ -O atime=off -O xattr=sa -O compression=lz4 -O sync=disabled \
+ -O redundant_metadata=none -O mountpoint=/mnt/tests
+
+# no need for some scheduler
+for i in /sys/block/s*/queue/scheduler; do
+ echo "none" | sudo tee $i
+done
+
+# Kill off our watchdog
+kill $(jobs -p)
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
new file mode 100755
index 000000000000..1c608348ffcd
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
@@ -0,0 +1,303 @@
+#!/usr/bin/env bash
+
+######################################################################
+# 2) start qemu with some operating system, init via cloud-init
+######################################################################
+
+set -eu
+
+# short name used in zfs-qemu.yml
+OS="$1"
+
+# OS variant (virt-install --os-variant list)
+OSv=$OS
+
+# FreeBSD urls's
+FREEBSD_REL="https://download.freebsd.org/releases/CI-IMAGES"
+FREEBSD_SNAP="https://download.freebsd.org/snapshots/CI-IMAGES"
+URLxz=""
+
+# Ubuntu mirrors
+UBMIRROR="https://cloud-images.ubuntu.com"
+#UBMIRROR="https://mirrors.cloud.tencent.com/ubuntu-cloud-images"
+#UBMIRROR="https://mirror.citrahost.com/ubuntu-cloud-images"
+
+# default nic model for vm's
+NIC="virtio"
+
+# additional options for virt-install
+OPTS[0]=""
+OPTS[1]=""
+
+case "$OS" in
+ almalinux8)
+ OSNAME="AlmaLinux 8"
+ URL="https://repo.almalinux.org/almalinux/8/cloud/x86_64/images/AlmaLinux-8-GenericCloud-latest.x86_64.qcow2"
+ ;;
+ almalinux9)
+ OSNAME="AlmaLinux 9"
+ URL="https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/AlmaLinux-9-GenericCloud-latest.x86_64.qcow2"
+ ;;
+ almalinux10)
+ OSNAME="AlmaLinux 10"
+ OSv="almalinux9"
+ URL="https://repo.almalinux.org/almalinux/10/cloud/x86_64/images/AlmaLinux-10-GenericCloud-latest.x86_64.qcow2"
+ ;;
+ archlinux)
+ OSNAME="Archlinux"
+ URL="https://geo.mirror.pkgbuild.com/images/latest/Arch-Linux-x86_64-cloudimg.qcow2"
+ ;;
+ centos-stream10)
+ OSNAME="CentOS Stream 10"
+ # TODO: #16903 Overwrite OSv to stream9 for virt-install until it's added to osinfo
+ OSv="centos-stream9"
+ URL="https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-10-latest.x86_64.qcow2"
+ ;;
+ centos-stream9)
+ OSNAME="CentOS Stream 9"
+ URL="https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-latest.x86_64.qcow2"
+ ;;
+ debian11)
+ OSNAME="Debian 11"
+ URL="https://cloud.debian.org/images/cloud/bullseye/latest/debian-11-generic-amd64.qcow2"
+ ;;
+ debian12)
+ OSNAME="Debian 12"
+ URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2"
+ ;;
+ debian13)
+ OSNAME="Debian 13"
+ # TODO: Overwrite OSv to debian13 for virt-install until it's added to osinfo
+ OSv="debian12"
+ URL="https://cloud.debian.org/images/cloud/trixie/latest/debian-13-generic-amd64.qcow2"
+ OPTS[0]="--boot"
+ OPTS[1]="uefi=on"
+ ;;
+ fedora41)
+ OSNAME="Fedora 41"
+ OSv="fedora-unknown"
+ URL="https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2"
+ ;;
+ fedora42)
+ OSNAME="Fedora 42"
+ OSv="fedora-unknown"
+ URL="https://download.fedoraproject.org/pub/fedora/linux/releases/42/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-42-1.1.x86_64.qcow2"
+ ;;
+ freebsd13-5r)
+ FreeBSD="13.5-RELEASE"
+ OSNAME="FreeBSD $FreeBSD"
+ OSv="freebsd13.0"
+ URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
+ KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
+ NIC="rtl8139"
+ ;;
+ freebsd14-2r)
+ FreeBSD="14.2-RELEASE"
+ OSNAME="FreeBSD $FreeBSD"
+ OSv="freebsd14.0"
+ KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
+ URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
+ ;;
+ freebsd14-3r)
+ FreeBSD="14.3-RELEASE"
+ OSNAME="FreeBSD $FreeBSD"
+ OSv="freebsd14.0"
+ URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
+ KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
+ ;;
+ freebsd13-5s)
+ FreeBSD="13.5-STABLE"
+ OSNAME="FreeBSD $FreeBSD"
+ OSv="freebsd13.0"
+ URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
+ KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
+ NIC="rtl8139"
+ ;;
+ freebsd14-3s)
+ FreeBSD="14.3-STABLE"
+ OSNAME="FreeBSD $FreeBSD"
+ OSv="freebsd14.0"
+ URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
+ KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
+ ;;
+ freebsd15-0c)
+ FreeBSD="15.0-ALPHA3"
+ OSNAME="FreeBSD $FreeBSD"
+ OSv="freebsd14.0"
+ URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
+ KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
+ ;;
+ tumbleweed)
+ OSNAME="openSUSE Tumbleweed"
+ OSv="opensusetumbleweed"
+ MIRROR="http://opensuse-mirror-gce-us.susecloud.net"
+ URL="$MIRROR/tumbleweed/appliances/openSUSE-MicroOS.x86_64-OpenStack-Cloud.qcow2"
+ ;;
+ ubuntu22)
+ OSNAME="Ubuntu 22.04"
+ OSv="ubuntu22.04"
+ URL="$UBMIRROR/jammy/current/jammy-server-cloudimg-amd64.img"
+ ;;
+ ubuntu24)
+ OSNAME="Ubuntu 24.04"
+ OSv="ubuntu24.04"
+ URL="$UBMIRROR/noble/current/noble-server-cloudimg-amd64.img"
+ ;;
+ *)
+ echo "Wrong value for OS variable!"
+ exit 111
+ ;;
+esac
+
+# environment file
+ENV="/var/tmp/env.txt"
+echo "ENV=$ENV" >> $ENV
+
+# result path
+echo 'RESPATH="/var/tmp/test_results"' >> $ENV
+
+# FreeBSD 13 has problems with: e1000 and virtio
+echo "NIC=$NIC" >> $ENV
+
+# freebsd15 -> used in zfs-qemu.yml
+echo "OS=$OS" >> $ENV
+
+# freebsd14.0 -> used for virt-install
+echo "OSv=\"$OSv\"" >> $ENV
+
+# FreeBSD 15 (Current) -> used for summary
+echo "OSNAME=\"$OSNAME\"" >> $ENV
+
+# default vm count for testings
+VMs=2
+echo "VMs=\"$VMs\"" >> $ENV
+
+# default cpu count for testing vm's
+CPU=2
+echo "CPU=\"$CPU\"" >> $ENV
+
+sudo mkdir -p "/mnt/tests"
+sudo chown -R $(whoami) /mnt/tests
+
+DISK="/dev/zvol/zpool/openzfs"
+sudo zfs create -ps -b 64k -V 80g zpool/openzfs
+while true; do test -b $DISK && break; sleep 1; done
+
+# we are downloading via axel, curl and wget are mostly slower and
+# require more return value checking
+IMG="/mnt/tests/cloud-image"
+if [ ! -z "$URLxz" ]; then
+ echo "Loading $URLxz ..."
+ time axel -q -o "$IMG" "$URLxz"
+ echo "Loading $KSRC ..."
+ time axel -q -o ~/src.txz $KSRC
+else
+ echo "Loading $URL ..."
+ time axel -q -o "$IMG" "$URL"
+fi
+
+echo "Importing VM image to zvol..."
+if [ ! -z "$URLxz" ]; then
+ xzcat -T0 $IMG | sudo dd of=$DISK bs=4M
+else
+ sudo qemu-img dd -f qcow2 -O raw if=$IMG of=$DISK bs=4M
+fi
+rm -f $IMG
+
+PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
+if [ ${OS:0:7} != "freebsd" ]; then
+ cat <<EOF > /tmp/user-data
+#cloud-config
+
+hostname: $OS
+
+users:
+- name: root
+ shell: $BASH
+- name: zfs
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: $BASH
+ ssh_authorized_keys:
+ - $PUBKEY
+
+growpart:
+ mode: auto
+ devices: ['/']
+ ignore_growroot_disabled: false
+EOF
+else
+ cat <<EOF > /tmp/user-data
+#cloud-config
+
+hostname: $OS
+
+# minimized config without sudo for nuageinit of FreeBSD
+growpart:
+ mode: auto
+ devices: ['/']
+ ignore_growroot_disabled: false
+EOF
+fi
+
+sudo virsh net-update default add ip-dhcp-host \
+ "<host mac='52:54:00:83:79:00' ip='192.168.122.10'/>" --live --config
+
+sudo virt-install \
+ --os-variant $OSv \
+ --name "openzfs" \
+ --cpu host-passthrough \
+ --virt-type=kvm --hvm \
+ --vcpus=4,sockets=1 \
+ --memory $((1024*12)) \
+ --memballoon model=virtio \
+ --graphics none \
+ --network bridge=virbr0,model=$NIC,mac='52:54:00:83:79:00' \
+ --cloud-init user-data=/tmp/user-data \
+ --disk $DISK,bus=virtio,cache=none,format=raw,driver.discard=unmap \
+ --import --noautoconsole ${OPTS[0]} ${OPTS[1]} >/dev/null
+
+# Give the VMs hostnames so we don't have to refer to them with
+# hardcoded IP addresses.
+#
+# vm0: Initial VM we install dependencies and build ZFS on.
+# vm1..2 Testing VMs
+for ((i=0; i<=VMs; i++)); do
+ echo "192.168.122.1$i vm$i" | sudo tee -a /etc/hosts
+done
+
+# in case the directory isn't there already
+mkdir -p $HOME/.ssh
+
+cat <<EOF >> $HOME/.ssh/config
+# no questions please
+StrictHostKeyChecking no
+
+# small timeout, used in while loops later
+ConnectTimeout 1
+EOF
+
+if [ ${OS:0:7} != "freebsd" ]; then
+ # enable KSM on Linux
+ sudo virsh dommemstat --domain "openzfs" --period 5
+ sudo virsh node-memory-tune 100 50 1
+ echo 1 | sudo tee /sys/kernel/mm/ksm/run > /dev/null
+else
+ # on FreeBSD we need some more init stuff, because of nuageinit
+ BASH="/usr/local/bin/bash"
+ while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
+ ssh 2>/dev/null root@vm0 "uname -a" && break
+ done
+ ssh root@vm0 "pkg install -y bash ca_root_nss git qemu-guest-agent python3 py311-cloud-init"
+ ssh root@vm0 "chsh -s $BASH root"
+ ssh root@vm0 'sysrc qemu_guest_agent_enable="YES"'
+ ssh root@vm0 'sysrc cloudinit_enable="YES"'
+ ssh root@vm0 "pw add user zfs -w no -s $BASH"
+ ssh root@vm0 'mkdir -p ~zfs/.ssh'
+ ssh root@vm0 'echo "zfs ALL=(ALL:ALL) NOPASSWD: ALL" >> /usr/local/etc/sudoers'
+ ssh root@vm0 'echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config'
+ scp ~/.ssh/id_ed25519.pub "root@vm0:~zfs/.ssh/authorized_keys"
+ ssh root@vm0 'chown -R zfs ~zfs'
+ ssh root@vm0 'service sshd restart'
+ scp ~/src.txz "root@vm0:/tmp/src.txz"
+ ssh root@vm0 'tar -C / -zxf /tmp/src.txz'
+fi
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh
new file mode 100755
index 000000000000..f67bb2f68e94
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh
@@ -0,0 +1,262 @@
+#!/usr/bin/env bash
+
+######################################################################
+# 3) install dependencies for compiling and loading
+#
+# $1: OS name (like 'fedora41')
+# $2: (optional) Experimental Fedora kernel version, like "6.14" to
+# install instead of Fedora defaults.
+######################################################################
+
+set -eu
+
+function archlinux() {
+ echo "##[group]Running pacman -Syu"
+ sudo btrfs filesystem resize max /
+ sudo pacman -Syu --noconfirm
+ echo "##[endgroup]"
+
+ echo "##[group]Install Development Tools"
+ sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \
+ fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \
+ parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \
+ samba strace sysstat rng-tools rsync wget xxhash
+ echo "##[endgroup]"
+}
+
+function debian() {
+ export DEBIAN_FRONTEND="noninteractive"
+
+ echo "##[group]Running apt-get update+upgrade"
+ sudo sed -i '/[[:alpha:]]-backports/d' /etc/apt/sources.list
+ sudo apt-get update -y
+ sudo apt-get upgrade -y
+ echo "##[endgroup]"
+
+ echo "##[group]Install Development Tools"
+ sudo apt-get install -y \
+ acl alien attr autoconf bc cpio cryptsetup curl dbench dh-python dkms \
+ fakeroot fio gdb gdebi git ksh lcov isc-dhcp-client jq libacl1-dev \
+ libaio-dev libattr1-dev libblkid-dev libcurl4-openssl-dev libdevmapper-dev \
+ libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \
+ libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \
+ lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \
+ python3-cffi python3-dev python3-distlib python3-packaging libtirpc-dev \
+ python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \
+ rsync samba strace sysstat uuid-dev watchdog wget xfslibs-dev xxhash \
+ zlib1g-dev
+ echo "##[endgroup]"
+}
+
+function freebsd() {
+ export ASSUME_ALWAYS_YES="YES"
+
+ echo "##[group]Install Development Tools"
+ sudo pkg install -y autoconf automake autotools base64 checkbashisms fio \
+ gdb gettext gettext-runtime git gmake gsed jq ksh lcov libtool lscpu \
+ pkgconf python python3 pamtester pamtester qemu-guest-agent rsync xxhash
+ sudo pkg install -xy \
+ '^samba4[[:digit:]]+$' \
+ '^py3[[:digit:]]+-cffi$' \
+ '^py3[[:digit:]]+-sysctl$' \
+ '^py3[[:digit:]]+-setuptools$' \
+ '^py3[[:digit:]]+-packaging$'
+ echo "##[endgroup]"
+}
+
+# common packages for: almalinux, centos, redhat
+function rhel() {
+ echo "##[group]Running dnf update"
+ echo "max_parallel_downloads=10" | sudo -E tee -a /etc/dnf/dnf.conf
+ sudo dnf clean all
+ sudo dnf update -y --setopt=fastestmirror=1 --refresh
+ echo "##[endgroup]"
+
+ echo "##[group]Install Development Tools"
+
+ # Alma wants "Development Tools", Fedora 41 wants "development-tools"
+ if ! sudo dnf group install -y "Development Tools" ; then
+ echo "Trying 'development-tools' instead of 'Development Tools'"
+ sudo dnf group install -y development-tools
+ fi
+
+ sudo dnf install -y \
+ acl attr bc bzip2 cryptsetup curl dbench dkms elfutils-libelf-devel fio \
+ gdb git jq kernel-rpm-macros ksh libacl-devel libaio-devel \
+ libargon2-devel libattr-devel libblkid-devel libcurl-devel libffi-devel \
+ ncompress libselinux-devel libtirpc-devel libtool libudev-devel \
+ libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \
+ parted perf python3 python3-cffi python3-devel python3-packaging \
+ kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \
+ rpm-build rsync samba strace sysstat systemd watchdog wget xfsprogs-devel \
+ xxhash zlib-devel
+ echo "##[endgroup]"
+}
+
+function tumbleweed() {
+ echo "##[group]Running zypper is TODO!"
+ sleep 23456
+ echo "##[endgroup]"
+}
+
+# $1: Kernel version to install (like '6.14rc7')
+function install_fedora_experimental_kernel {
+
+ our_version="$1"
+ sudo dnf -y copr enable @kernel-vanilla/stable
+ sudo dnf -y copr enable @kernel-vanilla/mainline
+ all="$(sudo dnf list --showduplicates kernel-* python3-perf* perf* bpftool*)"
+ echo "Available versions:"
+ echo "$all"
+
+ # You can have a bunch of minor variants of the version we want '6.14'.
+ # Pick the newest variant (sorted by version number).
+ specific_version=$(echo "$all" | grep $our_version | awk '{print $2}' | sort -V | tail -n 1)
+ list="$(echo "$all" | grep $specific_version | grep -Ev 'kernel-rt|kernel-selftests|kernel-debuginfo' | sed 's/.x86_64//g' | awk '{print $1"-"$2}')"
+ sudo dnf install -y $list
+ sudo dnf -y copr disable @kernel-vanilla/stable
+ sudo dnf -y copr disable @kernel-vanilla/mainline
+}
+
+# Install dependencies
+case "$1" in
+ almalinux8)
+ echo "##[group]Enable epel and powertools repositories"
+ sudo dnf config-manager -y --set-enabled powertools
+ sudo dnf install -y epel-release
+ echo "##[endgroup]"
+ rhel
+ echo "##[group]Install kernel-abi-whitelists"
+ sudo dnf install -y kernel-abi-whitelists
+ echo "##[endgroup]"
+ ;;
+ almalinux9|almalinux10|centos-stream9|centos-stream10)
+ echo "##[group]Enable epel and crb repositories"
+ sudo dnf config-manager -y --set-enabled crb
+ sudo dnf install -y epel-release
+ echo "##[endgroup]"
+ rhel
+ echo "##[group]Install kernel-abi-stablelists"
+ sudo dnf install -y kernel-abi-stablelists
+ echo "##[endgroup]"
+ ;;
+ archlinux)
+ archlinux
+ ;;
+ debian*)
+ echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
+ debian
+ echo "##[group]Install Debian specific"
+ sudo apt-get install -yq linux-perf dh-sequence-dkms
+ echo "##[endgroup]"
+ ;;
+ fedora*)
+ rhel
+ sudo dnf install -y libunwind-devel
+
+ # Fedora 42+ moves /usr/bin/script from 'util-linux' to 'util-linux-script'
+ sudo dnf install -y util-linux-script || true
+
+ # Optional: Install an experimental kernel ($2 = kernel version)
+ if [ -n "${2:-}" ] ; then
+ install_fedora_experimental_kernel "$2"
+ fi
+ ;;
+ freebsd*)
+ freebsd
+ ;;
+ tumbleweed)
+ tumbleweed
+ ;;
+ ubuntu*)
+ debian
+ echo "##[group]Install Ubuntu specific"
+ sudo apt-get install -yq linux-tools-common libtirpc-dev \
+ linux-modules-extra-$(uname -r)
+ sudo apt-get install -yq dh-sequence-dkms
+ echo "##[endgroup]"
+ echo "##[group]Delete Ubuntu OpenZFS modules"
+ for i in $(find /lib/modules -name zfs -type d); do sudo rm -rvf $i; done
+ echo "##[endgroup]"
+ ;;
+esac
+
+# This script is used for checkstyle + zloop deps also.
+# Install only the needed packages and exit - when used this way.
+test -z "${ONLY_DEPS:-}" || exit 0
+
+# Start services
+echo "##[group]Enable services"
+case "$1" in
+ freebsd*)
+ # add virtio things
+ echo 'virtio_load="YES"' | sudo -E tee -a /boot/loader.conf
+ for i in balloon blk console random scsi; do
+ echo "virtio_${i}_load=\"YES\"" | sudo -E tee -a /boot/loader.conf
+ done
+ echo "fdescfs /dev/fd fdescfs rw 0 0" | sudo -E tee -a /etc/fstab
+ sudo -E mount /dev/fd
+ sudo -E touch /etc/zfs/exports
+ sudo -E sysrc mountd_flags="/etc/zfs/exports"
+ echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null
+ sudo -E service nfsd enable
+ sudo -E service qemu-guest-agent enable
+ sudo -E service samba_server enable
+ ;;
+ debian*|ubuntu*)
+ sudo -E systemctl enable nfs-kernel-server
+ sudo -E systemctl enable qemu-guest-agent
+ sudo -E systemctl enable smbd
+ ;;
+ *)
+ # All other linux distros
+ sudo -E systemctl enable nfs-server
+ sudo -E systemctl enable qemu-guest-agent
+ sudo -E systemctl enable smb
+ ;;
+esac
+echo "##[endgroup]"
+
+# Setup Kernel cmdline
+CMDLINE="console=tty0 console=ttyS0,115200n8"
+CMDLINE="$CMDLINE selinux=0"
+CMDLINE="$CMDLINE random.trust_cpu=on"
+CMDLINE="$CMDLINE no_timer_check"
+case "$1" in
+ almalinux*|centos*|fedora*)
+ GRUB_CFG="/boot/grub2/grub.cfg"
+ GRUB_MKCONFIG="grub2-mkconfig"
+ CMDLINE="$CMDLINE biosdevname=0 net.ifnames=0"
+ echo 'GRUB_SERIAL_COMMAND="serial --speed=115200"' \
+ | sudo tee -a /etc/default/grub >/dev/null
+ ;;
+ ubuntu24)
+ GRUB_CFG="/boot/grub/grub.cfg"
+ GRUB_MKCONFIG="grub-mkconfig"
+ echo 'GRUB_DISABLE_OS_PROBER="false"' \
+ | sudo tee -a /etc/default/grub >/dev/null
+ ;;
+ *)
+ GRUB_CFG="/boot/grub/grub.cfg"
+ GRUB_MKCONFIG="grub-mkconfig"
+ ;;
+esac
+
+case "$1" in
+ archlinux|freebsd*)
+ true
+ ;;
+ *)
+ echo "##[group]Edit kernel cmdline"
+ sudo sed -i -e '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub || true
+ echo "GRUB_CMDLINE_LINUX=\"$CMDLINE\"" \
+ | sudo tee -a /etc/default/grub >/dev/null
+ sudo $GRUB_MKCONFIG -o $GRUB_CFG
+ echo "##[endgroup]"
+ ;;
+esac
+
+# reset cloud-init configuration and poweroff
+sudo cloud-init clean --logs
+sleep 2 && sudo poweroff &
+exit 0
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps.sh
new file mode 100755
index 000000000000..267ae4ad3c7b
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps.sh
@@ -0,0 +1,28 @@
+######################################################################
+# 3) Wait for VM to boot from previous step and launch dependencies
+# script on it.
+#
+# $1: OS name (like 'fedora41')
+# $2: (optional) Experimental kernel version to install on fedora,
+# like "6.14".
+######################################################################
+
+.github/workflows/scripts/qemu-wait-for-vm.sh vm0
+
+# SPECIAL CASE:
+#
+# If the user passed in an experimental kernel version to test on Fedora,
+# we need to update the kernel version in zfs's META file to allow the
+# build to happen. We update our local copy of META here, since we know
+# it will be rsync'd up in the next step.
+if [ -n "${2:-}" ] ; then
+ sed -i -E 's/Linux-Maximum: .+/Linux-Maximum: 99.99/g' META
+fi
+
+scp .github/workflows/scripts/qemu-3-deps-vm.sh zfs@vm0:qemu-3-deps-vm.sh
+PID=`pidof /usr/bin/qemu-system-x86_64`
+ssh zfs@vm0 '$HOME/qemu-3-deps-vm.sh' "$@"
+# wait for poweroff to succeed
+tail --pid=$PID -f /dev/null
+sleep 5 # avoid this: "error: Domain is already active"
+rm -f $HOME/.ssh/known_hosts
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh
new file mode 100755
index 000000000000..2807d9e77127
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh
@@ -0,0 +1,396 @@
+#!/usr/bin/env bash
+
+######################################################################
+# 4) configure and build openzfs modules. This is run on the VMs.
+#
+# Usage:
+#
+# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--patch-level NUM]
+# [--poweroff][--release][--repo][--tarball]
+#
+# OS: OS name like 'fedora41'
+# --enable-debug: Build RPMs with '--enable-debug' (for testing)
+# --dkms: Build DKMS RPMs as well
+# --patch-level NUM: Use a custom patch level number for packages.
+# --poweroff: Power-off the VM after building
+# --release Build zfs-release*.rpm as well
+# --repo After building everything, copy RPMs into /tmp/repo
+# in the ZFS RPM repository file structure. Also
+# copy tarballs if they were built.
+# --tarball: Also build a tarball of ZFS source
+######################################################################
+
+ENABLE_DEBUG=""
+DKMS=""
+PATCH_LEVEL=""
+POWEROFF=""
+RELEASE=""
+REPO=""
+TARBALL=""
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --enable-debug)
+ ENABLE_DEBUG=1
+ shift
+ ;;
+ --dkms)
+ DKMS=1
+ shift
+ ;;
+ --patch-level)
+ PATCH_LEVEL=$2
+ shift
+ shift
+ ;;
+ --poweroff)
+ POWEROFF=1
+ shift
+ ;;
+ --release)
+ RELEASE=1
+ shift
+ ;;
+ --repo)
+ REPO=1
+ shift
+ ;;
+ --tarball)
+ TARBALL=1
+ shift
+ ;;
+ *)
+ OS=$1
+ shift
+ ;;
+ esac
+done
+
+set -eu
+
+function run() {
+ LOG="/var/tmp/build-stderr.txt"
+ echo "****************************************************"
+ echo "$(date) ($*)"
+ echo "****************************************************"
+ ($@ || echo $? > /tmp/rv) 3>&1 1>&2 2>&3 | stdbuf -eL -oL tee -a $LOG
+ if [ -f /tmp/rv ]; then
+ RV=$(cat /tmp/rv)
+ echo "****************************************************"
+ echo "exit with value=$RV ($*)"
+ echo "****************************************************"
+ echo 1 > /var/tmp/build-exitcode.txt
+ exit $RV
+ fi
+}
+
+# Look at the RPMs in the current directory and copy/move them to
+# /tmp/repo, using the directory structure we use for the ZFS RPM repos.
+#
+# For example:
+# /tmp/repo/epel-testing/9.5
+# /tmp/repo/epel-testing/9.5/SRPMS
+# /tmp/repo/epel-testing/9.5/SRPMS/zfs-2.3.99-1.el9.src.rpm
+# /tmp/repo/epel-testing/9.5/SRPMS/zfs-kmod-2.3.99-1.el9.src.rpm
+# /tmp/repo/epel-testing/9.5/kmod
+# /tmp/repo/epel-testing/9.5/kmod/x86_64
+# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug
+# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/kmod-zfs-debuginfo-2.3.99-1.el9.x86_64.rpm
+# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libnvpair3-debuginfo-2.3.99-1.el9.x86_64.rpm
+# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libuutil3-debuginfo-2.3.99-1.el9.x86_64.rpm
+# ...
+function copy_rpms_to_repo {
+ # Pick a RPM to query. It doesn't matter which one - we just want to extract
+ # the 'Build Host' value from it.
+ rpm=$(ls zfs-*.rpm | head -n 1)
+
+ # Get zfs version '2.2.99'
+ zfs_ver=$(rpm -qpi $rpm | awk '/Version/{print $3}')
+
+ # Get "2.1" or "2.2"
+ zfs_major=$(echo $zfs_ver | grep -Eo [0-9]+\.[0-9]+)
+
+ # Get 'almalinux9.5' or 'fedora41' type string
+ build_host=$(rpm -qpi $rpm | awk '/Build Host/{print $4}')
+
+ # Get '9.5' or '41' OS version
+ os_ver=$(echo $build_host | grep -Eo '[0-9\.]+$')
+
+ # Our ZFS version and OS name will determine which repo the RPMs
+ # will go in (regular or testing). Fedora always gets the newest
+ # releases, and Alma gets the older releases.
+ case $build_host in
+ almalinux*)
+ case $zfs_major in
+ 2.2)
+ d="epel"
+ ;;
+ *)
+ d="epel-testing"
+ ;;
+ esac
+ ;;
+ fedora*)
+ d="fedora"
+ ;;
+ esac
+
+ prefix=/tmp/repo
+ dst="$prefix/$d/$os_ver"
+
+ # Special case: move zfs-release*.rpm out of the way first (if we built them).
+ # This will make filtering the other RPMs easier.
+ mkdir -p $dst
+ mv zfs-release*.rpm $dst || true
+
+ # Copy source RPMs
+ mkdir -p $dst/SRPMS
+ cp $(ls *.src.rpm) $dst/SRPMS/
+
+ if [[ "$build_host" =~ "almalinux" ]] ; then
+ # Copy kmods+userspace
+ mkdir -p $dst/kmod/x86_64/debug
+ cp $(ls *.rpm | grep -Ev 'src.rpm|dkms|debuginfo') $dst/kmod/x86_64
+ cp *debuginfo*.rpm $dst/kmod/x86_64/debug
+ fi
+
+ if [ -n "$DKMS" ] ; then
+ # Copy dkms+userspace
+ mkdir -p $dst/x86_64
+ cp $(ls *.rpm | grep -Ev 'src.rpm|kmod|debuginfo') $dst/x86_64
+ fi
+
+ # Copy debug
+ mkdir -p $dst/x86_64/debug
+ cp $(ls *debuginfo*.rpm | grep -v kmod) $dst/x86_64/debug
+}
+
+function freebsd() {
+ extra="${1:-}"
+
+ export MAKE="gmake"
+ echo "##[group]Autogen.sh"
+ run ./autogen.sh
+ echo "##[endgroup]"
+
+ echo "##[group]Configure"
+ run ./configure \
+ --prefix=/usr/local \
+ --with-libintl-prefix=/usr/local \
+ --enable-pyzfs \
+ --enable-debuginfo $extra
+ echo "##[endgroup]"
+
+ echo "##[group]Build"
+ run gmake -j$(sysctl -n hw.ncpu)
+ echo "##[endgroup]"
+
+ echo "##[group]Install"
+ run sudo gmake install
+ echo "##[endgroup]"
+}
+
+function linux() {
+ extra="${1:-}"
+
+ echo "##[group]Autogen.sh"
+ run ./autogen.sh
+ echo "##[endgroup]"
+
+ echo "##[group]Configure"
+ run ./configure \
+ --prefix=/usr \
+ --enable-pyzfs \
+ --enable-debuginfo $extra
+ echo "##[endgroup]"
+
+ echo "##[group]Build"
+ run make -j$(nproc)
+ echo "##[endgroup]"
+
+ echo "##[group]Install"
+ run sudo make install
+ echo "##[endgroup]"
+}
+
+function rpm_build_and_install() {
+ extra="${1:-}"
+
+ # Build RPMs with XZ compression by default (since gzip decompression is slow)
+ echo "%_binary_payload w7.xzdio" >> ~/.rpmmacros
+
+ echo "##[group]Autogen.sh"
+ run ./autogen.sh
+ echo "##[endgroup]"
+
+ if [ -n "$PATCH_LEVEL" ] ; then
+ sed -i -E 's/(Release:\s+)1/\1'$PATCH_LEVEL'/g' META
+ fi
+
+ echo "##[group]Configure"
+ run ./configure --enable-debuginfo $extra
+ echo "##[endgroup]"
+
+ echo "##[group]Build"
+ run make pkg-kmod pkg-utils
+ echo "##[endgroup]"
+
+ if [ -n "$DKMS" ] ; then
+ echo "##[group]DKMS"
+ make rpm-dkms
+ echo "##[endgroup]"
+ fi
+
+ if [ -n "$REPO" ] ; then
+ echo "Skipping install since we're only building RPMs and nothing else"
+ else
+ echo "##[group]Install"
+ run sudo dnf -y --nobest install $(ls *.rpm | grep -Ev 'dkms|src.rpm')
+ echo "##[endgroup]"
+ fi
+
+ # Optionally build the zfs-release.*.rpm
+ if [ -n "$RELEASE" ] ; then
+ echo "##[group]Release"
+ pushd ~
+ sudo dnf -y install rpm-build || true
+ # Check out a sparse copy of zfsonlinux.github.com.git so we don't get
+ # all the binaries. We just need a few kilobytes of files to build RPMs.
+ git clone --depth 1 --no-checkout \
+ https://github.com/zfsonlinux/zfsonlinux.github.com.git
+
+ cd zfsonlinux.github.com
+ git sparse-checkout set zfs-release
+ git checkout
+ cd zfs-release
+
+ mkdir -p ~/rpmbuild/{BUILDROOT,SPECS,RPMS,SRPMS,SOURCES,BUILD}
+ cp RPM-GPG-KEY-openzfs* *.repo ~/rpmbuild/SOURCES
+ cp zfs-release.spec ~/rpmbuild/SPECS/
+ rpmbuild -ba ~/rpmbuild/SPECS/zfs-release.spec
+
+ # ZFS release RPMs are built. Copy them to the ~/zfs directory just to
+ # keep all the RPMs in the same place.
+ cp ~/rpmbuild/RPMS/noarch/*.rpm ~/zfs
+ cp ~/rpmbuild/SRPMS/*.rpm ~/zfs
+
+ popd
+ rm -fr ~/rpmbuild
+ echo "##[endgroup]"
+ fi
+
+ if [ -n "$REPO" ] ; then
+ echo "##[group]Repo"
+ copy_rpms_to_repo
+ echo "##[endgroup]"
+ fi
+}
+
+function deb_build_and_install() {
+ extra="${1:-}"
+
+ echo "##[group]Autogen.sh"
+ run ./autogen.sh
+ echo "##[endgroup]"
+
+ echo "##[group]Configure"
+ run ./configure \
+ --prefix=/usr \
+ --enable-pyzfs \
+ --enable-debuginfo $extra
+ echo "##[endgroup]"
+
+ echo "##[group]Build"
+ run make native-deb-kmod native-deb-utils
+ echo "##[endgroup]"
+
+ echo "##[group]Install"
+ # Do kmod install. Note that when you build the native debs, the
+ # packages themselves are placed in parent directory '../' rather than
+ # in the source directory like the rpms are.
+ run sudo apt-get -y install $(find ../ | grep -E '\.deb$' \
+ | grep -Ev 'dkms|dracut')
+ echo "##[endgroup]"
+}
+
+function build_tarball {
+ if [ -n "$REPO" ] ; then
+ ./autogen.sh
+ ./configure --with-config=srpm
+ make dist
+ mkdir -p /tmp/repo/releases
+ # The tarball name is based off of 'Version' field in the META file.
+ mv *.tar.gz /tmp/repo/releases/
+ fi
+}
+
+# Debug: show kernel cmdline
+if [ -f /proc/cmdline ] ; then
+ cat /proc/cmdline || true
+fi
+
+# Set our hostname to our OS name and version number. Specifically, we set the
+# major and minor number so that when we query the Build Host field in the RPMs
+# we build, we can see what specific version of Fedora/Almalinux we were using
+# to build them. This is helpful for matching up KMOD versions.
+#
+# Examples:
+#
+# rhel8.10
+# almalinux9.5
+# fedora42
+source /etc/os-release
+ if which hostnamectl &> /dev/null ; then
+ # Fedora 42+ use hostnamectl
+ sudo hostnamectl set-hostname "$ID$VERSION_ID"
+ sudo hostnamectl set-hostname --pretty "$ID$VERSION_ID"
+else
+ sudo hostname "$ID$VERSION_ID"
+fi
+
+# save some sysinfo
+uname -a > /var/tmp/uname.txt
+
+cd $HOME/zfs
+export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin"
+
+extra=""
+if [ -n "$ENABLE_DEBUG" ] ; then
+ extra="--enable-debug"
+fi
+
+# build
+case "$OS" in
+ freebsd*)
+ freebsd "$extra"
+ ;;
+ alma*|centos*)
+ rpm_build_and_install "--with-spec=redhat $extra"
+ ;;
+ fedora*)
+ rpm_build_and_install "$extra"
+
+ # Historically, we've always built the release tarballs on Fedora, since
+ # there was one instance long ago where we built them on CentOS 7, and they
+ # didn't work correctly for everyone.
+ if [ -n "$TARBALL" ] ; then
+ build_tarball
+ fi
+ ;;
+ debian*|ubuntu*)
+ deb_build_and_install "$extra"
+ ;;
+ *)
+ linux "$extra"
+ ;;
+esac
+
+
+# building the zfs module was ok
+echo 0 > /var/tmp/build-exitcode.txt
+
+# reset cloud-init configuration and poweroff
+if [ -n "$POWEROFF" ] ; then
+ sudo cloud-init clean --logs
+ sync && sleep 2 && sudo poweroff &
+fi
+exit 0
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build.sh
new file mode 100755
index 000000000000..63c9bccaa446
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+######################################################################
+# 4) configure and build openzfs modules
+######################################################################
+echo "Build modules in QEMU machine"
+
+# Bring our VM back up and copy over ZFS source
+.github/workflows/scripts/qemu-prepare-for-build.sh
+
+ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-4-build-vm.sh' $@
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh
new file mode 100755
index 000000000000..4869c1003e48
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh
@@ -0,0 +1,137 @@
+#!/usr/bin/env bash
+
+######################################################################
+# 5) start test machines and load openzfs module
+######################################################################
+
+set -eu
+
+# read our defined variables
+source /var/tmp/env.txt
+
+# wait for poweroff to succeed
+PID=$(pidof /usr/bin/qemu-system-x86_64)
+tail --pid=$PID -f /dev/null
+sudo virsh undefine --nvram openzfs
+
+# cpu pinning
+CPUSET=("0,1" "2,3")
+
+# additional options for virt-install
+OPTS[0]=""
+OPTS[1]=""
+
+case "$OS" in
+ freebsd*)
+ # FreeBSD needs only 6GiB
+ RAM=6
+ ;;
+ debian13)
+ RAM=8
+ # Boot Debian 13 with uefi=on and secureboot=off (ZFS Kernel Module not signed)
+ OPTS[0]="--boot"
+ OPTS[1]="firmware=efi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no"
+ ;;
+ *)
+ # Linux needs more memory, but can be optimized to share it via KSM
+ RAM=8
+ ;;
+esac
+
+# create snapshot we can clone later
+sudo zfs snapshot zpool/openzfs@now
+
+# setup the testing vm's
+PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
+
+# start testing VMs
+for ((i=1; i<=VMs; i++)); do
+ echo "Creating disk for vm$i..."
+ DISK="/dev/zvol/zpool/vm$i"
+ FORMAT="raw"
+ sudo zfs clone zpool/openzfs@now zpool/vm$i-system
+ sudo zfs create -ps -b 64k -V 64g zpool/vm$i-tests
+
+ cat <<EOF > /tmp/user-data
+#cloud-config
+
+fqdn: vm$i
+
+users:
+- name: root
+ shell: $BASH
+- name: zfs
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: $BASH
+ ssh_authorized_keys:
+ - $PUBKEY
+
+growpart:
+ mode: auto
+ devices: ['/']
+ ignore_growroot_disabled: false
+EOF
+
+ sudo virsh net-update default add ip-dhcp-host \
+ "<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
+
+ sudo virt-install \
+ --os-variant $OSv \
+ --name "vm$i" \
+ --cpu host-passthrough \
+ --virt-type=kvm --hvm \
+ --vcpus=$CPU,sockets=1 \
+ --cpuset=${CPUSET[$((i-1))]} \
+ --memory $((1024*RAM)) \
+ --memballoon model=virtio \
+ --graphics none \
+ --cloud-init user-data=/tmp/user-data \
+ --network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
+ --disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
+ --disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
+ --import --noautoconsole ${OPTS[0]} ${OPTS[1]}
+done
+
+# generate some memory stats
+cat <<EOF > cronjob.sh
+exec 1>>/var/tmp/stats.txt
+exec 2>&1
+echo "********************************************************************************"
+uptime
+free -m
+zfs list
+EOF
+
+sudo chmod +x cronjob.sh
+sudo mv -f cronjob.sh /root/cronjob.sh
+echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
+sudo crontab crontab.txt
+rm crontab.txt
+
+# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
+# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
+# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
+for ((i=1; i<=VMs; i++)); do
+ mkdir -p $RESPATH/vm$i
+ read "pty" <<< $(sudo virsh ttyconsole vm$i)
+
+ # Create the file so we can tail it, even if there's no output.
+ touch $RESPATH/vm$i/console.txt
+
+ sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
+
+ # Write all VM boot lines to the console to aid in debugging failed boots.
+ # The boot lines from all the VMs will be munged together, so prepend each
+ # line with the vm hostname (like 'vm1:').
+ (while IFS=$'\n' read -r line; do echo "vm$i: $line" ; done < <(sudo tail -f $RESPATH/vm$i/console.txt)) &
+
+done
+echo "Console logging for ${VMs}x $OS started."
+
+
+# check if the machines are okay
+echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
+for ((i=1; i<=VMs; i++)); do
+ .github/workflows/scripts/qemu-wait-for-vm.sh vm$i
+done
+echo "All $VMs VMs are up now."
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh
new file mode 100755
index 000000000000..ca6ac77f146d
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh
@@ -0,0 +1,119 @@
+#!/usr/bin/env bash
+
+######################################################################
+# 6) load openzfs module and run the tests
+#
+# called on runner: qemu-6-tests.sh
+# called on qemu-vm: qemu-6-tests.sh $OS $2/$3
+######################################################################
+
+set -eu
+
+function prefix() {
+ ID="$1"
+ LINE="$2"
+ CURRENT=$(date +%s)
+ TSSTART=$(cat /tmp/tsstart)
+ DIFF=$((CURRENT-TSSTART))
+ H=$((DIFF/3600))
+ DIFF=$((DIFF-(H*3600)))
+ M=$((DIFF/60))
+ S=$((DIFF-(M*60)))
+
+ CTR=$(cat /tmp/ctr)
+ echo $LINE| grep -q '^\[.*] Test[: ]' && CTR=$((CTR+1)) && echo $CTR > /tmp/ctr
+
+ BASE="$HOME/work/zfs/zfs"
+ COLOR="$BASE/scripts/zfs-tests-color.sh"
+ CLINE=$(echo $LINE| grep '^\[.*] Test[: ]' \
+ | sed -e 's|^\[.*] Test|Test|g' \
+ | sed -e 's|/usr/local|/usr|g' \
+ | sed -e 's| /usr/share/zfs/zfs-tests/tests/| |g' | $COLOR)
+ if [ -z "$CLINE" ]; then
+ printf "vm${ID}: %s\n" "$LINE"
+ else
+ # [vm2: 00:15:54 256] Test: functional/checksum/setup (run as root) [00:00] [PASS]
+ printf "[vm${ID}: %02d:%02d:%02d %4d] %s\n" \
+ "$H" "$M" "$S" "$CTR" "$CLINE"
+ fi
+}
+
+# called directly on the runner
+if [ -z ${1:-} ]; then
+ cd "/var/tmp"
+ source env.txt
+ SSH=$(which ssh)
+ TESTS='$HOME/zfs/.github/workflows/scripts/qemu-6-tests.sh'
+ echo 0 > /tmp/ctr
+ date "+%s" > /tmp/tsstart
+
+ for ((i=1; i<=VMs; i++)); do
+ IP="192.168.122.1$i"
+ daemonize -c /var/tmp -p vm${i}.pid -o vm${i}log.txt -- \
+ $SSH zfs@$IP $TESTS $OS $i $VMs $CI_TYPE
+ # handly line by line and add info prefix
+ stdbuf -oL tail -fq vm${i}log.txt \
+ | while read -r line; do prefix "$i" "$line"; done &
+ echo $! > vm${i}log.pid
+ # don't mix up the initial --- Configuration --- part
+ sleep 0.13
+ done
+
+ # wait for all vm's to finish
+ for ((i=1; i<=VMs; i++)); do
+ tail --pid=$(cat vm${i}.pid) -f /dev/null
+ pid=$(cat vm${i}log.pid)
+ rm -f vm${i}log.pid
+ kill $pid
+ done
+
+ exit 0
+fi
+
+# this part runs inside qemu vm
+export PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
+case "$1" in
+ freebsd*)
+ TDIR="/usr/local/share/zfs"
+ sudo kldstat -n zfs 2>/dev/null && sudo kldunload zfs
+ sudo -E ./zfs/scripts/zfs.sh
+ sudo mv -f /var/tmp/*.txt /tmp
+ sudo newfs -U -t -L tmp /dev/vtbd1 >/dev/null
+ sudo mount -o noatime /dev/vtbd1 /var/tmp
+ sudo chmod 1777 /var/tmp
+ sudo mv -f /tmp/*.txt /var/tmp
+ ;;
+ *)
+ # use xfs @ /var/tmp for all distros
+ TDIR="/usr/share/zfs"
+ sudo -E modprobe zfs
+ sudo mv -f /var/tmp/*.txt /tmp
+ sudo mkfs.xfs -fq /dev/vdb
+ sudo mount -o noatime /dev/vdb /var/tmp
+ sudo chmod 1777 /var/tmp
+ sudo mv -f /tmp/*.txt /var/tmp
+ ;;
+esac
+
+# enable io_uring on el9/el10
+case "$1" in
+ almalinux9|almalinux10|centos-stream*)
+ sudo sysctl kernel.io_uring_disabled=0 > /dev/null
+ ;;
+esac
+
+# run functional testings and save exitcode
+cd /var/tmp
+TAGS=$2/$3
+if [ "$4" == "quick" ]; then
+ export RUNFILES="sanity.run"
+fi
+sudo dmesg -c > dmesg-prerun.txt
+mount > mount.txt
+df -h > df-prerun.txt
+$TDIR/zfs-tests.sh -vKO -s 3GB -T $TAGS
+RV=$?
+df -h > df-postrun.txt
+echo $RV > tests-exitcode.txt
+sync
+exit 0
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-7-prepare.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-7-prepare.sh
new file mode 100755
index 000000000000..98a5c24c2521
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-7-prepare.sh
@@ -0,0 +1,124 @@
+#!/usr/bin/env bash
+
+######################################################################
+# 7) prepare output of the results
+# - this script pre-creates all needed logfiles for later summary
+######################################################################
+
+set -eu
+
+# read our defined variables
+cd /var/tmp
+source env.txt
+
+mkdir -p $RESPATH
+
+# check if building the module has failed
+if [ -z ${VMs:-} ]; then
+ cd $RESPATH
+ echo ":exclamation: ZFS module didn't build successfully :exclamation:" \
+ | tee summary.txt | tee /tmp/summary.txt
+ cp /var/tmp/*.txt .
+ tar cf /tmp/qemu-$OS.tar -C $RESPATH -h . || true
+ exit 0
+fi
+
+# build was okay
+BASE="$HOME/work/zfs/zfs"
+MERGE="$BASE/.github/workflows/scripts/merge_summary.awk"
+
+# catch result files of testings (vm's should be there)
+for ((i=1; i<=VMs; i++)); do
+ rsync -arL zfs@vm$i:$RESPATH/current $RESPATH/vm$i || true
+ scp zfs@vm$i:"/var/tmp/*.txt" $RESPATH/vm$i || true
+ scp zfs@vm$i:"/var/tmp/*.rpm" $RESPATH/vm$i || true
+done
+cp -f /var/tmp/*.txt $RESPATH || true
+cd $RESPATH
+
+# prepare result files for summary
+for ((i=1; i<=VMs; i++)); do
+ file="vm$i/build-stderr.txt"
+ test -s $file && mv -f $file build-stderr.txt
+
+ file="vm$i/build-exitcode.txt"
+ test -s $file && mv -f $file build-exitcode.txt
+
+ file="vm$i/uname.txt"
+ test -s $file && mv -f $file uname.txt
+
+ file="vm$i/tests-exitcode.txt"
+ if [ ! -s $file ]; then
+ # XXX - add some tests for kernel panic's here
+ # tail -n 80 vm$i/console.txt | grep XYZ
+ echo 1 > $file
+ fi
+ rv=$(cat vm$i/tests-exitcode.txt)
+ test $rv != 0 && touch /tmp/have_failed_tests
+
+ file="vm$i/current/log"
+ if [ -s $file ]; then
+ cat $file >> log
+ awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }; \
+ /\[SKIP\]|\[PASS\]/{ show=0; } show' \
+ $file > /tmp/vm${i}dbg.txt
+ fi
+
+ file="vm${i}log.txt"
+ fileC="/tmp/vm${i}log.txt"
+ if [ -s $file ]; then
+ cat $file >> summary
+ cat $file | $BASE/scripts/zfs-tests-color.sh > $fileC
+ fi
+done
+
+# create summary of tests
+if [ -s summary ]; then
+ $MERGE summary | grep -v '^/' > summary.txt
+ $MERGE summary | $BASE/scripts/zfs-tests-color.sh > /tmp/summary.txt
+ rm -f summary
+else
+ touch summary.txt /tmp/summary.txt
+fi
+
+# create file for debugging
+if [ -s log ]; then
+ awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }; \
+ /\[SKIP\]|\[PASS\]/{ show=0; } show' \
+ log > summary-failure-logs.txt
+ rm -f log
+else
+ touch summary-failure-logs.txt
+fi
+
+# create debug overview for failed tests
+cat summary.txt \
+ | awk '/\(expected PASS\)/{ if ($1!="SKIP") print $2; next; } show' \
+ | while read t; do
+ cat summary-failure-logs.txt \
+ | awk '$0~/Test[: ]/{ show=0; } $0~v{ show=1; } show' v="$t" \
+ > /tmp/fail.txt
+ SIZE=$(stat --printf="%s" /tmp/fail.txt)
+ SIZE=$((SIZE/1024))
+ # Test Summary:
+ echo "##[group]$t ($SIZE KiB)" >> /tmp/failed.txt
+ cat /tmp/fail.txt | $BASE/scripts/zfs-tests-color.sh >> /tmp/failed.txt
+ echo "##[endgroup]" >> /tmp/failed.txt
+ # Job Summary:
+ echo -e "\n<details>\n<summary>$t ($SIZE KiB)</summary><pre>" >> failed.txt
+ cat /tmp/fail.txt >> failed.txt
+ echo "</pre></details>" >> failed.txt
+done
+
+if [ -e /tmp/have_failed_tests ]; then
+ echo ":warning: Some tests failed!" >> failed.txt
+else
+ echo ":thumbsup: All tests passed." >> failed.txt
+fi
+
+if [ ! -s uname.txt ]; then
+ echo ":interrobang: Panic - where is my uname.txt?" > uname.txt
+fi
+
+# artifact ready now
+tar cf /tmp/qemu-$OS.tar -C $RESPATH -h . || true
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-8-summary.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-8-summary.sh
new file mode 100755
index 000000000000..7d1e16567ab4
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-8-summary.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+
+######################################################################
+# 8) show colored output of results
+######################################################################
+
+set -eu
+
+# read our defined variables
+source /var/tmp/env.txt
+cd $RESPATH
+
+# helper function for showing some content with headline
+function showfile() {
+ content=$(dd if=$1 bs=1024 count=400k 2>/dev/null)
+ if [ -z "$2" ]; then
+ group1=""
+ group2=""
+ else
+ SIZE=$(stat --printf="%s" "$file")
+ SIZE=$((SIZE/1024))
+ group1="##[group]$2 ($SIZE KiB)"
+ group2="##[endgroup]"
+ fi
+cat <<EOF > tmp$$
+$group1
+$content
+$group2
+EOF
+ cat tmp$$
+ rm -f tmp$$
+}
+
+# overview
+cat /tmp/summary.txt
+echo ""
+
+if [ -f /tmp/have_failed_tests -a -s /tmp/failed.txt ]; then
+ echo "Debuginfo of failed tests:"
+ cat /tmp/failed.txt
+ echo ""
+ cat /tmp/summary.txt | grep -v '^/'
+ echo ""
+fi
+
+echo -e "\nFull logs for download:\n $1\n"
+
+for ((i=1; i<=VMs; i++)); do
+ rv=$(cat vm$i/tests-exitcode.txt)
+
+ if [ $rv = 0 ]; then
+ vm="vm$i"
+ else
+ vm="vm$i"
+ fi
+
+ file="vm$i/dmesg-prerun.txt"
+ test -s "$file" && showfile "$file" "$vm: dmesg kernel"
+
+ file="/tmp/vm${i}log.txt"
+ test -s "$file" && showfile "$file" "$vm: test results"
+
+ file="vm$i/console.txt"
+ test -s "$file" && showfile "$file" "$vm: serial console"
+
+ file="/tmp/vm${i}dbg.txt"
+ test -s "$file" && showfile "$file" "$vm: failure logfile"
+done
+
+test -f /tmp/have_failed_tests && exit 1
+exit 0
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-9-summary-page.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-9-summary-page.sh
new file mode 100755
index 000000000000..737dda01b565
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-9-summary-page.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+
+######################################################################
+# 9) generate github summary page of all the testings
+######################################################################
+
+set -eu
+
+function output() {
+ echo -e $* >> "out-$logfile.md"
+}
+
+function outfile() {
+ cat "$1" >> "out-$logfile.md"
+}
+
+function outfile_plain() {
+ output "<pre>"
+ cat "$1" >> "out-$logfile.md"
+ output "</pre>"
+}
+
+function send2github() {
+ test -f "$1" || exit 0
+ dd if="$1" bs=1023k count=1 >> $GITHUB_STEP_SUMMARY
+}
+
+# https://docs.github.com/en/enterprise-server@3.6/actions/using-workflows/workflow-commands-for-github-actions#step-isolation-and-limits
+# Job summaries are isolated between steps and each step is restricted to a maximum size of 1MiB.
+# [ ] can not show all error findings here
+# [x] split files into smaller ones and create additional steps
+
+# first call, generate all summaries
+if [ ! -f out-1.md ]; then
+ logfile="1"
+ for tarfile in Logs-functional-*/qemu-*.tar; do
+ rm -rf vm* *.txt
+ if [ ! -s "$tarfile" ]; then
+ output "\n## Functional Tests: unknown\n"
+ output ":exclamation: Tarfile $tarfile is empty :exclamation:"
+ continue
+ fi
+ tar xf "$tarfile"
+ test -s env.txt || continue
+ source env.txt
+ # when uname.txt is there, the other files are also ok
+ test -s uname.txt || continue
+ output "\n## Functional Tests: $OSNAME\n"
+ outfile_plain uname.txt
+ outfile_plain summary.txt
+ outfile failed.txt
+ logfile=$((logfile+1))
+ done
+ send2github out-1.md
+else
+ send2github out-$1.md
+fi
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-prepare-for-build.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-prepare-for-build.sh
new file mode 100755
index 000000000000..a5a9e422ba6e
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-prepare-for-build.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+# Helper script to run after installing dependencies. This brings the VM back
+# up and copies over the zfs source directory.
+echo "Build modules in QEMU machine"
+sudo virsh start openzfs
+.github/workflows/scripts/qemu-wait-for-vm.sh vm0
+rsync -ar $HOME/work/zfs/zfs zfs@vm0:./
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-test-repo-vm.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-test-repo-vm.sh
new file mode 100755
index 000000000000..e3cafcbb67cc
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-test-repo-vm.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+#
+# Do a test install of ZFS from an external repository.
+#
+# USAGE:
+#
+# ./qemu-test-repo-vm [URL]
+#
+# URL: URL to use instead of http://download.zfsonlinux.org
+# If blank, use the default repo from zfs-release RPM.
+
+set -e
+
+source /etc/os-release
+OS="$ID"
+VERSION="$VERSION_ID"
+
+ALTHOST=""
+if [ -n "$1" ] ; then
+ ALTHOST="$1"
+fi
+
+# Write summary to /tmp/repo so our artifacts scripts pick it up
+mkdir /tmp/repo
+SUMMARY=/tmp/repo/$OS-$VERSION-summary.txt
+
+# $1: Repo 'zfs' 'zfs-kmod' 'zfs-testing' 'zfs-testing-kmod'
+# $2: (optional) Alternate host than 'http://download.zfsonlinux.org' to
+# install from. Blank means use default from zfs-release RPM.
+function test_install {
+ repo=$1
+ host=""
+ if [ -n "$2" ] ; then
+ host=$2
+ fi
+
+ args="--disablerepo=zfs --enablerepo=$repo"
+
+ # If we supplied an alternate repo URL, and have not already edited
+ # zfs.repo, then update the repo file.
+ if [ -n "$host" ] && ! grep -q $host /etc/yum.repos.d/zfs.repo ; then
+ sudo sed -i "s;baseurl=http://download.zfsonlinux.org;baseurl=$host;g" /etc/yum.repos.d/zfs.repo
+ fi
+
+ sudo dnf -y install $args zfs zfs-test
+
+ # Load modules and create a simple pool as a sanity test.
+ sudo /usr/share/zfs/zfs.sh -r
+ truncate -s 100M /tmp/file
+ sudo zpool create tank /tmp/file
+ sudo zpool status
+
+ # Print out repo name, rpm installed (kmod or dkms), and repo URL
+ baseurl=$(grep -A 5 "\[$repo\]" /etc/yum.repos.d/zfs.repo | awk -F'=' '/baseurl=/{print $2; exit}')
+ package=$(sudo rpm -qa | grep zfs | grep -E 'kmod|dkms')
+
+ echo "$repo $package $baseurl" >> $SUMMARY
+
+ sudo zpool destroy tank
+ sudo rm /tmp/file
+ sudo dnf -y remove zfs
+}
+
+echo "##[group]Installing from repo"
+# The openzfs docs are the authoritative instructions for the install. Use
+# the specific version of zfs-release RPM it recommends.
+case $OS in
+almalinux*)
+ url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/RHEL-based%20distro/index.rst'
+ name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
+ sudo dnf -y install https://zfsonlinux.org/epel/$name$(rpm --eval "%{dist}").noarch.rpm 2>&1
+ sudo rpm -qi zfs-release
+ test_install zfs $ALTHOST
+ test_install zfs-kmod $ALTHOST
+ test_install zfs-testing $ALTHOST
+ test_install zfs-testing-kmod $ALTHOST
+ ;;
+fedora*)
+ url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/Fedora/index.rst'
+ name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
+ sudo dnf -y install -y https://zfsonlinux.org/fedora/$name$(rpm --eval "%{dist}").noarch.rpm
+ test_install zfs $ALTHOST
+ ;;
+esac
+echo "##[endgroup]"
+
+# Write out a simple version of the summary here. Later on we will collate all
+# the summaries and put them into a nice table in the workflow Summary page.
+echo "Summary: "
+cat $SUMMARY
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-wait-for-vm.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-wait-for-vm.sh
new file mode 100755
index 000000000000..e8afdb3f7b98
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-wait-for-vm.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+#
+# Wait for a VM to boot up and become active. This is used in a number of our
+# scripts.
+#
+# $1: VM hostname or IP address
+
+while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
+ ssh 2>/dev/null zfs@$1 "uname -a" && break
+done
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/replace-dupes-with-symlinks.sh b/sys/contrib/openzfs/.github/workflows/scripts/replace-dupes-with-symlinks.sh
new file mode 100755
index 000000000000..5412c954ad2f
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/scripts/replace-dupes-with-symlinks.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Recursively go though a directory structure and replace duplicate files with
+# symlinks. This cuts down our RPM repo size by ~25%.
+#
+# replace-dupes-with-symlinks.sh [DIR]
+#
+# DIR: Directory to traverse. Defaults to current directory if not specified.
+#
+
+src="$1"
+if [ -z "$src" ] ; then
+ src="."
+fi
+
+declare -A db
+
+pushd "$src"
+while read line ; do
+ bn="$(basename $line)"
+ if [ -z "${db[$bn]}" ] ; then
+ # First time this file has been seen
+ db[$bn]="$line"
+ else
+ if diff -b "$line" "${db[$bn]}" &>/dev/null ; then
+ # Files are the same, make a symlink
+ rm "$line"
+ ln -sr "${db[$bn]}" "$line"
+ fi
+ fi
+done <<< "$(find . -type f)"
+popd