aboutsummaryrefslogtreecommitdiff
path: root/sys/compat/linuxkpi/common
diff options
context:
space:
mode:
Diffstat (limited to 'sys/compat/linuxkpi/common')
-rw-r--r--sys/compat/linuxkpi/common/include/acpi/acpi.h170
-rw-r--r--sys/compat/linuxkpi/common/include/acpi/acpi_bus.h67
-rw-r--r--sys/compat/linuxkpi/common/include/acpi/actbl.h1
-rw-r--r--sys/compat/linuxkpi/common/include/acpi/video.h62
-rw-r--r--sys/compat/linuxkpi/common/include/asm-generic/io.h40
-rw-r--r--sys/compat/linuxkpi/common/include/asm/atomic-long.h137
-rw-r--r--sys/compat/linuxkpi/common/include/asm/atomic.h346
-rw-r--r--sys/compat/linuxkpi/common/include/asm/atomic64.h151
-rw-r--r--sys/compat/linuxkpi/common/include/asm/barrier.h64
-rw-r--r--sys/compat/linuxkpi/common/include/asm/byteorder.h158
-rw-r--r--sys/compat/linuxkpi/common/include/asm/cpufeature.h37
-rw-r--r--sys/compat/linuxkpi/common/include/asm/fcntl.h34
-rw-r--r--sys/compat/linuxkpi/common/include/asm/fpu/api.h40
-rw-r--r--sys/compat/linuxkpi/common/include/asm/hypervisor.h19
-rw-r--r--sys/compat/linuxkpi/common/include/asm/intel-family.h6
-rw-r--r--sys/compat/linuxkpi/common/include/asm/io.h41
-rw-r--r--sys/compat/linuxkpi/common/include/asm/iosf_mbi.h15
-rw-r--r--sys/compat/linuxkpi/common/include/asm/memtype.h18
-rw-r--r--sys/compat/linuxkpi/common/include/asm/msr.h35
-rw-r--r--sys/compat/linuxkpi/common/include/asm/neon.h40
-rw-r--r--sys/compat/linuxkpi/common/include/asm/pgtable.h58
-rw-r--r--sys/compat/linuxkpi/common/include/asm/processor.h63
-rw-r--r--sys/compat/linuxkpi/common/include/asm/set_memory.h126
-rw-r--r--sys/compat/linuxkpi/common/include/asm/smp.h50
-rw-r--r--sys/compat/linuxkpi/common/include/asm/types.h62
-rw-r--r--sys/compat/linuxkpi/common/include/asm/uaccess.h69
-rw-r--r--sys/compat/linuxkpi/common/include/asm/unaligned.h99
-rw-r--r--sys/compat/linuxkpi/common/include/crypto/hash.h94
-rw-r--r--sys/compat/linuxkpi/common/include/linux/acpi.h49
-rw-r--r--sys/compat/linuxkpi/common/include/linux/acpi_amd_wbrf.h97
-rw-r--r--sys/compat/linuxkpi/common/include/linux/agp_backend.h28
-rw-r--r--sys/compat/linuxkpi/common/include/linux/anon_inodes.h48
-rw-r--r--sys/compat/linuxkpi/common/include/linux/aperture.h64
-rw-r--r--sys/compat/linuxkpi/common/include/linux/apple-gmux.h12
-rw-r--r--sys/compat/linuxkpi/common/include/linux/atomic.h35
-rw-r--r--sys/compat/linuxkpi/common/include/linux/average.h90
-rw-r--r--sys/compat/linuxkpi/common/include/linux/backlight.h134
-rw-r--r--sys/compat/linuxkpi/common/include/linux/bcd.h43
-rw-r--r--sys/compat/linuxkpi/common/include/linux/bitfield.h141
-rw-r--r--sys/compat/linuxkpi/common/include/linux/bitmap.h450
-rw-r--r--sys/compat/linuxkpi/common/include/linux/bitops.h437
-rw-r--r--sys/compat/linuxkpi/common/include/linux/bottom_half.h32
-rw-r--r--sys/compat/linuxkpi/common/include/linux/bsearch.h36
-rw-r--r--sys/compat/linuxkpi/common/include/linux/build_bug.h65
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cache.h40
-rw-r--r--sys/compat/linuxkpi/common/include/linux/capability.h51
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cc_platform.h21
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cdev.h146
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cec.h8
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cgroup.h34
-rw-r--r--sys/compat/linuxkpi/common/include/linux/circ_buf.h34
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cleanup.h46
-rw-r--r--sys/compat/linuxkpi/common/include/linux/clocksource.h36
-rw-r--r--sys/compat/linuxkpi/common/include/linux/compat.h68
-rw-r--r--sys/compat/linuxkpi/common/include/linux/compiler.h133
-rw-r--r--sys/compat/linuxkpi/common/include/linux/completion.h68
-rw-r--r--sys/compat/linuxkpi/common/include/linux/console.h55
-rw-r--r--sys/compat/linuxkpi/common/include/linux/container_of.h54
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cpu.h78
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cpufeature.h43
-rw-r--r--sys/compat/linuxkpi/common/include/linux/crc32.h43
-rw-r--r--sys/compat/linuxkpi/common/include/linux/dcache.h45
-rw-r--r--sys/compat/linuxkpi/common/include/linux/debugfs.h124
-rw-r--r--sys/compat/linuxkpi/common/include/linux/delay.h87
-rw-r--r--sys/compat/linuxkpi/common/include/linux/devcoredump.h81
-rw-r--r--sys/compat/linuxkpi/common/include/linux/device.h718
-rw-r--r--sys/compat/linuxkpi/common/include/linux/device/driver.h33
-rw-r--r--sys/compat/linuxkpi/common/include/linux/dma-attrs.h54
-rw-r--r--sys/compat/linuxkpi/common/include/linux/dma-buf-map.h91
-rw-r--r--sys/compat/linuxkpi/common/include/linux/dma-mapping.h399
-rw-r--r--sys/compat/linuxkpi/common/include/linux/dmapool.h103
-rw-r--r--sys/compat/linuxkpi/common/include/linux/dmi.h58
-rw-r--r--sys/compat/linuxkpi/common/include/linux/dynamic_debug.h8
-rw-r--r--sys/compat/linuxkpi/common/include/linux/efi.h68
-rw-r--r--sys/compat/linuxkpi/common/include/linux/err.h81
-rw-r--r--sys/compat/linuxkpi/common/include/linux/errno.h73
-rw-r--r--sys/compat/linuxkpi/common/include/linux/etherdevice.h140
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ethtool.h57
-rw-r--r--sys/compat/linuxkpi/common/include/linux/eventpoll.h45
-rw-r--r--sys/compat/linuxkpi/common/include/linux/export.h31
-rw-r--r--sys/compat/linuxkpi/common/include/linux/file.h185
-rw-r--r--sys/compat/linuxkpi/common/include/linux/firmware.h116
-rw-r--r--sys/compat/linuxkpi/common/include/linux/fs.h425
-rw-r--r--sys/compat/linuxkpi/common/include/linux/fwnode.h10
-rw-r--r--sys/compat/linuxkpi/common/include/linux/gcd.h48
-rw-r--r--sys/compat/linuxkpi/common/include/linux/gfp.h214
-rw-r--r--sys/compat/linuxkpi/common/include/linux/gpf.h33
-rw-r--r--sys/compat/linuxkpi/common/include/linux/hardirq.h51
-rw-r--r--sys/compat/linuxkpi/common/include/linux/hash.h76
-rw-r--r--sys/compat/linuxkpi/common/include/linux/hashtable.h183
-rw-r--r--sys/compat/linuxkpi/common/include/linux/hdmi.h447
-rw-r--r--sys/compat/linuxkpi/common/include/linux/highmem.h170
-rw-r--r--sys/compat/linuxkpi/common/include/linux/hrtimer.h90
-rw-r--r--sys/compat/linuxkpi/common/include/linux/i2c-algo-bit.h49
-rw-r--r--sys/compat/linuxkpi/common/include/linux/i2c.h177
-rw-r--r--sys/compat/linuxkpi/common/include/linux/idr.h162
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ieee80211.h1245
-rw-r--r--sys/compat/linuxkpi/common/include/linux/if_arp.h35
-rw-r--r--sys/compat/linuxkpi/common/include/linux/if_ether.h82
-rw-r--r--sys/compat/linuxkpi/common/include/linux/if_vlan.h57
-rw-r--r--sys/compat/linuxkpi/common/include/linux/in.h44
-rw-r--r--sys/compat/linuxkpi/common/include/linux/in6.h34
-rw-r--r--sys/compat/linuxkpi/common/include/linux/inetdevice.h6
-rw-r--r--sys/compat/linuxkpi/common/include/linux/interrupt.h195
-rw-r--r--sys/compat/linuxkpi/common/include/linux/interval_tree.h55
-rw-r--r--sys/compat/linuxkpi/common/include/linux/interval_tree_generic.h99
-rw-r--r--sys/compat/linuxkpi/common/include/linux/io-64-nonatomic-lo-hi.h65
-rw-r--r--sys/compat/linuxkpi/common/include/linux/io-mapping.h130
-rw-r--r--sys/compat/linuxkpi/common/include/linux/io.h566
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ioctl.h38
-rw-r--r--sys/compat/linuxkpi/common/include/linux/iommu.h29
-rw-r--r--sys/compat/linuxkpi/common/include/linux/iopoll.h92
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ioport.h57
-rw-r--r--sys/compat/linuxkpi/common/include/linux/iosys-map.h161
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ip.h73
-rw-r--r--sys/compat/linuxkpi/common/include/linux/irq_work.h90
-rw-r--r--sys/compat/linuxkpi/common/include/linux/irqdomain.h10
-rw-r--r--sys/compat/linuxkpi/common/include/linux/irqreturn.h38
-rw-r--r--sys/compat/linuxkpi/common/include/linux/jhash.h144
-rw-r--r--sys/compat/linuxkpi/common/include/linux/jiffies.h143
-rw-r--r--sys/compat/linuxkpi/common/include/linux/jump_label.h48
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kconfig.h76
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kdev_t.h44
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kernel.h385
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kernel_stat.h34
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kfifo.h122
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kmemleak.h8
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kmod.h48
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kobject.h210
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kref.h129
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kstrtox.h324
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kthread.h166
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ktime.h287
-rw-r--r--sys/compat/linuxkpi/common/include/linux/leds.h41
-rw-r--r--sys/compat/linuxkpi/common/include/linux/limits.h47
-rw-r--r--sys/compat/linuxkpi/common/include/linux/list.h529
-rw-r--r--sys/compat/linuxkpi/common/include/linux/llist.h101
-rw-r--r--sys/compat/linuxkpi/common/include/linux/lockdep.h120
-rw-r--r--sys/compat/linuxkpi/common/include/linux/log2.h41
-rw-r--r--sys/compat/linuxkpi/common/include/linux/math.h76
-rw-r--r--sys/compat/linuxkpi/common/include/linux/math64.h171
-rw-r--r--sys/compat/linuxkpi/common/include/linux/media-bus-format.h8
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mhi.h222
-rw-r--r--sys/compat/linuxkpi/common/include/linux/minmax.h74
-rw-r--r--sys/compat/linuxkpi/common/include/linux/miscdevice.h74
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mm.h479
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mm_types.h93
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mman.h38
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mmap_lock.h54
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mmu_context.h43
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mmu_notifier.h33
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mmzone.h15
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mod_devicetable.h83
-rw-r--r--sys/compat/linuxkpi/common/include/linux/module.h127
-rw-r--r--sys/compat/linuxkpi/common/include/linux/moduleparam.h146
-rw-r--r--sys/compat/linuxkpi/common/include/linux/mutex.h177
-rw-r--r--sys/compat/linuxkpi/common/include/linux/net.h88
-rw-r--r--sys/compat/linuxkpi/common/include/linux/net_dim.h408
-rw-r--r--sys/compat/linuxkpi/common/include/linux/netdev_features.h52
-rw-r--r--sys/compat/linuxkpi/common/include/linux/netdevice.h488
-rw-r--r--sys/compat/linuxkpi/common/include/linux/nl80211.h445
-rw-r--r--sys/compat/linuxkpi/common/include/linux/nodemask.h46
-rw-r--r--sys/compat/linuxkpi/common/include/linux/nospec.h8
-rw-r--r--sys/compat/linuxkpi/common/include/linux/notifier.h58
-rw-r--r--sys/compat/linuxkpi/common/include/linux/numa.h34
-rw-r--r--sys/compat/linuxkpi/common/include/linux/of.h33
-rw-r--r--sys/compat/linuxkpi/common/include/linux/overflow.h349
-rw-r--r--sys/compat/linuxkpi/common/include/linux/page-flags.h41
-rw-r--r--sys/compat/linuxkpi/common/include/linux/page.h130
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pagemap.h49
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pagevec.h137
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pci.h1537
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pci_ids.h78
-rw-r--r--sys/compat/linuxkpi/common/include/linux/perf_event.h34
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pfn.h42
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pfn_t.h54
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pid.h68
-rw-r--r--sys/compat/linuxkpi/common/include/linux/platform_device.h97
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pm.h100
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pm_qos.h57
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pm_runtime.h54
-rw-r--r--sys/compat/linuxkpi/common/include/linux/poison.h9
-rw-r--r--sys/compat/linuxkpi/common/include/linux/poll.h47
-rw-r--r--sys/compat/linuxkpi/common/include/linux/power_supply.h42
-rw-r--r--sys/compat/linuxkpi/common/include/linux/preempt.h41
-rw-r--r--sys/compat/linuxkpi/common/include/linux/prefetch.h34
-rw-r--r--sys/compat/linuxkpi/common/include/linux/printk.h97
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ptp_clock_kernel.h75
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pwm.h100
-rw-r--r--sys/compat/linuxkpi/common/include/linux/qrtr.h41
-rw-r--r--sys/compat/linuxkpi/common/include/linux/radix-tree.h84
-rw-r--r--sys/compat/linuxkpi/common/include/linux/random.h129
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ratelimit.h17
-rw-r--r--sys/compat/linuxkpi/common/include/linux/rbtree.h206
-rw-r--r--sys/compat/linuxkpi/common/include/linux/rculist.h147
-rw-r--r--sys/compat/linuxkpi/common/include/linux/rcupdate.h165
-rw-r--r--sys/compat/linuxkpi/common/include/linux/reboot.h40
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ref_tracker.h93
-rw-r--r--sys/compat/linuxkpi/common/include/linux/refcount.h84
-rw-r--r--sys/compat/linuxkpi/common/include/linux/rhashtable.h87
-rw-r--r--sys/compat/linuxkpi/common/include/linux/rwlock.h57
-rw-r--r--sys/compat/linuxkpi/common/include/linux/rwsem.h85
-rw-r--r--sys/compat/linuxkpi/common/include/linux/scatterlist.h684
-rw-r--r--sys/compat/linuxkpi/common/include/linux/sched.h243
-rw-r--r--sys/compat/linuxkpi/common/include/linux/sched/mm.h43
-rw-r--r--sys/compat/linuxkpi/common/include/linux/semaphore.h68
-rw-r--r--sys/compat/linuxkpi/common/include/linux/seq_file.h108
-rw-r--r--sys/compat/linuxkpi/common/include/linux/seqlock.h184
-rw-r--r--sys/compat/linuxkpi/common/include/linux/shmem_fs.h67
-rw-r--r--sys/compat/linuxkpi/common/include/linux/shrinker.h79
-rw-r--r--sys/compat/linuxkpi/common/include/linux/sizes.h63
-rw-r--r--sys/compat/linuxkpi/common/include/linux/skbuff.h1167
-rw-r--r--sys/compat/linuxkpi/common/include/linux/slab.h284
-rw-r--r--sys/compat/linuxkpi/common/include/linux/smp.h51
-rw-r--r--sys/compat/linuxkpi/common/include/linux/soc/mediatek/mtk_wed.h62
-rw-r--r--sys/compat/linuxkpi/common/include/linux/soc/qcom/qmi.h173
-rw-r--r--sys/compat/linuxkpi/common/include/linux/socket.h78
-rw-r--r--sys/compat/linuxkpi/common/include/linux/sort.h41
-rw-r--r--sys/compat/linuxkpi/common/include/linux/spinlock.h181
-rw-r--r--sys/compat/linuxkpi/common/include/linux/srcu.h54
-rw-r--r--sys/compat/linuxkpi/common/include/linux/stackdepot.h32
-rw-r--r--sys/compat/linuxkpi/common/include/linux/stdarg.h33
-rw-r--r--sys/compat/linuxkpi/common/include/linux/stddef.h31
-rw-r--r--sys/compat/linuxkpi/common/include/linux/string.h329
-rw-r--r--sys/compat/linuxkpi/common/include/linux/string_helpers.h69
-rw-r--r--sys/compat/linuxkpi/common/include/linux/stringify.h35
-rw-r--r--sys/compat/linuxkpi/common/include/linux/suspend.h23
-rw-r--r--sys/compat/linuxkpi/common/include/linux/swap.h71
-rw-r--r--sys/compat/linuxkpi/common/include/linux/sysfs.h487
-rw-r--r--sys/compat/linuxkpi/common/include/linux/tcp.h70
-rw-r--r--sys/compat/linuxkpi/common/include/linux/time.h141
-rw-r--r--sys/compat/linuxkpi/common/include/linux/timer.h94
-rw-r--r--sys/compat/linuxkpi/common/include/linux/tracepoint.h48
-rw-r--r--sys/compat/linuxkpi/common/include/linux/typecheck.h38
-rw-r--r--sys/compat/linuxkpi/common/include/linux/types.h96
-rw-r--r--sys/compat/linuxkpi/common/include/linux/uaccess.h115
-rw-r--r--sys/compat/linuxkpi/common/include/linux/udp.h52
-rw-r--r--sys/compat/linuxkpi/common/include/linux/units.h40
-rw-r--r--sys/compat/linuxkpi/common/include/linux/usb.h318
-rw-r--r--sys/compat/linuxkpi/common/include/linux/utsname.h51
-rw-r--r--sys/compat/linuxkpi/common/include/linux/uuid.h77
-rw-r--r--sys/compat/linuxkpi/common/include/linux/vgaarb.h281
-rw-r--r--sys/compat/linuxkpi/common/include/linux/vmalloc.h42
-rw-r--r--sys/compat/linuxkpi/common/include/linux/wait.h319
-rw-r--r--sys/compat/linuxkpi/common/include/linux/wait_bit.h71
-rw-r--r--sys/compat/linuxkpi/common/include/linux/workqueue.h267
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ww_mutex.h149
-rw-r--r--sys/compat/linuxkpi/common/include/linux/xarray.h149
-rw-r--r--sys/compat/linuxkpi/common/include/net/addrconf.h49
-rw-r--r--sys/compat/linuxkpi/common/include/net/cfg80211.h2140
-rw-r--r--sys/compat/linuxkpi/common/include/net/ieee80211_radiotap.h51
-rw-r--r--sys/compat/linuxkpi/common/include/net/if_inet6.h55
-rw-r--r--sys/compat/linuxkpi/common/include/net/ip.h101
-rw-r--r--sys/compat/linuxkpi/common/include/net/ipv6.h117
-rw-r--r--sys/compat/linuxkpi/common/include/net/mac80211.h2686
-rw-r--r--sys/compat/linuxkpi/common/include/net/netevent.h73
-rw-r--r--sys/compat/linuxkpi/common/include/net/netlink.h53
-rw-r--r--sys/compat/linuxkpi/common/include/net/page_pool.h119
-rw-r--r--sys/compat/linuxkpi/common/include/net/regulatory.h47
-rw-r--r--sys/compat/linuxkpi/common/include/net/tcp.h38
-rw-r--r--sys/compat/linuxkpi/common/include/stdarg.h33
-rw-r--r--sys/compat/linuxkpi/common/include/video/cmdline.h44
-rw-r--r--sys/compat/linuxkpi/common/include/video/mipi_display.h64
-rw-r--r--sys/compat/linuxkpi/common/include/video/vga.h19
-rw-r--r--sys/compat/linuxkpi/common/include/xen/xen.h37
-rw-r--r--sys/compat/linuxkpi/common/src/linux_80211.c8292
-rw-r--r--sys/compat/linuxkpi/common/src/linux_80211.h454
-rw-r--r--sys/compat/linuxkpi/common/src/linux_80211_macops.c756
-rw-r--r--sys/compat/linuxkpi/common/src/linux_acpi.c376
-rw-r--r--sys/compat/linuxkpi/common/src/linux_aperture.c387
-rw-r--r--sys/compat/linuxkpi/common/src/linux_cmdline.c63
-rw-r--r--sys/compat/linuxkpi/common/src/linux_compat.c3020
-rw-r--r--sys/compat/linuxkpi/common/src/linux_current.c343
-rw-r--r--sys/compat/linuxkpi/common/src/linux_devres.c267
-rw-r--r--sys/compat/linuxkpi/common/src/linux_dmi.c147
-rw-r--r--sys/compat/linuxkpi/common/src/linux_domain.c56
-rw-r--r--sys/compat/linuxkpi/common/src/linux_firmware.c247
-rw-r--r--sys/compat/linuxkpi/common/src/linux_folio.c58
-rw-r--r--sys/compat/linuxkpi/common/src/linux_fpu.c99
-rw-r--r--sys/compat/linuxkpi/common/src/linux_hdmi.c1959
-rw-r--r--sys/compat/linuxkpi/common/src/linux_hrtimer.c141
-rw-r--r--sys/compat/linuxkpi/common/src/linux_i2c.c381
-rw-r--r--sys/compat/linuxkpi/common/src/linux_i2cbb.c325
-rw-r--r--sys/compat/linuxkpi/common/src/linux_idr.c813
-rw-r--r--sys/compat/linuxkpi/common/src/linux_interrupt.c251
-rw-r--r--sys/compat/linuxkpi/common/src/linux_kmod.c33
-rw-r--r--sys/compat/linuxkpi/common/src/linux_kobject.c354
-rw-r--r--sys/compat/linuxkpi/common/src/linux_kthread.c181
-rw-r--r--sys/compat/linuxkpi/common/src/linux_lock.c184
-rw-r--r--sys/compat/linuxkpi/common/src/linux_mhi.c89
-rw-r--r--sys/compat/linuxkpi/common/src/linux_netdev.c436
-rw-r--r--sys/compat/linuxkpi/common/src/linux_page.c575
-rw-r--r--sys/compat/linuxkpi/common/src/linux_pci.c2048
-rw-r--r--sys/compat/linuxkpi/common/src/linux_radix.c382
-rw-r--r--sys/compat/linuxkpi/common/src/linux_rcu.c461
-rw-r--r--sys/compat/linuxkpi/common/src/linux_schedule.c475
-rw-r--r--sys/compat/linuxkpi/common/src/linux_seq_file.c301
-rw-r--r--sys/compat/linuxkpi/common/src/linux_shmemfs.c125
-rw-r--r--sys/compat/linuxkpi/common/src/linux_shrinker.c156
-rw-r--r--sys/compat/linuxkpi/common/src/linux_simple_attr.c207
-rw-r--r--sys/compat/linuxkpi/common/src/linux_skbuff.c361
-rw-r--r--sys/compat/linuxkpi/common/src/linux_slab.c330
-rw-r--r--sys/compat/linuxkpi/common/src/linux_tasklet.c277
-rw-r--r--sys/compat/linuxkpi/common/src/linux_usb.c1720
-rw-r--r--sys/compat/linuxkpi/common/src/linux_work.c789
-rw-r--r--sys/compat/linuxkpi/common/src/linux_xarray.c451
-rw-r--r--sys/compat/linuxkpi/common/src/linuxkpi_hdmikmod.c7
-rw-r--r--sys/compat/linuxkpi/common/src/linuxkpi_videokmod.c7
-rw-r--r--sys/compat/linuxkpi/common/src/lkpi_iic_if.m41
309 files changed, 65190 insertions, 0 deletions
diff --git a/sys/compat/linuxkpi/common/include/acpi/acpi.h b/sys/compat/linuxkpi/common/include/acpi/acpi.h
new file mode 100644
index 000000000000..1e398d05ba20
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/acpi/acpi.h
@@ -0,0 +1,170 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ * Copyright (c) 2020 Vladimir Kondratyev <wulf@FreeBSD.org>
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ACPI_ACPI_H_
+#define _LINUXKPI_ACPI_ACPI_H_
+
+/*
+ * LINUXKPI_WANT_LINUX_ACPI is a temporary workaround to allow drm-kmod
+ * to update all needed branches without breaking builds.
+ * Once that happened and checks are implemented based on __FreeBSD_verison
+ * we will remove these conditions again.
+ */
+
+/*
+ * FreeBSD import of ACPICA has a typedef for BOOLEAN which conflicts with
+ * amdgpu driver. Workaround it on preprocessor level.
+ */
+#define ACPI_USE_SYSTEM_INTTYPES
+#define BOOLEAN unsigned char
+typedef unsigned char UINT8;
+typedef unsigned short UINT16;
+typedef short INT16;
+typedef unsigned int UINT32;
+typedef int INT32;
+typedef uint64_t UINT64;
+typedef int64_t INT64;
+#include <contrib/dev/acpica/include/acpi.h>
+#undef BOOLEAN
+
+typedef ACPI_IO_ADDRESS acpi_io_address;
+typedef ACPI_HANDLE acpi_handle;
+typedef ACPI_OBJECT_HANDLER acpi_object_handler;
+typedef ACPI_OBJECT_TYPE acpi_object_type;
+typedef ACPI_STATUS acpi_status;
+typedef ACPI_STRING acpi_string;
+typedef ACPI_SIZE acpi_size;
+typedef ACPI_WALK_CALLBACK acpi_walk_callback;
+
+union linuxkpi_acpi_object {
+ acpi_object_type type;
+ struct {
+ acpi_object_type type;
+ UINT64 value;
+ } integer;
+ struct {
+ acpi_object_type type;
+ UINT32 length;
+ char *pointer;
+ } string;
+ struct {
+ acpi_object_type type;
+ UINT32 length;
+ UINT8 *pointer;
+ } buffer;
+ struct {
+ acpi_object_type type;
+ UINT32 count;
+ union linuxkpi_acpi_object *elements;
+ } package;
+ struct {
+ acpi_object_type type;
+ acpi_object_type actual_type;
+ acpi_handle handle;
+ } reference;
+ struct {
+ acpi_object_type type;
+ UINT32 proc_id;
+ acpi_io_address pblk_address;
+ UINT32 pblk_length;
+ } processor;
+ struct {
+ acpi_object_type type;
+ UINT32 system_level;
+ UINT32 resource_order;
+ } power_resource;
+};
+
+#ifdef LINUXKPI_WANT_LINUX_ACPI
+struct linuxkpi_acpi_buffer {
+ acpi_size length; /* Length in bytes of the buffer */
+ void *pointer; /* pointer to buffer */
+};
+
+typedef struct linuxkpi_acpi_buffer lkpi_acpi_buffer_t;
+#else
+typedef ACPI_BUFFER lkpi_acpi_buffer_t;
+#endif
+
+static inline ACPI_STATUS
+acpi_evaluate_object(ACPI_HANDLE Object, ACPI_STRING Pathname,
+ ACPI_OBJECT_LIST *ParameterObjects, lkpi_acpi_buffer_t *ReturnObjectBuffer)
+{
+ return (AcpiEvaluateObject(
+ Object, Pathname, ParameterObjects, (ACPI_BUFFER *)ReturnObjectBuffer));
+}
+
+static inline const char *
+acpi_format_exception(ACPI_STATUS Exception)
+{
+ return (AcpiFormatException(Exception));
+}
+
+static inline ACPI_STATUS
+acpi_get_handle(ACPI_HANDLE Parent, ACPI_STRING Pathname,
+ ACPI_HANDLE *RetHandle)
+{
+ return (AcpiGetHandle(Parent, Pathname, RetHandle));
+}
+
+static inline ACPI_STATUS
+acpi_get_data(ACPI_HANDLE ObjHandle, ACPI_OBJECT_HANDLER Handler, void **Data)
+{
+ return (AcpiGetData(ObjHandle, Handler, Data));
+}
+
+static inline ACPI_STATUS
+acpi_get_name(ACPI_HANDLE Object, UINT32 NameType, lkpi_acpi_buffer_t *RetPathPtr)
+{
+ return (AcpiGetName(Object, NameType, (ACPI_BUFFER *)RetPathPtr));
+}
+
+static inline ACPI_STATUS
+acpi_get_table(ACPI_STRING Signature, UINT32 Instance,
+ ACPI_TABLE_HEADER **OutTable)
+{
+ return (AcpiGetTable(Signature, Instance, OutTable));
+}
+
+static inline void
+acpi_put_table(ACPI_TABLE_HEADER *Table)
+{
+ AcpiPutTable(Table);
+}
+
+#ifdef LINUXKPI_WANT_LINUX_ACPI
+#define acpi_object linuxkpi_acpi_object
+#define acpi_buffer linuxkpi_acpi_buffer
+#endif
+
+#endif /* _LINUXKPI_ACPI_ACPI_H_ */
diff --git a/sys/compat/linuxkpi/common/include/acpi/acpi_bus.h b/sys/compat/linuxkpi/common/include/acpi/acpi_bus.h
new file mode 100644
index 000000000000..47195e7d66a6
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/acpi/acpi_bus.h
@@ -0,0 +1,67 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ACPI_ACPI_BUS_H_
+#define _LINUXKPI_ACPI_ACPI_BUS_H_
+
+/* Aliase struct acpi_device to device_t */
+#define acpi_device _device
+
+typedef char acpi_device_class[20];
+
+struct acpi_bus_event {
+ acpi_device_class device_class;
+ uint32_t type;
+ uint32_t data;
+};
+
+#define acpi_dev_present(...) lkpi_acpi_dev_present(__VA_ARGS__)
+#define acpi_dev_get_first_match_dev(...) \
+ lkpi_acpi_dev_get_first_match_dev(__VA_ARGS__)
+
+ACPI_HANDLE bsd_acpi_get_handle(device_t bsddev);
+bool acpi_check_dsm(ACPI_HANDLE handle, const char *uuid, int rev,
+ uint64_t funcs);
+ACPI_OBJECT * acpi_evaluate_dsm_typed(ACPI_HANDLE handle, const char *uuid,
+ int rev, int func, ACPI_OBJECT *argv4,
+ ACPI_OBJECT_TYPE type);
+int register_acpi_notifier(struct notifier_block *nb);
+int unregister_acpi_notifier(struct notifier_block *nb);
+uint32_t acpi_target_system_state(void);
+bool lkpi_acpi_dev_present(const char *hid, const char *uid,
+ int64_t hrv);
+struct acpi_device *lkpi_acpi_dev_get_first_match_dev(const char *hid,
+ const char *uid, int64_t hrv);
+
+union linuxkpi_acpi_object;
+
+union linuxkpi_acpi_object *
+acpi_evaluate_dsm(ACPI_HANDLE ObjHandle, const guid_t *guid,
+ UINT64 rev, UINT64 func, union linuxkpi_acpi_object *arg);
+
+#endif /* _LINUXKPI_ACPI_ACPI_BUS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/acpi/actbl.h b/sys/compat/linuxkpi/common/include/acpi/actbl.h
new file mode 100644
index 000000000000..dbb7db41bb66
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/acpi/actbl.h
@@ -0,0 +1 @@
+#include <contrib/dev/acpica/include/actbl.h>
diff --git a/sys/compat/linuxkpi/common/include/acpi/video.h b/sys/compat/linuxkpi/common/include/acpi/video.h
new file mode 100644
index 000000000000..fd2ffd6764d0
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/acpi/video.h
@@ -0,0 +1,62 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ACPI_VIDEO_H_
+#define _LINUXKPI_ACPI_VIDEO_H_
+
+#include <sys/types.h>
+#include <sys/errno.h>
+
+#define ACPI_VIDEO_CLASS "video"
+
+#define ACPI_VIDEO_NOTIFY_PROBE 0x81
+
+static inline int
+acpi_video_register(void)
+{
+
+ return (-ENODEV);
+}
+
+static inline void
+acpi_video_unregister(void)
+{
+}
+
+static inline void
+acpi_video_register_backlight(void)
+{
+}
+
+static inline bool
+acpi_video_backlight_use_native(void)
+{
+ return (true);
+}
+
+#endif /* _LINUXKPI_ACPI_VIDEO_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm-generic/io.h b/sys/compat/linuxkpi/common/include/asm-generic/io.h
new file mode 100644
index 000000000000..8dc7eb6cf453
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm-generic/io.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUXKPI_ASMGENERIC_IO_H_
+#define _LINUXKPI_ASMGENERIC_IO_H_
+
+#if defined(__i386__) || defined(__amd64__)
+
+#include <machine/cpufunc.h>
+
+#define outb(a,b) outb(b,a)
+#define outw(a,b) outw(b,a)
+#define outl(a,b) outl(b,a)
+
+#endif
+
+#endif /* _LINUXKPI_ASMGENERIC_IO_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/atomic-long.h b/sys/compat/linuxkpi/common/include/asm/atomic-long.h
new file mode 100644
index 000000000000..db3a94c539e5
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/atomic-long.h
@@ -0,0 +1,137 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_ASM_ATOMIC_LONG_H_
+#define _LINUXKPI_ASM_ATOMIC_LONG_H_
+
+#include <linux/compiler.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
+#define ATOMIC_LONG_INIT(x) { .counter = (x) }
+
+typedef struct {
+ volatile long counter;
+} atomic_long_t;
+
+#define atomic_long_add(i, v) atomic_long_add_return((i), (v))
+#define atomic_long_sub(i, v) atomic_long_sub_return((i), (v))
+#define atomic_long_inc_return(v) atomic_long_add_return(1, (v))
+#define atomic_long_inc_not_zero(v) atomic_long_add_unless((v), 1, 0)
+
+static inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+ return i + atomic_fetchadd_long(&v->counter, i);
+}
+
+static inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ return atomic_fetchadd_long(&v->counter, -i) - i;
+}
+
+static inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+
+static inline long
+atomic_long_read(atomic_long_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+
+static inline long
+atomic_long_inc(atomic_long_t *v)
+{
+ return atomic_fetchadd_long(&v->counter, 1) + 1;
+}
+
+static inline long
+atomic_long_dec(atomic_long_t *v)
+{
+ return atomic_fetchadd_long(&v->counter, -1) - 1;
+}
+
+static inline long
+atomic_long_xchg(atomic_long_t *v, long val)
+{
+ return atomic_swap_long(&v->counter, val);
+}
+
+static inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ long ret = old;
+
+ for (;;) {
+ if (atomic_fcmpset_long(&v->counter, &ret, new))
+ break;
+ if (ret != old)
+ break;
+ }
+ return (ret);
+}
+
+static inline int
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ long c = atomic_long_read(v);
+
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ if (likely(atomic_fcmpset_long(&v->counter, &c, c + a)))
+ break;
+ }
+ return (c != u);
+}
+
+static inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ long c = atomic_long_read(v);
+
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ if (likely(atomic_fcmpset_long(&v->counter, &c, c + a)))
+ break;
+ }
+ return (c);
+}
+
+static inline long
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+ long i = atomic_long_add(-1, v);
+ return i == 0 ;
+}
+
+#endif /* _LINUXKPI_ASM_ATOMIC_LONG_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/atomic.h b/sys/compat/linuxkpi/common/include/asm/atomic.h
new file mode 100644
index 000000000000..edb478af8c82
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/atomic.h
@@ -0,0 +1,346 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ASM_ATOMIC_H_
+#define _LINUXKPI_ASM_ATOMIC_H_
+
+#include <linux/compiler.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
+#define ATOMIC_INIT(x) { .counter = (x) }
+
+typedef struct {
+ volatile int counter;
+} atomic_t;
+
+/*------------------------------------------------------------------------*
+ * 32-bit atomic operations
+ *------------------------------------------------------------------------*/
+
+#define atomic_add(i, v) atomic_add_return((i), (v))
+#define atomic_sub(i, v) atomic_sub_return((i), (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
+#define atomic_add_and_test(i, v) (atomic_add_return((i), (v)) == 0)
+#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+static inline int
+atomic_add_return(int i, atomic_t *v)
+{
+ return i + atomic_fetchadd_int(&v->counter, i);
+}
+
+static inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+ return atomic_fetchadd_int(&v->counter, -i) - i;
+}
+
+static inline void
+atomic_set(atomic_t *v, int i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+
+static inline void
+atomic_set_release(atomic_t *v, int i)
+{
+ atomic_store_rel_int(&v->counter, i);
+}
+
+static inline void
+atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_set_int(&v->counter, mask);
+}
+
+static inline int
+atomic_read(const atomic_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+
+static inline int
+atomic_inc(atomic_t *v)
+{
+ return atomic_fetchadd_int(&v->counter, 1) + 1;
+}
+
+static inline int
+atomic_dec(atomic_t *v)
+{
+ return atomic_fetchadd_int(&v->counter, -1) - 1;
+}
+
+static inline int
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c = atomic_read(v);
+
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
+ break;
+ }
+ return (c != u);
+}
+
+static inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = atomic_read(v);
+
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
+ break;
+ }
+ return (c);
+}
+
+static inline void
+atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_clear_int(&v->counter, mask);
+}
+
+static inline int
+atomic_xchg(atomic_t *v, int i)
+{
+ return (atomic_swap_int(&v->counter, i));
+}
+
+static inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret = old;
+
+ for (;;) {
+ if (atomic_fcmpset_int(&v->counter, &ret, new))
+ break;
+ if (ret != old)
+ break;
+ }
+ return (ret);
+}
+
+#if defined(__amd64__) || defined(__arm64__) || defined(__i386__)
+#define LINUXKPI_ATOMIC_8(...) __VA_ARGS__
+#define LINUXKPI_ATOMIC_16(...) __VA_ARGS__
+#else
+#define LINUXKPI_ATOMIC_8(...)
+#define LINUXKPI_ATOMIC_16(...)
+#endif
+
+#if !(defined(i386) || (defined(__powerpc__) && !defined(__powerpc64__)))
+#define LINUXKPI_ATOMIC_64(...) __VA_ARGS__
+#else
+#define LINUXKPI_ATOMIC_64(...)
+#endif
+
+#define cmpxchg(ptr, old, new) ({ \
+ union { \
+ __typeof(*(ptr)) val; \
+ u8 u8[0]; \
+ u16 u16[0]; \
+ u32 u32[0]; \
+ u64 u64[0]; \
+ } __ret = { .val = (old) }, __new = { .val = (new) }; \
+ \
+ CTASSERT( \
+ LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \
+ LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \
+ LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \
+ sizeof(__ret.val) == 4); \
+ \
+ switch (sizeof(__ret.val)) { \
+ LINUXKPI_ATOMIC_8( \
+ case 1: \
+ while (!atomic_fcmpset_8((volatile u8 *)(ptr), \
+ __ret.u8, __new.u8[0]) && __ret.val == (old)) \
+ ; \
+ break; \
+ ) \
+ LINUXKPI_ATOMIC_16( \
+ case 2: \
+ while (!atomic_fcmpset_16((volatile u16 *)(ptr), \
+ __ret.u16, __new.u16[0]) && __ret.val == (old)) \
+ ; \
+ break; \
+ ) \
+ case 4: \
+ while (!atomic_fcmpset_32((volatile u32 *)(ptr), \
+ __ret.u32, __new.u32[0]) && __ret.val == (old)) \
+ ; \
+ break; \
+ LINUXKPI_ATOMIC_64( \
+ case 8: \
+ while (!atomic_fcmpset_64((volatile u64 *)(ptr), \
+ __ret.u64, __new.u64[0]) && __ret.val == (old)) \
+ ; \
+ break; \
+ ) \
+ } \
+ __ret.val; \
+})
+
+#define cmpxchg64(...) cmpxchg(__VA_ARGS__)
+#define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__)
+
+#define xchg(ptr, new) ({ \
+ union { \
+ __typeof(*(ptr)) val; \
+ u8 u8[0]; \
+ u16 u16[0]; \
+ u32 u32[0]; \
+ u64 u64[0]; \
+ } __ret, __new = { .val = (new) }; \
+ \
+ CTASSERT( \
+ LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \
+ LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \
+ LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \
+ sizeof(__ret.val) == 4); \
+ \
+ switch (sizeof(__ret.val)) { \
+ LINUXKPI_ATOMIC_8( \
+ case 1: \
+ __ret.val = READ_ONCE(*ptr); \
+ while (!atomic_fcmpset_8((volatile u8 *)(ptr), \
+ __ret.u8, __new.u8[0])) \
+ ; \
+ break; \
+ ) \
+ LINUXKPI_ATOMIC_16( \
+ case 2: \
+ __ret.val = READ_ONCE(*ptr); \
+ while (!atomic_fcmpset_16((volatile u16 *)(ptr), \
+ __ret.u16, __new.u16[0])) \
+ ; \
+ break; \
+ ) \
+ case 4: \
+ __ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr), \
+ __new.u32[0]); \
+ break; \
+ LINUXKPI_ATOMIC_64( \
+ case 8: \
+ __ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr), \
+ __new.u64[0]); \
+ break; \
+ ) \
+ } \
+ __ret.val; \
+})
+
+#define try_cmpxchg(p, op, n) \
+({ \
+ __typeof(p) __op = (__typeof((p)))(op); \
+ __typeof(*(p)) __o = *__op; \
+ __typeof(*(p)) __p = __sync_val_compare_and_swap((p), (__o), (n)); \
+ if (__p != __o) \
+ *__op = __p; \
+ (__p == __o); \
+})
+
+#define __atomic_try_cmpxchg(type, _p, _po, _n) \
+({ \
+ __typeof(_po) __po = (_po); \
+ __typeof(*(_po)) __r, __o = *__po; \
+ __r = atomic_cmpxchg##type((_p), __o, (_n)); \
+ if (unlikely(__r != __o)) \
+ *__po = __r; \
+ likely(__r == __o); \
+})
+
+#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
+
+static inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+ int retval;
+ int old;
+
+ old = atomic_read(v);
+ for (;;) {
+ retval = old - 1;
+ if (unlikely(retval < 0))
+ break;
+ if (likely(atomic_fcmpset_int(&v->counter, &old, retval)))
+ break;
+ }
+ return (retval);
+}
+
+#define LINUX_ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \
+ c = old; \
+}
+
+#define LINUX_ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \
+ c = old; \
+ \
+ return (c); \
+}
+
+static inline int
+atomic_fetch_inc(atomic_t *v)
+{
+
+ return ((atomic_inc_return(v) - 1));
+}
+
+LINUX_ATOMIC_OP(or, |)
+LINUX_ATOMIC_OP(and, &)
+LINUX_ATOMIC_OP(andnot, &~)
+LINUX_ATOMIC_OP(xor, ^)
+
+LINUX_ATOMIC_FETCH_OP(or, |)
+LINUX_ATOMIC_FETCH_OP(and, &)
+LINUX_ATOMIC_FETCH_OP(andnot, &~)
+LINUX_ATOMIC_FETCH_OP(xor, ^)
+
+#endif /* _LINUXKPI_ASM_ATOMIC_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/atomic64.h b/sys/compat/linuxkpi/common/include/asm/atomic64.h
new file mode 100644
index 000000000000..fbfb9254b64c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/atomic64.h
@@ -0,0 +1,151 @@
+/*-
+ * Copyright (c) 2016-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_ASM_ATOMIC64_H_
+#define _LINUXKPI_ASM_ATOMIC64_H_
+
+#include <linux/compiler.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
+
+typedef struct {
+ volatile int64_t counter;
+} atomic64_t;
+#define ATOMIC64_INIT(x) { .counter = (x) }
+
+/*------------------------------------------------------------------------*
+ * 64-bit atomic operations
+ *------------------------------------------------------------------------*/
+
+#define atomic64_add(i, v) atomic64_add_return((i), (v))
+#define atomic64_sub(i, v) atomic64_sub_return((i), (v))
+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
+#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
+#define atomic64_add_and_test(i, v) (atomic64_add_return((i), (v)) == 0)
+#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
+#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
+#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+static inline int64_t
+atomic64_fetch_add(int64_t i, atomic64_t *v)
+{
+ return (atomic_fetchadd_64(&v->counter, i));
+}
+
+static inline int64_t
+atomic64_add_return(int64_t i, atomic64_t *v)
+{
+ return i + atomic_fetchadd_64(&v->counter, i);
+}
+
+static inline int64_t
+atomic64_sub_return(int64_t i, atomic64_t *v)
+{
+ return atomic_fetchadd_64(&v->counter, -i) - i;
+}
+
+static inline void
+atomic64_set(atomic64_t *v, int64_t i)
+{
+ atomic_store_rel_64(&v->counter, i);
+}
+
+static inline int64_t
+atomic64_read(atomic64_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+
+static inline int64_t
+atomic64_inc(atomic64_t *v)
+{
+ return atomic_fetchadd_64(&v->counter, 1) + 1;
+}
+
+static inline int64_t
+atomic64_dec(atomic64_t *v)
+{
+ return atomic_fetchadd_64(&v->counter, -1) - 1;
+}
+
+static inline int64_t
+atomic64_add_unless(atomic64_t *v, int64_t a, int64_t u)
+{
+ int64_t c = atomic64_read(v);
+
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ if (likely(atomic_fcmpset_64(&v->counter, &c, c + a)))
+ break;
+ }
+ return (c != u);
+}
+
+static inline int64_t
+atomic64_fetch_add_unless(atomic64_t *v, int64_t a, int64_t u)
+{
+ int64_t c = atomic64_read(v);
+
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ if (likely(atomic_fcmpset_64(&v->counter, &c, c + a)))
+ break;
+ }
+ return (c);
+}
+
+static inline int64_t
+atomic64_xchg(atomic64_t *v, int64_t i)
+{
+#if !(defined(__powerpc__) && !defined(__powerpc64__))
+ return (atomic_swap_64(&v->counter, i));
+#else
+ int64_t ret = atomic64_read(v);
+
+ while (!atomic_fcmpset_64(&v->counter, &ret, i))
+ ;
+ return (ret);
+#endif
+}
+
+static inline int64_t
+atomic64_cmpxchg(atomic64_t *v, int64_t old, int64_t new)
+{
+ int64_t ret = old;
+
+ for (;;) {
+ if (atomic_fcmpset_64(&v->counter, &ret, new))
+ break;
+ if (ret != old)
+ break;
+ }
+ return (ret);
+}
+
+#endif /* _LINUXKPI_ASM_ATOMIC64_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/barrier.h b/sys/compat/linuxkpi/common/include/asm/barrier.h
new file mode 100644
index 000000000000..39c5139cb322
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/barrier.h
@@ -0,0 +1,64 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ASM_BARRIER_H_
+#define _LINUXKPI_ASM_BARRIER_H_
+
+#include <sys/types.h>
+#include <machine/atomic.h>
+
+#include <asm/atomic.h>
+#include <linux/compiler.h>
+
+/* TODO: Check other archs for atomic_thread_fence_* useability */
+#if defined(__amd64__) || defined(__i386__)
+#define smp_mb() atomic_thread_fence_seq_cst()
+#define smp_wmb() atomic_thread_fence_rel()
+#define smp_rmb() atomic_thread_fence_acq()
+#define smp_store_mb(x, v) do { (void)xchg(&(x), v); } while (0)
+#endif
+
+#ifndef smp_mb
+#define smp_mb() mb()
+#endif
+#ifndef smp_wmb
+#define smp_wmb() wmb()
+#endif
+#ifndef smp_rmb
+#define smp_rmb() rmb()
+#endif
+#ifndef smp_store_mb
+#define smp_store_mb(x, v) do { WRITE_ONCE(x, v); smp_mb(); } while (0)
+#endif
+
+#define smp_mb__before_atomic() barrier()
+#define smp_mb__after_atomic() barrier()
+
+#define smp_store_release(p, v) do { smp_mb(); WRITE_ONCE(*p, v); } while (0)
+#define smp_load_acquire(p) ({ typeof(*p) _v = READ_ONCE(*p); smp_mb(); _v; })
+
+#endif /* _LINUXKPI_ASM_BARRIER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/byteorder.h b/sys/compat/linuxkpi/common/include/asm/byteorder.h
new file mode 100644
index 000000000000..ad7a923ca143
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/byteorder.h
@@ -0,0 +1,158 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_ASM_BYTEORDER_H_
+#define _LINUXKPI_ASM_BYTEORDER_H_
+
+#include <sys/types.h>
+#include <sys/endian.h>
+#include <asm/types.h>
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define __LITTLE_ENDIAN
+#else
+#define __BIG_ENDIAN
+#endif
+
+#define __cpu_to_le64(x) htole64(x)
+#define cpu_to_le64(x) __cpu_to_le64(x)
+#define __le64_to_cpu(x) le64toh(x)
+#define le64_to_cpu(x) __le64_to_cpu(x)
+#define __cpu_to_le32(x) htole32(x)
+#define cpu_to_le32(x) __cpu_to_le32(x)
+#define __le32_to_cpu(x) le32toh(x)
+#define le32_to_cpu(x) __le32_to_cpu(x)
+#define __cpu_to_le16(x) htole16(x)
+#define cpu_to_le16(x) __cpu_to_le16(x)
+#define __le16_to_cpu(x) le16toh(x)
+#define le16_to_cpu(x) __le16_to_cpu(x)
+#define __cpu_to_be64(x) htobe64(x)
+#define cpu_to_be64(x) __cpu_to_be64(x)
+#define __be64_to_cpu(x) be64toh(x)
+#define be64_to_cpu(x) __be64_to_cpu(x)
+#define __cpu_to_be32(x) htobe32(x)
+#define cpu_to_be32(x) __cpu_to_be32(x)
+#define __be32_to_cpu(x) be32toh(x)
+#define be32_to_cpu(x) __be32_to_cpu(x)
+#define __cpu_to_be16(x) htobe16(x)
+#define cpu_to_be16(x) __cpu_to_be16(x)
+#define __be16_to_cpu(x) be16toh(x)
+#define be16_to_cpu(x) __be16_to_cpu(x)
+
+#define __cpu_to_le64p(x) htole64(*((const uint64_t *)(x)))
+#define cpu_to_le64p(x) __cpu_to_le64p(x)
+#define __le64_to_cpup(x) le64toh(*((const uint64_t *)(x)))
+#define le64_to_cpup(x) __le64_to_cpup(x)
+#define __cpu_to_le32p(x) htole32(*((const uint32_t *)(x)))
+#define cpu_to_le32p(x) __cpu_to_le32p(x)
+#define __le32_to_cpup(x) le32toh(*((const uint32_t *)(x)))
+#define le32_to_cpup(x) __le32_to_cpup(x)
+#define __cpu_to_le16p(x) htole16(*((const uint16_t *)(x)))
+#define cpu_to_le16p(x) __cpu_to_le16p(x)
+#define __le16_to_cpup(x) le16toh(*((const uint16_t *)(x)))
+#define le16_to_cpup(x) __le16_to_cpup(x)
+#define __cpu_to_be64p(x) htobe64(*((const uint64_t *)(x)))
+#define cpu_to_be64p(x) __cpu_to_be64p(x)
+#define __be64_to_cpup(x) be64toh(*((const uint64_t *)(x)))
+#define be64_to_cpup(x) __be64_to_cpup(x)
+#define __cpu_to_be32p(x) htobe32(*((const uint32_t *)(x)))
+#define cpu_to_be32p(x) __cpu_to_be32p(x)
+#define __be32_to_cpup(x) be32toh(*((const uint32_t *)(x)))
+#define be32_to_cpup(x) __be32_to_cpup(x)
+#define __cpu_to_be16p(x) htobe16(*((const uint16_t *)(x)))
+#define cpu_to_be16p(x) __cpu_to_be16p(x)
+#define __be16_to_cpup(x) be16toh(*((const uint16_t *)(x)))
+#define be16_to_cpup(x) __be16_to_cpup(x)
+
+
+#define __cpu_to_le64s(x) do { *((uint64_t *)(x)) = cpu_to_le64p((x)); } while (0)
+#define cpu_to_le64s(x) __cpu_to_le64s(x)
+#define __le64_to_cpus(x) do { *((uint64_t *)(x)) = le64_to_cpup((x)); } while (0)
+#define le64_to_cpus(x) __le64_to_cpus(x)
+#define __cpu_to_le32s(x) do { *((uint32_t *)(x)) = cpu_to_le32p((x)); } while (0)
+#define cpu_to_le32s(x) __cpu_to_le32s(x)
+#define __le32_to_cpus(x) do { *((uint32_t *)(x)) = le32_to_cpup((x)); } while (0)
+#define le32_to_cpus(x) __le32_to_cpus(x)
+#define __cpu_to_le16s(x) do { *((uint16_t *)(x)) = cpu_to_le16p((x)); } while (0)
+#define cpu_to_le16s(x) __cpu_to_le16s(x)
+#define __le16_to_cpus(x) do { *((uint16_t *)(x)) = le16_to_cpup((x)); } while (0)
+#define le16_to_cpus(x) __le16_to_cpus(x)
+#define __cpu_to_be64s(x) do { *((uint64_t *)(x)) = cpu_to_be64p((x)); } while (0)
+#define cpu_to_be64s(x) __cpu_to_be64s(x)
+#define __be64_to_cpus(x) do { *((uint64_t *)(x)) = be64_to_cpup((x)); } while (0)
+#define be64_to_cpus(x) __be64_to_cpus(x)
+#define __cpu_to_be32s(x) do { *((uint32_t *)(x)) = cpu_to_be32p((x)); } while (0)
+#define cpu_to_be32s(x) __cpu_to_be32s(x)
+#define __be32_to_cpus(x) do { *((uint32_t *)(x)) = be32_to_cpup((x)); } while (0)
+#define be32_to_cpus(x) __be32_to_cpus(x)
+#define __cpu_to_be16s(x) do { *((uint16_t *)(x)) = cpu_to_be16p((x)); } while (0)
+#define cpu_to_be16s(x) __cpu_to_be16s(x)
+#define __be16_to_cpus(x) do { *((uint16_t *)(x)) = be16_to_cpup((x)); } while (0)
+#define be16_to_cpus(x) __be16_to_cpus(x)
+
+#define swab16(x) bswap16(x)
+#define swab32(x) bswap32(x)
+#define swab64(x) bswap64(x)
+
+static inline void
+be64_add_cpu(uint64_t *var, uint64_t val)
+{
+ *var = cpu_to_be64(be64_to_cpu(*var) + val);
+}
+
+static inline void
+be32_add_cpu(uint32_t *var, uint32_t val)
+{
+ *var = cpu_to_be32(be32_to_cpu(*var) + val);
+}
+
+static inline void
+be16_add_cpu(uint16_t *var, uint16_t val)
+{
+ *var = cpu_to_be16(be16_to_cpu(*var) + val);
+}
+
+static __inline void
+le64_add_cpu(uint64_t *var, uint64_t val)
+{
+ *var = cpu_to_le64(le64_to_cpu(*var) + val);
+}
+
+static __inline void
+le32_add_cpu(uint32_t *var, uint32_t val)
+{
+ *var = cpu_to_le32(le32_to_cpu(*var) + val);
+}
+
+static inline void
+le16_add_cpu(uint16_t *var, uint16_t val)
+{
+ *var = cpu_to_le16(le16_to_cpu(*var) + val);
+}
+
+#endif /* _LINUXKPI_ASM_BYTEORDER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/cpufeature.h b/sys/compat/linuxkpi/common/include/asm/cpufeature.h
new file mode 100644
index 000000000000..84ab86af33dd
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/cpufeature.h
@@ -0,0 +1,37 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_ASM_CPUFEATURE_H
+#define _LINUXKPI_ASM_CPUFEATURE_H
+
+#if defined(__amd64__) || defined(__i386__)
+
+#include <sys/types.h>
+#include <machine/md_var.h>
+
+#define X86_FEATURE_CLFLUSH 1
+#define X86_FEATURE_XMM4_1 2
+#define X86_FEATURE_PAT 3
+#define X86_FEATURE_HYPERVISOR 4
+
+static inline bool
+static_cpu_has(uint16_t f)
+{
+ switch (f) {
+ case X86_FEATURE_CLFLUSH:
+ return ((cpu_feature & CPUID_CLFSH) != 0);
+ case X86_FEATURE_XMM4_1:
+ return ((cpu_feature2 & CPUID2_SSE41) != 0);
+ case X86_FEATURE_PAT:
+ return ((cpu_feature & CPUID_PAT) != 0);
+ case X86_FEATURE_HYPERVISOR:
+ return ((cpu_feature2 & CPUID2_HV) != 0);
+ default:
+ return (false);
+ }
+}
+
+#define boot_cpu_has(x) static_cpu_has(x)
+
+#endif
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/asm/fcntl.h b/sys/compat/linuxkpi/common/include/asm/fcntl.h
new file mode 100644
index 000000000000..3820e15039f2
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/fcntl.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_ASM_FCNTL_H_
+#define _LINUXKPI_ASM_FCNTL_H_
+
+#include <sys/fcntl.h>
+
+#endif /* _LINUXKPI_ASM_FCNTL_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/fpu/api.h b/sys/compat/linuxkpi/common/include/asm/fpu/api.h
new file mode 100644
index 000000000000..a4803bde461f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/fpu/api.h
@@ -0,0 +1,40 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Val Packett <val@packett.cool>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ASM_FPU_API_H_
+#define _LINUXKPI_ASM_FPU_API_H_
+
+#define kernel_fpu_begin() \
+ lkpi_kernel_fpu_begin()
+
+#define kernel_fpu_end() \
+ lkpi_kernel_fpu_end()
+
+extern void lkpi_kernel_fpu_begin(void);
+extern void lkpi_kernel_fpu_end(void);
+
+#endif /* _LINUXKPI_ASM_FPU_API_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/hypervisor.h b/sys/compat/linuxkpi/common/include/asm/hypervisor.h
new file mode 100644
index 000000000000..f344a2929359
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/hypervisor.h
@@ -0,0 +1,19 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_ASM_HYPERVISOR_H
+#define _LINUXKPI_ASM_HYPERVISOR_H
+
+#if defined(__i386__) || defined(__amd64__)
+
+#define X86_HYPER_NATIVE 1
+#define X86_HYPER_MS_HYPERV 2
+
+static inline bool
+hypervisor_is_type(int type)
+{
+ return (type == X86_HYPER_NATIVE);
+}
+
+#endif
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/asm/intel-family.h b/sys/compat/linuxkpi/common/include/asm/intel-family.h
new file mode 100644
index 000000000000..91ad1d1a8ff3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/intel-family.h
@@ -0,0 +1,6 @@
+/* Public domain. */
+
+#define INTEL_FAM6_ALDERLAKE 0x97
+#define INTEL_FAM6_ALDERLAKE_L 0x9A
+
+#define INTEL_FAM6_ROCKETLAKE 0xA7
diff --git a/sys/compat/linuxkpi/common/include/asm/io.h b/sys/compat/linuxkpi/common/include/asm/io.h
new file mode 100644
index 000000000000..63d1c8f72f8e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/io.h
@@ -0,0 +1,41 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_ASM_IO_H_
+#define _LINUXKPI_ASM_IO_H_
+
+#include <sys/param.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <linux/io.h>
+
+#define virt_to_phys(x) vtophys(x)
+
+#endif /* _LINUXKPI_ASM_IO_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/iosf_mbi.h b/sys/compat/linuxkpi/common/include/asm/iosf_mbi.h
new file mode 100644
index 000000000000..a99c9cced6a4
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/iosf_mbi.h
@@ -0,0 +1,15 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_ASM_IOSF_MBI_H_
+#define _LINUXKPI_ASM_IOSF_MBI_H_
+
+#define MBI_PMIC_BUS_ACCESS_BEGIN 1
+#define MBI_PMIC_BUS_ACCESS_END 2
+
+#define iosf_mbi_assert_punit_acquired()
+#define iosf_mbi_punit_acquire()
+#define iosf_mbi_punit_release()
+#define iosf_mbi_register_pmic_bus_access_notifier(x) 0
+#define iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(x) 0
+
+#endif /* _LINUXKPI_ASM_IOSF_MBI_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/memtype.h b/sys/compat/linuxkpi/common/include/asm/memtype.h
new file mode 100644
index 000000000000..c433e54fd7bf
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/memtype.h
@@ -0,0 +1,18 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_ASM_MEMTYPE_H_
+#define _LINUXKPI_ASM_MEMTYPE_H_
+
+#if defined(__amd64__) || defined(__i386__)
+
+#include <asm/cpufeature.h>
+
+static inline bool
+pat_enabled(void)
+{
+ return (boot_cpu_has(X86_FEATURE_PAT));
+}
+
+#endif
+
+#endif /* _LINUXKPI_ASM_MEMTYPE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/msr.h b/sys/compat/linuxkpi/common/include/asm/msr.h
new file mode 100644
index 000000000000..6f7c10f2860b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/msr.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ASM_MSR_H_
+#define _LINUXKPI_ASM_MSR_H_
+
+#include <machine/cpufunc.h>
+
+#define rdmsrl(msr, val) ((val) = rdmsr(msr))
+#define rdmsrl_safe(msr, val) rdmsr_safe(msr, val)
+
+#endif /* _LINUXKPI_ASM_MSR_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/neon.h b/sys/compat/linuxkpi/common/include/asm/neon.h
new file mode 100644
index 000000000000..0488a90d6664
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/neon.h
@@ -0,0 +1,40 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Val Packett <val@packett.cool>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ASM_NEON_H_
+#define _LINUXKPI_ASM_NEON_H_
+
+#define kernel_neon_begin() \
+ lkpi_kernel_fpu_begin()
+
+#define kernel_neon_end() \
+ lkpi_kernel_fpu_end()
+
+extern void lkpi_kernel_fpu_begin(void);
+extern void lkpi_kernel_fpu_end(void);
+
+#endif /* _LINUXKPI_ASM_NEON_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/pgtable.h b/sys/compat/linuxkpi/common/include/asm/pgtable.h
new file mode 100644
index 000000000000..865662d587db
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/pgtable.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_ASM_PGTABLE_H_
+#define _LINUXKPI_ASM_PGTABLE_H_
+
+#include <linux/page.h>
+
+typedef unsigned long pteval_t;
+typedef unsigned long pmdval_t;
+typedef unsigned long pudval_t;
+typedef unsigned long pgdval_t;
+typedef unsigned long pgprotval_t;
+typedef struct page *pgtable_t;
+
+#define pgprot_decrypted(prot) (prot)
+
+#if defined(__i386__) || defined(__amd64__)
+#define _PAGE_BIT_PRESENT 0
+#define _PAGE_BIT_RW 1
+#define _PAGE_BIT_USER 2
+#define _PAGE_BIT_PWT 3
+#define _PAGE_BIT_PCD 4
+#define _PAGE_BIT_PAT 7
+
+#define _PAGE_PRESENT (((pteval_t) 1) << _PAGE_BIT_PRESENT)
+#define _PAGE_RW (((pteval_t) 1) << _PAGE_BIT_RW)
+#define _PAGE_PWT (((pteval_t) 1) << _PAGE_BIT_PWT)
+#define _PAGE_PCD (((pteval_t) 1) << _PAGE_BIT_PCD)
+#define _PAGE_PAT (((pteval_t) 1) << _PAGE_BIT_PAT)
+#endif
+
+#endif /* _LINUXKPI_ASM_PGTABLE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/processor.h b/sys/compat/linuxkpi/common/include/asm/processor.h
new file mode 100644
index 000000000000..2bc4b6532544
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/processor.h
@@ -0,0 +1,63 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ASM_PROCESSOR_H_
+#define _LINUXKPI_ASM_PROCESSOR_H_
+
+#include <sys/types.h>
+#include <machine/cpufunc.h>
+#include <machine/cpu.h>
+
+#if defined(__i386__) || defined(__amd64__)
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NSC 8
+#define X86_VENDOR_HYGON 9
+#define X86_VENDOR_NUM 12
+
+#define X86_VENDOR_UNKNOWN 0xff
+
+struct cpuinfo_x86 {
+ uint8_t x86;
+ uint8_t x86_model;
+ uint16_t x86_clflush_size;
+ uint16_t x86_max_cores;
+ uint8_t x86_vendor;
+};
+
+extern struct cpuinfo_x86 boot_cpu_data;
+extern struct cpuinfo_x86 *__cpu_data;
+#define cpu_data(cpu) __cpu_data[cpu]
+#endif
+
+#define cpu_relax() cpu_spinwait()
+
+#endif /* _LINUXKPI_ASM_PROCESSOR_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/set_memory.h b/sys/compat/linuxkpi/common/include/asm/set_memory.h
new file mode 100644
index 000000000000..1019aaf264a0
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/set_memory.h
@@ -0,0 +1,126 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2016 Matt Macy (mmacy@nextbsd.org)
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ASM_SET_MEMORY_H_
+#define _LINUXKPI_ASM_SET_MEMORY_H_
+
+#include <linux/page.h>
+
+static inline int
+set_memory_uc(unsigned long addr, int numpages)
+{
+ vm_size_t len;
+
+ len = (vm_size_t)numpages << PAGE_SHIFT;
+ return (-pmap_change_attr(addr, len, VM_MEMATTR_UNCACHEABLE));
+}
+
+static inline int
+set_memory_wc(unsigned long addr, int numpages)
+{
+#ifdef VM_MEMATTR_WRITE_COMBINING
+ vm_size_t len;
+
+ len = (vm_size_t)numpages << PAGE_SHIFT;
+ return (-pmap_change_attr(addr, len, VM_MEMATTR_WRITE_COMBINING));
+#else
+ return (set_memory_uc(addr, numpages));
+#endif
+}
+
+static inline int
+set_memory_wb(unsigned long addr, int numpages)
+{
+ vm_size_t len;
+
+ len = (vm_size_t)numpages << PAGE_SHIFT;
+ return (-pmap_change_attr(addr, len, VM_MEMATTR_WRITE_BACK));
+}
+
+static inline int
+set_pages_uc(struct page *page, int numpages)
+{
+ KASSERT(numpages == 1, ("%s: numpages %d", __func__, numpages));
+
+ pmap_page_set_memattr(page, VM_MEMATTR_UNCACHEABLE);
+ return (0);
+}
+
+static inline int
+set_pages_wc(struct page *page, int numpages)
+{
+ KASSERT(numpages == 1, ("%s: numpages %d", __func__, numpages));
+
+#ifdef VM_MEMATTR_WRITE_COMBINING
+ pmap_page_set_memattr(page, VM_MEMATTR_WRITE_COMBINING);
+#else
+ return (set_pages_uc(page, numpages));
+#endif
+ return (0);
+}
+
+static inline int
+set_pages_wb(struct page *page, int numpages)
+{
+ KASSERT(numpages == 1, ("%s: numpages %d", __func__, numpages));
+
+ pmap_page_set_memattr(page, VM_MEMATTR_WRITE_BACK);
+ return (0);
+}
+
+static inline int
+set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ set_pages_wb(pages[i], 1);
+ return (0);
+}
+
+static inline int
+set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ set_pages_wc(pages[i], 1);
+ return (0);
+}
+
+static inline int
+set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ set_pages_uc(pages[i], 1);
+ return (0);
+}
+
+#endif /* _LINUXKPI_ASM_SET_MEMORY_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/smp.h b/sys/compat/linuxkpi/common/include/asm/smp.h
new file mode 100644
index 000000000000..27c3a81ef101
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/smp.h
@@ -0,0 +1,50 @@
+/*-
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ASM_SMP_H_
+#define _LINUXKPI_ASM_SMP_H_
+
+#include <linux/jump_label.h>
+#include <linux/preempt.h>
+#include <asm/fpu/api.h>
+
+#if defined(__i386__) || defined(__amd64__)
+
+#define wbinvd_on_all_cpus() linux_wbinvd_on_all_cpus()
+
+int linux_wbinvd_on_all_cpus(void);
+
+#endif
+
+#define get_cpu() ({ \
+ critical_enter(); \
+ PCPU_GET(cpuid); \
+})
+
+#define put_cpu() \
+ critical_exit()
+
+#endif /* _LINUXKPI_ASM_SMP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/types.h b/sys/compat/linuxkpi/common/include/asm/types.h
new file mode 100644
index 000000000000..2e61bcfdb5e6
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/types.h
@@ -0,0 +1,62 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_ASM_TYPES_H_
+#define _LINUXKPI_ASM_TYPES_H_
+
+#if defined(_KERNEL) || defined(_STANDALONE)
+
+#include <sys/types.h>
+
+typedef uint8_t u8;
+typedef uint8_t __u8;
+typedef uint16_t u16;
+typedef uint16_t __u16;
+typedef uint32_t u32;
+typedef uint32_t __u32;
+typedef uint64_t u64;
+typedef uint64_t __u64;
+
+typedef int8_t s8;
+typedef int8_t __s8;
+typedef int16_t s16;
+typedef int16_t __s16;
+typedef int32_t s32;
+typedef int32_t __s32;
+typedef int64_t s64;
+typedef int64_t __s64;
+
+/* DMA addresses come in generic and 64-bit flavours. */
+typedef vm_paddr_t dma_addr_t;
+typedef vm_paddr_t dma64_addr_t;
+
+typedef unsigned short umode_t;
+
+#endif /* _KERNEL || _STANDALONE */
+
+#endif /* _LINUXKPI_ASM_TYPES_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/uaccess.h b/sys/compat/linuxkpi/common/include/asm/uaccess.h
new file mode 100644
index 000000000000..94354f8ddeee
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/uaccess.h
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_ASM_UACCESS_H_
+#define _LINUXKPI_ASM_UACCESS_H_
+
+#include <linux/uaccess.h>
+
+static inline long
+copy_to_user(void *to, const void *from, unsigned long n)
+{
+ if (linux_copyout(from, to, n) != 0)
+ return n;
+ return 0;
+}
+#define __copy_to_user(...) copy_to_user(__VA_ARGS__)
+
+static inline long
+copy_from_user(void *to, const void *from, unsigned long n)
+{
+ if (linux_copyin(from, to, n) != 0)
+ return n;
+ return 0;
+}
+#define __copy_from_user(...) copy_from_user(__VA_ARGS__)
+#define __copy_in_user(...) copy_from_user(__VA_ARGS__)
+
+#define user_access_begin(ptr, len) access_ok(ptr, len)
+#define user_access_end() do { } while (0)
+
+#define user_write_access_begin(ptr, len) access_ok(ptr, len)
+#define user_write_access_end() do { } while (0)
+
+#define unsafe_get_user(x, ptr, err) do { \
+ if (unlikely(__get_user(x, ptr))) \
+ goto err; \
+} while (0)
+
+#define unsafe_put_user(x, ptr, err) do { \
+ if (unlikely(__put_user(x, ptr))) \
+ goto err; \
+} while (0)
+
+#endif /* _LINUXKPI_ASM_UACCESS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/asm/unaligned.h b/sys/compat/linuxkpi/common/include/asm/unaligned.h
new file mode 100644
index 000000000000..e45846a3b543
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/unaligned.h
@@ -0,0 +1,99 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020,2023 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ASM_UNALIGNED_H
+#define _LINUXKPI_ASM_UNALIGNED_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+static __inline uint16_t
+get_unaligned_le16(const void *p)
+{
+
+ return (le16_to_cpup((const __le16 *)p));
+}
+
+static __inline uint32_t
+get_unaligned_le32(const void *p)
+{
+
+ return (le32_to_cpup((const __le32 *)p));
+}
+
+static __inline void
+put_unaligned_le16(__le16 v, void *p)
+{
+ __le16 x;
+
+ x = cpu_to_le16(v);
+ memcpy(p, &x, sizeof(x));
+}
+
+static __inline void
+put_unaligned_le32(__le32 v, void *p)
+{
+ __le32 x;
+
+ x = cpu_to_le32(v);
+ memcpy(p, &x, sizeof(x));
+}
+
+static __inline void
+put_unaligned_le64(__le64 v, void *p)
+{
+ __le64 x;
+
+ x = cpu_to_le64(v);
+ memcpy(p, &x, sizeof(x));
+}
+
+static __inline uint16_t
+get_unaligned_be16(const void *p)
+{
+
+ return (be16_to_cpup((const __be16 *)p));
+}
+
+static __inline uint32_t
+get_unaligned_be32(const void *p)
+{
+
+ return (be32_to_cpup((const __be32 *)p));
+}
+
+static __inline uint64_t
+get_unaligned_be64(const void *p)
+{
+
+ return (be64_to_cpup((const __be64 *)p));
+}
+
+#endif /* _LINUXKPI_ASM_UNALIGNED_H */
diff --git a/sys/compat/linuxkpi/common/include/crypto/hash.h b/sys/compat/linuxkpi/common/include/crypto/hash.h
new file mode 100644
index 000000000000..bf401691722a
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/crypto/hash.h
@@ -0,0 +1,94 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_CRYPTO_HASH_H
+#define _LINUXKPI_CRYPTO_HASH_H
+
+#include <linux/kernel.h> /* for pr_debug */
+
+struct crypto_shash {
+};
+
+struct shash_desc {
+ struct crypto_shash *tfm;
+};
+
+static inline struct crypto_shash *
+crypto_alloc_shash(const char *algostr, int x, int y)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (NULL);
+}
+
+static inline void
+crypto_free_shash(struct crypto_shash *csh)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static inline int
+crypto_shash_init(struct shash_desc *desc)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (-ENXIO);
+}
+
+static inline int
+crypto_shash_final(struct shash_desc *desc, uint8_t *mic)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (-ENXIO);
+}
+
+static inline int
+crypto_shash_setkey(struct crypto_shash *csh, const uint8_t *key, size_t keylen)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (-ENXIO);
+}
+
+static inline int
+crypto_shash_update(struct shash_desc *desc, uint8_t *data, size_t datalen)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (-ENXIO);
+}
+
+static inline void
+shash_desc_zero(struct shash_desc *desc)
+{
+
+ explicit_bzero(desc, sizeof(*desc));
+}
+
+/* XXX review this. */
+#define SHASH_DESC_ON_STACK(desc, tfm) \
+ uint8_t ___ ## desc ## _desc[sizeof(struct shash_desc)]; \
+ struct shash_desc *desc = (struct shash_desc *)___ ## desc ## _desc
+
+#endif /* _LINUXKPI_CRYPTO_HASH_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/acpi.h b/sys/compat/linuxkpi/common/include/linux/acpi.h
new file mode 100644
index 000000000000..3e1ec1b20626
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/acpi.h
@@ -0,0 +1,49 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_ACPI_H_
+#define _LINUXKPI_LINUX_ACPI_H_
+
+#include <linux/device.h>
+#include <linux/uuid.h>
+
+#if defined(__aarch64__) || defined(__amd64__) || defined(__i386__)
+
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+
+#define ACPI_HANDLE(dev) \
+ ((dev)->bsddev != NULL ? bsd_acpi_get_handle((dev)->bsddev) : NULL)
+#define acpi_device_handle(dev) \
+ ((dev) != NULL ? bsd_acpi_get_handle(dev) : NULL)
+static inline void acpi_dev_put(struct acpi_device *adev) {}
+#define acpi_handle_debug(handle, fmt, ...)
+
+#endif
+
+#endif /* _LINUXKPI_LINUX_ACPI_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/acpi_amd_wbrf.h b/sys/compat/linuxkpi/common/include/linux/acpi_amd_wbrf.h
new file mode 100644
index 000000000000..92c2ead41c45
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/acpi_amd_wbrf.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 2025 The FreeBSD Foundation
+ * Copyright (c) 2025 Jean-Sébastien Pédron
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_ACPI_AMD_WBRF_H_
+#define _LINUXKPI_LINUX_ACPI_AMD_WBRF_H_
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+#define MAX_NUM_OF_WBRF_RANGES 11
+
+#define WBRF_RECORD_ADD 0x0
+#define WBRF_RECORD_REMOVE 0x1
+
+struct freq_band_range {
+ uint64_t start;
+ uint64_t end;
+};
+
+struct wbrf_ranges_in_out {
+ uint64_t num_of_ranges;
+ struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES];
+};
+
+enum wbrf_notifier_actions {
+ WBRF_CHANGED,
+};
+
+/*
+ * The following functions currently have dummy implementations that, on Linux,
+ * are used when CONFIG_AMD_WBRF is not set at compile time.
+ */
+
+static inline bool
+acpi_amd_wbrf_supported_consumer(struct device *dev)
+{
+ return (false);
+}
+
+static inline int
+acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action,
+ struct wbrf_ranges_in_out *in)
+{
+ return (-ENODEV);
+}
+
+static inline bool
+acpi_amd_wbrf_supported_producer(struct device *dev)
+{
+ return (false);
+}
+
+static inline int
+amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out)
+{
+ return (-ENODEV);
+}
+
+static inline int
+amd_wbrf_register_notifier(struct notifier_block *nb)
+{
+ return (-ENODEV);
+}
+
+static inline int
+amd_wbrf_unregister_notifier(struct notifier_block *nb)
+{
+ return (-ENODEV);
+}
+
+#endif /* _LINUXKPI_LINUX_ACPI_AMD_WBRF_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/agp_backend.h b/sys/compat/linuxkpi/common/include/linux/agp_backend.h
new file mode 100644
index 000000000000..c855fd842970
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/agp_backend.h
@@ -0,0 +1,28 @@
+/* Public domain */
+
+#ifndef _LINUXKPI_LINUX_AGP_BACKEND_H_
+#define _LINUXKPI_LINUX_AGP_BACKEND_H_
+
+#include <sys/types.h>
+
+struct agp_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+struct agp_kern_info {
+ struct agp_version version;
+ uint16_t vendor;
+ uint16_t device;
+ unsigned long mode;
+ unsigned long aper_base;
+ size_t aper_size;
+ int max_memory;
+ int current_memory;
+ bool cant_use_aperture;
+ unsigned long page_mask;
+};
+
+struct agp_memory;
+
+#endif /* _LINUXKPI_LINUX_AGP_BACKEND_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/anon_inodes.h b/sys/compat/linuxkpi/common/include/linux/anon_inodes.h
new file mode 100644
index 000000000000..c69f6e152b17
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/anon_inodes.h
@@ -0,0 +1,48 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_ANON_INODES_H_
+#define _LINUXKPI_LINUX_ANON_INODES_H_
+
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/types.h>
+
+static inline struct file *
+anon_inode_getfile(const char *name __unused,
+ const struct file_operations *fops, void *priv, int flags __unused)
+{
+ struct file *file;
+
+ file = alloc_file(FMODE_READ, fops);
+ file->private_data = priv;
+
+ return (file);
+}
+
+#endif /* _LINUXKPI_LINUX_ANON_INODES_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/aperture.h b/sys/compat/linuxkpi/common/include/linux/aperture.h
new file mode 100644
index 000000000000..7eced3cc3cb1
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/aperture.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _LINUX_APERTURE_H_
+#define _LINUX_APERTURE_H_
+
+#include <linux/types.h>
+
+#define CONFIG_APERTURE_HELPERS
+
+struct pci_dev;
+struct platform_device;
+
+#if defined(CONFIG_APERTURE_HELPERS)
+int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size);
+
+int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ const char *name);
+
+int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev);
+
+int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name);
+#else
+static inline int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ const char *name)
+{
+ return 0;
+}
+
+static inline int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
+{
+ return 0;
+}
+#endif
+
+/**
+ * aperture_remove_all_conflicting_devices - remove all existing framebuffers
+ * @name: a descriptive name of the requesting driver
+ *
+ * This function removes all graphics device drivers. Use this function on systems
+ * that can have their framebuffer located anywhere in memory.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+static inline int aperture_remove_all_conflicting_devices(const char *name)
+{
+ return aperture_remove_conflicting_devices(0, (resource_size_t)-1, name);
+}
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/apple-gmux.h b/sys/compat/linuxkpi/common/include/linux/apple-gmux.h
new file mode 100644
index 000000000000..812a782c57d4
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/apple-gmux.h
@@ -0,0 +1,12 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_APPLE_GMUX_H
+#define _LINUXKPI_LINUX_APPLE_GMUX_H
+
+static inline bool
+apple_gmux_detect(void *a, void *b)
+{
+ return false;
+}
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/atomic.h b/sys/compat/linuxkpi/common/include/linux/atomic.h
new file mode 100644
index 000000000000..bc76928a7d67
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/atomic.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_ATOMIC_H_
+#define _LINUXKPI_LINUX_ATOMIC_H_
+
+#include <asm/atomic.h>
+#include <asm/atomic64.h>
+#include <asm/atomic-long.h>
+#include <asm/barrier.h>
+
+#endif /* _LINUXKPI_LINUX_ATOMIC_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/average.h b/sys/compat/linuxkpi/common/include/linux/average.h
new file mode 100644
index 000000000000..4191a351c5c6
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/average.h
@@ -0,0 +1,90 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_AVERAGE_H
+#define _LINUXKPI_LINUX_AVERAGE_H
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <linux/log2.h>
+
+/* EWMA stands for Exponentially Weighted Moving Average. */
+/*
+ * Z_t = d X_t + (1 - d) * Z_(t-1); 0 < d <= 1, t >= 1; Roberts (1959).
+ * t : observation number in time.
+ * d : weight for current observation.
+ * Xt : observations over time.
+ * Zt : EWMA value after observation t.
+ *
+ * wmba_*_read seems to return up-to [u]long values; have to deal with 32/64bit.
+ * According to the ath5k.h change log this seems to be a fix-(_p)recision impl.
+ * assert 2/4 bits for frac.
+ * Also all (_d) values seem to be pow2 which simplifies maths (shift by
+ * d = ilog2(_d) instead of doing division (d = 1/_d)). Keep it this way until
+ * we hit the CTASSERT.
+ */
+
+#define DECLARE_EWMA(_name, _p, _d) \
+ \
+ CTASSERT((sizeof(unsigned long) <= 4) ? (_p < 30) : (_p < 60)); \
+ CTASSERT(_d > 0 && powerof2(_d)); \
+ \
+ struct ewma_ ## _name { \
+ unsigned long zt; \
+ }; \
+ \
+ static __inline void \
+ ewma_ ## _name ## _init(struct ewma_ ## _name *ewma) \
+ { \
+ /* No target (no historical data). */ \
+ ewma->zt = 0; \
+ } \
+ \
+ static __inline void \
+ ewma_ ## _name ## _add(struct ewma_ ## _name *ewma, unsigned long x) \
+ { \
+ unsigned long ztm1 = ewma->zt; /* Z_(t-1). */ \
+ int d = ilog2(_d); \
+ \
+ if (ewma->zt == 0) \
+ ewma->zt = x << (_p); \
+ else \
+ ewma->zt = ((x << (_p)) >> d) + \
+ (((ztm1 << d) - ztm1) >> d); \
+ } \
+ \
+ static __inline unsigned long \
+ ewma_ ## _name ## _read(struct ewma_ ## _name *ewma) \
+ { \
+ return (ewma->zt >> (_p)); \
+ } \
+
+#endif /* _LINUXKPI_LINUX_AVERAGE_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/backlight.h b/sys/compat/linuxkpi/common/include/linux/backlight.h
new file mode 100644
index 000000000000..4f8f7440925a
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/backlight.h
@@ -0,0 +1,134 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_BACKLIGHT_H_
+#define _LINUXKPI_LINUX_BACKLIGHT_H_
+
+#include <linux/notifier.h>
+
+struct backlight_device;
+
+enum backlight_type {
+ BACKLIGHT_RAW = 0,
+};
+
+struct backlight_properties {
+ int type;
+ int max_brightness;
+ int brightness;
+ int power;
+};
+
+enum backlight_notification {
+ BACKLIGHT_REGISTERED,
+ BACKLIGHT_UNREGISTERED,
+};
+
+enum backlight_update_reason {
+ BACKLIGHT_UPDATE_HOTKEY = 0
+};
+
+struct backlight_ops {
+ int options;
+#define BL_CORE_SUSPENDRESUME 1
+ int (*update_status)(struct backlight_device *);
+ int (*get_brightness)(struct backlight_device *);
+};
+
+struct backlight_device {
+ const struct backlight_ops *ops;
+ struct backlight_properties props;
+ void *data;
+ struct device *dev;
+ char *name;
+};
+
+#define bl_get_data(bd) (bd)->data
+
+struct backlight_device *linux_backlight_device_register(const char *name,
+ struct device *dev, void *data, const struct backlight_ops *ops, struct backlight_properties *props);
+void linux_backlight_device_unregister(struct backlight_device *bd);
+#define backlight_device_register(name, dev, data, ops, props) \
+ linux_backlight_device_register(name, dev, data, ops, props)
+#define backlight_device_unregister(bd) linux_backlight_device_unregister(bd)
+
+static inline int
+backlight_update_status(struct backlight_device *bd)
+{
+ return (bd->ops->update_status(bd));
+}
+
+static inline void
+backlight_force_update(struct backlight_device *bd, int reason)
+{
+ bd->props.brightness = bd->ops->get_brightness(bd);
+}
+
+static inline int
+backlight_get_brightness(struct backlight_device *bd)
+{
+
+ return (bd->props.brightness);
+}
+
+static inline int
+backlight_device_set_brightness(struct backlight_device *bd, int brightness)
+{
+
+ if (brightness > bd->props.max_brightness)
+ return (EINVAL);
+ bd->props.brightness = brightness;
+ return (bd->ops->update_status(bd));
+}
+
+static inline int
+backlight_enable(struct backlight_device *bd)
+{
+ if (bd == NULL)
+ return (0);
+ bd->props.power = 0/* FB_BLANK_UNBLANK */;
+ return (backlight_update_status(bd));
+}
+
+static inline int
+backlight_disable(struct backlight_device *bd)
+{
+ if (bd == NULL)
+ return (0);
+ bd->props.power = 4/* FB_BLANK_POWERDOWN */;
+ return (backlight_update_status(bd));
+}
+
+static inline bool
+backlight_is_blank(struct backlight_device *bd)
+{
+
+ return (bd->props.power != 0/* FB_BLANK_UNBLANK */);
+}
+
+#endif /* _LINUXKPI_LINUX_BACKLIGHT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/bcd.h b/sys/compat/linuxkpi/common/include/linux/bcd.h
new file mode 100644
index 000000000000..385819910454
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/bcd.h
@@ -0,0 +1,43 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_BCD_H
+#define _LINUXKPI_LINUX_BCD_H
+
+#include <sys/types.h>
+#include <sys/libkern.h>
+
+/* Compared to the libkern version this one truncates the argument. */
+static inline uint8_t linuxkpi_bcd2bin(uint8_t x)
+{
+
+ return (bcd2bin(x));
+}
+
+#define bcd2bin(_x) linuxkpi_bcd2bin(_x)
+
+#endif /* _LINUXKPI_LINUX_BCD_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/bitfield.h b/sys/compat/linuxkpi/common/include/linux/bitfield.h
new file mode 100644
index 000000000000..8a91b0663f37
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/bitfield.h
@@ -0,0 +1,141 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2024 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_BITFIELD_H
+#define _LINUXKPI_LINUX_BITFIELD_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/* Use largest possible type. */
+static inline uint64_t ___lsb(uint64_t f) { return (f & -f); }
+static inline uint64_t ___bitmask(uint64_t f) { return (f / ___lsb(f)); }
+
+#define _uX_get_bits(_n) \
+ static __inline uint ## _n ## _t \
+ u ## _n ## _get_bits(uint ## _n ## _t v, uint ## _n ## _t f) \
+ { \
+ return ((v & f) / ___lsb(f)); \
+ }
+
+_uX_get_bits(64)
+_uX_get_bits(32)
+_uX_get_bits(16)
+_uX_get_bits(8)
+
+#define _leX_get_bits(_n) \
+ static __inline uint ## _n ## _t \
+ le ## _n ## _get_bits(__le ## _n v, uint ## _n ## _t f) \
+ { \
+ return ((le ## _n ## _to_cpu(v) & f) / ___lsb(f)); \
+ }
+
+_leX_get_bits(64)
+_leX_get_bits(32)
+_leX_get_bits(16)
+
+#define _uX_encode_bits(_n) \
+ static __inline uint ## _n ## _t \
+ u ## _n ## _encode_bits(uint ## _n ## _t v, uint ## _n ## _t f) \
+ { \
+ return ((v & ___bitmask(f)) * ___lsb(f)); \
+ }
+
+_uX_encode_bits(64)
+_uX_encode_bits(32)
+_uX_encode_bits(16)
+_uX_encode_bits(8)
+
+#define _leX_encode_bits(_n) \
+ static __inline uint ## _n ## _t \
+ le ## _n ## _encode_bits(__le ## _n v, uint ## _n ## _t f) \
+ { \
+ return (cpu_to_le ## _n((v & ___bitmask(f)) * ___lsb(f))); \
+ }
+
+_leX_encode_bits(64)
+_leX_encode_bits(32)
+_leX_encode_bits(16)
+
+#define _leXp_replace_bits(_n) \
+ static __inline void \
+ le ## _n ## p_replace_bits(uint ## _n ## _t *p, \
+ uint ## _n ## _t v, uint ## _n ## _t f) \
+ { \
+ *p = (*p & ~(cpu_to_le ## _n(f))) | \
+ le ## _n ## _encode_bits(v, f); \
+ }
+
+_leXp_replace_bits(64)
+_leXp_replace_bits(32)
+_leXp_replace_bits(16)
+
+#define _uXp_replace_bits(_n) \
+ static __inline void \
+ u ## _n ## p_replace_bits(uint ## _n ## _t *p, \
+ uint ## _n ## _t v, uint ## _n ## _t f) \
+ { \
+ *p = (*p & ~f) | u ## _n ## _encode_bits(v, f); \
+ }
+
+_uXp_replace_bits(64)
+_uXp_replace_bits(32)
+_uXp_replace_bits(16)
+_uXp_replace_bits(8)
+
+#define _uX_replace_bits(_n) \
+ static __inline uint ## _n ## _t \
+ u ## _n ## _replace_bits(uint ## _n ## _t p, \
+ uint ## _n ## _t v, uint ## _n ## _t f) \
+ { \
+ return ((p & ~f) | u ## _n ## _encode_bits(v, f)); \
+ }
+
+_uX_replace_bits(64)
+_uX_replace_bits(32)
+_uX_replace_bits(16)
+_uX_replace_bits(8)
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define FIELD_FIT(_mask, _value) \
+ (!(((typeof(_mask))(_value) << __bf_shf(_mask)) & ~(_mask)))
+
+#define FIELD_PREP(_mask, _value) \
+ (((typeof(_mask))(_value) << __bf_shf(_mask)) & (_mask))
+
+/* Likely would need extra sanity checks compared to FIELD_PREP()? */
+#define FIELD_PREP_CONST(_mask, _value) \
+ (((typeof(_mask))(_value) << __bf_shf(_mask)) & (_mask))
+
+#define FIELD_GET(_mask, _value) \
+ ((typeof(_mask))(((_value) & (_mask)) >> __bf_shf(_mask)))
+
+#endif /* _LINUXKPI_LINUX_BITFIELD_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/bitmap.h b/sys/compat/linuxkpi/common/include/linux/bitmap.h
new file mode 100644
index 000000000000..f26a0f99dc03
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/bitmap.h
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_BITMAP_H_
+#define _LINUXKPI_LINUX_BITMAP_H_
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+static inline void
+bitmap_zero(unsigned long *addr, const unsigned int size)
+{
+ memset(addr, 0, BITS_TO_LONGS(size) * sizeof(long));
+}
+
+static inline void
+bitmap_fill(unsigned long *addr, const unsigned int size)
+{
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+
+ memset(addr, 0xff, BIT_WORD(size) * sizeof(long));
+
+ if (tail)
+ addr[BIT_WORD(size)] = BITMAP_LAST_WORD_MASK(tail);
+}
+
+static inline int
+bitmap_full(unsigned long *addr, const unsigned int size)
+{
+ const unsigned int end = BIT_WORD(size);
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+ unsigned int i;
+
+ for (i = 0; i != end; i++) {
+ if (addr[i] != ~0UL)
+ return (0);
+ }
+
+ if (tail) {
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+
+ if ((addr[end] & mask) != mask)
+ return (0);
+ }
+ return (1);
+}
+
+static inline int
+bitmap_empty(unsigned long *addr, const unsigned int size)
+{
+ const unsigned int end = BIT_WORD(size);
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+ unsigned int i;
+
+ for (i = 0; i != end; i++) {
+ if (addr[i] != 0)
+ return (0);
+ }
+
+ if (tail) {
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+
+ if ((addr[end] & mask) != 0)
+ return (0);
+ }
+ return (1);
+}
+
+static inline void
+bitmap_set(unsigned long *map, unsigned int start, int nr)
+{
+ const unsigned int size = start + nr;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ map += BIT_WORD(start);
+
+ while (nr - bits_to_set >= 0) {
+ *map |= mask_to_set;
+ nr -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ map++;
+ }
+
+ if (nr) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ *map |= mask_to_set;
+ }
+}
+
+static inline void
+bitmap_clear(unsigned long *map, unsigned int start, int nr)
+{
+ const unsigned int size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ map += BIT_WORD(start);
+
+ while (nr - bits_to_clear >= 0) {
+ *map &= ~mask_to_clear;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ map++;
+ }
+
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ *map &= ~mask_to_clear;
+ }
+}
+
+static inline unsigned int
+bitmap_find_next_zero_area_off(const unsigned long *map,
+ const unsigned int size, unsigned int start,
+ unsigned int nr, unsigned int align_mask,
+ unsigned int align_offset)
+{
+ unsigned int index;
+ unsigned int end;
+ unsigned int i;
+
+retry:
+ index = find_next_zero_bit(map, size, start);
+
+ index = (((index + align_offset) + align_mask) & ~align_mask) - align_offset;
+
+ end = index + nr;
+ if (end > size)
+ return (end);
+
+ i = find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto retry;
+ }
+ return (index);
+}
+
+static inline unsigned int
+bitmap_find_next_zero_area(const unsigned long *map,
+ const unsigned int size, unsigned int start,
+ unsigned int nr, unsigned int align_mask)
+{
+ return (bitmap_find_next_zero_area_off(map, size,
+ start, nr, align_mask, 0));
+}
+
+static inline int
+bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
+{
+ int pos;
+ int end;
+
+ for (pos = 0; (end = pos + (1 << order)) <= bits; pos = end) {
+ if (!linux_reg_op(bitmap, pos, order, REG_OP_ISFREE))
+ continue;
+ linux_reg_op(bitmap, pos, order, REG_OP_ALLOC);
+ return (pos);
+ }
+ return (-ENOMEM);
+}
+
+static inline int
+bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
+{
+ if (!linux_reg_op(bitmap, pos, order, REG_OP_ISFREE))
+ return (-EBUSY);
+ linux_reg_op(bitmap, pos, order, REG_OP_ALLOC);
+ return (0);
+}
+
+static inline void
+bitmap_release_region(unsigned long *bitmap, int pos, int order)
+{
+ linux_reg_op(bitmap, pos, order, REG_OP_RELEASE);
+}
+
+static inline unsigned int
+bitmap_weight(unsigned long *addr, const unsigned int size)
+{
+ const unsigned int end = BIT_WORD(size);
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+ unsigned int retval = 0;
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ retval += hweight_long(addr[i]);
+
+ if (tail) {
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+
+ retval += hweight_long(addr[end] & mask);
+ }
+ return (retval);
+}
+
+static inline int
+bitmap_equal(const unsigned long *pa,
+ const unsigned long *pb, unsigned size)
+{
+ const unsigned int end = BIT_WORD(size);
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+ unsigned int i;
+
+ for (i = 0; i != end; i++) {
+ if (pa[i] != pb[i])
+ return (0);
+ }
+
+ if (tail) {
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+
+ if ((pa[end] ^ pb[end]) & mask)
+ return (0);
+ }
+ return (1);
+}
+
+static inline int
+bitmap_subset(const unsigned long *pa,
+ const unsigned long *pb, unsigned size)
+{
+ const unsigned end = BIT_WORD(size);
+ const unsigned tail = size & (BITS_PER_LONG - 1);
+ unsigned i;
+
+ for (i = 0; i != end; i++) {
+ if (pa[i] & ~pb[i])
+ return (0);
+ }
+
+ if (tail) {
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+
+ if (pa[end] & ~pb[end] & mask)
+ return (0);
+ }
+ return (1);
+}
+
+static inline bool
+bitmap_intersects(const unsigned long *pa, const unsigned long *pb,
+ unsigned size)
+{
+ const unsigned end = BIT_WORD(size);
+ const unsigned tail = size & (BITS_PER_LONG - 1);
+ unsigned i;
+
+ for (i = 0; i != end; i++)
+ if (pa[i] & pb[i])
+ return (true);
+
+ if (tail) {
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+
+ if (pa[end] & pb[end] & mask)
+ return (true);
+ }
+ return (false);
+}
+
+static inline void
+bitmap_complement(unsigned long *dst, const unsigned long *src,
+ const unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ dst[i] = ~src[i];
+}
+
+static inline void
+bitmap_copy(unsigned long *dst, const unsigned long *src,
+ const unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ dst[i] = src[i];
+}
+
+static inline void
+bitmap_to_arr32(uint32_t *dst, const unsigned long *src, unsigned int size)
+{
+ const unsigned int end = howmany(size, 32);
+
+#ifdef __LP64__
+ unsigned int i = 0;
+ while (i < end) {
+ dst[i++] = (uint32_t)(*src & UINT_MAX);
+ if (i < end)
+ dst[i++] = (uint32_t)(*src >> 32);
+ src++;
+ }
+#else
+ bitmap_copy((unsigned long *)dst, src, size);
+#endif
+ if ((size % 32) != 0) /* Linux uses BITS_PER_LONG. Seems to be a bug */
+ dst[end - 1] &= (uint32_t)(UINT_MAX >> (32 - (size % 32)));
+}
+
+static inline void
+bitmap_from_arr32(unsigned long *dst, const uint32_t *src,
+ unsigned int size)
+{
+ const unsigned int end = BIT_WORD(size);
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+
+#ifdef __LP64__
+ const unsigned int end32 = howmany(size, 32);
+ unsigned int i = 0;
+
+ while (i < end32) {
+ dst[i++/2] = (unsigned long) *(src++);
+ if (i < end32)
+ dst[i++/2] |= ((unsigned long) *(src++)) << 32;
+ }
+#else
+ bitmap_copy(dst, (const unsigned long *)src, size);
+#endif
+ if ((size % BITS_PER_LONG) != 0)
+ dst[end] &= BITMAP_LAST_WORD_MASK(tail);
+}
+
+static inline void
+bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, const unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ dst[i] = src1[i] | src2[i];
+}
+
+static inline void
+bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, const unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ dst[i] = src1[i] & src2[i];
+}
+
+static inline void
+bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, const unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ dst[i] = src1[i] & ~src2[i];
+}
+
+static inline void
+bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, const unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ unsigned int i;
+
+ for (i = 0; i != end; i++)
+ dst[i] = src1[i] ^ src2[i];
+}
+
+static inline void
+bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int size)
+{
+ const unsigned int end = BITS_TO_LONGS(size);
+ const unsigned int tail = size & (BITS_PER_LONG - 1);
+ const unsigned long mask = BITMAP_LAST_WORD_MASK(tail);
+ const unsigned int off = BIT_WORD(shift);
+ const unsigned int rem = shift & (BITS_PER_LONG - 1);
+ unsigned long left, right;
+ unsigned int i, srcpos;
+
+ for (i = 0, srcpos = off; srcpos < end; i++, srcpos++) {
+ right = src[srcpos];
+ left = 0;
+
+ if (srcpos == end - 1)
+ right &= mask;
+
+ if (rem != 0) {
+ right >>= rem;
+ if (srcpos + 1 < end) {
+ left = src[srcpos + 1];
+ if (srcpos + 1 == end - 1)
+ left &= mask;
+ left <<= (BITS_PER_LONG - rem);
+ }
+ }
+ dst[i] = left | right;
+ }
+ if (off != 0)
+ memset(dst + end - off, 0, off * sizeof(unsigned long));
+}
+
+static inline unsigned long *
+bitmap_alloc(unsigned int size, gfp_t flags)
+{
+ return (kmalloc_array(BITS_TO_LONGS(size),
+ sizeof(unsigned long), flags));
+}
+
+static inline unsigned long *
+bitmap_zalloc(unsigned int size, gfp_t flags)
+{
+ return (bitmap_alloc(size, flags | __GFP_ZERO));
+}
+
+static inline void
+bitmap_free(const unsigned long *bitmap)
+{
+ kfree(bitmap);
+}
+
+#endif /* _LINUXKPI_LINUX_BITMAP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/bitops.h b/sys/compat/linuxkpi/common/include/linux/bitops.h
new file mode 100644
index 000000000000..bc776a0db9c4
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/bitops.h
@@ -0,0 +1,437 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_BITOPS_H_
+#define _LINUXKPI_LINUX_BITOPS_H_
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/errno.h>
+#include <sys/libkern.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#ifdef __LP64__
+#define BITS_PER_LONG 64
+#else
+#define BITS_PER_LONG 32
+#endif
+
+#define BITS_PER_LONG_LONG 64
+
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
+#define BITMAP_LAST_WORD_MASK(n) (~0UL >> (BITS_PER_LONG - (n)))
+#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
+#define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG - 1)))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
+#define GENMASK_ULL(h, l) (((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
+#define BITS_PER_BYTE 8
+#define BITS_PER_TYPE(t) (sizeof(t) * BITS_PER_BYTE)
+#define BITS_TO_BYTES(n) howmany((n), BITS_PER_BYTE)
+
+#define hweight8(x) bitcount((uint8_t)(x))
+#define hweight16(x) bitcount16(x)
+#define hweight32(x) bitcount32(x)
+#define hweight64(x) bitcount64(x)
+#define hweight_long(x) bitcountl(x)
+
+#define HWEIGHT8(x) (bitcount8((uint8_t)(x)))
+#define HWEIGHT16(x) (bitcount16(x))
+#define HWEIGHT32(x) (bitcount32(x))
+#define HWEIGHT64(x) (bitcount64(x))
+
+static inline int
+__ffs(int mask)
+{
+ return (ffs(mask) - 1);
+}
+
+static inline int
+__fls(int mask)
+{
+ return (fls(mask) - 1);
+}
+
+static inline int
+__ffsl(long mask)
+{
+ return (ffsl(mask) - 1);
+}
+
+static inline unsigned long
+__ffs64(uint64_t mask)
+{
+ return (ffsll(mask) - 1);
+}
+
+static inline int
+__flsl(long mask)
+{
+ return (flsl(mask) - 1);
+}
+
+static inline int
+fls64(uint64_t mask)
+{
+ return (flsll(mask));
+}
+
+static inline uint32_t
+ror32(uint32_t word, unsigned int shift)
+{
+ return ((word >> shift) | (word << (32 - shift)));
+}
+
+#define ffz(mask) __ffs(~(mask))
+
+static inline int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+static inline unsigned long
+find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ long mask;
+ int bit;
+
+ for (bit = 0; size >= BITS_PER_LONG;
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ if (*addr == 0)
+ continue;
+ return (bit + __ffsl(*addr));
+ }
+ if (size) {
+ mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
+ if (mask)
+ bit += __ffsl(mask);
+ else
+ bit += size;
+ }
+ return (bit);
+}
+
+static inline unsigned long
+find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ long mask;
+ int bit;
+
+ for (bit = 0; size >= BITS_PER_LONG;
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ if (~(*addr) == 0)
+ continue;
+ return (bit + __ffsl(~(*addr)));
+ }
+ if (size) {
+ mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
+ if (mask)
+ bit += __ffsl(mask);
+ else
+ bit += size;
+ }
+ return (bit);
+}
+
+static inline unsigned long
+find_last_bit(const unsigned long *addr, unsigned long size)
+{
+ long mask;
+ int offs;
+ int bit;
+ int pos;
+
+ pos = size / BITS_PER_LONG;
+ offs = size % BITS_PER_LONG;
+ bit = BITS_PER_LONG * pos;
+ addr += pos;
+ if (offs) {
+ mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
+ if (mask)
+ return (bit + __flsl(mask));
+ }
+ while (pos--) {
+ addr--;
+ bit -= BITS_PER_LONG;
+ if (*addr)
+ return (bit + __flsl(*addr));
+ }
+ return (size);
+}
+
+static inline unsigned long
+find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
+{
+ long mask;
+ int offs;
+ int bit;
+ int pos;
+
+ if (offset >= size)
+ return (size);
+ pos = offset / BITS_PER_LONG;
+ offs = offset % BITS_PER_LONG;
+ bit = BITS_PER_LONG * pos;
+ addr += pos;
+ if (offs) {
+ mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
+ if (mask)
+ return (bit + __ffsl(mask));
+ if (size - bit <= BITS_PER_LONG)
+ return (size);
+ bit += BITS_PER_LONG;
+ addr++;
+ }
+ for (size -= bit; size >= BITS_PER_LONG;
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ if (*addr == 0)
+ continue;
+ return (bit + __ffsl(*addr));
+ }
+ if (size) {
+ mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
+ if (mask)
+ bit += __ffsl(mask);
+ else
+ bit += size;
+ }
+ return (bit);
+}
+
+static inline unsigned long
+find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ long mask;
+ int offs;
+ int bit;
+ int pos;
+
+ if (offset >= size)
+ return (size);
+ pos = offset / BITS_PER_LONG;
+ offs = offset % BITS_PER_LONG;
+ bit = BITS_PER_LONG * pos;
+ addr += pos;
+ if (offs) {
+ mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
+ if (mask)
+ return (bit + __ffsl(mask));
+ if (size - bit <= BITS_PER_LONG)
+ return (size);
+ bit += BITS_PER_LONG;
+ addr++;
+ }
+ for (size -= bit; size >= BITS_PER_LONG;
+ size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+ if (~(*addr) == 0)
+ continue;
+ return (bit + __ffsl(~(*addr)));
+ }
+ if (size) {
+ mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
+ if (mask)
+ bit += __ffsl(mask);
+ else
+ bit += size;
+ }
+ return (bit);
+}
+
+#define __set_bit(i, a) \
+ atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define set_bit(i, a) \
+ atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define __clear_bit(i, a) \
+ atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define clear_bit(i, a) \
+ atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define clear_bit_unlock(i, a) \
+ atomic_clear_rel_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define test_bit(i, a) \
+ !!(READ_ONCE(((volatile const unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
+
+static inline void
+__assign_bit(long bit, volatile unsigned long *addr, bool value)
+{
+ if (value)
+ __set_bit(bit, addr);
+ else
+ __clear_bit(bit, addr);
+}
+
+static inline int
+test_and_clear_bit(long bit, volatile unsigned long *var)
+{
+ long val;
+
+ var += BIT_WORD(bit);
+ bit %= BITS_PER_LONG;
+ bit = (1UL << bit);
+
+ val = *var;
+ while (!atomic_fcmpset_long(var, &val, val & ~bit))
+ ;
+ return !!(val & bit);
+}
+
+static inline int
+__test_and_clear_bit(long bit, volatile unsigned long *var)
+{
+ long val;
+
+ var += BIT_WORD(bit);
+ bit %= BITS_PER_LONG;
+ bit = (1UL << bit);
+
+ val = *var;
+ *var &= ~bit;
+
+ return !!(val & bit);
+}
+
+static inline int
+test_and_set_bit(long bit, volatile unsigned long *var)
+{
+ long val;
+
+ var += BIT_WORD(bit);
+ bit %= BITS_PER_LONG;
+ bit = (1UL << bit);
+
+ val = *var;
+ while (!atomic_fcmpset_long(var, &val, val | bit))
+ ;
+ return !!(val & bit);
+}
+
+static inline int
+__test_and_set_bit(long bit, volatile unsigned long *var)
+{
+ long val;
+
+ var += BIT_WORD(bit);
+ bit %= BITS_PER_LONG;
+ bit = (1UL << bit);
+
+ val = *var;
+ *var |= bit;
+
+ return !!(val & bit);
+}
+
+enum {
+ REG_OP_ISFREE,
+ REG_OP_ALLOC,
+ REG_OP_RELEASE,
+};
+
+static inline int
+linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
+{
+ int nbits_reg;
+ int index;
+ int offset;
+ int nlongs_reg;
+ int nbitsinlong;
+ unsigned long mask;
+ int i;
+ int ret = 0;
+
+ nbits_reg = 1 << order;
+ index = pos / BITS_PER_LONG;
+ offset = pos - (index * BITS_PER_LONG);
+ nlongs_reg = BITS_TO_LONGS(nbits_reg);
+ nbitsinlong = MIN(nbits_reg, BITS_PER_LONG);
+
+ mask = (1UL << (nbitsinlong - 1));
+ mask += mask - 1;
+ mask <<= offset;
+
+ switch (reg_op) {
+ case REG_OP_ISFREE:
+ for (i = 0; i < nlongs_reg; i++) {
+ if (bitmap[index + i] & mask)
+ goto done;
+ }
+ ret = 1;
+ break;
+
+ case REG_OP_ALLOC:
+ for (i = 0; i < nlongs_reg; i++)
+ bitmap[index + i] |= mask;
+ break;
+
+ case REG_OP_RELEASE:
+ for (i = 0; i < nlongs_reg; i++)
+ bitmap[index + i] &= ~mask;
+ break;
+ }
+done:
+ return ret;
+}
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = find_first_zero_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
+static inline uint64_t
+sign_extend64(uint64_t value, int index)
+{
+ uint8_t shift = 63 - index;
+
+ return ((int64_t)(value << shift) >> shift);
+}
+
+static inline uint32_t
+sign_extend32(uint32_t value, int index)
+{
+ uint8_t shift = 31 - index;
+
+ return ((int32_t)(value << shift) >> shift);
+}
+
+#endif /* _LINUXKPI_LINUX_BITOPS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/bottom_half.h b/sys/compat/linuxkpi/common/include/linux/bottom_half.h
new file mode 100644
index 000000000000..12b170845cbc
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/bottom_half.h
@@ -0,0 +1,32 @@
+/*-
+ * Copyright (c) 2017 Hans Petter Selasky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_BOTTOM_HALF_H_
+#define _LINUXKPI_LINUX_BOTTOM_HALF_H_
+
+extern void local_bh_enable(void);
+extern void local_bh_disable(void);
+
+#endif /* _LINUXKPI_LINUX_BOTTOM_HALF_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/bsearch.h b/sys/compat/linuxkpi/common/include/linux/bsearch.h
new file mode 100644
index 000000000000..fb67109e4bba
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/bsearch.h
@@ -0,0 +1,36 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_BSEARCH_H
+#define _LINUXKPI_LINUX_BSEARCH_H
+
+#include <sys/libkern.h>
+
+#endif /* _LINUXKPI_LINUX_BSEARCH_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/build_bug.h b/sys/compat/linuxkpi/common/include/linux/build_bug.h
new file mode 100644
index 000000000000..6a026376cfc8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/build_bug.h
@@ -0,0 +1,65 @@
+/*-
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ * Copyright (c) 2018 Johannes Lundberg <johalun0@gmail.com>
+ * Copyright (c) 2021 The FreeBSD Foundation
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ * Copyright (c) 2023 Serenity Cyber Security, LLC
+ *
+ * Portions of this software were developed by Bjoern A. Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_BUILD_BUG_H_
+#define _LINUXKPI_LINUX_BUILD_BUG_H_
+
+#include <sys/param.h>
+
+#include <linux/compiler.h>
+
+/*
+ * BUILD_BUG_ON() can happen inside functions where _Static_assert() does not
+ * seem to work. Use old-schoold-ish CTASSERT from before commit
+ * a3085588a88fa58eb5b1eaae471999e1995a29cf but also make sure we do not
+ * end up with an unused typedef or variable. The compiler should optimise
+ * it away entirely.
+ */
+#define _O_CTASSERT(x) _O__CTASSERT(x, __LINE__)
+#define _O__CTASSERT(x, y) _O___CTASSERT(x, y)
+#define _O___CTASSERT(x, y) while (0) { \
+ typedef char __assert_line_ ## y[(x) ? 1 : -1]; \
+ __assert_line_ ## y _x __unused; \
+ _x[0] = '\0'; \
+}
+
+#define BUILD_BUG() do { CTASSERT(0); } while (0)
+#define BUILD_BUG_ON(x) do { _O_CTASSERT(!(x)) } while (0)
+#define BUILD_BUG_ON_MSG(x, msg) BUILD_BUG_ON(x)
+#define BUILD_BUG_ON_NOT_POWER_OF_2(x) BUILD_BUG_ON(!powerof2(x))
+#define BUILD_BUG_ON_INVALID(expr) while (0) { (void)(expr); }
+#define BUILD_BUG_ON_ZERO(x) ((int)sizeof(struct { int:-((x) != 0); }))
+
+#define static_assert(x, ...) __static_assert(x, ##__VA_ARGS__, #x)
+#define __static_assert(x, msg, ...) _Static_assert(x, msg)
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/cache.h b/sys/compat/linuxkpi/common/include/linux/cache.h
new file mode 100644
index 000000000000..b02b28d08ea9
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/cache.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_CACHE_H_
+#define _LINUXKPI_LINUX_CACHE_H_
+
+#include <sys/param.h>
+
+#define cache_line_size() CACHE_LINE_SIZE
+#define L1_CACHE_BYTES CACHE_LINE_SIZE
+#define L1_CACHE_ALIGN(x) ALIGN(x, CACHE_LINE_SIZE)
+
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#endif /* _LINUXKPI_LINUX_CACHE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/capability.h b/sys/compat/linuxkpi/common/include/linux/capability.h
new file mode 100644
index 000000000000..e3dacd4e9f15
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/capability.h
@@ -0,0 +1,51 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2015 Rimvydas Jasinskas
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * linux/capability.h
+ *
+ * Simple capable() priv_check helper
+ */
+
+#ifndef _LINUXKPI_LINUX_CAPABILITY_H
+#define _LINUXKPI_LINUX_CAPABILITY_H
+
+#include <sys/types.h>
+#include <sys/proc.h>
+#include <sys/priv.h>
+
+#define CAP_SYS_ADMIN PRIV_DRIVER
+#define CAP_SYS_NICE PRIV_SCHED_SETPRIORITY
+
+static inline bool
+capable(const int tryme)
+{
+ return (priv_check(curthread, tryme) == 0);
+}
+
+#endif /* _LINUXKPI_LINUX_CAPABILITY_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/cc_platform.h b/sys/compat/linuxkpi/common/include/linux/cc_platform.h
new file mode 100644
index 000000000000..1544c141614b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/cc_platform.h
@@ -0,0 +1,21 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_CC_PLATFORM_H_
+#define _LINUXKPI_LINUX_CC_PLATFORM_H_
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+
+enum cc_attr {
+ CC_ATTR_MEM_ENCRYPT,
+ CC_ATTR_GUEST_MEM_ENCRYPT,
+};
+
+static inline bool
+cc_platform_has(enum cc_attr attr __unused)
+{
+
+ return (false);
+}
+
+#endif /* _LINUXKPI_LINUX_CC_PLATFORM_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/cdev.h b/sys/compat/linuxkpi/common/include/linux/cdev.h
new file mode 100644
index 000000000000..d989db14c2f8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/cdev.h
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_CDEV_H_
+#define _LINUXKPI_LINUX_CDEV_H_
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/kdev_t.h>
+#include <linux/list.h>
+
+#include <asm/atomic-long.h>
+
+struct device;
+struct file_operations;
+struct inode;
+struct module;
+
+extern struct cdevsw linuxcdevsw;
+extern const struct kobj_type linux_cdev_ktype;
+extern const struct kobj_type linux_cdev_static_ktype;
+
+struct linux_cdev {
+ struct kobject kobj;
+ struct module *owner;
+ struct cdev *cdev;
+ dev_t dev;
+ const struct file_operations *ops;
+ u_int refs;
+ u_int siref;
+};
+
+struct linux_cdev *cdev_alloc(void);
+
+static inline void
+cdev_init(struct linux_cdev *cdev, const struct file_operations *ops)
+{
+
+ kobject_init(&cdev->kobj, &linux_cdev_static_ktype);
+ cdev->ops = ops;
+ cdev->refs = 1;
+}
+
+static inline void
+cdev_put(struct linux_cdev *p)
+{
+ kobject_put(&p->kobj);
+}
+
+static inline int
+cdev_add(struct linux_cdev *cdev, dev_t dev, unsigned count)
+{
+ struct make_dev_args args;
+ int error;
+
+ if (count != 1)
+ return (-EINVAL);
+
+ cdev->dev = dev;
+
+ /* Setup arguments for make_dev_s() */
+ make_dev_args_init(&args);
+ args.mda_devsw = &linuxcdevsw;
+ args.mda_uid = 0;
+ args.mda_gid = 0;
+ args.mda_mode = 0700;
+ args.mda_si_drv1 = cdev;
+
+ error = make_dev_s(&args, &cdev->cdev, "%s",
+ kobject_name(&cdev->kobj));
+ if (error)
+ return (-error);
+
+ kobject_get(cdev->kobj.parent);
+ return (0);
+}
+
+static inline int
+cdev_add_ext(struct linux_cdev *cdev, dev_t dev, uid_t uid, gid_t gid, int mode)
+{
+ struct make_dev_args args;
+ int error;
+
+ cdev->dev = dev;
+
+ /* Setup arguments for make_dev_s() */
+ make_dev_args_init(&args);
+ args.mda_devsw = &linuxcdevsw;
+ args.mda_uid = uid;
+ args.mda_gid = gid;
+ args.mda_mode = mode;
+ args.mda_si_drv1 = cdev;
+
+ error = make_dev_s(&args, &cdev->cdev, "%s/%d",
+ kobject_name(&cdev->kobj), MINOR(dev));
+ if (error)
+ return (-error);
+
+ kobject_get(cdev->kobj.parent);
+ return (0);
+}
+
+static inline void
+cdev_del(struct linux_cdev *cdev)
+{
+ kobject_put(&cdev->kobj);
+}
+
+struct linux_cdev *linux_find_cdev(const char *name, unsigned major, unsigned minor);
+
+int linux_cdev_device_add(struct linux_cdev *, struct device *);
+void linux_cdev_device_del(struct linux_cdev *, struct device *);
+
+#define cdev_device_add(...) \
+ linux_cdev_device_add(__VA_ARGS__)
+#define cdev_device_del(...) \
+ linux_cdev_device_del(__VA_ARGS__)
+
+#define cdev linux_cdev
+
+#endif /* _LINUXKPI_LINUX_CDEV_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/cec.h b/sys/compat/linuxkpi/common/include/linux/cec.h
new file mode 100644
index 000000000000..e0854d87d85c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/cec.h
@@ -0,0 +1,8 @@
+/* Public domain */
+
+#ifndef _LINUXKPI_LINUX_CEC_H_
+#define _LINUXKPI_LINUX_CEC_H_
+
+#define CEC_PHYS_ADDR_INVALID 0xffff
+
+#endif /* _LINUXKPI_LINUX_CEC_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/cgroup.h b/sys/compat/linuxkpi/common/include/linux/cgroup.h
new file mode 100644
index 000000000000..a9dd22fd0f4c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/cgroup.h
@@ -0,0 +1,34 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_CGROUP_H_
+#define _LINUXKPI_LINUX_CGROUP_H_
+
+#include <linux/kernel_stat.h>
+
+#endif /* _LINUXKPI_LINUX_CGROUP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/circ_buf.h b/sys/compat/linuxkpi/common/include/linux/circ_buf.h
new file mode 100644
index 000000000000..53d7fa736ef8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/circ_buf.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUXKPI_LINUX_CIRC_BUF_H_
+#define _LINUXKPI_LINUX_CIRC_BUF_H_
+
+#define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size) - 1))
+#define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head) + 1),(size))
+
+#endif /* _LINUXKPI_LINUX_CIRC_BUF_H_ */
+
diff --git a/sys/compat/linuxkpi/common/include/linux/cleanup.h b/sys/compat/linuxkpi/common/include/linux/cleanup.h
new file mode 100644
index 000000000000..01f234f0cbe7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/cleanup.h
@@ -0,0 +1,46 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ */
+
+#ifndef _LINUXKPI_LINUX_CLEANUP_H
+#define _LINUXKPI_LINUX_CLEANUP_H
+
+#define __cleanup(_f) __attribute__((__cleanup__(_f)))
+
+/*
+ * Note: "_T" are special as they are exposed into common code for
+ * statements. Extra care should be taken when changing the code.
+ */
+#define DEFINE_GUARD(_n, _dt, _lock, _unlock) \
+ \
+ typedef _dt guard_ ## _n ## _t; \
+ \
+ static inline _dt \
+ guard_ ## _n ## _create( _dt _T) \
+ { \
+ _dt c; \
+ \
+ c = ({ _lock; _T; }); \
+ return (c); \
+ } \
+ \
+ static inline void \
+ guard_ ## _n ## _destroy(_dt *t) \
+ { \
+ _dt _T; \
+ \
+ _T = *t; \
+ if (_T) { _unlock; }; \
+ }
+
+/* We need to keep these calls unique. */
+#define guard(_n) \
+ guard_ ## _n ## _t guard_ ## _n ## _ ## __COUNTER__ \
+ __cleanup(guard_ ## _n ## _destroy) = guard_ ## _n ## _create
+
+#endif /* _LINUXKPI_LINUX_CLEANUP_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/clocksource.h b/sys/compat/linuxkpi/common/include/linux/clocksource.h
new file mode 100644
index 000000000000..3e7664c3e57e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/clocksource.h
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_CLOCKSOURCE_H
+#define _LINUXKPI_LINUX_CLOCKSOURCE_H
+
+#include <asm/types.h>
+
+#define CLOCKSOURCE_MASK(x) ((u64)(-1ULL >> ((-(x)) & 63)))
+
+#endif /* _LINUXKPI_LINUX_CLOCKSOURCE_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/compat.h b/sys/compat/linuxkpi/common/include/linux/compat.h
new file mode 100644
index 000000000000..8a5a6918bb7c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/compat.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_COMPAT_H_
+#define _LINUXKPI_LINUX_COMPAT_H_
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+
+struct domainset;
+struct thread;
+struct task_struct;
+
+extern int linux_alloc_current(struct thread *, int flags);
+extern void linux_free_current(struct task_struct *);
+extern struct domainset *linux_get_vm_domain_set(int node);
+
+#define __current_unallocated(td) \
+ __predict_false((td)->td_lkpi_task == NULL)
+
+static inline void
+linux_set_current(struct thread *td)
+{
+ if (__current_unallocated(td))
+ lkpi_alloc_current(td, M_WAITOK);
+}
+
+static inline int
+linux_set_current_flags(struct thread *td, int flags)
+{
+ if (__current_unallocated(td))
+ return (lkpi_alloc_current(td, flags));
+ return (0);
+}
+
+#define compat_ptr(x) ((void *)(uintptr_t)x)
+#define ptr_to_compat(x) ((uintptr_t)x)
+
+typedef void fpu_safe_exec_cb_t(void *ctx);
+void lkpi_fpu_safe_exec(fpu_safe_exec_cb_t func, void *ctx);
+
+#endif /* _LINUXKPI_LINUX_COMPAT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/compiler.h b/sys/compat/linuxkpi/common/include/linux/compiler.h
new file mode 100644
index 000000000000..fb5ad3bf4fe4
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/compiler.h
@@ -0,0 +1,133 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * Copyright (c) 2015 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_COMPILER_H_
+#define _LINUXKPI_LINUX_COMPILER_H_
+
+#include <sys/cdefs.h>
+
+#define __user
+#define __kernel
+#define __safe
+#define __force
+#define __nocast
+#define __iomem
+#define __chk_user_ptr(x) ((void)0)
+#define __chk_io_ptr(x) ((void)0)
+#define __builtin_warning(x, y...) (1)
+#define __acquires(x)
+#define __releases(x)
+#define __acquire(x) do { } while (0)
+#define __release(x) do { } while (0)
+#define __cond_lock(x,c) (c)
+#define __bitwise
+#define __devinitdata
+#ifndef __deprecated
+#define __deprecated
+#endif
+#define __init
+#define __initconst
+#define __devinit
+#define __devexit
+#define __exit
+#define __rcu
+#define __percpu
+#define __weak __weak_symbol
+#define __malloc
+#define __attribute_const__ __attribute__((__const__))
+#undef __always_inline
+#define __always_inline inline
+#define noinline __noinline
+#define noinline_for_stack __noinline
+#define ____cacheline_aligned __aligned(CACHE_LINE_SIZE)
+#define ____cacheline_aligned_in_smp __aligned(CACHE_LINE_SIZE)
+#define fallthrough /* FALLTHROUGH */ do { } while(0)
+
+#if __has_attribute(__nonstring__)
+#define __nonstring __attribute__((__nonstring__))
+#else
+#define __nonstring
+#endif
+#if __has_attribute(__counted_by__)
+#define __counted_by(_x) __attribute__((__counted_by__(_x)))
+#else
+#define __counted_by(_x)
+#endif
+
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#define typeof(x) __typeof(x)
+
+#define uninitialized_var(x) x = x
+#define __maybe_unused __unused
+#define __always_unused __unused
+#define __must_check __result_use_check
+
+#define __printf(a,b) __printflike(a,b)
+
+#define __diag_push()
+#define __diag_pop()
+#define __diag_ignore_all(...)
+
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+#define lower_32_bits(n) ((u32)(n))
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+
+#define ___PASTE(a,b) a##b
+#define __PASTE(a,b) ___PASTE(a,b)
+
+#define WRITE_ONCE(x,v) do { \
+ barrier(); \
+ (*(volatile __typeof(x) *)(uintptr_t)&(x)) = (v); \
+ barrier(); \
+} while (0)
+
+#define READ_ONCE(x) ({ \
+ __typeof(x) __var = ({ \
+ barrier(); \
+ (*(const volatile __typeof(x) *)&(x)); \
+ }); \
+ barrier(); \
+ __var; \
+})
+
+#define lockless_dereference(p) READ_ONCE(p)
+
+#define _AT(T,X) ((T)(X))
+
+#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#define __must_be_array(a) __same_type(a, &(a)[0])
+
+#define sizeof_field(_s, _m) sizeof(((_s *)0)->_m)
+
+#define is_signed_type(t) ((t)-1 < (t)1)
+#define is_unsigned_type(t) ((t)-1 > (t)1)
+
+#endif /* _LINUXKPI_LINUX_COMPILER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/completion.h b/sys/compat/linuxkpi/common/include/linux/completion.h
new file mode 100644
index 000000000000..9f8bebb4cf82
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/completion.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_COMPLETION_H_
+#define _LINUXKPI_LINUX_COMPLETION_H_
+
+#include <linux/errno.h>
+
+struct completion {
+ unsigned int done;
+};
+
+#define INIT_COMPLETION(c) \
+ ((c).done = 0)
+#define init_completion(c) \
+ do { (c)->done = 0; } while (0)
+#define reinit_completion(c) \
+ do { (c)->done = 0; } while (0)
+#define complete(c) \
+ linux_complete_common((c), 0)
+#define complete_all(c) \
+ linux_complete_common((c), 1)
+#define wait_for_completion(c) \
+ linux_wait_for_common((c), 0)
+#define wait_for_completion_interruptible(c) \
+ linux_wait_for_common((c), 1)
+#define wait_for_completion_timeout(c, timeout) \
+ linux_wait_for_timeout_common((c), (timeout), 0)
+#define wait_for_completion_interruptible_timeout(c, timeout) \
+ linux_wait_for_timeout_common((c), (timeout), 1)
+#define try_wait_for_completion(c) \
+ linux_try_wait_for_completion(c)
+#define completion_done(c) \
+ linux_completion_done(c)
+
+extern void linux_complete_common(struct completion *, int);
+extern int linux_wait_for_common(struct completion *, int);
+extern unsigned long linux_wait_for_timeout_common(struct completion *,
+ unsigned long, int);
+extern int linux_try_wait_for_completion(struct completion *);
+extern int linux_completion_done(struct completion *);
+
+#endif /* _LINUXKPI_LINUX_COMPLETION_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/console.h b/sys/compat/linuxkpi/common/include/linux/console.h
new file mode 100644
index 000000000000..09f486203815
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/console.h
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUXKPI_LINUX_CONSOLE_H_
+#define _LINUXKPI_LINUX_CONSOLE_H_
+
+#include <linux/types.h>
+
+static inline void
+console_lock(void)
+{
+}
+
+static inline int
+console_trylock(void)
+{
+ return (1);
+}
+
+static inline void
+console_unlock(void)
+{
+}
+
+static inline bool
+vgacon_text_force(void)
+{
+
+ return (false);
+}
+
+#endif /* _LINUXKPI_LINUX_CONSOLE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/container_of.h b/sys/compat/linuxkpi/common/include/linux/container_of.h
new file mode 100644
index 000000000000..7210d531b055
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/container_of.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2017 Matt Macy <mmacy@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_CONTAINER_OF_H
+#define _LINUXKPI_LINUX_CONTAINER_OF_H
+
+#include <sys/stdint.h>
+
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+
+#define container_of(ptr, type, member) \
+({ \
+ const __typeof(((type *)0)->member) *__p = (ptr); \
+ (type *)((uintptr_t)__p - offsetof(type, member)); \
+})
+
+#define container_of_const(ptr, type, member) \
+ _Generic(ptr, \
+ const typeof(*(ptr)) *: \
+ (const type *)container_of(ptr, type, member), \
+ default: \
+ container_of(ptr, type, member) \
+ )
+
+#define typeof_member(type, member) __typeof(((type *)0)->member)
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/cpu.h b/sys/compat/linuxkpi/common/include/linux/cpu.h
new file mode 100644
index 000000000000..43ec3d66a2e3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/cpu.h
@@ -0,0 +1,78 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_CPU_H
+#define _LINUXKPI_LINUX_CPU_H
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/cpuset.h>
+#include <sys/smp.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+
+typedef cpuset_t cpumask_t;
+
+extern cpumask_t cpu_online_mask;
+
+cpumask_t *lkpi_get_static_single_cpu_mask(int);
+
+static __inline int
+cpumask_next(int cpuid, cpumask_t mask)
+{
+
+ /*
+ * -1 can be an input to cpuid according to logic in drivers
+ * but is never a valid cpuid in a set!
+ */
+ KASSERT((cpuid >= -1 && cpuid <= MAXCPU), ("%s: invalid cpuid %d\n",
+ __func__, cpuid));
+ KASSERT(!CPU_EMPTY(&mask), ("%s: empty CPU mask", __func__));
+
+ do {
+ cpuid++;
+#ifdef SMP
+ if (cpuid > mp_maxid)
+#endif
+ cpuid = 0;
+ } while (!CPU_ISSET(cpuid, &mask));
+ return (cpuid);
+}
+
+static __inline void
+cpumask_set_cpu(int cpu, cpumask_t *mask)
+{
+
+ CPU_SET(cpu, mask);
+}
+
+#define cpumask_of(_cpu) (lkpi_get_static_single_cpu_mask(_cpu))
+
+#endif /* _LINUXKPI_LINUX_CPU_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/cpufeature.h b/sys/compat/linuxkpi/common/include/linux/cpufeature.h
new file mode 100644
index 000000000000..746d1a7164a8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/cpufeature.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 2025 The FreeBSD Foundation
+ * Copyright (c) 2025 Jean-Sébastien Pédron
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_CPUFEATURE_H_
+#define _LINUXKPI_LINUX_CPUFEATURE_H_
+
+/*
+ * Linux includes the following header. We don't have it on FreeBSD yet, so
+ * let's comment this include for now. It is still referenced here because
+ * sometimes, consumers of headers rely voluntarily or not on the namespace
+ * pollution.
+ */
+/* #include <linux/init.h> */
+#include <linux/mod_devicetable.h>
+#include <asm/cpufeature.h>
+
+#endif /* _LINUXKPI_LINUX_CPUFEATURE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/crc32.h b/sys/compat/linuxkpi/common/include/linux/crc32.h
new file mode 100644
index 000000000000..e6d39fa7c5ff
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/crc32.h
@@ -0,0 +1,43 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_CRC32_H
+#define _LINUXKPI_LINUX_CRC32_H
+
+#include <sys/gsb_crc32.h>
+
+static __inline uint32_t
+crc32_le(uint32_t crc, const void *data, size_t len)
+{
+
+ return (crc32_raw(data, len, crc));
+}
+
+#endif /* _LINUXKPI_LINUX_CRC32_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/dcache.h b/sys/compat/linuxkpi/common/include/linux/dcache.h
new file mode 100644
index 000000000000..992d6f7c2720
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/dcache.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2017 Limelight Networks, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_DCACHE_H
+#define _LINUXKPI_LINUX_DCACHE_H
+
+#include <sys/vnode.h>
+
+#include <fs/pseudofs/pseudofs.h>
+
+struct dentry {
+ struct vnode *d_inode;
+ struct pfs_node *d_pfs_node; /* FreeBSD specific field */
+};
+
+static inline struct vnode *
+d_inode(const struct dentry *dentry)
+{
+ return (dentry->d_inode);
+}
+
+#endif /* _LINUXKPI_LINUX_DCACHE_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/debugfs.h b/sys/compat/linuxkpi/common/include/linux/debugfs.h
new file mode 100644
index 000000000000..4d146e085a7b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/debugfs.h
@@ -0,0 +1,124 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2016-2018, Matthew Macy <mmacy@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_DEBUGFS_H_
+#define _LINUXKPI_LINUX_DEBUGFS_H_
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+MALLOC_DECLARE(M_DFSINT);
+
+struct debugfs_reg32 {
+ char *name;
+ unsigned long offset;
+};
+
+struct debugfs_regset32 {
+ const struct debugfs_reg32 *regs;
+ int nregs;
+};
+
+struct debugfs_blob_wrapper {
+ void *data;
+ size_t size;
+};
+
+static inline bool
+debugfs_initialized(void)
+{
+
+ return (true);
+}
+
+struct dentry *debugfs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+/* TODO: We currently ignore the `file_size` argument. */
+struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size);
+
+struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
+struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ const struct file_operations *fops_ro,
+ const struct file_operations *fops_wo);
+
+struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
+
+struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
+ const char *dest);
+
+struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
+
+void debugfs_remove(struct dentry *dentry);
+
+void debugfs_remove_recursive(struct dentry *dentry);
+
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)
+#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt)
+
+void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent,
+ bool *value);
+void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
+ uint8_t *value);
+void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent,
+ uint16_t *value);
+void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent,
+ uint32_t *value);
+void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
+ uint64_t *value);
+void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
+ uint8_t *value);
+void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
+ uint16_t *value);
+void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent,
+ uint32_t *value);
+void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent,
+ uint64_t *value);
+void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent,
+ unsigned long *value);
+void debugfs_create_atomic_t(const char *name, umode_t mode, struct dentry *parent,
+ atomic_t *value);
+void debugfs_create_str(const char *name, umode_t mode, struct dentry *parent,
+ char **value);
+
+struct dentry *debugfs_create_blob(const char *name, umode_t mode,
+ struct dentry *parent, struct debugfs_blob_wrapper *value);
+
+#endif /* _LINUXKPI_LINUX_DEBUGFS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/delay.h b/sys/compat/linuxkpi/common/include/linux/delay.h
new file mode 100644
index 000000000000..f19d1a759c26
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/delay.h
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
+ * Copyright (c) 2014 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_DELAY_H_
+#define _LINUXKPI_LINUX_DELAY_H_
+
+#include <linux/jiffies.h>
+#include <sys/systm.h>
+
+static inline void
+linux_msleep(unsigned int ms)
+{
+ /* guard against invalid values */
+ if (ms == 0)
+ ms = 1;
+ pause_sbt("lnxsleep", mstosbt(ms), 0, C_HARDCLOCK);
+}
+
+#undef msleep
+#define msleep(ms) linux_msleep(ms)
+
+#undef msleep_interruptible
+#define msleep_interruptible(ms) linux_msleep_interruptible(ms)
+
+#define udelay(t) DELAY(t)
+
+static inline void
+mdelay(unsigned long msecs)
+{
+ while (msecs--)
+ DELAY(1000);
+}
+
+static inline void
+ndelay(unsigned long x)
+{
+ DELAY(howmany(x, 1000));
+}
+
+static inline void
+usleep_range(unsigned long min, unsigned long max)
+{
+ /* guard against invalid values */
+ if (min == 0)
+ min = 1;
+ pause_sbt("lnxsleep", ustosbt(min), 0, C_HARDCLOCK);
+}
+
+extern unsigned int linux_msleep_interruptible(unsigned int ms);
+
+static inline void
+fsleep(unsigned long us)
+{
+
+ if (us < 10)
+ udelay(us);
+ else
+ usleep_range(us, us);
+}
+
+#endif /* _LINUXKPI_LINUX_DELAY_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/devcoredump.h b/sys/compat/linuxkpi/common/include/linux/devcoredump.h
new file mode 100644
index 000000000000..5fa06c6595a8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/devcoredump.h
@@ -0,0 +1,81 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_DEVCOREDUMP_H
+#define _LINUXKPI_LINUX_DEVCOREDUMP_H
+
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+
+static inline void
+_lkpi_dev_coredumpsg_free(struct scatterlist *table)
+{
+ struct scatterlist *iter;
+ struct page *p;
+ int i;
+
+ iter = table;
+ for_each_sg(table, iter, sg_nents(table), i) {
+ p = sg_page(iter);
+ if (p)
+ __free_page(p);
+ }
+
+ /* XXX what about chained tables? */
+ kfree(table);
+}
+
+static inline void
+dev_coredumpv(struct device *dev __unused, void *data, size_t datalen __unused,
+ gfp_t gfp __unused)
+{
+
+ /* UNIMPLEMENTED */
+ vfree(data);
+}
+
+static inline void
+dev_coredumpsg(struct device *dev __unused, struct scatterlist *table,
+ size_t datalen __unused, gfp_t gfp __unused)
+{
+
+ /* UNIMPLEMENTED */
+ _lkpi_dev_coredumpsg_free(table);
+}
+
+static inline void
+_devcd_free_sgtable(struct scatterlist *table)
+{
+ /* UNIMPLEMENTED */
+ _lkpi_dev_coredumpsg_free(table);
+}
+
+#endif /* _LINUXKPI_LINUX_DEVCOREDUMP_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/device.h b/sys/compat/linuxkpi/common/include/linux/device.h
new file mode 100644
index 000000000000..2556b0c45e49
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/device.h
@@ -0,0 +1,718 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ * Copyright (c) 2021-2022 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_DEVICE_H_
+#define _LINUXKPI_LINUX_DEVICE_H_
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/kdev_t.h>
+#include <linux/backlight.h>
+#include <linux/pm.h>
+#include <linux/idr.h>
+#include <linux/overflow.h>
+#include <linux/ratelimit.h> /* via linux/dev_printk.h */
+#include <linux/fwnode.h>
+#include <asm/atomic.h>
+
+#include <sys/bus.h>
+#include <sys/backlight.h>
+
+struct device;
+
+struct class {
+ const char *name;
+ struct kobject kobj;
+ devclass_t bsdclass;
+ const struct dev_pm_ops *pm;
+ const struct attribute_group **dev_groups;
+ void (*class_release)(struct class *class);
+ void (*dev_release)(struct device *dev);
+ char * (*devnode)(struct device *dev, umode_t *mode);
+};
+
+struct dev_pm_ops {
+ int (*prepare)(struct device *dev);
+ void (*complete)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*suspend_late)(struct device *dev);
+ int (*resume)(struct device *dev);
+ int (*resume_early)(struct device *dev);
+ int (*freeze)(struct device *dev);
+ int (*freeze_late)(struct device *dev);
+ int (*thaw)(struct device *dev);
+ int (*thaw_early)(struct device *dev);
+ int (*poweroff)(struct device *dev);
+ int (*poweroff_late)(struct device *dev);
+ int (*restore)(struct device *dev);
+ int (*restore_early)(struct device *dev);
+ int (*suspend_noirq)(struct device *dev);
+ int (*runtime_suspend)(struct device *dev);
+ int (*runtime_resume)(struct device *dev);
+ int (*runtime_idle)(struct device *dev);
+};
+
+struct device_driver {
+ const char *name;
+ const struct dev_pm_ops *pm;
+
+ void (*shutdown) (struct device *);
+};
+
+struct device_type {
+ const char *name;
+};
+
+struct device {
+ struct device *parent;
+ struct list_head irqents;
+ device_t bsddev;
+ /*
+ * The following flag is used to determine if the LinuxKPI is
+ * responsible for detaching the BSD device or not. If the
+ * LinuxKPI got the BSD device using devclass_get_device(), it
+ * must not try to detach or delete it, because it's already
+ * done somewhere else.
+ */
+ bool bsddev_attached_here;
+ struct device_driver *driver;
+ struct device_type *type;
+ dev_t devt;
+ struct class *class;
+ void (*release)(struct device *dev);
+ struct kobject kobj;
+ void *dma_priv;
+ void *driver_data;
+ unsigned int irq;
+#define LINUX_IRQ_INVALID 65535
+ unsigned int irq_start;
+ unsigned int irq_end;
+ const struct attribute_group **groups;
+ struct fwnode_handle *fwnode;
+ struct cdev *backlight_dev;
+ struct backlight_device *bd;
+
+ spinlock_t devres_lock;
+ struct list_head devres_head;
+
+ struct dev_pm_info power;
+};
+
+extern struct device linux_root_device;
+extern struct kobject linux_class_root;
+extern const struct kobj_type linux_dev_ktype;
+extern const struct kobj_type linux_class_ktype;
+
+struct class_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct class *, struct class_attribute *, char *);
+ ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t);
+ const void *(*namespace)(struct class *, const struct class_attribute *);
+};
+
+#define CLASS_ATTR(_name, _mode, _show, _store) \
+ struct class_attribute class_attr_##_name = \
+ { { #_name, NULL, _mode }, _show, _store }
+
+struct device_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device *,
+ struct device_attribute *, char *);
+ ssize_t (*store)(struct device *,
+ struct device_attribute *, const char *,
+ size_t);
+};
+
+#define DEVICE_ATTR(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+#define DEVICE_ATTR_RO(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+#define DEVICE_ATTR_WO(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
+#define DEVICE_ATTR_RW(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+
+/* Simple class attribute that is just a static string */
+struct class_attribute_string {
+ struct class_attribute attr;
+ char *str;
+};
+
+static inline ssize_t
+show_class_attr_string(struct class *class,
+ struct class_attribute *attr, char *buf)
+{
+ struct class_attribute_string *cs;
+ cs = container_of(attr, struct class_attribute_string, attr);
+ return snprintf(buf, PAGE_SIZE, "%s\n", cs->str);
+}
+
+/* Currently read-only only */
+#define _CLASS_ATTR_STRING(_name, _mode, _str) \
+ { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
+#define CLASS_ATTR_STRING(_name, _mode, _str) \
+ struct class_attribute_string class_attr_##_name = \
+ _CLASS_ATTR_STRING(_name, _mode, _str)
+
+#define dev_printk(lvl, dev, fmt, ...) \
+ device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+
+#define dev_emerg(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+#define dev_alert(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+#define dev_crit(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+#define dev_err(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+#define dev_warn(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+#define dev_notice(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+#define dev_info(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+#define dev_dbg(dev, fmt, ...) do { } while (0)
+
+#define dev_WARN(dev, fmt, ...) \
+ device_printf((dev)->bsddev, "%s:%d: " fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define dev_WARN_ONCE(dev, condition, fmt, ...) do { \
+ static bool __dev_WARN_ONCE; \
+ bool __ret_warn_on = (condition); \
+ if (unlikely(__ret_warn_on)) { \
+ if (!__dev_WARN_ONCE) { \
+ __dev_WARN_ONCE = true; \
+ device_printf((dev)->bsddev, "%s:%d: " fmt, __func__, __LINE__, ##__VA_ARGS__); \
+ } \
+ } \
+} while (0)
+
+#define dev_info_once(dev, ...) do { \
+ static bool __dev_info_once; \
+ if (!__dev_info_once) { \
+ __dev_info_once = true; \
+ dev_info(dev, __VA_ARGS__); \
+ } \
+} while (0)
+
+#define dev_warn_once(dev, ...) do { \
+ static bool __dev_warn_once; \
+ if (!__dev_warn_once) { \
+ __dev_warn_once = 1; \
+ dev_warn(dev, __VA_ARGS__); \
+ } \
+} while (0)
+
+#define dev_err_once(dev, ...) do { \
+ static bool __dev_err_once; \
+ if (!__dev_err_once) { \
+ __dev_err_once = 1; \
+ dev_err(dev, __VA_ARGS__); \
+ } \
+} while (0)
+
+#define dev_dbg_once(dev, ...) do { \
+ static bool __dev_dbg_once; \
+ if (!__dev_dbg_once) { \
+ __dev_dbg_once = 1; \
+ dev_dbg(dev, __VA_ARGS__); \
+ } \
+} while (0)
+
+#define dev_err_ratelimited(dev, ...) do { \
+ static linux_ratelimit_t __ratelimited; \
+ if (linux_ratelimited(&__ratelimited)) \
+ dev_err(dev, __VA_ARGS__); \
+} while (0)
+
+#define dev_warn_ratelimited(dev, ...) do { \
+ static linux_ratelimit_t __ratelimited; \
+ if (linux_ratelimited(&__ratelimited)) \
+ dev_warn(dev, __VA_ARGS__); \
+} while (0)
+
+#define dev_dbg_ratelimited(dev, ...) do { \
+ static linux_ratelimit_t __ratelimited; \
+ if (linux_ratelimited(&__ratelimited)) \
+ dev_dbg(dev, __VA_ARGS__); \
+} while (0)
+
+/* Public and LinuxKPI internal devres functions. */
+void *lkpi_devres_alloc(void(*release)(struct device *, void *), size_t, gfp_t);
+void lkpi_devres_add(struct device *, void *);
+void lkpi_devres_free(void *);
+void *lkpi_devres_find(struct device *, void(*release)(struct device *, void *),
+ int (*match)(struct device *, void *, void *), void *);
+int lkpi_devres_destroy(struct device *, void(*release)(struct device *, void *),
+ int (*match)(struct device *, void *, void *), void *);
+#define devres_alloc(_r, _s, _g) lkpi_devres_alloc(_r, _s, _g)
+#define devres_add(_d, _p) lkpi_devres_add(_d, _p)
+#define devres_free(_p) lkpi_devres_free(_p)
+#define devres_find(_d, _rfn, _mfn, _mp) \
+ lkpi_devres_find(_d, _rfn, _mfn, _mp)
+#define devres_destroy(_d, _rfn, _mfn, _mp) \
+ lkpi_devres_destroy(_d, _rfn, _mfn, _mp)
+void lkpi_devres_release_free_list(struct device *);
+void lkpi_devres_unlink(struct device *, void *);
+void lkpi_devm_kmalloc_release(struct device *, void *);
+#define devm_kfree(_d, _p) lkpi_devm_kmalloc_release(_d, _p)
+
+static inline const char *
+dev_driver_string(const struct device *dev)
+{
+ driver_t *drv;
+ const char *str = "";
+
+ if (dev->bsddev != NULL) {
+ drv = device_get_driver(dev->bsddev);
+ if (drv != NULL)
+ str = drv->name;
+ }
+
+ return (str);
+}
+
+static inline void *
+dev_get_drvdata(const struct device *dev)
+{
+
+ return dev->driver_data;
+}
+
+static inline void
+dev_set_drvdata(struct device *dev, void *data)
+{
+
+ dev->driver_data = data;
+}
+
+static inline struct device *
+get_device(struct device *dev)
+{
+
+ if (dev)
+ kobject_get(&dev->kobj);
+
+ return (dev);
+}
+
+static inline char *
+dev_name(const struct device *dev)
+{
+
+ return kobject_name(&dev->kobj);
+}
+
+static inline bool
+dev_is_removable(struct device *dev)
+{
+
+ return (false);
+}
+
+#define dev_set_name(_dev, _fmt, ...) \
+ kobject_set_name(&(_dev)->kobj, (_fmt), ##__VA_ARGS__)
+
+static inline void
+put_device(struct device *dev)
+{
+
+ if (dev)
+ kobject_put(&dev->kobj);
+}
+
+struct class *lkpi_class_create(const char *name);
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 60400
+#define class_create(name) lkpi_class_create(name)
+#else
+#define class_create(owner, name) lkpi_class_create(name)
+#endif
+
+static inline int
+class_register(struct class *class)
+{
+
+ class->bsdclass = devclass_create(class->name);
+ kobject_init(&class->kobj, &linux_class_ktype);
+ kobject_set_name(&class->kobj, class->name);
+ kobject_add(&class->kobj, &linux_class_root, class->name);
+
+ return (0);
+}
+
+static inline void
+class_unregister(struct class *class)
+{
+
+ kobject_put(&class->kobj);
+}
+
+static inline struct device *kobj_to_dev(struct kobject *kobj)
+{
+ return container_of(kobj, struct device, kobj);
+}
+
+struct device *device_create(struct class *class, struct device *parent,
+ dev_t devt, void *drvdata, const char *fmt, ...);
+struct device *device_create_groups_vargs(struct class *class, struct device *parent,
+ dev_t devt, void *drvdata, const struct attribute_group **groups,
+ const char *fmt, va_list args);
+
+/*
+ * Devices are registered and created for exporting to sysfs. Create
+ * implies register and register assumes the device fields have been
+ * setup appropriately before being called.
+ */
+static inline void
+device_initialize(struct device *dev)
+{
+ device_t bsddev = NULL;
+ int unit = -1;
+
+ if (dev->devt) {
+ unit = MINOR(dev->devt);
+ bsddev = devclass_get_device(dev->class->bsdclass, unit);
+ dev->bsddev_attached_here = false;
+ } else if (dev->parent == NULL) {
+ bsddev = devclass_get_device(dev->class->bsdclass, 0);
+ dev->bsddev_attached_here = false;
+ } else {
+ dev->bsddev_attached_here = true;
+ }
+
+ if (bsddev == NULL && dev->parent != NULL) {
+ bsddev = device_add_child(dev->parent->bsddev,
+ dev->class->kobj.name, unit);
+ }
+
+ if (bsddev != NULL)
+ device_set_softc(bsddev, dev);
+
+ dev->bsddev = bsddev;
+ MPASS(dev->bsddev != NULL);
+ kobject_init(&dev->kobj, &linux_dev_ktype);
+
+ spin_lock_init(&dev->devres_lock);
+ INIT_LIST_HEAD(&dev->devres_head);
+}
+
+static inline int
+device_add(struct device *dev)
+{
+ if (dev->bsddev != NULL) {
+ if (dev->devt == 0)
+ dev->devt = makedev(0, device_get_unit(dev->bsddev));
+ }
+ kobject_add(&dev->kobj, &dev->class->kobj, dev_name(dev));
+
+ if (dev->groups)
+ return (sysfs_create_groups(&dev->kobj, dev->groups));
+
+ return (0);
+}
+
+static inline void
+device_create_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static inline struct device *
+device_create_with_groups(struct class *class,
+ struct device *parent, dev_t devt, void *drvdata,
+ const struct attribute_group **groups, const char *fmt, ...)
+{
+ va_list vargs;
+ struct device *dev;
+
+ va_start(vargs, fmt);
+ dev = device_create_groups_vargs(class, parent, devt, drvdata,
+ groups, fmt, vargs);
+ va_end(vargs);
+ return dev;
+}
+
+static inline bool
+device_is_registered(struct device *dev)
+{
+
+ return (dev->bsddev != NULL);
+}
+
+static inline int
+device_register(struct device *dev)
+{
+ device_t bsddev = NULL;
+ int unit = -1;
+
+ if (device_is_registered(dev))
+ goto done;
+
+ if (dev->devt) {
+ unit = MINOR(dev->devt);
+ bsddev = devclass_get_device(dev->class->bsdclass, unit);
+ dev->bsddev_attached_here = false;
+ } else if (dev->parent == NULL) {
+ bsddev = devclass_get_device(dev->class->bsdclass, 0);
+ dev->bsddev_attached_here = false;
+ } else {
+ dev->bsddev_attached_here = true;
+ }
+ if (bsddev == NULL && dev->parent != NULL) {
+ bsddev = device_add_child(dev->parent->bsddev,
+ dev->class->kobj.name, unit);
+ }
+ if (bsddev != NULL) {
+ if (dev->devt == 0)
+ dev->devt = makedev(0, device_get_unit(bsddev));
+ device_set_softc(bsddev, dev);
+ }
+ dev->bsddev = bsddev;
+done:
+ kobject_init(&dev->kobj, &linux_dev_ktype);
+ kobject_add(&dev->kobj, &dev->class->kobj, dev_name(dev));
+
+ sysfs_create_groups(&dev->kobj, dev->class->dev_groups);
+
+ return (0);
+}
+
+static inline void
+device_unregister(struct device *dev)
+{
+ device_t bsddev;
+
+ sysfs_remove_groups(&dev->kobj, dev->class->dev_groups);
+
+ bsddev = dev->bsddev;
+ dev->bsddev = NULL;
+
+ if (bsddev != NULL && dev->bsddev_attached_here) {
+ bus_topo_lock();
+ device_delete_child(device_get_parent(bsddev), bsddev);
+ bus_topo_unlock();
+ }
+ put_device(dev);
+}
+
+static inline void
+device_del(struct device *dev)
+{
+ device_t bsddev;
+
+ bsddev = dev->bsddev;
+ dev->bsddev = NULL;
+
+ if (bsddev != NULL && dev->bsddev_attached_here) {
+ bus_topo_lock();
+ device_delete_child(device_get_parent(bsddev), bsddev);
+ bus_topo_unlock();
+ }
+}
+
+static inline void
+device_destroy(struct class *class, dev_t devt)
+{
+ device_t bsddev;
+ int unit;
+
+ unit = MINOR(devt);
+ bsddev = devclass_get_device(class->bsdclass, unit);
+ if (bsddev != NULL)
+ device_unregister(device_get_softc(bsddev));
+}
+
+static inline void
+device_release_driver(struct device *dev)
+{
+
+#if 0
+ /* This leads to panics. Disable temporarily. Keep to rework. */
+
+ /* We also need to cleanup LinuxKPI bits. What else? */
+ lkpi_devres_release_free_list(dev);
+ dev_set_drvdata(dev, NULL);
+ /* Do not call dev->release! */
+
+ bus_topo_lock();
+ if (device_is_attached(dev->bsddev))
+ device_detach(dev->bsddev);
+ bus_topo_unlock();
+#endif
+}
+
+static inline int
+device_reprobe(struct device *dev)
+{
+ int error;
+
+ device_release_driver(dev);
+ bus_topo_lock();
+ error = device_probe_and_attach(dev->bsddev);
+ bus_topo_unlock();
+
+ return (-error);
+}
+
+static inline void
+device_set_wakeup_enable(struct device *dev __unused, bool enable __unused)
+{
+
+ /*
+ * XXX-BZ TODO This is used by wireless drivers supporting WoWLAN which
+ * we currently do not support.
+ */
+}
+
+static inline int
+device_wakeup_enable(struct device *dev)
+{
+
+ device_set_wakeup_enable(dev, true);
+ return (0);
+}
+
+static inline bool
+device_iommu_mapped(struct device *dev __unused)
+{
+ return (false);
+}
+
+#define dev_pm_set_driver_flags(dev, flags) do { \
+} while (0)
+
+static inline void
+linux_class_kfree(struct class *class)
+{
+
+ kfree(class);
+}
+
+static inline void
+class_destroy(struct class *class)
+{
+
+ if (class == NULL)
+ return;
+ class_unregister(class);
+}
+
+static inline int
+device_create_file(struct device *dev, const struct device_attribute *attr)
+{
+
+ if (dev)
+ return sysfs_create_file(&dev->kobj, &attr->attr);
+ return -EINVAL;
+}
+
+static inline void
+device_remove_file(struct device *dev, const struct device_attribute *attr)
+{
+
+ if (dev)
+ sysfs_remove_file(&dev->kobj, &attr->attr);
+}
+
+static inline int
+class_create_file(struct class *class, const struct class_attribute *attr)
+{
+
+ if (class)
+ return sysfs_create_file(&class->kobj, &attr->attr);
+ return -EINVAL;
+}
+
+static inline void
+class_remove_file(struct class *class, const struct class_attribute *attr)
+{
+
+ if (class)
+ sysfs_remove_file(&class->kobj, &attr->attr);
+}
+
+#define dev_to_node(dev) linux_dev_to_node(dev)
+#define of_node_to_nid(node) -1
+int linux_dev_to_node(struct device *);
+
+char *kvasprintf(gfp_t, const char *, va_list);
+char *kasprintf(gfp_t, const char *, ...);
+char *lkpi_devm_kasprintf(struct device *, gfp_t, const char *, ...);
+
+#define devm_kasprintf(_dev, _gfp, _fmt, ...) \
+ lkpi_devm_kasprintf(_dev, _gfp, _fmt, ##__VA_ARGS__)
+
+static __inline void *
+devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+ void *p;
+
+ p = lkpi_devres_alloc(lkpi_devm_kmalloc_release, size, gfp);
+ if (p != NULL)
+ lkpi_devres_add(dev, p);
+
+ return (p);
+}
+
+static inline void *
+devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ void *dst;
+
+ if (len == 0)
+ return (NULL);
+
+ dst = devm_kmalloc(dev, len, gfp);
+ if (dst != NULL)
+ memcpy(dst, src, len);
+
+ return (dst);
+}
+
+#define devm_kzalloc(_dev, _size, _gfp) \
+ devm_kmalloc((_dev), (_size), (_gfp) | __GFP_ZERO)
+
+#define devm_kcalloc(_dev, _sizen, _size, _gfp) \
+ devm_kmalloc((_dev), ((_sizen) * (_size)), (_gfp) | __GFP_ZERO)
+
+int lkpi_devm_add_action(struct device *dev, void (*action)(void *), void *data);
+#define devm_add_action(dev, action, data) \
+ lkpi_devm_add_action(dev, action, data);
+int lkpi_devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data);
+#define devm_add_action_or_reset(dev, action, data) \
+ lkpi_devm_add_action_or_reset(dev, action, data)
+
+int lkpi_devm_device_add_group(struct device *dev, const struct attribute_group *group);
+#define devm_device_add_group(dev, group) \
+ lkpi_devm_device_add_group(dev, group)
+
+#endif /* _LINUXKPI_LINUX_DEVICE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/device/driver.h b/sys/compat/linuxkpi/common/include/linux/device/driver.h
new file mode 100644
index 000000000000..03b510c9c8b7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/device/driver.h
@@ -0,0 +1,33 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Bjoern A. Zeeb
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ */
+
+#ifndef LINUXKPI_LINUX_DEVICE_DRIVER_H
+#define LINUXKPI_LINUX_DEVICE_DRIVER_H
+
+#include <sys/cdefs.h>
+#include <linux/module.h>
+
+#define module_driver(_drv, _regf, _unregf) \
+static inline int \
+__CONCAT(__CONCAT(_, _drv), _init)(void) \
+{ \
+ return (_regf(&(_drv))); \
+} \
+ \
+static inline void \
+__CONCAT(__CONCAT(_, _drv), _exit)(void) \
+{ \
+ _unregf(&(_drv)); \
+} \
+ \
+module_init(__CONCAT(__CONCAT(_, _drv), _init)); \
+module_exit(__CONCAT(__CONCAT(_, _drv), _exit))
+
+#endif /* LINUXKPI_LINUX_DEVICE_DRIVER_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/dma-attrs.h b/sys/compat/linuxkpi/common/include/linux/dma-attrs.h
new file mode 100644
index 000000000000..c9cfa9b621d5
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/dma-attrs.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_DMA_ATTR_H_
+#define _LINUXKPI_LINUX_DMA_ATTR_H_
+
+#define DMA_ATTR_WRITE_BARRIER (1 << 0)
+#define DMA_ATTR_WEAK_ORDERING (1 << 1)
+#define DMA_ATTR_WRITE_COMBINE (1 << 2)
+#define DMA_ATTR_NON_CONSISTENT (1 << 3)
+#define DMA_ATTR_NO_KERNEL_MAPPING (1 << 4)
+#define DMA_ATTR_SKIP_CPU_SYNC (1 << 5)
+#define DMA_ATTR_FORCE_CONTIGUOUS (1 << 6)
+#define DMA_ATTR_ALLOC_SINGLE_PAGES (1 << 7)
+#define DMA_ATTR_NO_WARN (1 << 8)
+#define DMA_ATTR_PRIVILEGED (1 << 9)
+
+struct dma_attrs {
+ unsigned long flags;
+};
+#define DEFINE_DMA_ATTRS(x) struct dma_attrs x = { }
+
+static inline void
+init_dma_attrs(struct dma_attrs *attrs)
+{
+ attrs->flags = 0;
+}
+
+#endif /* _LINUXKPI_LINUX_DMA_ATTR_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/dma-buf-map.h b/sys/compat/linuxkpi/common/include/linux/dma-buf-map.h
new file mode 100644
index 000000000000..567ce3b072b3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/dma-buf-map.h
@@ -0,0 +1,91 @@
+/* Public domain. */
+
+#ifndef _LINUX_DMA_BUF_MAP_H
+#define _LINUX_DMA_BUF_MAP_H
+
+#include <linux/io.h>
+#include <linux/string.h>
+
+struct dma_buf_map {
+ union {
+ void *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+};
+
+static inline void
+dma_buf_map_incr(struct dma_buf_map *dbm, size_t n)
+{
+ if (dbm->is_iomem)
+ dbm->vaddr_iomem += n;
+ else
+ dbm->vaddr += n;
+}
+
+static inline void
+dma_buf_map_memcpy_to(struct dma_buf_map *dbm, const void *src, size_t len)
+{
+ if (dbm->is_iomem)
+ memcpy_toio(dbm->vaddr_iomem, src, len);
+ else
+ memcpy(dbm->vaddr, src, len);
+}
+
+static inline bool
+dma_buf_map_is_null(const struct dma_buf_map *dbm)
+{
+ if (dbm->is_iomem)
+ return (dbm->vaddr_iomem == NULL);
+ else
+ return (dbm->vaddr == NULL);
+}
+
+static inline bool
+dma_buf_map_is_set(const struct dma_buf_map *dbm)
+{
+ if (dbm->is_iomem)
+ return (dbm->vaddr_iomem != NULL);
+ else
+ return (dbm->vaddr != NULL);
+}
+
+static inline bool
+dma_buf_map_is_equal(
+ const struct dma_buf_map *dbm_a, const struct dma_buf_map *dbm_b)
+{
+ if (dbm_a->is_iomem != dbm_b->is_iomem)
+ return (false);
+
+ if (dbm_a->is_iomem)
+ return (dbm_a->vaddr_iomem == dbm_b->vaddr_iomem);
+ else
+ return (dbm_a->vaddr == dbm_b->vaddr);
+}
+
+static inline void
+dma_buf_map_clear(struct dma_buf_map *dbm)
+{
+ if (dbm->is_iomem) {
+ dbm->vaddr_iomem = NULL;
+ dbm->is_iomem = false;
+ } else {
+ dbm->vaddr = NULL;
+ }
+}
+
+static inline void
+dma_buf_map_set_vaddr_iomem(struct dma_buf_map *dbm, void *addr)
+{
+ dbm->vaddr_iomem = addr;
+ dbm->is_iomem = true;
+}
+
+static inline void
+dma_buf_map_set_vaddr(struct dma_buf_map *dbm, void *addr)
+{
+ dbm->vaddr = addr;
+ dbm->is_iomem = false;
+}
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/dma-mapping.h b/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
new file mode 100644
index 000000000000..2d8e1196d3d3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
@@ -0,0 +1,399 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_DMA_MAPPING_H_
+#define _LINUXKPI_LINUX_DMA_MAPPING_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/dma-attrs.h>
+#include <linux/scatterlist.h>
+#include <linux/mm.h>
+#include <linux/page.h>
+#include <linux/sizes.h>
+
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/uma_align_mask.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+
+enum dma_data_direction {
+ DMA_BIDIRECTIONAL = 0,
+ DMA_TO_DEVICE = 1,
+ DMA_FROM_DEVICE = 2,
+ DMA_NONE = 3,
+};
+
+struct dma_map_ops {
+ void* (*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+ int (*map_sg)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+ void (*sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir);
+ void (*sync_single_range_for_cpu)(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction dir);
+ void (*sync_single_range_for_device)(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction dir);
+ void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+ void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+ int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
+ int (*dma_supported)(struct device *dev, u64 mask);
+ int is_phys;
+};
+
+#define DMA_BIT_MASK(n) ((2ULL << ((n) - 1)) - 1ULL)
+
+int linux_dma_tag_init(struct device *, u64);
+int linux_dma_tag_init_coherent(struct device *, u64);
+void *linux_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+void *linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+dma_addr_t linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len); /* backward compat */
+dma_addr_t lkpi_dma_map_phys(struct device *, vm_paddr_t, size_t,
+ enum dma_data_direction, unsigned long);
+void linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t size); /* backward compat */
+void lkpi_dma_unmap(struct device *, dma_addr_t, size_t,
+ enum dma_data_direction, unsigned long);
+int linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction,
+ unsigned long attrs __unused);
+void linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents __unused, enum dma_data_direction direction,
+ unsigned long attrs __unused);
+void linuxkpi_dma_sync(struct device *, dma_addr_t, size_t, bus_dmasync_op_t);
+
+static inline int
+dma_supported(struct device *dev, u64 dma_mask)
+{
+
+ /* XXX busdma takes care of this elsewhere. */
+ return (1);
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 dma_mask)
+{
+
+ if (!dev->dma_priv || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ return (linux_dma_tag_init(dev, dma_mask));
+}
+
+static inline int
+dma_set_coherent_mask(struct device *dev, u64 dma_mask)
+{
+
+ if (!dev->dma_priv || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ return (linux_dma_tag_init_coherent(dev, dma_mask));
+}
+
+static inline int
+dma_set_mask_and_coherent(struct device *dev, u64 dma_mask)
+{
+ int r;
+
+ r = dma_set_mask(dev, dma_mask);
+ if (r == 0)
+ dma_set_coherent_mask(dev, dma_mask);
+ return (r);
+}
+
+static inline void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+ return (linux_dma_alloc_coherent(dev, size, dma_handle, flag));
+}
+
+static inline void *
+dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+
+ return (dma_alloc_coherent(dev, size, dma_handle, flag | __GFP_ZERO));
+}
+
+static inline void *
+dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+
+ return (linuxkpi_dmam_alloc_coherent(dev, size, dma_handle, flag));
+}
+
+static inline void
+dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+
+ lkpi_dma_unmap(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
+ kmem_free(cpu_addr, size);
+}
+
+static inline dma_addr_t
+dma_map_page_attrs(struct device *dev, struct page *page, size_t offset,
+ size_t size, enum dma_data_direction direction, unsigned long attrs)
+{
+
+ return (lkpi_dma_map_phys(dev, page_to_phys(page) + offset, size,
+ direction, attrs));
+}
+
+/* linux_dma_(un)map_sg_attrs does not support attrs yet */
+#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
+ linux_dma_map_sg_attrs(dev, sgl, nents, dir, 0)
+
+#define dma_unmap_sg_attrs(dev, sg, nents, dir, attrs) \
+ linux_dma_unmap_sg_attrs(dev, sg, nents, dir, 0)
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+
+ return (lkpi_dma_map_phys(dev, page_to_phys(page) + offset, size,
+ direction, 0));
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+
+ lkpi_dma_unmap(dev, dma_address, size, direction, 0);
+}
+
+static inline dma_addr_t
+dma_map_resource(struct device *dev, phys_addr_t paddr, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
+{
+ return (lkpi_dma_map_phys(dev, paddr, size, direction, attrs));
+}
+
+static inline void
+dma_unmap_resource(struct device *dev, dma_addr_t dma, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
+{
+ lkpi_dma_unmap(dev, dma, size, direction, attrs);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma, size_t size,
+ enum dma_data_direction direction)
+{
+ bus_dmasync_op_t op;
+
+ switch (direction) {
+ case DMA_BIDIRECTIONAL:
+ op = BUS_DMASYNC_POSTREAD;
+ linuxkpi_dma_sync(dev, dma, size, op);
+ op = BUS_DMASYNC_PREREAD;
+ break;
+ case DMA_TO_DEVICE:
+ op = BUS_DMASYNC_POSTWRITE;
+ break;
+ case DMA_FROM_DEVICE:
+ op = BUS_DMASYNC_POSTREAD;
+ break;
+ default:
+ return;
+ }
+
+ linuxkpi_dma_sync(dev, dma, size, op);
+}
+
+static inline void
+dma_sync_single(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma,
+ size_t size, enum dma_data_direction direction)
+{
+ bus_dmasync_op_t op;
+
+ switch (direction) {
+ case DMA_BIDIRECTIONAL:
+ op = BUS_DMASYNC_PREWRITE;
+ break;
+ case DMA_TO_DEVICE:
+ op = BUS_DMASYNC_PREREAD;
+ break;
+ case DMA_FROM_DEVICE:
+ op = BUS_DMASYNC_PREWRITE;
+ break;
+ default:
+ return;
+ }
+
+ linuxkpi_dma_sync(dev, dma, size, op);
+}
+
+/* (20250329) These four seem to be unused code. */
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ pr_debug("%s:%d: TODO dir %d\n", __func__, __LINE__, direction);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ pr_debug("%s:%d: TODO dir %d\n", __func__, __LINE__, direction);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ pr_debug("%s:%d: TODO dir %d\n", __func__, __LINE__, direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ pr_debug("%s:%d: TODO dir %d\n", __func__, __LINE__, direction);
+}
+
+#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
+
+static inline int
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+
+ if (dma_addr == 0 || dma_addr == DMA_MAPPING_ERROR)
+ return (-ENOMEM);
+ return (0);
+}
+
+static inline unsigned int dma_set_max_seg_size(struct device *dev,
+ unsigned int size)
+{
+ return (0);
+}
+
+static inline dma_addr_t
+_dma_map_single_attrs(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
+{
+ return (lkpi_dma_map_phys(dev, vtophys(ptr), size,
+ direction, attrs));
+}
+
+static inline void
+_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma, size_t size,
+ enum dma_data_direction direction, unsigned long attrs)
+{
+ lkpi_dma_unmap(dev, dma, size, direction, attrs);
+}
+
+static inline size_t
+dma_max_mapping_size(struct device *dev)
+{
+
+ return (SCATTERLIST_MAX_SEGMENT);
+}
+
+#define dma_map_single_attrs(dev, ptr, size, dir, attrs) \
+ _dma_map_single_attrs(dev, ptr, size, dir, 0)
+
+#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
+ _dma_unmap_single_attrs(dev, dma_addr, size, dir, 0)
+
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
+
+#define DEFINE_DMA_UNMAP_ADDR(name) dma_addr_t name
+#define DEFINE_DMA_UNMAP_LEN(name) __u32 name
+#define dma_unmap_addr(p, name) ((p)->name)
+#define dma_unmap_addr_set(p, name, v) (((p)->name) = (v))
+#define dma_unmap_len(p, name) ((p)->name)
+#define dma_unmap_len_set(p, name, v) (((p)->name) = (v))
+
+#define dma_get_cache_alignment() (uma_get_cache_align_mask() + 1)
+
+
+static inline int
+dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ int nents;
+
+ nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->nents, dir, attrs);
+ if (nents < 0)
+ return (nents);
+ sgt->nents = nents;
+ return (0);
+}
+
+static inline void
+dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+
+ dma_unmap_sg_attrs(dev, sgt->sgl, sgt->nents, dir, attrs);
+}
+
+
+#endif /* _LINUXKPI_LINUX_DMA_MAPPING_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/dmapool.h b/sys/compat/linuxkpi/common/include/linux/dmapool.h
new file mode 100644
index 000000000000..8501a32e30b7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/dmapool.h
@@ -0,0 +1,103 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_DMAPOOL_H_
+#define _LINUXKPI_LINUX_DMAPOOL_H_
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+struct dma_pool;
+struct dma_pool *linux_dma_pool_create(char *name, struct device *dev,
+ size_t size, size_t align, size_t boundary);
+void linux_dma_pool_destroy(struct dma_pool *pool);
+void lkpi_dmam_pool_destroy(struct device *, void *);
+void *linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle);
+void linux_dma_pool_free(struct dma_pool *pool, void *vaddr,
+ dma_addr_t dma_addr);
+
+static inline struct dma_pool *
+dma_pool_create(char *name, struct device *dev, size_t size,
+ size_t align, size_t boundary)
+{
+
+ return (linux_dma_pool_create(name, dev, size, align, boundary));
+}
+
+static inline struct dma_pool *
+dmam_pool_create(/* const */ char *name, struct device *dev, size_t size,
+ size_t align, size_t boundary)
+{
+ struct dma_pool **pp;
+
+ pp = devres_alloc(lkpi_dmam_pool_destroy, sizeof(*pp), GFP_KERNEL);
+ if (pp == NULL)
+ return (NULL);
+ *pp = linux_dma_pool_create(name, dev, size, align, boundary);
+ if (*pp == NULL) {
+ devres_free(pp);
+ return (NULL);
+ }
+
+ devres_add(dev, pp);
+ return (*pp);
+}
+
+static inline void
+dma_pool_destroy(struct dma_pool *pool)
+{
+
+ linux_dma_pool_destroy(pool);
+}
+
+static inline void *
+dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
+{
+
+ return (linux_dma_pool_alloc(pool, mem_flags, handle));
+}
+
+static inline void *
+dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
+{
+
+ return (dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle));
+}
+
+static inline void
+dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr)
+{
+
+ linux_dma_pool_free(pool, vaddr, dma_addr);
+}
+
+#endif /* _LINUXKPI_LINUX_DMAPOOL_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/dmi.h b/sys/compat/linuxkpi/common/include/linux/dmi.h
new file mode 100644
index 000000000000..d9760ee0324f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/dmi.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __LINUXKPI_LINUX_DMI_H__
+#define __LINUXKPI_LINUX_DMI_H__
+
+#include <sys/types.h>
+#include <linux/mod_devicetable.h>
+
+struct dmi_header {
+ uint8_t type;
+ uint8_t length;
+ uint16_t handle;
+};
+
+int linux_dmi_check_system(const struct dmi_system_id *);
+bool linux_dmi_match(enum dmi_field, const char *);
+const struct dmi_system_id *linux_dmi_first_match(const struct dmi_system_id *);
+const char *linux_dmi_get_system_info(int);
+
+#define dmi_check_system(sysid) linux_dmi_check_system(sysid)
+#define dmi_match(f, str) linux_dmi_match(f, str)
+#define dmi_first_match(sysid) linux_dmi_first_match(sysid)
+#define dmi_get_system_info(sysid) linux_dmi_get_system_info(sysid)
+
+static inline int
+dmi_walk(void (*callbackf)(const struct dmi_header *, void *), void *arg)
+{
+
+ return (-ENXIO);
+}
+
+#endif /* __LINUXKPI_LINUX_DMI_H__ */
diff --git a/sys/compat/linuxkpi/common/include/linux/dynamic_debug.h b/sys/compat/linuxkpi/common/include/linux/dynamic_debug.h
new file mode 100644
index 000000000000..12915eec3b68
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/dynamic_debug.h
@@ -0,0 +1,8 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_DYNAMIC_DEBUG_H
+#define _LINUXKPI_LINUX_DYNAMIC_DEBUG_H
+
+#define DECLARE_DYNDBG_CLASSMAP(a, b, c, ...)
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/efi.h b/sys/compat/linuxkpi/common/include/linux/efi.h
new file mode 100644
index 000000000000..aa33371bd0e8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/efi.h
@@ -0,0 +1,68 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_EFI_H_
+#define _LINUXKPI_LINUX_EFI_H_
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/linker.h>
+#include <sys/systm.h>
+
+#include <machine/md_var.h>
+#include <machine/metadata.h>
+
+#define EFI_BOOT 0
+
+static inline bool
+__efi_enabled(int feature)
+{
+ bool enabled = false;
+
+ switch (feature) {
+ case EFI_BOOT:
+#ifdef __amd64__
+ /* Use cached value on amd64 */
+ enabled = efi_boot;
+#elif defined(MODINFOMD_EFI_MAP)
+ enabled = preload_search_info(preload_kmdp,
+ MODINFO_METADATA | MODINFOMD_EFI_MAP) != NULL;
+#endif
+ break;
+ default:
+ break;
+ }
+
+ return (enabled);
+}
+
+#define efi_enabled(x) ({ \
+ _Static_assert((x) == EFI_BOOT, "unsupported feature"); \
+ __efi_enabled(x); \
+})
+
+#endif /* _LINUXKPI_LINUX_EFI_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/err.h b/sys/compat/linuxkpi/common/include/linux/err.h
new file mode 100644
index 000000000000..3d19949e641e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/err.h
@@ -0,0 +1,81 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_ERR_H_
+#define _LINUXKPI_LINUX_ERR_H_
+
+#include <sys/types.h>
+
+#include <linux/compiler.h>
+
+#define MAX_ERRNO 4095
+
+#define IS_ERR_VALUE(x) unlikely((x) >= (uintptr_t)-MAX_ERRNO)
+
+static inline void *
+ERR_PTR(long error)
+{
+ return (void *)(intptr_t)error;
+}
+
+static inline long
+PTR_ERR(const void *ptr)
+{
+ return (intptr_t)ptr;
+}
+
+static inline bool
+IS_ERR(const void *ptr)
+{
+ return IS_ERR_VALUE((uintptr_t)ptr);
+}
+
+static inline bool
+IS_ERR_OR_NULL(const void *ptr)
+{
+ return !ptr || IS_ERR_VALUE((uintptr_t)ptr);
+}
+
+static inline void *
+ERR_CAST(const void *ptr)
+{
+ return __DECONST(void *, ptr);
+}
+
+static inline int
+PTR_ERR_OR_ZERO(const void *ptr)
+{
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+ else
+ return 0;
+}
+
+#define PTR_RET(p) PTR_ERR_OR_ZERO(p)
+
+#endif /* _LINUXKPI_LINUX_ERR_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/errno.h b/sys/compat/linuxkpi/common/include/linux/errno.h
new file mode 100644
index 000000000000..d634675d43d0
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/errno.h
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_ERRNO_H_
+#define _LINUXKPI_LINUX_ERRNO_H_
+
+#include <sys/errno.h>
+
+#define EBADRQC 56 /* Bad request code */
+#define EBADSLT 57 /* Invalid slot */
+#define ENOKEY 126 /* Required key not available */
+
+#define ECHRNG EDOM
+#define ETIME ETIMEDOUT
+#define ECOMM ESTALE
+#define ENODATA ECONNREFUSED
+#define ENOIOCTLCMD ENOIOCTL
+/* Use same value as Linux, because BSD's ERESTART is negative */
+#define ERESTARTSYS 512
+#define ENOTSUPP EOPNOTSUPP
+#define ENONET EHOSTDOWN
+#define EHWPOISON 133 /* Memory page hardware error */
+
+/*
+ * The error numbers below are arbitrary and do not resemble the numbers
+ * used in Linux. They should not be returned to user space.
+ */
+#define ERESTARTNOINTR 513
+#define ERESTARTNOHAND 514
+#define ERESTART_RESTARTBLOCK 516
+#define EPROBE_DEFER 517
+#define EOPENSTALE 518
+#define EBADHANDLE 521
+#define ENOTSYNC 522
+#define EBADCOOKIE 523
+#define ETOOSMALL 525
+#define ESERVERFAULT 526
+#define EBADTYPE 527
+#define EJUKEBOX 528
+#define EIOCBQUEUED 529
+#define ERFKILL 530
+#define EBADE 531
+#define ENOMEDIUM 532
+#define ENOSR 533
+#define ELNRNG 534
+#define ENAVAIL 535
+
+#endif /* _LINUXKPI_LINUX_ERRNO_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/etherdevice.h b/sys/compat/linuxkpi/common/include/linux/etherdevice.h
new file mode 100644
index 000000000000..1f2d6cf22d7e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/etherdevice.h
@@ -0,0 +1,140 @@
+/*-
+ * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_ETHERDEVICE_H_
+#define _LINUXKPI_LINUX_ETHERDEVICE_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+#include <sys/random.h>
+#include <sys/libkern.h>
+
+#define ETH_MODULE_SFF_8079 1
+#define ETH_MODULE_SFF_8079_LEN 256
+#define ETH_MODULE_SFF_8472 2
+#define ETH_MODULE_SFF_8472_LEN 512
+#define ETH_MODULE_SFF_8636 3
+#define ETH_MODULE_SFF_8636_LEN 256
+#define ETH_MODULE_SFF_8436 4
+#define ETH_MODULE_SFF_8436_LEN 256
+
+struct ethtool_eeprom {
+ u32 offset;
+ u32 len;
+};
+
+struct ethtool_modinfo {
+ u32 type;
+ u32 eeprom_len;
+};
+
+static inline bool
+is_zero_ether_addr(const u8 * addr)
+{
+ return ((addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]) ==
+ 0x00);
+}
+
+static inline bool
+is_unicast_ether_addr(const u8 * addr)
+{
+ return ((addr[0] & 0x01) == 0x00);
+}
+
+static inline bool
+is_multicast_ether_addr(const u8 * addr)
+{
+ return ((addr[0] & 0x01) == 0x01);
+}
+
+static inline bool
+is_broadcast_ether_addr(const u8 * addr)
+{
+ return ((addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) ==
+ 0xff);
+}
+
+static inline bool
+is_valid_ether_addr(const u8 * addr)
+{
+ return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
+}
+
+static inline void
+ether_addr_copy(u8 * dst, const u8 * src)
+{
+ memcpy(dst, src, 6);
+}
+
+static inline bool
+ether_addr_equal_unaligned(const u8 *pa, const u8 *pb)
+{
+ return (memcmp(pa, pb, 6) == 0);
+}
+#define ether_addr_equal(_pa, _pb) ether_addr_equal_unaligned(_pa, _pb)
+
+static inline bool
+ether_addr_equal_64bits(const u8 *pa, const u8 *pb)
+{
+ return (memcmp(pa, pb, 6) == 0);
+}
+
+static inline void
+eth_broadcast_addr(u8 *pa)
+{
+ memset(pa, 0xff, 6);
+}
+
+static inline void
+eth_zero_addr(u8 *pa)
+{
+ memset(pa, 0, 6);
+}
+
+static inline void
+random_ether_addr(u8 *dst)
+{
+ arc4random_buf(dst, 6);
+
+ dst[0] &= 0xfe;
+ dst[0] |= 0x02;
+}
+
+static inline void
+eth_random_addr(u8 *dst)
+{
+
+ random_ether_addr(dst);
+}
+
+static inline int
+device_get_mac_address(struct device *dev, char *dst)
+{
+
+ /* XXX get mac address from FDT? */
+ return (-ENOENT);
+}
+
+#endif /* _LINUXKPI_LINUX_ETHERDEVICE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/ethtool.h b/sys/compat/linuxkpi/common/include/linux/ethtool.h
new file mode 100644
index 000000000000..f5567cd7ea40
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/ethtool.h
@@ -0,0 +1,57 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_ETHTOOL_H_
+#define _LINUXKPI_LINUX_ETHTOOL_H_
+
+#include <linux/types.h>
+
+#define ETH_GSTRING_LEN (2 * IF_NAMESIZE) /* Increase if not large enough */
+
+#define ETHTOOL_FWVERS_LEN 32
+
+struct ethtool_stats {
+ uint8_t __dummy[0];
+};
+
+enum ethtool_ss {
+ ETH_SS_STATS,
+};
+
+struct ethtool_drvinfo {
+ char driver[32];
+ char version[32];
+ char fw_version[ETHTOOL_FWVERS_LEN];
+ char bus_info[32];
+};
+
+struct net_device;
+struct ethtool_ops {
+ void(*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
+};
+
+#endif /* _LINUXKPI_LINUX_ETHTOOL_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/eventpoll.h b/sys/compat/linuxkpi/common/include/linux/eventpoll.h
new file mode 100644
index 000000000000..e77e6d689f86
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/eventpoll.h
@@ -0,0 +1,45 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022, Jake Freeland <jfree@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_EVENTPOLL_H_
+#define _LINUXKPI_LINUX_EVENTPOLL_H_
+
+#include <sys/poll.h>
+
+#define EPOLLIN POLLIN
+#define EPOLLPRI POLLPRI
+#define EPOLLOUT POLLOUT
+#define EPOLLERR POLLERR
+#define EPOLLHUP POLLHUP
+#define EPOLLNVAL POLLNVAL
+#define EPOLLRDNORM POLLRDNORM
+#define EPOLLRDBAND POLLRDBAND
+#define EPOLLWRNORM POLLWRNORM
+#define EPOLLWRBAND POLLWRBAND
+#define EPOLLRDHUP POLLRDHUP
+
+#endif /* _LINUXKPI_LINUX_EVENTPOLL_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/export.h b/sys/compat/linuxkpi/common/include/linux/export.h
new file mode 100644
index 000000000000..f48bd6af45d3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/export.h
@@ -0,0 +1,31 @@
+/*-
+ * Copyright (c) 2018 Johannes Lundberg <johalun0@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_EXPORT_H
+#define _LINUXKPI_LINUX_EXPORT_H
+
+#define EXPORT_SYMBOL(name)
+#define EXPORT_SYMBOL_GPL(name)
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/file.h b/sys/compat/linuxkpi/common/include/linux/file.h
new file mode 100644
index 000000000000..f6e988c2d88e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/file.h
@@ -0,0 +1,185 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_FILE_H_
+#define _LINUXKPI_LINUX_FILE_H_
+
+#include <sys/param.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#include <sys/refcount.h>
+#include <sys/capsicum.h>
+#include <sys/proc.h>
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+
+struct linux_file;
+
+#undef file
+
+extern const struct fileops linuxfileops;
+
+static inline struct linux_file *
+linux_fget(unsigned int fd)
+{
+ struct file *file;
+
+ /* lookup file pointer by file descriptor index */
+ if (fget_unlocked(curthread, fd, &cap_no_rights, &file) != 0)
+ return (NULL);
+
+ /* check if file handle really belongs to us */
+ if (file->f_data == NULL ||
+ file->f_ops != &linuxfileops) {
+ fdrop(file, curthread);
+ return (NULL);
+ }
+ return ((struct linux_file *)file->f_data);
+}
+
+extern void linux_file_free(struct linux_file *filp);
+
+static inline void
+fput(struct linux_file *filp)
+{
+ if (refcount_release(filp->_file == NULL ?
+ &filp->f_count : &filp->_file->f_count)) {
+ linux_file_free(filp);
+ }
+}
+
+static inline unsigned int
+file_count(struct linux_file *filp)
+{
+ return (filp->_file == NULL ?
+ filp->f_count : filp->_file->f_count);
+}
+
+static inline void
+put_unused_fd(unsigned int fd)
+{
+ struct file *file;
+
+ if (fget_unlocked(curthread, fd, &cap_no_rights, &file) != 0) {
+ return;
+ }
+ /*
+ * NOTE: We should only get here when the "fd" has not been
+ * installed, so no need to free the associated Linux file
+ * structure.
+ */
+ fdclose(curthread, file, fd);
+
+ /* drop extra reference */
+ fdrop(file, curthread);
+}
+
+static inline void
+fd_install(unsigned int fd, struct linux_file *filp)
+{
+ struct file *file;
+
+ if (fget_unlocked(curthread, fd, &cap_no_rights, &file) != 0) {
+ filp->_file = NULL;
+ } else {
+ filp->_file = file;
+ finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops);
+
+ /* transfer reference count from "filp" to "file" */
+ while (refcount_release(&filp->f_count) == 0)
+ refcount_acquire(&file->f_count);
+ }
+
+ /* drop the extra reference */
+ fput(filp);
+}
+
+static inline int
+get_unused_fd(void)
+{
+ struct file *file;
+ int error;
+ int fd;
+
+ error = falloc(curthread, &file, &fd, 0);
+ if (error)
+ return -error;
+ /* drop the extra reference */
+ fdrop(file, curthread);
+ return fd;
+}
+
+static inline int
+get_unused_fd_flags(int flags)
+{
+ struct file *file;
+ int error;
+ int fd;
+
+ error = falloc(curthread, &file, &fd, flags);
+ if (error)
+ return -error;
+ /* drop the extra reference */
+ fdrop(file, curthread);
+ return fd;
+}
+
+extern struct linux_file *linux_file_alloc(void);
+
+static inline struct linux_file *
+alloc_file(int mode, const struct file_operations *fops)
+{
+ struct linux_file *filp;
+
+ filp = linux_file_alloc();
+ filp->f_op = fops;
+ filp->f_mode = mode;
+
+ return (filp);
+}
+
+struct fd {
+ struct linux_file *linux_file;
+};
+
+static inline void fdput(struct fd fd)
+{
+ fput(fd.linux_file);
+}
+
+static inline struct fd fdget(unsigned int fd)
+{
+ struct linux_file *f = linux_fget(fd);
+ return (struct fd){f};
+}
+
+#define file linux_file
+#define fget(...) linux_fget(__VA_ARGS__)
+
+#endif /* _LINUXKPI_LINUX_FILE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/firmware.h b/sys/compat/linuxkpi/common/include/linux/firmware.h
new file mode 100644
index 000000000000..a6330ddafb55
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/firmware.h
@@ -0,0 +1,116 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_FIRMWARE_H
+#define _LINUXKPI_LINUX_FIRMWARE_H
+
+#include <sys/types.h>
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct firmware;
+
+struct linuxkpi_firmware {
+ size_t size;
+ const uint8_t *data;
+ /* XXX Does Linux expose anything else? */
+
+ /* This is LinuxKPI implementation private. */
+ const struct firmware *fbdfw;
+};
+
+int linuxkpi_request_firmware_nowait(struct module *, bool, const char *,
+ struct device *, gfp_t, void *,
+ void(*cont)(const struct linuxkpi_firmware *, void *));
+int linuxkpi_request_firmware(const struct linuxkpi_firmware **,
+ const char *, struct device *);
+int linuxkpi_firmware_request_nowarn(const struct linuxkpi_firmware **,
+ const char *, struct device *);
+void linuxkpi_release_firmware(const struct linuxkpi_firmware *);
+int linuxkpi_request_partial_firmware_into_buf(const struct linuxkpi_firmware **,
+ const char *, struct device *, uint8_t *, size_t, size_t);
+
+
+static __inline int
+request_firmware_nowait(struct module *mod, bool _t,
+ const char *fw_name, struct device *dev, gfp_t gfp, void *drv,
+ void(*cont)(const struct linuxkpi_firmware *, void *))
+{
+
+
+ return (linuxkpi_request_firmware_nowait(mod, _t, fw_name, dev, gfp,
+ drv, cont));
+}
+
+static __inline int
+request_firmware(const struct linuxkpi_firmware **fw,
+ const char *fw_name, struct device *dev)
+{
+
+ return (linuxkpi_request_firmware(fw, fw_name, dev));
+}
+
+static __inline int
+request_firmware_direct(const struct linuxkpi_firmware **fw,
+ const char *fw_name, struct device *dev)
+{
+
+ return (linuxkpi_request_firmware(fw, fw_name, dev));
+}
+
+static __inline int
+firmware_request_nowarn(const struct linuxkpi_firmware **fw,
+ const char *fw_name, struct device *dev)
+{
+
+ return (linuxkpi_firmware_request_nowarn(fw, fw_name, dev));
+}
+
+static __inline void
+release_firmware(const struct linuxkpi_firmware *fw)
+{
+
+ linuxkpi_release_firmware(fw);
+}
+
+static inline int
+request_partial_firmware_into_buf(const struct linuxkpi_firmware **fw,
+ const char *fw_name, struct device *dev, void *buf, size_t buflen,
+ size_t offset)
+{
+
+ return (linuxkpi_request_partial_firmware_into_buf(fw, fw_name,
+ dev, buf, buflen, offset));
+}
+
+#define firmware linuxkpi_firmware
+
+#endif /* _LINUXKPI_LINUX_FIRMWARE_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/fs.h b/sys/compat/linuxkpi/common/include/linux/fs.h
new file mode 100644
index 000000000000..f1568ad6282d
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/fs.h
@@ -0,0 +1,425 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_FS_H_
+#define _LINUXKPI_LINUX_FS_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/vnode.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/dcache.h>
+#include <linux/capability.h>
+#include <linux/wait_bit.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+
+struct module;
+struct kiocb;
+struct iovec;
+struct dentry;
+struct page;
+struct file_lock;
+struct pipe_inode_info;
+struct vm_area_struct;
+struct poll_table_struct;
+struct files_struct;
+struct pfs_node;
+struct linux_cdev;
+
+#define inode vnode
+#define i_cdev v_rdev
+#define i_private v_data
+
+#define S_IRUGO (S_IRUSR | S_IRGRP | S_IROTH)
+#define S_IWUGO (S_IWUSR | S_IWGRP | S_IWOTH)
+
+typedef struct files_struct *fl_owner_t;
+
+struct file_operations;
+
+struct linux_file_wait_queue {
+ struct wait_queue wq;
+ struct wait_queue_head *wqh;
+ atomic_t state;
+#define LINUX_FWQ_STATE_INIT 0
+#define LINUX_FWQ_STATE_NOT_READY 1
+#define LINUX_FWQ_STATE_QUEUED 2
+#define LINUX_FWQ_STATE_READY 3
+#define LINUX_FWQ_STATE_MAX 4
+};
+
+struct linux_file {
+ struct file *_file;
+ const struct file_operations *f_op;
+ void *private_data;
+ int f_flags;
+ int f_mode; /* Just starting mode. */
+ struct dentry *f_dentry;
+ struct dentry f_dentry_store;
+ struct selinfo f_selinfo;
+ struct sigio *f_sigio;
+ struct vnode *f_vnode;
+#define f_inode f_vnode
+ volatile u_int f_count;
+
+ /* anonymous shmem object */
+ vm_object_t f_shmem;
+
+ /* kqfilter support */
+ int f_kqflags;
+#define LINUX_KQ_FLAG_HAS_READ (1 << 0)
+#define LINUX_KQ_FLAG_HAS_WRITE (1 << 1)
+#define LINUX_KQ_FLAG_NEED_READ (1 << 2)
+#define LINUX_KQ_FLAG_NEED_WRITE (1 << 3)
+ /* protects f_selinfo.si_note */
+ spinlock_t f_kqlock;
+ struct linux_file_wait_queue f_wait_queue;
+
+ /* pointer to associated character device, if any */
+ struct linux_cdev *f_cdev;
+
+ struct rcu_head rcu;
+};
+
+#define file linux_file
+#define fasync_struct sigio *
+
+#define fasync_helper(fd, filp, on, queue) \
+({ \
+ if ((on)) \
+ *(queue) = &(filp)->f_sigio; \
+ else \
+ *(queue) = NULL; \
+ 0; \
+})
+
+#define kill_fasync(queue, sig, pollstat) \
+do { \
+ if (*(queue) != NULL) \
+ pgsigio(*(queue), (sig), 0); \
+} while (0)
+
+typedef int (*filldir_t)(void *, const char *, int, off_t, u64, unsigned);
+
+struct file_operations {
+ struct module *owner;
+ ssize_t (*read)(struct linux_file *, char __user *, size_t, off_t *);
+ ssize_t (*write)(struct linux_file *, const char __user *, size_t, off_t *);
+ unsigned int (*poll) (struct linux_file *, struct poll_table_struct *);
+ long (*unlocked_ioctl)(struct linux_file *, unsigned int, unsigned long);
+ long (*compat_ioctl)(struct linux_file *, unsigned int, unsigned long);
+ int (*mmap)(struct linux_file *, struct vm_area_struct *);
+ int (*open)(struct inode *, struct file *);
+ int (*release)(struct inode *, struct linux_file *);
+ int (*fasync)(int, struct linux_file *, int);
+
+/* Although not supported in FreeBSD, to align with Linux code
+ * we are adding llseek() only when it is mapped to no_llseek which returns
+ * an illegal seek error
+ */
+ off_t (*llseek)(struct linux_file *, off_t, int);
+/*
+ * Not supported in FreeBSD. That's ok, we never call it and it allows some
+ * drivers like DRM drivers to compile without changes.
+ */
+ void (*show_fdinfo)(struct seq_file *, struct file *);
+#if 0
+ /* We do not support these methods. Don't permit them to compile. */
+ loff_t (*llseek)(struct file *, loff_t, int);
+ ssize_t (*aio_read)(struct kiocb *, const struct iovec *,
+ unsigned long, loff_t);
+ ssize_t (*aio_write)(struct kiocb *, const struct iovec *,
+ unsigned long, loff_t);
+ int (*readdir)(struct file *, void *, filldir_t);
+ int (*ioctl)(struct inode *, struct file *, unsigned int,
+ unsigned long);
+ int (*flush)(struct file *, fl_owner_t id);
+ int (*fsync)(struct file *, struct dentry *, int datasync);
+ int (*aio_fsync)(struct kiocb *, int datasync);
+ int (*lock)(struct file *, int, struct file_lock *);
+ ssize_t (*sendpage)(struct file *, struct page *, int, size_t,
+ loff_t *, int);
+ unsigned long (*get_unmapped_area)(struct file *, unsigned long,
+ unsigned long, unsigned long, unsigned long);
+ int (*check_flags)(int);
+ int (*flock)(struct file *, int, struct file_lock *);
+ ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
+ loff_t *, size_t, unsigned int);
+ ssize_t (*splice_read)(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
+ int (*setlease)(struct file *, long, struct file_lock **);
+#endif
+};
+#define fops_get(fops) (fops)
+#define replace_fops(f, fops) ((f)->f_op = (fops))
+
+#define FMODE_READ FREAD
+#define FMODE_WRITE FWRITE
+#define FMODE_EXEC FEXEC
+#define FMODE_UNSIGNED_OFFSET 0x2000
+int __register_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops);
+int __register_chrdev_p(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops, uid_t uid,
+ gid_t gid, int mode);
+void __unregister_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name);
+
+static inline void
+unregister_chrdev(unsigned int major, const char *name)
+{
+
+ __unregister_chrdev(major, 0, 256, name);
+}
+
+static inline int
+register_chrdev(unsigned int major, const char *name,
+ const struct file_operations *fops)
+{
+
+ return (__register_chrdev(major, 0, 256, name, fops));
+}
+
+static inline int
+register_chrdev_p(unsigned int major, const char *name,
+ const struct file_operations *fops, uid_t uid, gid_t gid, int mode)
+{
+
+ return (__register_chrdev_p(major, 0, 256, name, fops, uid, gid, mode));
+}
+
+static inline int
+register_chrdev_region(dev_t dev, unsigned range, const char *name)
+{
+
+ return 0;
+}
+
+static inline void
+unregister_chrdev_region(dev_t dev, unsigned range)
+{
+
+ return;
+}
+
+static inline int
+alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
+ const char *name)
+{
+
+ return 0;
+}
+
+/* No current support for seek op in FreeBSD */
+static inline int
+nonseekable_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static inline int
+simple_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+extern unsigned int linux_iminor(struct inode *);
+#define iminor(...) linux_iminor(__VA_ARGS__)
+
+static inline struct linux_file *
+get_file(struct linux_file *f)
+{
+
+ refcount_acquire(f->_file == NULL ? &f->f_count : &f->_file->f_count);
+ return (f);
+}
+
+struct linux_file * linux_get_file_rcu(struct linux_file **f);
+struct linux_file * get_file_active(struct linux_file **f);
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION < 60700
+static inline bool
+get_file_rcu(struct linux_file *f)
+{
+ return (refcount_acquire_if_not_zero(
+ f->_file == NULL ? &f->f_count : &f->_file->f_count));
+}
+#else
+#define get_file_rcu(f) linux_get_file_rcu(f)
+#endif
+
+static inline struct inode *
+igrab(struct inode *inode)
+{
+ int error;
+
+ error = vget(inode, 0);
+ if (error)
+ return (NULL);
+
+ return (inode);
+}
+
+static inline void
+iput(struct inode *inode)
+{
+
+ vrele(inode);
+}
+
+static inline loff_t
+no_llseek(struct file *file, loff_t offset, int whence)
+{
+
+ return (-ESPIPE);
+}
+
+static inline loff_t
+default_llseek(struct file *file, loff_t offset, int whence)
+{
+ return (no_llseek(file, offset, whence));
+}
+
+static inline loff_t
+generic_file_llseek(struct file *file, loff_t offset, int whence)
+{
+ return (no_llseek(file, offset, whence));
+}
+
+static inline loff_t
+noop_llseek(struct linux_file *file, loff_t offset, int whence)
+{
+
+ return (file->_file->f_offset);
+}
+
+static inline struct vnode *
+file_inode(const struct linux_file *file)
+{
+
+ return (file->f_vnode);
+}
+
+static inline int
+call_mmap(struct linux_file *file, struct vm_area_struct *vma)
+{
+
+ return (file->f_op->mmap(file, vma));
+}
+
+static inline void
+i_size_write(struct inode *inode, loff_t i_size)
+{
+}
+
+/*
+ * simple_read_from_buffer: copy data from kernel-space origin
+ * buffer into user-space destination buffer
+ *
+ * @dest: destination buffer
+ * @read_size: number of bytes to be transferred
+ * @ppos: starting transfer position pointer
+ * @orig: origin buffer
+ * @buf_size: size of destination and origin buffers
+ *
+ * Return value:
+ * On success, total bytes copied with *ppos incremented accordingly.
+ * On failure, negative value.
+ */
+static inline ssize_t
+simple_read_from_buffer(void __user *dest, size_t read_size, loff_t *ppos,
+ void *orig, size_t buf_size)
+{
+ void *p, *read_pos = ((char *) orig) + *ppos;
+ size_t buf_remain = buf_size - *ppos;
+
+ if (buf_remain < 0 || buf_remain > buf_size)
+ return -EINVAL;
+
+ if (read_size > buf_remain)
+ read_size = buf_remain;
+
+ /*
+ * XXX At time of commit only debugfs consumers could be
+ * identified. If others will use this function we may
+ * have to revise this: normally we would call copy_to_user()
+ * here but lindebugfs will return the result and the
+ * copyout is done elsewhere for us.
+ */
+ p = memcpy(dest, read_pos, read_size);
+ if (p != NULL)
+ *ppos += read_size;
+
+ return (read_size);
+}
+
+MALLOC_DECLARE(M_LSATTR);
+
+#define __DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt, __wrfunc)\
+static inline int \
+__fops ## _open(struct inode *inode, struct file *filp) \
+{ \
+ return (simple_attr_open(inode, filp, __get, __set, __fmt)); \
+} \
+static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+ .read = simple_attr_read, \
+ .write = __wrfunc, \
+ .llseek = no_llseek \
+}
+
+#define DEFINE_SIMPLE_ATTRIBUTE(fops, get, set, fmt) \
+ __DEFINE_SIMPLE_ATTRIBUTE(fops, get, set, fmt, simple_attr_write)
+#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops, get, set, fmt) \
+ __DEFINE_SIMPLE_ATTRIBUTE(fops, get, set, fmt, simple_attr_write_signed)
+
+int simple_attr_open(struct inode *inode, struct file *filp,
+ int (*get)(void *, uint64_t *), int (*set)(void *, uint64_t),
+ const char *fmt);
+
+int simple_attr_release(struct inode *inode, struct file *filp);
+
+ssize_t simple_attr_read(struct file *filp, char *buf, size_t read_size, loff_t *ppos);
+
+ssize_t simple_attr_write(struct file *filp, const char *buf, size_t write_size, loff_t *ppos);
+
+ssize_t simple_attr_write_signed(struct file *filp, const char *buf,
+ size_t write_size, loff_t *ppos);
+
+#endif /* _LINUXKPI_LINUX_FS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/fwnode.h b/sys/compat/linuxkpi/common/include/linux/fwnode.h
new file mode 100644
index 000000000000..a1fbc1b6d6a3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/fwnode.h
@@ -0,0 +1,10 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_FWNODE_H_
+#define _LINUXKPI_LINUX_FWNODE_H_
+
+struct fwnode_handle {
+ struct fwnode_handle *secondary;
+};
+
+#endif /* _LINUXKPI_LINUX_FWNODE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/gcd.h b/sys/compat/linuxkpi/common/include/linux/gcd.h
new file mode 100644
index 000000000000..5ca0540e5102
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/gcd.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dieter Baron and Thomas Klausner.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_GCD_H_
+#define _LINUXKPI_LINUX_GCD_H_
+
+static inline unsigned long
+gcd(unsigned long a, unsigned long b)
+{
+ unsigned long c;
+
+ c = a % b;
+ while (c != 0) {
+ a = b;
+ b = c;
+ c = a % b;
+ }
+
+ return (b);
+}
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/gfp.h b/sys/compat/linuxkpi/common/include/linux/gfp.h
new file mode 100644
index 000000000000..4c4caa621789
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/gfp.h
@@ -0,0 +1,214 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_GFP_H_
+#define _LINUXKPI_LINUX_GFP_H_
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <linux/page.h>
+
+#include <vm/vm_param.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+
+#define __GFP_NOWARN 0
+#define __GFP_HIGHMEM 0
+#define __GFP_ZERO M_ZERO
+#define __GFP_NOMEMALLOC 0
+#define __GFP_RECLAIM 0
+#define __GFP_RECLAIMABLE 0
+#define __GFP_RETRY_MAYFAIL 0
+#define __GFP_MOVABLE 0
+#define __GFP_COMP 0
+#define __GFP_KSWAPD_RECLAIM 0
+
+#define __GFP_IO 0
+#define __GFP_NO_KSWAPD 0
+#define __GFP_KSWAPD_RECLAIM 0
+#define __GFP_WAIT M_WAITOK
+#define __GFP_DMA32 (1U << 24) /* LinuxKPI only */
+#define __GFP_NORETRY (1U << 25) /* LinuxKPI only */
+#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
+#define __GFP_NOFAIL M_WAITOK
+
+#define GFP_NOWAIT M_NOWAIT
+#define GFP_ATOMIC (M_NOWAIT | M_USE_RESERVE)
+#define GFP_KERNEL M_WAITOK
+#define GFP_USER M_WAITOK
+#define GFP_HIGHUSER M_WAITOK
+#define GFP_HIGHUSER_MOVABLE M_WAITOK
+#define GFP_IOFS M_NOWAIT
+#define GFP_NOIO M_NOWAIT
+#define GFP_NOFS M_NOWAIT
+#define GFP_DMA32 __GFP_DMA32
+#define GFP_TEMPORARY M_NOWAIT
+#define GFP_NATIVE_MASK (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_ZERO)
+#define GFP_TRANSHUGE 0
+#define GFP_TRANSHUGE_LIGHT 0
+
+CTASSERT((__GFP_DMA32 & GFP_NATIVE_MASK) == 0);
+CTASSERT((__GFP_BITS_MASK & GFP_NATIVE_MASK) == GFP_NATIVE_MASK);
+
+struct page_frag_cache {
+ void *va;
+ int pagecnt_bias;
+};
+
+/*
+ * Page management for unmapped pages:
+ */
+struct page *linux_alloc_pages(gfp_t flags, unsigned int order);
+void linux_free_pages(struct page *page, unsigned int order);
+void *linuxkpi_page_frag_alloc(struct page_frag_cache *, size_t, gfp_t);
+void linuxkpi_page_frag_free(void *);
+void linuxkpi__page_frag_cache_drain(struct page *, size_t);
+
+static inline struct page *
+alloc_page(gfp_t flags)
+{
+
+ return (linux_alloc_pages(flags, 0));
+}
+
+static inline struct page *
+alloc_pages(gfp_t flags, unsigned int order)
+{
+
+ return (linux_alloc_pages(flags, order));
+}
+
+static inline struct page *
+alloc_pages_node(int node_id, gfp_t flags, unsigned int order)
+{
+
+ return (linux_alloc_pages(flags, order));
+}
+
+static inline void
+__free_pages(struct page *page, unsigned int order)
+{
+
+ linux_free_pages(page, order);
+}
+
+static inline void
+__free_page(struct page *page)
+{
+
+ linux_free_pages(page, 0);
+}
+
+static inline struct page *
+dev_alloc_pages(unsigned int order)
+{
+ return (linux_alloc_pages(GFP_ATOMIC, order));
+}
+
+struct folio *folio_alloc(gfp_t gfp, unsigned int order);
+
+/*
+ * Page management for mapped pages:
+ */
+vm_offset_t linux_alloc_kmem(gfp_t flags, unsigned int order);
+void linux_free_kmem(vm_offset_t, unsigned int order);
+
+static inline vm_offset_t
+get_zeroed_page(gfp_t flags)
+{
+
+ return (linux_alloc_kmem(flags | __GFP_ZERO, 0));
+}
+
+static inline vm_offset_t
+__get_free_page(gfp_t flags)
+{
+
+ return (linux_alloc_kmem(flags, 0));
+}
+
+static inline vm_offset_t
+__get_free_pages(gfp_t flags, unsigned int order)
+{
+
+ return (linux_alloc_kmem(flags, order));
+}
+
+static inline void
+free_pages(uintptr_t addr, unsigned int order)
+{
+ if (addr == 0)
+ return;
+
+ linux_free_kmem(addr, order);
+}
+
+static inline void
+free_page(uintptr_t addr)
+{
+ if (addr == 0)
+ return;
+
+ linux_free_kmem(addr, 0);
+}
+
+static inline void *
+page_frag_alloc(struct page_frag_cache *pfc, size_t fragsz, gfp_t gfp)
+{
+
+ return (linuxkpi_page_frag_alloc(pfc, fragsz, gfp));
+}
+
+static inline void
+page_frag_free(void *addr)
+{
+
+ linuxkpi_page_frag_free(addr);
+}
+
+static inline void
+__page_frag_cache_drain(struct page *page, size_t count)
+{
+
+ linuxkpi__page_frag_cache_drain(page, count);
+}
+
+static inline bool
+gfpflags_allow_blocking(const gfp_t gfp_flags)
+{
+ return ((gfp_flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK);
+}
+
+#define SetPageReserved(page) do { } while (0) /* NOP */
+#define ClearPageReserved(page) do { } while (0) /* NOP */
+
+#endif /* _LINUXKPI_LINUX_GFP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/gpf.h b/sys/compat/linuxkpi/common/include/linux/gpf.h
new file mode 100644
index 000000000000..01e883a94728
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/gpf.h
@@ -0,0 +1,33 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Serenity Cyber Security, LLC.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_GPF_H_
+#define _LINUXKPI_LINUX_GPF_H_
+
+#include <linux/mmzone.h>
+
+#endif /* _LINUXKPI_LINUX_GPF_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/hardirq.h b/sys/compat/linuxkpi/common/include/linux/hardirq.h
new file mode 100644
index 000000000000..f79451dd0d35
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/hardirq.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_HARDIRQ_H_
+#define _LINUXKPI_LINUX_HARDIRQ_H_
+
+#include <linux/types.h>
+#include <linux/lockdep.h>
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+
+#define synchronize_irq(irq) _intr_drain((irq))
+
+/*
+ * FIXME: In the i915 driver's `intel_engine_cs.c` file,
+ * `synchronize_hardirq()` was replaced by `synchronize_rcu()` with the
+ * following comment:
+ * "Is it enough to wait that all cpu have context-switched?"
+ *
+ * See commit f6d50b7af554e21c380486d6f41c8537b265c777 in drm-kmod.
+ */
+#define synchronize_hardirq(irq) _intr_drain((irq))
+
+#endif /* _LINUXKPI_LINUX_HARDIRQ_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/hash.h b/sys/compat/linuxkpi/common/include/linux/hash.h
new file mode 100644
index 000000000000..c75814c96724
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/hash.h
@@ -0,0 +1,76 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 NVIDIA corporation & affiliates.
+ * Copyright (c) 2013 François Tigeot
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_HASH_H_
+#define _LINUXKPI_LINUX_HASH_H_
+
+#include <sys/hash.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <asm/types.h>
+
+#include <linux/bitops.h>
+
+static inline u64
+hash_64(u64 val, u8 bits)
+{
+ u64 ret;
+ u8 x;
+
+ ret = bits;
+
+ for (x = 0; x != sizeof(ret); x++) {
+ u64 chunk = (val >> (8 * x)) & 0xFF;
+ ret = HASHSTEP(ret, chunk);
+ }
+ return (ret >> (64 - bits));
+}
+
+static inline u32
+hash_32(u32 val, u8 bits)
+{
+ u32 ret;
+ u8 x;
+
+ ret = bits;
+
+ for (x = 0; x != sizeof(ret); x++) {
+ u32 chunk = (val >> (8 * x)) & 0xFF;
+ ret = HASHSTEP(ret, chunk);
+ }
+ return (ret >> (32 - bits));
+}
+
+#if BITS_PER_LONG == 64
+#define hash_long(...) hash_64(__VA_ARGS__)
+#else
+#define hash_long(...) hash_32(__VA_ARGS__)
+#endif
+
+#endif /* _LINUXKPI_LINUX_HASH_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/hashtable.h b/sys/compat/linuxkpi/common/include/linux/hashtable.h
new file mode 100644
index 000000000000..55755c354959
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/hashtable.h
@@ -0,0 +1,183 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 NVIDIA corporation & affiliates.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_HASHTABLE_H
+#define _LINUXKPI_LINUX_HASHTABLE_H
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <linux/hash.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist.h>
+
+#include <ck_queue.h>
+
+struct lkpi_hash_entry {
+ CK_LIST_ENTRY(lkpi_hash_entry) entry;
+};
+
+struct lkpi_hash_head {
+ CK_LIST_HEAD(, lkpi_hash_entry) head;
+};
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct lkpi_hash_head name[1UL << (bits)]
+
+#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
+ struct lkpi_hash_head name[1UL << (bits)] __read_mostly
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct lkpi_hash_head name[1UL << (bits)]
+
+#define HASH_SIZE(name) ARRAY_SIZE(name)
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+#define hash_min(...) \
+ hash_long(__VA_ARGS__)
+
+static inline void
+__hash_init(struct lkpi_hash_head *ht, unsigned long size)
+{
+ unsigned long x;
+
+ for (x = 0; x != size; x++)
+ CK_LIST_INIT(&ht[x].head);
+}
+
+#define hash_init(ht) \
+ __hash_init(ht, HASH_SIZE(ht))
+
+#define hash_add(...) \
+ hash_add_rcu(__VA_ARGS__)
+
+static inline void
+__hash_node_type_assert(struct hlist_node *node)
+{
+ /*
+ * Unfortunately Linux doesn't have an own type for the hash
+ * table node entries. The purpose of this function is simply
+ * to check the type of the passed argument.
+ */
+ CTASSERT(sizeof(struct lkpi_hash_entry) == sizeof(*node));
+}
+
+#define hash_add_rcu(ht, node, key) do { \
+ struct lkpi_hash_head *__head = &(ht)[hash_min(key, HASH_BITS(ht))]; \
+ __hash_node_type_assert(node); \
+ CK_LIST_INSERT_HEAD(&__head->head, \
+ (struct lkpi_hash_entry *)(node), entry); \
+} while (0)
+
+static inline bool
+hash_hashed(struct hlist_node *node)
+{
+ return (((struct lkpi_hash_entry *)node)->entry.cle_prev != NULL);
+}
+
+static inline bool
+__hash_empty(struct lkpi_hash_head *ht, unsigned long size)
+{
+ unsigned long x;
+
+ for (x = 0; x != size; x++) {
+ if (!CK_LIST_EMPTY(&ht[x].head))
+ return (false);
+ }
+ return (true);
+}
+
+#define hash_empty(ht) \
+ __hash_empty(ht, HASH_SIZE(ht))
+
+#define hash_del(...) \
+ hash_del_rcu(__VA_ARGS__)
+
+static inline void
+hash_del_rcu(struct hlist_node *node)
+{
+ CK_LIST_REMOVE((struct lkpi_hash_entry *)node, entry);
+ memset(node, 0, sizeof(*node));
+}
+
+#define __hash_first(ht, type, member) ({ \
+ const struct lkpi_hash_entry *__first = CK_LIST_FIRST(&(ht)->head); \
+ __hash_node_type_assert(&((type *)0)->member); \
+ (__first != NULL ? container_of((const void *)__first, type, member) : NULL); \
+})
+
+#define __hash_next(obj, type, member) ({ \
+ const struct lkpi_hash_entry *__next = \
+ CK_LIST_NEXT((struct lkpi_hash_entry *)&(obj)->member, entry); \
+ __hash_node_type_assert(&(obj)->member); \
+ (__next != NULL ? container_of((const void *)__next, type, member) : NULL); \
+})
+
+#define hash_for_each(...) \
+ hash_for_each_rcu(__VA_ARGS__)
+
+#define hash_for_each_rcu(name, bkt, obj, member) \
+ for ((bkt) = 0, (obj) = NULL; (obj) == NULL && \
+ (bkt) != HASH_SIZE(name); (bkt)++) \
+ for ((obj) = __hash_first(&(name)[bkt], \
+ __typeof(*(obj)), member); \
+ (obj) != NULL; \
+ (obj) = __hash_next(obj, \
+ __typeof(*(obj)), member))
+
+#define hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, (obj) = NULL; (obj) == NULL && \
+ (bkt) != HASH_SIZE(name); (bkt)++) \
+ for ((obj) = __hash_first(&(name)[bkt], \
+ __typeof(*(obj)), member); \
+ (obj) != NULL && ((tmp) = &__hash_next(obj, \
+ __typeof(*(obj)), member)->member, 1); \
+ (obj) = container_of(tmp, __typeof(*(obj)), member))
+
+#define hash_for_each_possible(...) \
+ hash_for_each_possible_rcu(__VA_ARGS__)
+
+#define hash_for_each_possible_rcu_notrace(...) \
+ hash_for_each_possible_rcu(__VA_ARGS__)
+
+#define hash_for_each_possible_rcu(name, obj, member, key) \
+ for ((obj) = __hash_first(&(name)[hash_min(key, HASH_BITS(name))], \
+ __typeof(*(obj)), member); \
+ (obj) != NULL; \
+ (obj) = __hash_next(obj, __typeof(*(obj)), member))
+
+#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
+ for ((obj) = __hash_first(&(name)[hash_min(key, HASH_BITS(name))], \
+ __typeof(*(obj)), member); \
+ (obj) != NULL && ((tmp) = &__hash_next(obj, \
+ __typeof(*(obj)), member)->member, 1); \
+ (obj) = container_of(tmp, __typeof(*(obj)), member))
+
+#endif /* _LINUXKPI_LINUX_HASHTABLE_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/hdmi.h b/sys/compat/linuxkpi/common/include/linux/hdmi.h
new file mode 100644
index 000000000000..e07578167d69
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/hdmi.h
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __LINUX_HDMI_H_
+#define __LINUX_HDMI_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+enum hdmi_packet_type {
+ HDMI_PACKET_TYPE_NULL = 0x00,
+ HDMI_PACKET_TYPE_AUDIO_CLOCK_REGEN = 0x01,
+ HDMI_PACKET_TYPE_AUDIO_SAMPLE = 0x02,
+ HDMI_PACKET_TYPE_GENERAL_CONTROL = 0x03,
+ HDMI_PACKET_TYPE_ACP = 0x04,
+ HDMI_PACKET_TYPE_ISRC1 = 0x05,
+ HDMI_PACKET_TYPE_ISRC2 = 0x06,
+ HDMI_PACKET_TYPE_ONE_BIT_AUDIO_SAMPLE = 0x07,
+ HDMI_PACKET_TYPE_DST_AUDIO = 0x08,
+ HDMI_PACKET_TYPE_HBR_AUDIO_STREAM = 0x09,
+ HDMI_PACKET_TYPE_GAMUT_METADATA = 0x0a,
+ /* + enum hdmi_infoframe_type */
+};
+
+enum hdmi_infoframe_type {
+ HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
+ HDMI_INFOFRAME_TYPE_AVI = 0x82,
+ HDMI_INFOFRAME_TYPE_SPD = 0x83,
+ HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
+ HDMI_INFOFRAME_TYPE_DRM = 0x87,
+};
+
+#define HDMI_IEEE_OUI 0x000c03
+#define HDMI_FORUM_IEEE_OUI 0xc45dd8
+#define HDMI_INFOFRAME_HEADER_SIZE 4
+#define HDMI_AVI_INFOFRAME_SIZE 13
+#define HDMI_SPD_INFOFRAME_SIZE 25
+#define HDMI_AUDIO_INFOFRAME_SIZE 10
+#define HDMI_DRM_INFOFRAME_SIZE 26
+#define HDMI_VENDOR_INFOFRAME_SIZE 4
+
+#define HDMI_INFOFRAME_SIZE(type) \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
+
+struct hdmi_any_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+};
+
+enum hdmi_colorspace {
+ HDMI_COLORSPACE_RGB,
+ HDMI_COLORSPACE_YUV422,
+ HDMI_COLORSPACE_YUV444,
+ HDMI_COLORSPACE_YUV420,
+ HDMI_COLORSPACE_RESERVED4,
+ HDMI_COLORSPACE_RESERVED5,
+ HDMI_COLORSPACE_RESERVED6,
+ HDMI_COLORSPACE_IDO_DEFINED,
+};
+
+enum hdmi_scan_mode {
+ HDMI_SCAN_MODE_NONE,
+ HDMI_SCAN_MODE_OVERSCAN,
+ HDMI_SCAN_MODE_UNDERSCAN,
+ HDMI_SCAN_MODE_RESERVED,
+};
+
+enum hdmi_colorimetry {
+ HDMI_COLORIMETRY_NONE,
+ HDMI_COLORIMETRY_ITU_601,
+ HDMI_COLORIMETRY_ITU_709,
+ HDMI_COLORIMETRY_EXTENDED,
+};
+
+enum hdmi_picture_aspect {
+ HDMI_PICTURE_ASPECT_NONE,
+ HDMI_PICTURE_ASPECT_4_3,
+ HDMI_PICTURE_ASPECT_16_9,
+ HDMI_PICTURE_ASPECT_64_27,
+ HDMI_PICTURE_ASPECT_256_135,
+ HDMI_PICTURE_ASPECT_RESERVED,
+};
+
+enum hdmi_active_aspect {
+ HDMI_ACTIVE_ASPECT_16_9_TOP = 2,
+ HDMI_ACTIVE_ASPECT_14_9_TOP = 3,
+ HDMI_ACTIVE_ASPECT_16_9_CENTER = 4,
+ HDMI_ACTIVE_ASPECT_PICTURE = 8,
+ HDMI_ACTIVE_ASPECT_4_3 = 9,
+ HDMI_ACTIVE_ASPECT_16_9 = 10,
+ HDMI_ACTIVE_ASPECT_14_9 = 11,
+ HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13,
+ HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14,
+ HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15,
+};
+
+enum hdmi_extended_colorimetry {
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
+ HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_OPYCC_601,
+ HDMI_EXTENDED_COLORIMETRY_OPRGB,
+
+ /* The following EC values are only defined in CEA-861-F. */
+ HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
+ HDMI_EXTENDED_COLORIMETRY_BT2020,
+ HDMI_EXTENDED_COLORIMETRY_RESERVED,
+};
+
+enum hdmi_quantization_range {
+ HDMI_QUANTIZATION_RANGE_DEFAULT,
+ HDMI_QUANTIZATION_RANGE_LIMITED,
+ HDMI_QUANTIZATION_RANGE_FULL,
+ HDMI_QUANTIZATION_RANGE_RESERVED,
+};
+
+/* non-uniform picture scaling */
+enum hdmi_nups {
+ HDMI_NUPS_UNKNOWN,
+ HDMI_NUPS_HORIZONTAL,
+ HDMI_NUPS_VERTICAL,
+ HDMI_NUPS_BOTH,
+};
+
+enum hdmi_ycc_quantization_range {
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED,
+ HDMI_YCC_QUANTIZATION_RANGE_FULL,
+};
+
+enum hdmi_content_type {
+ HDMI_CONTENT_TYPE_GRAPHICS,
+ HDMI_CONTENT_TYPE_PHOTO,
+ HDMI_CONTENT_TYPE_CINEMA,
+ HDMI_CONTENT_TYPE_GAME,
+};
+
+enum hdmi_metadata_type {
+ HDMI_STATIC_METADATA_TYPE1 = 0,
+};
+
+enum hdmi_eotf {
+ HDMI_EOTF_TRADITIONAL_GAMMA_SDR,
+ HDMI_EOTF_TRADITIONAL_GAMMA_HDR,
+ HDMI_EOTF_SMPTE_ST2084,
+ HDMI_EOTF_BT_2100_HLG,
+};
+
+struct hdmi_avi_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ bool itc;
+ unsigned char pixel_repeat;
+ enum hdmi_colorspace colorspace;
+ enum hdmi_scan_mode scan_mode;
+ enum hdmi_colorimetry colorimetry;
+ enum hdmi_picture_aspect picture_aspect;
+ enum hdmi_active_aspect active_aspect;
+ enum hdmi_extended_colorimetry extended_colorimetry;
+ enum hdmi_quantization_range quantization_range;
+ enum hdmi_nups nups;
+ unsigned char video_code;
+ enum hdmi_ycc_quantization_range ycc_quantization_range;
+ enum hdmi_content_type content_type;
+ unsigned short top_bar;
+ unsigned short bottom_bar;
+ unsigned short left_bar;
+ unsigned short right_bar;
+};
+
+/* DRM Infoframe as per CTA 861.G spec */
+struct hdmi_drm_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ enum hdmi_eotf eotf;
+ enum hdmi_metadata_type metadata_type;
+ struct {
+ u16 x, y;
+ } display_primaries[3];
+ struct {
+ u16 x, y;
+ } white_point;
+ u16 max_display_mastering_luminance;
+ u16 min_display_mastering_luminance;
+ u16 max_cll;
+ u16 max_fall;
+};
+
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
+int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame);
+ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame);
+int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame,
+ const void *buffer, size_t size);
+
+enum hdmi_spd_sdi {
+ HDMI_SPD_SDI_UNKNOWN,
+ HDMI_SPD_SDI_DSTB,
+ HDMI_SPD_SDI_DVDP,
+ HDMI_SPD_SDI_DVHS,
+ HDMI_SPD_SDI_HDDVR,
+ HDMI_SPD_SDI_DVC,
+ HDMI_SPD_SDI_DSC,
+ HDMI_SPD_SDI_VCD,
+ HDMI_SPD_SDI_GAME,
+ HDMI_SPD_SDI_PC,
+ HDMI_SPD_SDI_BD,
+ HDMI_SPD_SDI_SACD,
+ HDMI_SPD_SDI_HDDVD,
+ HDMI_SPD_SDI_PMP,
+};
+
+struct hdmi_spd_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ char vendor[8];
+ char product[16];
+ enum hdmi_spd_sdi sdi;
+};
+
+int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
+ const char *vendor, const char *product);
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame);
+
+enum hdmi_audio_coding_type {
+ HDMI_AUDIO_CODING_TYPE_STREAM,
+ HDMI_AUDIO_CODING_TYPE_PCM,
+ HDMI_AUDIO_CODING_TYPE_AC3,
+ HDMI_AUDIO_CODING_TYPE_MPEG1,
+ HDMI_AUDIO_CODING_TYPE_MP3,
+ HDMI_AUDIO_CODING_TYPE_MPEG2,
+ HDMI_AUDIO_CODING_TYPE_AAC_LC,
+ HDMI_AUDIO_CODING_TYPE_DTS,
+ HDMI_AUDIO_CODING_TYPE_ATRAC,
+ HDMI_AUDIO_CODING_TYPE_DSD,
+ HDMI_AUDIO_CODING_TYPE_EAC3,
+ HDMI_AUDIO_CODING_TYPE_DTS_HD,
+ HDMI_AUDIO_CODING_TYPE_MLP,
+ HDMI_AUDIO_CODING_TYPE_DST,
+ HDMI_AUDIO_CODING_TYPE_WMA_PRO,
+ HDMI_AUDIO_CODING_TYPE_CXT,
+};
+
+enum hdmi_audio_sample_size {
+ HDMI_AUDIO_SAMPLE_SIZE_STREAM,
+ HDMI_AUDIO_SAMPLE_SIZE_16,
+ HDMI_AUDIO_SAMPLE_SIZE_20,
+ HDMI_AUDIO_SAMPLE_SIZE_24,
+};
+
+enum hdmi_audio_sample_frequency {
+ HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_32000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_44100,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_48000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_88200,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_96000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_176400,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_192000,
+};
+
+enum hdmi_audio_coding_type_ext {
+ /* Refer to Audio Coding Type (CT) field in Data Byte 1 */
+ HDMI_AUDIO_CODING_TYPE_EXT_CT,
+
+ /*
+ * The next three CXT values are defined in CEA-861-E only.
+ * They do not exist in older versions, and in CEA-861-F they are
+ * defined as 'Not in use'.
+ */
+ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
+ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
+
+ /* The following CXT values are only defined in CEA-861-F. */
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC,
+ HDMI_AUDIO_CODING_TYPE_EXT_DRA,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10,
+};
+
+struct hdmi_audio_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned char channels;
+ enum hdmi_audio_coding_type coding_type;
+ enum hdmi_audio_sample_size sample_size;
+ enum hdmi_audio_sample_frequency sample_frequency;
+ enum hdmi_audio_coding_type_ext coding_type_ext;
+ unsigned char channel_allocation;
+ unsigned char level_shift_value;
+ bool downmix_inhibit;
+
+};
+
+int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_audio_infoframe_check(const struct hdmi_audio_infoframe *frame);
+
+#ifdef __linux__
+struct dp_sdp;
+ssize_t
+hdmi_audio_infoframe_pack_for_dp(const struct hdmi_audio_infoframe *frame,
+ struct dp_sdp *sdp, u8 dp_version);
+#endif
+
+enum hdmi_3d_structure {
+ HDMI_3D_STRUCTURE_INVALID = -1,
+ HDMI_3D_STRUCTURE_FRAME_PACKING = 0,
+ HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE,
+ HDMI_3D_STRUCTURE_LINE_ALTERNATIVE,
+ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL,
+ HDMI_3D_STRUCTURE_L_DEPTH,
+ HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH,
+ HDMI_3D_STRUCTURE_TOP_AND_BOTTOM,
+ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8,
+};
+
+
+struct hdmi_vendor_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned int oui;
+ u8 vic;
+ enum hdmi_3d_structure s3d_struct;
+ unsigned int s3d_ext_data;
+};
+
+/* HDR Metadata as per 861.G spec */
+struct hdr_static_metadata {
+ __u8 eotf;
+ __u8 metadata_type;
+ __u16 max_cll;
+ __u16 max_fall;
+ __u16 min_cll;
+};
+
+/**
+ * struct hdr_sink_metadata - HDR sink metadata
+ *
+ * Metadata Information read from Sink's EDID
+ */
+struct hdr_sink_metadata {
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u32 metadata_type;
+ /**
+ * @hdmi_type1: HDR Metadata Infoframe.
+ */
+ union {
+ struct hdr_static_metadata hdmi_type1;
+ };
+};
+
+int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame);
+
+union hdmi_vendor_any_infoframe {
+ struct {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned int oui;
+ } any;
+ struct hdmi_vendor_infoframe hdmi;
+};
+
+/**
+ * union hdmi_infoframe - overall union of all abstract infoframe representations
+ * @any: generic infoframe
+ * @avi: avi infoframe
+ * @spd: spd infoframe
+ * @vendor: union of all vendor infoframes
+ * @audio: audio infoframe
+ * @drm: Dynamic Range and Mastering infoframe
+ *
+ * This is used by the generic pack function. This works since all infoframes
+ * have the same header which also indicates which type of infoframe should be
+ * packed.
+ */
+union hdmi_infoframe {
+ struct hdmi_any_infoframe any;
+ struct hdmi_avi_infoframe avi;
+ struct hdmi_spd_infoframe spd;
+ union hdmi_vendor_any_infoframe vendor;
+ struct hdmi_audio_infoframe audio;
+ struct hdmi_drm_infoframe drm;
+};
+
+ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_infoframe_check(union hdmi_infoframe *frame);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+ const void *buffer, size_t size);
+void hdmi_infoframe_log(const char *level, struct device *dev,
+ const union hdmi_infoframe *frame);
+
+#endif /* _DRM_HDMI_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/highmem.h b/sys/compat/linuxkpi/common/include/linux/highmem.h
new file mode 100644
index 000000000000..58a9cdcdf60f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/highmem.h
@@ -0,0 +1,170 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_HIGHMEM_H_
+#define _LINUXKPI_LINUX_HIGHMEM_H_
+
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/sf_buf.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+
+#include <linux/mm.h>
+#include <linux/page.h>
+
+#define PageHighMem(p) (0)
+
+static inline struct page *
+kmap_to_page(void *addr)
+{
+
+ return (virt_to_page(addr));
+}
+
+static inline void *
+kmap(struct page *page)
+{
+ struct sf_buf *sf;
+
+ if (PMAP_HAS_DMAP) {
+ return ((void *)PHYS_TO_DMAP(page_to_phys(page)));
+ } else {
+ sched_pin();
+ sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
+ if (sf == NULL) {
+ sched_unpin();
+ return (NULL);
+ }
+ return ((void *)sf_buf_kva(sf));
+ }
+}
+
+static inline void *
+kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ vm_memattr_t attr = pgprot2cachemode(prot);
+
+ if (attr != VM_MEMATTR_DEFAULT) {
+ page->flags |= PG_FICTITIOUS;
+ pmap_page_set_memattr(page, attr);
+ }
+ return (kmap(page));
+}
+
+static inline void *
+kmap_atomic(struct page *page)
+{
+
+ return (kmap_atomic_prot(page, VM_PROT_ALL));
+}
+
+static inline void *
+kmap_local_page(struct page *page)
+{
+ return (kmap(page));
+}
+
+static inline void *
+kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+
+ return (kmap_atomic_prot(page, prot));
+}
+
+static inline void
+kunmap(struct page *page)
+{
+ struct sf_buf *sf;
+
+ if (!PMAP_HAS_DMAP) {
+ /* lookup SF buffer in list */
+ sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
+
+ /* double-free */
+ sf_buf_free(sf);
+ sf_buf_free(sf);
+
+ sched_unpin();
+ }
+}
+
+static inline void
+kunmap_atomic(void *vaddr)
+{
+
+ if (!PMAP_HAS_DMAP)
+ kunmap(virt_to_page(vaddr));
+}
+
+static inline void
+kunmap_local(void *addr)
+{
+
+ kunmap_atomic(addr);
+}
+
+static inline void
+memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
+{
+ char *from;
+
+ KASSERT(offset + len <= PAGE_SIZE,
+ ("%s: memcpy from page %p to address %p: "
+ "offset+len (%zu+%zu) would go beyond page end",
+ __func__, page, to, offset, len));
+
+ from = kmap_local_page(page);
+ memcpy(to, from + offset, len);
+ kunmap_local(from);
+}
+
+static inline void
+memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
+{
+ char *to;
+
+ KASSERT(offset + len <= PAGE_SIZE,
+ ("%s: memcpy from address %p to page %p: "
+ "offset+len (%zu+%zu) would go beyond page end",
+ __func__, from, page, offset, len));
+
+ to = kmap_local_page(page);
+ memcpy(to + offset, from, len);
+ kunmap_local(to);
+}
+
+#endif /* _LINUXKPI_LINUX_HIGHMEM_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/hrtimer.h b/sys/compat/linuxkpi/common/include/linux/hrtimer.h
new file mode 100644
index 000000000000..88f9487d0b85
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/hrtimer.h
@@ -0,0 +1,90 @@
+/*-
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_HRTIMER_H_
+#define _LINUXKPI_LINUX_HRTIMER_H_
+
+#include <sys/_callout.h>
+#include <sys/_mutex.h>
+
+#include <linux/ktime.h>
+#include <linux/rbtree.h>
+#include <linux/timer.h>
+
+enum hrtimer_mode {
+ HRTIMER_MODE_REL,
+ HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL,
+};
+
+enum hrtimer_restart {
+ HRTIMER_RESTART,
+ HRTIMER_NORESTART,
+};
+
+struct hrtimer {
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct mtx mtx;
+ struct callout callout;
+ s64 expires; /* relative time in nanoseconds */
+ s64 precision; /* in nanoseconds */
+};
+
+#define hrtimer_active(hrtimer) linux_hrtimer_active(hrtimer)
+#define hrtimer_try_to_cancel(hrtimer) linux_hrtimer_try_to_cancel(hrtimer)
+#define hrtimer_cancel(hrtimer) linux_hrtimer_cancel(hrtimer)
+
+#define hrtimer_init(hrtimer, clock, mode) do { \
+ CTASSERT((clock) == CLOCK_MONOTONIC); \
+ CTASSERT((mode) == HRTIMER_MODE_REL); \
+ linux_hrtimer_init(hrtimer); \
+} while (0)
+
+#define hrtimer_set_expires(hrtimer, time) \
+ linux_hrtimer_set_expires(hrtimer, time)
+
+#define hrtimer_start(hrtimer, time, mode) do { \
+ CTASSERT((mode) == HRTIMER_MODE_REL); \
+ linux_hrtimer_start(hrtimer, time); \
+} while (0)
+
+#define hrtimer_start_range_ns(hrtimer, time, prec, mode) do { \
+ CTASSERT((mode) == HRTIMER_MODE_REL); \
+ linux_hrtimer_start_range_ns(hrtimer, time, prec); \
+} while (0)
+
+#define hrtimer_forward_now(hrtimer, interval) do { \
+ linux_hrtimer_forward_now(hrtimer, interval); \
+} while (0)
+
+bool linux_hrtimer_active(struct hrtimer *);
+int linux_hrtimer_try_to_cancel(struct hrtimer *);
+int linux_hrtimer_cancel(struct hrtimer *);
+void linux_hrtimer_init(struct hrtimer *);
+void linux_hrtimer_set_expires(struct hrtimer *, ktime_t);
+void linux_hrtimer_start(struct hrtimer *, ktime_t);
+void linux_hrtimer_start_range_ns(struct hrtimer *, ktime_t, int64_t);
+void linux_hrtimer_forward_now(struct hrtimer *, ktime_t);
+
+#endif /* _LINUXKPI_LINUX_HRTIMER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/i2c-algo-bit.h b/sys/compat/linuxkpi/common/include/linux/i2c-algo-bit.h
new file mode 100644
index 000000000000..4e8f00f9bebc
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/i2c-algo-bit.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2021 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUX_I2C_ALGO_BIT_H_
+#define _LINUX_I2C_ALGO_BIT_H_
+
+#include <linux/i2c.h>
+
+struct i2c_algo_bit_data {
+ void *data;
+ void (*setsda) (void *data, int state);
+ void (*setscl) (void *data, int state);
+ int (*getsda) (void *data);
+ int (*getscl) (void *data);
+ int (*pre_xfer) (struct i2c_adapter *);
+ void (*post_xfer) (struct i2c_adapter *);
+
+ int udelay;
+ int timeout;
+};
+
+int lkpi_i2c_bit_add_bus(struct i2c_adapter *adapter);
+
+#define i2c_bit_add_bus(adapter) lkpi_i2c_bit_add_bus(adapter)
+
+#endif /*_LINUX_I2C_ALGO_BIT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/i2c.h b/sys/compat/linuxkpi/common/include/linux/i2c.h
new file mode 100644
index 000000000000..f24d282586f6
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/i2c.h
@@ -0,0 +1,177 @@
+/*-
+ * Copyright (c) 2021 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUX_I2C_H_
+#define _LINUX_I2C_H_
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/systm.h>
+
+#include <linux/device.h>
+
+#define I2C_MAX_ADAPTER_NAME_LENGTH 32
+
+#define I2C_M_RD 0x0001
+#define I2C_M_NOSTART 0x0002
+#define I2C_M_STOP 0x0004
+
+/* No need for us */
+#define I2C_FUNC_I2C 0
+#define I2C_FUNC_SMBUS_EMUL 0
+#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0
+#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0
+#define I2C_FUNC_10BIT_ADDR 0
+
+#define I2C_CLASS_HWMON 0x1
+#define I2C_CLASS_DDC 0x8
+#define I2C_CLASS_SPD 0x80
+
+struct i2c_adapter {
+ struct module *owner;
+ unsigned int class;
+
+ char name[I2C_MAX_ADAPTER_NAME_LENGTH];
+ struct device dev;
+
+ const struct i2c_lock_operations *lock_ops;
+ const struct i2c_algorithm *algo;
+ const struct i2c_adapter_quirks *quirks;
+ void *algo_data;
+
+ int retries;
+ void *data;
+};
+
+struct i2c_msg {
+ uint16_t addr;
+ uint16_t flags;
+ uint16_t len;
+ uint8_t *buf;
+};
+
+struct i2c_algorithm {
+ int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int);
+ uint32_t (*functionality)(struct i2c_adapter *);
+};
+
+struct i2c_lock_operations {
+ void (*lock_bus)(struct i2c_adapter *, unsigned int);
+ int (*trylock_bus)(struct i2c_adapter *, unsigned int);
+ void (*unlock_bus)(struct i2c_adapter *, unsigned int);
+};
+
+struct i2c_adapter_quirks {
+ uint64_t flags;
+ int max_num_msgs;
+ uint16_t max_write_len;
+ uint16_t max_read_len;
+ uint16_t max_comb_1st_msg_len;
+ uint16_t max_comb_2nd_msg_len;
+};
+
+#define I2C_AQ_COMB BIT(0)
+#define I2C_AQ_COMB_WRITE_FIRST BIT(1)
+#define I2C_AQ_COMB_READ_SECOND BIT(2)
+#define I2C_AQ_COMB_SAME_ADDR BIT(3)
+#define I2C_AQ_COMB_WRITE_THEN_READ \
+ (I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | \
+ I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR)
+#define I2C_AQ_NO_CLK_STRETCH BIT(4)
+#define I2C_AQ_NO_ZERO_LEN_READ BIT(5)
+#define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6)
+#define I2C_AQ_NO_ZERO_LEN \
+ (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
+#define I2C_AQ_NO_REP_START BIT(7)
+
+int lkpi_i2c_add_adapter(struct i2c_adapter *adapter);
+int lkpi_i2c_del_adapter(struct i2c_adapter *adapter);
+
+int lkpi_i2cbb_transfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int nmsgs);
+
+#define i2c_add_adapter(adapter) lkpi_i2c_add_adapter(adapter)
+#define i2c_del_adapter(adapter) lkpi_i2c_del_adapter(adapter)
+
+#define i2c_get_adapter(x) NULL
+#define i2c_put_adapter(x)
+
+static inline int
+do_i2c_transfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int nmsgs)
+{
+ int ret, retries;
+
+ retries = adapter->retries == 0 ? 1 : adapter->retries;
+ for (; retries != 0; retries--) {
+ if (adapter->algo != NULL && adapter->algo->master_xfer != NULL)
+ ret = adapter->algo->master_xfer(adapter, msgs, nmsgs);
+ else
+ ret = lkpi_i2cbb_transfer(adapter, msgs, nmsgs);
+ if (ret != -EAGAIN)
+ break;
+ }
+
+ return (ret);
+}
+
+static inline int
+i2c_transfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int nmsgs)
+{
+ int ret;
+
+ if (adapter->algo == NULL && adapter->algo_data == NULL)
+ return (-EOPNOTSUPP);
+
+ if (adapter->lock_ops)
+ adapter->lock_ops->lock_bus(adapter, 0);
+
+ ret = do_i2c_transfer(adapter, msgs, nmsgs);
+
+ if (adapter->lock_ops)
+ adapter->lock_ops->unlock_bus(adapter, 0);
+
+ return (ret);
+}
+
+/* Unlocked version of i2c_transfer */
+static inline int
+__i2c_transfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int nmsgs)
+{
+ return (do_i2c_transfer(adapter, msgs, nmsgs));
+}
+
+static inline void
+i2c_set_adapdata(struct i2c_adapter *adapter, void *data)
+{
+ adapter->data = data;
+}
+
+static inline void *
+i2c_get_adapdata(struct i2c_adapter *adapter)
+{
+ return (adapter->data);
+}
+
+#endif /* _LINUX_I2C_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/idr.h b/sys/compat/linuxkpi/common/include/linux/idr.h
new file mode 100644
index 000000000000..535d8ce07fb4
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/idr.h
@@ -0,0 +1,162 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_IDR_H_
+#define _LINUXKPI_LINUX_IDR_H_
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/limits.h>
+#include <sys/mutex.h>
+
+#include <linux/radix-tree.h>
+#include <linux/gpf.h>
+#include <linux/types.h>
+
+#define IDR_BITS 5
+#define IDR_SIZE (1 << IDR_BITS)
+#define IDR_MASK (IDR_SIZE - 1)
+
+#define MAX_ID_SHIFT ((sizeof(int) * NBBY) - 1)
+#define MAX_ID_BIT (1U << MAX_ID_SHIFT)
+#define MAX_ID_MASK (MAX_ID_BIT - 1)
+#define MAX_LEVEL (MAX_ID_SHIFT + IDR_BITS - 1) / IDR_BITS
+
+#define MAX_IDR_SHIFT (sizeof(int)*8 - 1)
+#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
+#define MAX_IDR_MASK (MAX_IDR_BIT - 1)
+
+struct idr_layer {
+ unsigned long bitmap;
+ struct idr_layer *ary[IDR_SIZE];
+};
+
+struct idr {
+ struct mtx lock;
+ struct idr_layer *top;
+ struct idr_layer *free;
+ int layers;
+ int next_cyclic_id;
+};
+
+/* NOTE: It is the applications responsibility to destroy the IDR */
+#define DEFINE_IDR(name) \
+ struct idr name; \
+ SYSINIT(name##_idr_sysinit, SI_SUB_DRIVERS, SI_ORDER_FIRST, \
+ idr_init, &(name))
+
+/* NOTE: It is the applications responsibility to destroy the IDA */
+#define DEFINE_IDA(name) \
+ struct ida name; \
+ SYSINIT(name##_ida_sysinit, SI_SUB_DRIVERS, SI_ORDER_FIRST, \
+ ida_init, &(name))
+
+void idr_preload(gfp_t gfp_mask);
+void idr_preload_end(void);
+void *idr_find(struct idr *idp, int id);
+void *idr_get_next(struct idr *idp, int *nextid);
+bool idr_is_empty(struct idr *idp);
+int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
+int idr_get_new(struct idr *idp, void *ptr, int *id);
+int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
+void *idr_replace(struct idr *idp, void *ptr, int id);
+void *idr_remove(struct idr *idp, int id);
+void idr_remove_all(struct idr *idp);
+void idr_destroy(struct idr *idp);
+void idr_init(struct idr *idp);
+int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t);
+int idr_alloc_cyclic(struct idr *idp, void *ptr, int start, int end, gfp_t);
+int idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data);
+
+#define idr_for_each_entry(idp, entry, id) \
+ for ((id) = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++(id))
+
+#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
+#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1)
+#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
+
+struct ida_bitmap {
+ long nr_busy;
+ unsigned long bitmap[IDA_BITMAP_LONGS];
+};
+
+struct ida {
+ struct idr idr;
+ struct ida_bitmap *free_bitmap;
+};
+
+int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
+int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
+void ida_remove(struct ida *ida, int id);
+void ida_destroy(struct ida *ida);
+void ida_init(struct ida *ida);
+
+int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
+ gfp_t gfp_mask);
+void ida_simple_remove(struct ida *ida, unsigned int id);
+
+static inline void
+ida_free(struct ida *ida, int id)
+{
+
+ ida_remove(ida, id);
+}
+
+static inline int
+ida_get_new(struct ida *ida, int *p_id)
+{
+
+ return (ida_get_new_above(ida, 0, p_id));
+}
+
+static inline int
+ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
+{
+ return (ida_simple_get(ida, min, UINT_MAX, gfp));
+}
+
+static inline int
+ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
+{
+
+ return (ida_simple_get(ida, 0, max, gfp));
+}
+
+static inline int ida_alloc(struct ida *ida, gfp_t gfp)
+{
+ return (ida_alloc_max(ida, ~0u, gfp));
+}
+
+static inline bool
+ida_is_empty(struct ida *ida)
+{
+
+ return (idr_is_empty(&ida->idr));
+}
+
+#endif /* _LINUXKPI_LINUX_IDR_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/ieee80211.h b/sys/compat/linuxkpi/common/include/linux/ieee80211.h
new file mode 100644
index 000000000000..3644ef80861b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/ieee80211.h
@@ -0,0 +1,1245 @@
+/*-
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_IEEE80211_H
+#define _LINUXKPI_LINUX_IEEE80211_H
+
+#include <sys/types.h>
+#include <net80211/ieee80211.h>
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/if_ether.h>
+
+/* linux_80211.c */
+extern int linuxkpi_debug_80211;
+#ifndef D80211_TODO
+#define D80211_TODO 0x1
+#endif
+#define TODO(fmt, ...) if (linuxkpi_debug_80211 & D80211_TODO) \
+ printf("%s:%d: XXX LKPI80211 TODO " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+
+/* 9.4.2.55 Management MIC element (CMAC-256, GMAC-128, and GMAC-256). */
+struct ieee80211_mmie_16 {
+ uint8_t element_id;
+ uint8_t length;
+ uint16_t key_id;
+ uint8_t ipn[6];
+ uint8_t mic[16];
+};
+
+#define IEEE80211_CCMP_HDR_LEN 8 /* 802.11i .. net80211 comment */
+#define IEEE80211_CCMP_PN_LEN 6
+#define IEEE80211_CCMP_MIC_LEN 8 /* || 16 */
+#define IEEE80211_CCMP_256_HDR_LEN 8
+#define IEEE80211_CCMP_256_MIC_LEN 16
+#define IEEE80211_GCMP_HDR_LEN 8
+#define IEEE80211_GCMP_MIC_LEN 16
+#define IEEE80211_GCMP_PN_LEN 6
+#define IEEE80211_GMAC_PN_LEN 6
+#define IEEE80211_CMAC_PN_LEN 6
+
+#define IEEE80211_MAX_PN_LEN 16
+
+#define IEEE80211_INVAL_HW_QUEUE ((uint8_t)-1)
+
+#define IEEE80211_MAX_AMPDU_BUF_HT IEEE80211_AGGR_BAWMAX
+#define IEEE80211_MAX_AMPDU_BUF_HE 256
+#define IEEE80211_MAX_AMPDU_BUF_EHT 1024
+
+#define IEEE80211_MAX_FRAME_LEN 2352
+#define IEEE80211_MAX_DATA_LEN (2300 + IEEE80211_CRC_LEN)
+
+#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095 /* 9.3.2.1 Format of Data frames; non-VHT non-DMG STA */
+#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
+#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
+#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
+#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
+#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
+
+#define IEEE80211_MAX_RTS_THRESHOLD 2346 /* net80211::IEEE80211_RTS_MAX */
+
+#define IEEE80211_MIN_ACTION_SIZE 23 /* ? */
+
+/* Wi-Fi Peer-to-Peer (P2P) Technical Specification */
+#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7f
+#define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7)
+
+/* 802.11-2016, 9.2.4.5.1, Table 9-6 QoS Control Field */
+#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
+#define IEEE80211_QOS_CTL_TID_MASK IEEE80211_QOS_TID
+#define IEEE80211_QOS_CTL_EOSP 0x0010
+#define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080
+#define IEEE80211_QOS_CTL_ACK_POLICY_MASK 0x0060
+#define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020
+#define IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT 0x0100
+
+enum ieee80211_rate_flags {
+ IEEE80211_RATE_SHORT_PREAMBLE = BIT(0),
+};
+
+enum ieee80211_rate_control_changed_flags {
+ IEEE80211_RC_BW_CHANGED = BIT(0),
+ IEEE80211_RC_NSS_CHANGED = BIT(1),
+ IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2),
+ IEEE80211_RC_SMPS_CHANGED = BIT(3),
+};
+
+#define IEEE80211_SCTL_FRAG IEEE80211_SEQ_FRAG_MASK
+#define IEEE80211_SCTL_SEQ IEEE80211_SEQ_SEQ_MASK
+
+#define IEEE80211_TKIP_ICV_LEN 4
+#define IEEE80211_TKIP_IV_LEN 8 /* WEP + KID + EXT */
+
+/* 802.11-2016, 9.4.2.158.3 Supported VHT-MCS and NSS Set field. */
+#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13) /* part of tx_highest */
+
+#define IEEE80211_VHT_MAX_AMPDU_1024K 7 /* 9.4.2.56.3 A-MPDU Parameters field, Table 9-163 */
+
+#define IEEE80211_WEP_IV_LEN 3 /* net80211: IEEE80211_WEP_IVLEN */
+#define IEEE80211_WEP_ICV_LEN 4
+
+#define WLAN_AUTH_OPEN __LINE__ /* TODO FIXME brcmfmac */
+#define WLAN_CAPABILITY_IBSS __LINE__ /* TODO FIXME no longer used? */
+#define WLAN_CAPABILITY_SHORT_PREAMBLE __LINE__ /* TODO FIXME brcmfmac */
+#define WLAN_CAPABILITY_SHORT_SLOT_TIME __LINE__ /* TODO FIXME brcmfmac */
+
+enum wlan_ht_cap_sm_ps {
+ WLAN_HT_CAP_SM_PS_STATIC = 0,
+ WLAN_HT_CAP_SM_PS_DYNAMIC,
+ WLAN_HT_CAP_SM_PS_INVALID,
+ WLAN_HT_CAP_SM_PS_DISABLED,
+};
+
+#define WLAN_MAX_KEY_LEN 32
+#define WLAN_PMKID_LEN 16
+#define WLAN_PMK_LEN_SUITE_B_192 48
+
+enum ieee80211_key_len {
+ WLAN_KEY_LEN_WEP40 = 5,
+ WLAN_KEY_LEN_WEP104 = 13,
+ WLAN_KEY_LEN_TKIP = 32,
+ WLAN_KEY_LEN_CCMP = 16,
+ WLAN_KEY_LEN_CCMP_256 = 32,
+ WLAN_KEY_LEN_GCMP = 16,
+ WLAN_KEY_LEN_AES_CMAC = 16,
+ WLAN_KEY_LEN_GCMP_256 = 32,
+ WLAN_KEY_LEN_BIP_CMAC_256 = 32,
+ WLAN_KEY_LEN_BIP_GMAC_128 = 16,
+ WLAN_KEY_LEN_BIP_GMAC_256 = 32,
+};
+
+/* 802.11-2020, 9.4.2.55.3, Table 9-185 Subfields of the A-MPDU Parameters field */
+enum ieee80211_min_mpdu_start_spacing {
+ IEEE80211_HT_MPDU_DENSITY_NONE = 0,
+#if 0
+ IEEE80211_HT_MPDU_DENSITY_XXX = 1, /* 1/4 us */
+#endif
+ IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 us */
+ IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 us */
+ IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 us */
+ IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4us */
+ IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8us */
+ IEEE80211_HT_MPDU_DENSITY_16 = 7, /* 16us */
+};
+
+/* 9.4.2.57, Table 9-168, HT Operation element fields and subfields */
+#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080 /* B24.. */
+
+#define IEEE80211_FCTL_FTYPE IEEE80211_FC0_TYPE_MASK
+#define IEEE80211_FCTL_STYPE IEEE80211_FC0_SUBTYPE_MASK
+#define IEEE80211_FCTL_ORDER (IEEE80211_FC1_ORDER << 8)
+#define IEEE80211_FCTL_PROTECTED (IEEE80211_FC1_PROTECTED << 8)
+#define IEEE80211_FCTL_FROMDS (IEEE80211_FC1_DIR_FROMDS << 8)
+#define IEEE80211_FCTL_TODS (IEEE80211_FC1_DIR_TODS << 8)
+#define IEEE80211_FCTL_MOREFRAGS (IEEE80211_FC1_MORE_FRAG << 8)
+#define IEEE80211_FCTL_PM (IEEE80211_FC1_PWR_MGT << 8)
+
+#define IEEE80211_FTYPE_MGMT IEEE80211_FC0_TYPE_MGT
+#define IEEE80211_FTYPE_CTL IEEE80211_FC0_TYPE_CTL
+#define IEEE80211_FTYPE_DATA IEEE80211_FC0_TYPE_DATA
+
+#define IEEE80211_STYPE_ASSOC_REQ IEEE80211_FC0_SUBTYPE_ASSOC_REQ
+#define IEEE80211_STYPE_REASSOC_REQ IEEE80211_FC0_SUBTYPE_REASSOC_REQ
+#define IEEE80211_STYPE_PROBE_REQ IEEE80211_FC0_SUBTYPE_PROBE_REQ
+#define IEEE80211_STYPE_DISASSOC IEEE80211_FC0_SUBTYPE_DISASSOC
+#define IEEE80211_STYPE_AUTH IEEE80211_FC0_SUBTYPE_AUTH
+#define IEEE80211_STYPE_DEAUTH IEEE80211_FC0_SUBTYPE_DEAUTH
+#define IEEE80211_STYPE_CTS IEEE80211_FC0_SUBTYPE_CTS
+#define IEEE80211_STYPE_RTS IEEE80211_FC0_SUBTYPE_RTS
+#define IEEE80211_STYPE_ACTION IEEE80211_FC0_SUBTYPE_ACTION
+#define IEEE80211_STYPE_DATA IEEE80211_FC0_SUBTYPE_DATA
+#define IEEE80211_STYPE_QOS_DATA IEEE80211_FC0_SUBTYPE_QOS_DATA
+#define IEEE80211_STYPE_QOS_NULLFUNC IEEE80211_FC0_SUBTYPE_QOS_NULL
+#define IEEE80211_STYPE_QOS_CFACK 0xd0 /* XXX-BZ reserved? */
+
+#define IEEE80211_NUM_ACS 4 /* net8021::WME_NUM_AC */
+
+#define IEEE80211_MAX_SSID_LEN 32 /* 9.4.2.2 SSID element, net80211: IEEE80211_NWID_LEN */
+
+
+/* Figure 9-27, BAR Control field */
+#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000
+#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12
+
+#define IEEE80211_PPE_THRES_INFO_PPET_SIZE 1 /* TODO FIXME ax? */
+#define IEEE80211_PPE_THRES_NSS_MASK 2 /* TODO FIXME ax? */
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS 3 /* TODO FIXME ax? */
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 8 /* TODO FIXME ax? */
+#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE 16 /* TODO FIXME ax? */
+
+/* 802.11-2012, Table 8-130-HT Operation element fields and subfields, HT Protection */
+#define IEEE80211_HT_OP_MODE_PROTECTION IEEE80211_HTINFO_OPMODE /* Mask. */
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONE IEEE80211_HTINFO_OPMODE_PURE /* No protection */
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER IEEE80211_HTINFO_OPMODE_PROTOPT /* Nonmember protection */
+#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ IEEE80211_HTINFO_OPMODE_HT20PR /* 20 MHz protection */
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED IEEE80211_HTINFO_OPMODE_MIXED /* Non-HT mixed */
+
+
+/* 9.6.13.1, Table 9-342 TDLS Action field values. */
+enum ieee80211_tdls_action_code {
+ WLAN_TDLS_SETUP_REQUEST = 0,
+ WLAN_TDLS_SETUP_RESPONSE = 1,
+ WLAN_TDLS_SETUP_CONFIRM = 2,
+ WLAN_TDLS_TEARDOWN = 3,
+ WLAN_TDLS_PEER_TRAFFIC_INDICATION = 4,
+ WLAN_TDLS_CHANNEL_SWITCH_REQUEST = 5,
+ WLAN_TDLS_CHANNEL_SWITCH_RESPONSE = 6,
+ WLAN_TDLS_PEER_PSM_REQUEST = 7,
+ WLAN_TDLS_PEER_PSM_RESPONSE = 8,
+ WLAN_TDLS_PEER_TRAFFIC_RESPONSE = 9,
+ WLAN_TDLS_DISCOVERY_REQUEST = 10,
+ /* 11-255 reserved */
+};
+
+/* 802.11-2020 9.4.2.26, Table 9-153. Extended Capabilities field. */
+/* This is split up into octets CAPA1 = octet 1, ... */
+#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2 % 8)
+#define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(22 % 8)
+#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT BIT(23 % 8)
+#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(62 % 8)
+#define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB BIT(63 % 8)
+#define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB BIT(64 % 8)
+#define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(77 % 8)
+#define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(78 % 8)
+#define WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT BIT(79 % 8)
+
+#define WLAN_EXT_CAPA11_EMA_SUPPORT 0x00 /* XXX TODO FIXME */
+
+
+/* iwlwifi/mvm/utils:: for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++) */
+/* Would be so much easier if we'd define constants to the same. */
+enum ieee80211_ac_numbers {
+ IEEE80211_AC_VO = 0, /* net80211::WME_AC_VO */
+ IEEE80211_AC_VI = 1, /* net80211::WME_AC_VI */
+ IEEE80211_AC_BE = 2, /* net80211::WME_AC_BE */
+ IEEE80211_AC_BK = 3, /* net80211::WME_AC_BK */
+};
+
+#define IEEE80211_MAX_QUEUES 16 /* Assume IEEE80211_NUM_TIDS for the moment. */
+
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VO 1
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VI 2
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BK 4
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BE 8
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 0xf
+
+
+/* Define the LinuxKPI names directly to the net80211 ones. */
+#define IEEE80211_HT_CAP_LDPC_CODING IEEE80211_HTCAP_LDPC
+#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 IEEE80211_HTCAP_CHWIDTH40
+#define IEEE80211_HT_CAP_SM_PS IEEE80211_HTCAP_SMPS
+#define IEEE80211_HT_CAP_SM_PS_SHIFT 2
+#define IEEE80211_HT_CAP_GRN_FLD IEEE80211_HTCAP_GREENFIELD
+#define IEEE80211_HT_CAP_SGI_20 IEEE80211_HTCAP_SHORTGI20
+#define IEEE80211_HT_CAP_SGI_40 IEEE80211_HTCAP_SHORTGI40
+#define IEEE80211_HT_CAP_TX_STBC IEEE80211_HTCAP_TXSTBC
+#define IEEE80211_HT_CAP_RX_STBC IEEE80211_HTCAP_RXSTBC
+#define IEEE80211_HT_CAP_RX_STBC_SHIFT IEEE80211_HTCAP_RXSTBC_S
+#define IEEE80211_HT_CAP_MAX_AMSDU IEEE80211_HTCAP_MAXAMSDU
+#define IEEE80211_HT_CAP_DSSSCCK40 IEEE80211_HTCAP_DSSSCCK40
+#define IEEE80211_HT_CAP_LSIG_TXOP_PROT IEEE80211_HTCAP_LSIGTXOPPROT
+
+#define IEEE80211_HT_MCS_TX_DEFINED 0x0001
+#define IEEE80211_HT_MCS_TX_RX_DIFF 0x0002
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0c
+#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
+#define IEEE80211_HT_MCS_MASK_LEN 10
+
+#define IEEE80211_MLD_MAX_NUM_LINKS 15
+#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0xf
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME 1
+
+struct ieee80211_mcs_info {
+ uint8_t rx_mask[IEEE80211_HT_MCS_MASK_LEN];
+ uint16_t rx_highest;
+ uint8_t tx_params;
+ uint8_t __reserved[3];
+} __packed;
+
+/* 802.11-2020, 9.4.2.55.1 HT Capabilities element structure */
+struct ieee80211_ht_cap {
+ uint16_t cap_info;
+ uint8_t ampdu_params_info;
+ struct ieee80211_mcs_info mcs;
+ uint16_t extended_ht_cap_info;
+ uint32_t tx_BF_cap_info;
+ uint8_t antenna_selection_info;
+} __packed;
+
+#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
+#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
+#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
+#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13
+
+enum ieee80211_ht_max_ampdu_len {
+ IEEE80211_HT_MAX_AMPDU_64K
+};
+
+enum ieee80211_ampdu_mlme_action {
+ IEEE80211_AMPDU_RX_START,
+ IEEE80211_AMPDU_RX_STOP,
+ IEEE80211_AMPDU_TX_OPERATIONAL,
+ IEEE80211_AMPDU_TX_START,
+ IEEE80211_AMPDU_TX_STOP_CONT,
+ IEEE80211_AMPDU_TX_STOP_FLUSH,
+ IEEE80211_AMPDU_TX_STOP_FLUSH_CONT
+};
+
+#define IEEE80211_AMPDU_TX_START_IMMEDIATE 1
+#define IEEE80211_AMPDU_TX_START_DELAY_ADDBA 2
+
+enum ieee80211_chanctx_switch_mode {
+ CHANCTX_SWMODE_REASSIGN_VIF,
+ CHANCTX_SWMODE_SWAP_CONTEXTS,
+};
+
+enum ieee80211_chanctx_change_flags {
+ IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(0),
+ IEEE80211_CHANCTX_CHANGE_RADAR = BIT(1),
+ IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(2),
+ IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(3),
+ IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(4),
+ IEEE80211_CHANCTX_CHANGE_PUNCTURING = BIT(5),
+ IEEE80211_CHANCTX_CHANGE_MIN_DEF = BIT(6),
+};
+
+enum ieee80211_frame_release_type {
+ IEEE80211_FRAME_RELEASE_PSPOLL = 1,
+ IEEE80211_FRAME_RELEASE_UAPSD = 2,
+};
+
+enum ieee80211_p2p_attr_ids {
+ IEEE80211_P2P_ATTR_DEVICE_ID,
+ IEEE80211_P2P_ATTR_DEVICE_INFO,
+ IEEE80211_P2P_ATTR_GROUP_ID,
+ IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+ IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+};
+
+enum ieee80211_reconfig_type {
+ IEEE80211_RECONFIG_TYPE_RESTART,
+ IEEE80211_RECONFIG_TYPE_SUSPEND,
+};
+
+enum ieee80211_roc_type {
+ IEEE80211_ROC_TYPE_MGMT_TX,
+ IEEE80211_ROC_TYPE_NORMAL,
+};
+
+enum ieee80211_smps_mode {
+ IEEE80211_SMPS_OFF,
+ IEEE80211_SMPS_STATIC,
+ IEEE80211_SMPS_DYNAMIC,
+ IEEE80211_SMPS_AUTOMATIC,
+ IEEE80211_SMPS_NUM_MODES,
+};
+
+/* net80211::IEEE80211_S_* different but represents the state machine. */
+/* Note: order here is important! */
+enum ieee80211_sta_state {
+ IEEE80211_STA_NOTEXIST = 0,
+ IEEE80211_STA_NONE = 1,
+ IEEE80211_STA_AUTH = 2,
+ IEEE80211_STA_ASSOC = 3,
+ IEEE80211_STA_AUTHORIZED = 4, /* 802.1x */
+};
+
+enum ieee80211_tx_info_flags {
+ /* XXX TODO .. right shift numbers - not sure where that came from? */
+ IEEE80211_TX_CTL_AMPDU = BIT(0),
+ IEEE80211_TX_CTL_ASSIGN_SEQ = BIT(1),
+ IEEE80211_TX_CTL_NO_ACK = BIT(2),
+ IEEE80211_TX_CTL_SEND_AFTER_DTIM = BIT(3),
+ IEEE80211_TX_CTL_TX_OFFCHAN = BIT(4),
+ IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(5),
+ IEEE80211_TX_STATUS_EOSP = BIT(6),
+ IEEE80211_TX_STAT_ACK = BIT(7),
+ IEEE80211_TX_STAT_AMPDU = BIT(8),
+ IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(9),
+ IEEE80211_TX_STAT_TX_FILTERED = BIT(10),
+ IEEE80211_TX_STAT_NOACK_TRANSMITTED = BIT(11),
+ IEEE80211_TX_CTL_FIRST_FRAGMENT = BIT(12),
+ IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(13),
+ IEEE80211_TX_CTL_NO_CCK_RATE = BIT(14),
+ IEEE80211_TX_CTL_INJECTED = BIT(15),
+ IEEE80211_TX_CTL_HW_80211_ENCAP = BIT(16),
+ IEEE80211_TX_CTL_USE_MINRATE = BIT(17),
+ IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(18),
+ IEEE80211_TX_CTL_LDPC = BIT(19),
+ IEEE80211_TX_CTL_STBC = BIT(20),
+} __packed;
+
+enum ieee80211_tx_status_flags {
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID = BIT(0),
+};
+
+enum ieee80211_tx_control_flags {
+ /* XXX TODO .. right shift numbers */
+ IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
+ IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
+ IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
+ IEEE80211_TX_CTRL_DONT_USE_RATE_MASK = BIT(3),
+ IEEE80211_TX_CTRL_MLO_LINK = 0xF0000000, /* This is IEEE80211_LINK_UNSPECIFIED on the high bits. */
+};
+
+enum ieee80211_tx_rate_flags {
+ /* XXX TODO .. right shift numbers */
+ IEEE80211_TX_RC_40_MHZ_WIDTH = BIT(0),
+ IEEE80211_TX_RC_80_MHZ_WIDTH = BIT(1),
+ IEEE80211_TX_RC_160_MHZ_WIDTH = BIT(2),
+ IEEE80211_TX_RC_GREEN_FIELD = BIT(3),
+ IEEE80211_TX_RC_MCS = BIT(4),
+ IEEE80211_TX_RC_SHORT_GI = BIT(5),
+ IEEE80211_TX_RC_VHT_MCS = BIT(6),
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE = BIT(7),
+};
+
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED -128
+
+#define IEEE80211_HT_CTL_LEN 4
+
+struct ieee80211_hdr { /* net80211::ieee80211_frame_addr4 */
+ __le16 frame_control;
+ __le16 duration_id;
+ uint8_t addr1[ETH_ALEN];
+ uint8_t addr2[ETH_ALEN];
+ uint8_t addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ uint8_t addr4[ETH_ALEN];
+};
+
+struct ieee80211_hdr_3addr { /* net80211::ieee80211_frame */
+ __le16 frame_control;
+ __le16 duration_id;
+ uint8_t addr1[ETH_ALEN];
+ uint8_t addr2[ETH_ALEN];
+ uint8_t addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+};
+
+struct ieee80211_qos_hdr { /* net80211:ieee80211_qosframe */
+ __le16 frame_control;
+ __le16 duration_id;
+ uint8_t addr1[ETH_ALEN];
+ uint8_t addr2[ETH_ALEN];
+ uint8_t addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ __le16 qos_ctrl;
+};
+
+struct ieee80211_vendor_ie {
+};
+
+/* 802.11-2020, Table 9-359-Block Ack Action field values */
+enum ieee80211_back {
+ WLAN_ACTION_ADDBA_REQ = 0,
+};
+
+enum ieee80211_sa_query {
+ WLAN_ACTION_SA_QUERY_RESPONSE = 1,
+};
+
+/* 802.11-2020, Table 9-51-Category values */
+enum ieee80211_category {
+ WLAN_CATEGORY_BACK = 3,
+ WLAN_CATEGORY_SA_QUERY = 8, /* net80211::IEEE80211_ACTION_CAT_SA_QUERY */
+};
+
+/* 80211-2020 9.3.3.2 Format of Management frames */
+struct ieee80211_mgmt {
+ __le16 frame_control;
+ __le16 duration_id;
+ uint8_t da[ETH_ALEN];
+ uint8_t sa[ETH_ALEN];
+ uint8_t bssid[ETH_ALEN];
+ __le16 seq_ctrl;
+ union {
+ /* 9.3.3.3 Beacon frame format */
+ struct {
+ uint64_t timestamp;
+ uint16_t beacon_int;
+ uint16_t capab_info;
+ uint8_t variable[0];
+ } beacon;
+ /* 9.3.3.5 Association Request frame format */
+ struct {
+ uint16_t capab_info;
+ uint16_t listen_interval;
+ uint8_t variable[0];
+ } assoc_req;
+ /* 9.3.3.10 Probe Request frame format */
+ struct {
+ uint8_t variable[0];
+ } probe_req;
+ /* 9.3.3.11 Probe Response frame format */
+ struct {
+ uint64_t timestamp;
+ uint16_t beacon_int;
+ uint16_t capab_info;
+ uint8_t variable[0];
+ } probe_resp;
+ /* 9.3.3.14 Action frame format */
+ struct {
+ /* 9.4.1.11 Action field */
+ uint8_t category;
+ /* 9.6.8 Public Action details */
+ union {
+ /* 9.6.2.5 TPC Report frame format */
+ struct {
+ uint8_t spec_mgmt;
+ uint8_t dialog_token;
+ /* uint32_t tpc_rep_elem:: */
+ uint8_t tpc_elem_id;
+ uint8_t tpc_elem_length;
+ uint8_t tpc_elem_tx_power;
+ uint8_t tpc_elem_link_margin;
+ } tpc_report;
+ /* 9.6.8.33 Fine Timing Measurement frame format */
+ struct {
+ uint8_t dialog_token;
+ uint8_t follow_up;
+ uint8_t tod[6];
+ uint8_t toa[6];
+ uint16_t tod_error;
+ uint16_t toa_error;
+ uint8_t variable[0];
+ } ftm;
+ /* 802.11-2016, 9.6.5.2 ADDBA Request frame format */
+ struct {
+ uint8_t action_code;
+ uint8_t dialog_token;
+ uint16_t capab;
+ uint16_t timeout;
+ uint16_t start_seq_num;
+ /* Optional follows... */
+ uint8_t variable[0];
+ } addba_req;
+ /* XXX */
+ struct {
+ uint8_t dialog_token;
+ } wnm_timing_msr;
+ } u;
+ } action;
+ DECLARE_FLEX_ARRAY(uint8_t, body);
+ } u;
+};
+
+struct ieee80211_cts { /* net80211::ieee80211_frame_cts */
+ __le16 frame_control;
+ __le16 duration;
+ uint8_t ra[ETH_ALEN];
+} __packed;
+
+struct ieee80211_rts { /* net80211::ieee80211_frame_rts */
+ __le16 frame_control;
+ __le16 duration;
+ uint8_t ra[ETH_ALEN];
+ uint8_t ta[ETH_ALEN];
+} __packed;
+
+#define MHZ_TO_KHZ(_f) ((_f) * 1000)
+#define DBI_TO_MBI(_g) ((_g) * 100)
+#define MBI_TO_DBI(_x) ((_x) / 100)
+#define DBM_TO_MBM(_g) ((_g) * 100)
+#define MBM_TO_DBM(_x) ((_x) / 100)
+
+#define IEEE80211_SEQ_TO_SN(_seqn) (((_seqn) & IEEE80211_SEQ_SEQ_MASK) >> \
+ IEEE80211_SEQ_SEQ_SHIFT)
+#define IEEE80211_SN_TO_SEQ(_sn) (((_sn) << IEEE80211_SEQ_SEQ_SHIFT) & \
+ IEEE80211_SEQ_SEQ_MASK)
+
+/* Time unit (TU) to .. See net80211: IEEE80211_DUR_TU */
+#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies(_tu) * 1024)
+#define TU_TO_EXP_TIME(_tu) (jiffies + TU_TO_JIFFIES(_tu))
+
+/* 9.4.2.21.1, Table 9-82. */
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_LCI 8
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC 11
+
+/* 9.4.2.1, Table 9-77. Element IDs. */
+enum ieee80211_eid {
+ WLAN_EID_SSID = 0,
+ WLAN_EID_SUPP_RATES = 1,
+ WLAN_EID_DS_PARAMS = 3,
+ WLAN_EID_TIM = 5,
+ WLAN_EID_COUNTRY = 7, /* IEEE80211_ELEMID_COUNTRY */
+ WLAN_EID_REQUEST = 10,
+ WLAN_EID_QBSS_LOAD = 11, /* IEEE80211_ELEMID_BSSLOAD */
+ WLAN_EID_CHANNEL_SWITCH = 37,
+ WLAN_EID_MEASURE_REPORT = 39,
+ WLAN_EID_HT_CAPABILITY = 45, /* IEEE80211_ELEMID_HTCAP */
+ WLAN_EID_RSN = 48, /* IEEE80211_ELEMID_RSN */
+ WLAN_EID_EXT_SUPP_RATES = 50,
+ WLAN_EID_EXT_NON_INHERITANCE = 56,
+ WLAN_EID_EXT_CHANSWITCH_ANN = 60,
+ WLAN_EID_MULTIPLE_BSSID = 71, /* IEEE80211_ELEMID_MULTIBSSID */
+ WLAN_EID_MULTI_BSSID_IDX = 85,
+ WLAN_EID_EXT_CAPABILITY = 127,
+ WLAN_EID_VHT_CAPABILITY = 191, /* IEEE80211_ELEMID_VHT_CAP */
+ WLAN_EID_S1G_TWT = 216,
+ WLAN_EID_VENDOR_SPECIFIC = 221, /* IEEE80211_ELEMID_VENDOR */
+};
+
+enum ieee80211_eid_ext {
+ WLAN_EID_EXT_HE_CAPABILITY = 35,
+};
+
+#define for_each_element(_elem, _data, _len) \
+ for (_elem = (const struct element *)(_data); \
+ (((const uint8_t *)(_data) + (_len) - (const uint8_t *)_elem) >= sizeof(*_elem)) && \
+ (((const uint8_t *)(_data) + (_len) - (const uint8_t *)_elem) >= (sizeof(*_elem) + _elem->datalen)); \
+ _elem = (const struct element *)(_elem->data + _elem->datalen))
+
+#define for_each_element_id(_elem, _eid, _data, _len) \
+ for_each_element(_elem, _data, _len) \
+ if (_elem->id == (_eid))
+
+/* 9.4.1.7, Table 9-45. Reason codes. */
+enum ieee80211_reason_code {
+ /* reserved = 0, */
+ WLAN_REASON_UNSPECIFIED = 1,
+ WLAN_REASON_DEAUTH_LEAVING = 3, /* LEAVING_NETWORK_DEAUTH */
+ WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE = 25,
+ WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED = 26,
+};
+
+/* 9.4.1.9, Table 9-46. Status codes. */
+enum ieee80211_status_code {
+ WLAN_STATUS_SUCCESS = 0,
+ WLAN_STATUS_AUTH_TIMEOUT = 16, /* REJECTED_SEQUENCE_TIMEOUT */
+};
+
+/* 9.3.1.22 Trigger frame format; 80211ax-2021 */
+struct ieee80211_trigger {
+ __le16 frame_control;
+ __le16 duration_id;
+ uint8_t ra[ETH_ALEN];
+ uint8_t ta[ETH_ALEN];
+ __le64 common_info; /* 8+ really */
+ uint8_t variable[];
+};
+
+/* Table 9-29c-Trigger Type subfield encoding */
+enum {
+ IEEE80211_TRIGGER_TYPE_BASIC = 0x0,
+ IEEE80211_TRIGGER_TYPE_MU_BAR = 0x2,
+#if 0
+ /* Not seen yet. */
+ BFRP = 0x1,
+ MU-RTS = 0x3,
+ BSRP = 0x4,
+ GCR MU-BAR = 0x5,
+ BQRP = 0x6,
+ NFRP = 0x7,
+ /* 0x8..0xf reserved */
+#endif
+ IEEE80211_TRIGGER_TYPE_MASK = 0xf
+};
+
+#define IEEE80211_TRIGGER_ULBW_MASK 0xc0000
+#define IEEE80211_TRIGGER_ULBW_20MHZ 0x0
+#define IEEE80211_TRIGGER_ULBW_40MHZ 0x1
+#define IEEE80211_TRIGGER_ULBW_80MHZ 0x2
+#define IEEE80211_TRIGGER_ULBW_160_80P80MHZ 0x3
+
+/* 802.11-2020, Figure 9-687-Control field format; 802.11ax-2021 */
+#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)
+#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4)
+#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5)
+
+/* 802.11-2020, Figure 9-688-Request Type field format; 802.11ax-2021 */
+#define IEEE80211_TWT_REQTYPE_SETUP_CMD (BIT(1) | BIT(2) | BIT(3))
+#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4)
+#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5)
+#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6)
+#define IEEE80211_TWT_REQTYPE_FLOWID (BIT(7) | BIT(8) | BIT(9))
+#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP (BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14))
+#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15)
+
+struct ieee80211_twt_params {
+ int mantissa, min_twt_dur, twt;
+ uint16_t req_type;
+};
+
+struct ieee80211_twt_setup {
+ int control;
+ struct ieee80211_twt_params *params;
+};
+
+/* 802.11-2020, Table 9-297-TWT Setup Command field values */
+enum ieee80211_twt_setup_cmd {
+ TWT_SETUP_CMD_REQUEST = 0,
+ TWT_SETUP_CMD_SUGGEST = 1,
+ /* DEMAND = 2, */
+ /* GROUPING = 3, */
+ TWT_SETUP_CMD_ACCEPT = 4,
+ /* ALTERNATE = 5 */
+ TWT_SETUP_CMD_DICTATE = 6,
+ TWT_SETUP_CMD_REJECT = 7,
+};
+
+struct ieee80211_bssid_index {
+ int bssid_index;
+};
+
+enum ieee80211_ap_reg_power {
+ IEEE80211_REG_UNSET_AP,
+ IEEE80211_REG_LPI_AP,
+ IEEE80211_REG_SP_AP,
+ IEEE80211_REG_VLP_AP,
+};
+
+/*
+ * 802.11ax-2021, Table 9-277-Meaning of Maximum Transmit Power Count subfield
+ * if Maximum Transmit Power Interpretation subfield is 1 or 3
+ */
+#define IEEE80211_MAX_NUM_PWR_LEVEL 8
+
+/*
+ * 802.11ax-2021, Table 9-275a-Maximum Transmit Power Interpretation subfield
+ * encoding (4) * Table E-12-Regulatory Info subfield encoding in the
+ * United States (2)
+ */
+#define IEEE80211_TPE_MAX_IE_NUM 8
+
+/* 802.11ax-2021, 9.4.2.161 Transmit Power Envelope element */
+struct ieee80211_tx_pwr_env {
+ uint8_t tx_power_info;
+ uint8_t tx_power[IEEE80211_MAX_NUM_PWR_LEVEL];
+};
+
+/* 802.11ax-2021, Figure 9-617-Transmit Power Information field format */
+/* These are field masks (3bit/3bit/2bit). */
+#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x07
+#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
+#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xc0
+
+/*
+ * 802.11ax-2021, Table 9-275a-Maximum Transmit Power Interpretation subfield
+ * encoding
+ */
+enum ieee80211_tx_pwr_interpretation_subfield_enc {
+ IEEE80211_TPE_LOCAL_EIRP,
+ IEEE80211_TPE_LOCAL_EIRP_PSD,
+ IEEE80211_TPE_REG_CLIENT_EIRP,
+ IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+};
+
+enum ieee80211_tx_pwr_category_6ghz {
+ IEEE80211_TPE_CAT_6GHZ_DEFAULT,
+};
+
+/* 802.11-2020, 9.4.2.27 BSS Load element */
+struct ieee80211_bss_load_elem {
+ uint16_t sta_count;
+ uint8_t channel_util;
+ uint16_t avail_adm_capa;
+};
+
+struct ieee80211_p2p_noa_desc {
+ uint32_t count; /* uint8_t ? */
+ uint32_t duration;
+ uint32_t interval;
+ uint32_t start_time;
+};
+
+struct ieee80211_p2p_noa_attr {
+ uint8_t index;
+ uint8_t oppps_ctwindow;
+ struct ieee80211_p2p_noa_desc desc[4];
+};
+
+
+/* net80211: IEEE80211_IS_CTL() */
+static __inline bool
+ieee80211_is_ctl(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_TYPE_CTL);
+
+ return (fc == v);
+}
+
+/* net80211: IEEE80211_IS_DATA() */
+static __inline bool
+ieee80211_is_data(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_TYPE_DATA);
+
+ return (fc == v);
+}
+
+/* net80211: IEEE80211_IS_QOSDATA() */
+static __inline bool
+ieee80211_is_data_qos(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_QOS_DATA | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_QOS_DATA | IEEE80211_FC0_TYPE_DATA);
+
+ return (fc == v);
+}
+
+/* net80211: IEEE80211_IS_MGMT() */
+static __inline bool
+ieee80211_is_mgmt(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+
+/* Derived from net80211::ieee80211_anyhdrsize. */
+static __inline unsigned int
+ieee80211_hdrlen(__le16 fc)
+{
+ unsigned int size;
+
+ if (ieee80211_is_ctl(fc)) {
+ switch (fc & htole16(IEEE80211_FC0_SUBTYPE_MASK)) {
+ case htole16(IEEE80211_FC0_SUBTYPE_CTS):
+ case htole16(IEEE80211_FC0_SUBTYPE_ACK):
+ return sizeof(struct ieee80211_frame_ack);
+ case htole16(IEEE80211_FC0_SUBTYPE_BAR):
+ return sizeof(struct ieee80211_frame_bar);
+ }
+ return (sizeof(struct ieee80211_frame_min));
+ }
+
+ size = sizeof(struct ieee80211_frame);
+ if (ieee80211_is_data(fc)) {
+ if ((fc & htole16(IEEE80211_FC1_DIR_MASK << 8)) ==
+ htole16(IEEE80211_FC1_DIR_DSTODS << 8))
+ size += IEEE80211_ADDR_LEN;
+ if ((fc & htole16(IEEE80211_FC0_SUBTYPE_QOS_DATA |
+ IEEE80211_FC0_TYPE_MASK)) ==
+ htole16(IEEE80211_FC0_SUBTYPE_QOS_DATA |
+ IEEE80211_FC0_TYPE_DATA))
+ size += sizeof(uint16_t);
+ }
+
+ if (ieee80211_is_mgmt(fc)) {
+#ifdef __notyet__
+ printf("XXX-BZ %s: TODO? fc %#04x size %u\n",
+ __func__, fc, size);
+#endif
+ ;
+ }
+
+ return (size);
+}
+
+static inline bool
+ieee80211_is_trigger(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_TRIGGER | IEEE80211_FC0_TYPE_CTL);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_action(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_ACTION | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_probe_resp(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_PROBE_RESP | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_auth(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_AUTH | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_assoc_req(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_ASSOC_REQ | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_assoc_resp(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_ASSOC_RESP | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_reassoc_req(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_REASSOC_REQ | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_reassoc_resp(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_REASSOC_RESP | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_disassoc(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_DISASSOC | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_data_present(__le16 fc)
+{
+ __le16 v;
+
+ /* If it is a data frame and NODATA is not present. */
+ fc &= htole16(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_NODATA);
+ v = htole16(IEEE80211_FC0_TYPE_DATA);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_deauth(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_DEAUTH | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_beacon(__le16 fc)
+{
+ __le16 v;
+
+ /*
+ * For as much as I get it this comes in LE and unlike FreeBSD
+ * where we get the entire frame header and u8[], here we get the
+ * 9.2.4.1 Frame Control field only. Mask and compare.
+ */
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_BEACON | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+
+static __inline bool
+ieee80211_is_probe_req(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_PROBE_REQ | IEEE80211_FC0_TYPE_MGT);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_has_protected(__le16 fc)
+{
+
+ return (fc & htole16(IEEE80211_FC1_PROTECTED << 8));
+}
+
+static __inline bool
+ieee80211_is_back_req(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_BAR | IEEE80211_FC0_TYPE_CTL);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt;
+ __le16 fc;
+
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+ fc = mgmt->frame_control;
+
+ /* 11.2.2 Bufferable MMPDUs, 80211-2020. */
+ /* XXX we do not care about IBSS yet. */
+
+ if (!ieee80211_is_mgmt(fc))
+ return (false);
+ if (ieee80211_is_action(fc)) /* XXX FTM? */
+ return (true); /* XXX false? */
+ if (ieee80211_is_disassoc(fc))
+ return (true);
+ if (ieee80211_is_deauth(fc))
+ return (true);
+
+ TODO();
+
+ return (false);
+}
+
+static __inline bool
+ieee80211_is_nullfunc(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_NODATA | IEEE80211_FC0_TYPE_DATA);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_qos_nullfunc(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_QOS_NULL | IEEE80211_FC0_TYPE_DATA);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_is_any_nullfunc(__le16 fc)
+{
+
+ return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc));
+}
+
+static inline bool
+ieee80211_is_pspoll(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK);
+ v = htole16(IEEE80211_FC0_SUBTYPE_PS_POLL | IEEE80211_FC0_TYPE_CTL);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_has_a4(__le16 fc)
+{
+ __le16 v;
+
+ fc &= htole16((IEEE80211_FC1_DIR_TODS | IEEE80211_FC1_DIR_FROMDS) << 8);
+ v = htole16((IEEE80211_FC1_DIR_TODS | IEEE80211_FC1_DIR_FROMDS) << 8);
+
+ return (fc == v);
+}
+
+static __inline bool
+ieee80211_has_order(__le16 fc)
+{
+
+ return (fc & htole16(IEEE80211_FC1_ORDER << 8));
+}
+
+static __inline bool
+ieee80211_has_retry(__le16 fc)
+{
+
+ return (fc & htole16(IEEE80211_FC1_RETRY << 8));
+}
+
+
+static __inline bool
+ieee80211_has_fromds(__le16 fc)
+{
+
+ return (fc & htole16(IEEE80211_FC1_DIR_FROMDS << 8));
+}
+
+static __inline bool
+ieee80211_has_tods(__le16 fc)
+{
+
+ return (fc & htole16(IEEE80211_FC1_DIR_TODS << 8));
+}
+
+static __inline uint8_t *
+ieee80211_get_SA(struct ieee80211_hdr *hdr)
+{
+
+ if (ieee80211_has_a4(hdr->frame_control))
+ return (hdr->addr4);
+ if (ieee80211_has_fromds(hdr->frame_control))
+ return (hdr->addr3);
+ return (hdr->addr2);
+}
+
+static __inline uint8_t *
+ieee80211_get_DA(struct ieee80211_hdr *hdr)
+{
+
+ if (ieee80211_has_tods(hdr->frame_control))
+ return (hdr->addr3);
+ return (hdr->addr1);
+}
+
+static __inline bool
+ieee80211_is_frag(struct ieee80211_hdr *hdr)
+{
+ TODO();
+ return (false);
+}
+
+static __inline bool
+ieee80211_is_first_frag(__le16 fc)
+{
+ TODO();
+ return (false);
+}
+
+static __inline bool
+ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
+{
+ TODO();
+ return (false);
+}
+
+static __inline bool
+ieee80211_is_ftm(struct sk_buff *skb)
+{
+ TODO();
+ return (false);
+}
+
+static __inline bool
+ieee80211_is_timing_measurement(struct sk_buff *skb)
+{
+ TODO();
+ return (false);
+}
+
+static __inline bool
+ieee80211_has_pm(__le16 fc)
+{
+ TODO();
+ return (false);
+}
+
+static __inline bool
+ieee80211_has_morefrags(__le16 fc)
+{
+
+ fc &= htole16(IEEE80211_FC1_MORE_FRAG << 8);
+ return (fc != 0);
+}
+
+static __inline u8 *
+ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_has_a4(hdr->frame_control))
+ return (u8 *)hdr + 30;
+ else
+ return (u8 *)hdr + 24;
+}
+
+#endif /* _LINUXKPI_LINUX_IEEE80211_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/if_arp.h b/sys/compat/linuxkpi/common/include/linux/if_arp.h
new file mode 100644
index 000000000000..6201c3a1c284
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/if_arp.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_IF_ARP_H_
+#define _LINUXKPI_LINUX_IF_ARP_H_
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <net/if_arp.h>
+#endif /* _LINUXKPI_LINUX_IF_ARP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/if_ether.h b/sys/compat/linuxkpi/common/include/linux/if_ether.h
new file mode 100644
index 000000000000..6676e8fc142f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/if_ether.h
@@ -0,0 +1,82 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ * Copyright (c) 2021-2022 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_IF_ETHER_H_
+#define _LINUXKPI_LINUX_IF_ETHER_H_
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include <net/ethernet.h>
+
+#define ETH_HLEN ETHER_HDR_LEN /* Total octets in header. */
+#ifndef ETH_ALEN
+#define ETH_ALEN ETHER_ADDR_LEN
+#endif
+#define ETH_FRAME_LEN (ETHER_MAX_LEN - ETHER_CRC_LEN)
+#define ETH_FCS_LEN 4 /* Octets in the FCS */
+#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header)
+ * that VLAN requires. */
+/*
+ * defined Ethernet Protocol ID's.
+ */
+#define ETH_P_ARP ETHERTYPE_ARP
+#define ETH_P_IP ETHERTYPE_IP
+#define ETH_P_IPV6 ETHERTYPE_IPV6
+#define ETH_P_MPLS_UC ETHERTYPE_MPLS
+#define ETH_P_MPLS_MC ETHERTYPE_MPLS_MCAST
+#define ETH_P_8021Q ETHERTYPE_VLAN
+#define ETH_P_8021AD ETHERTYPE_QINQ
+#define ETH_P_PAE ETHERTYPE_PAE
+#define ETH_P_802_2 ETHERTYPE_8023
+#define ETH_P_IPX ETHERTYPE_IPX
+#define ETH_P_AARP ETHERTYPE_AARP
+#define ETH_P_802_3_MIN 0x05DD /* See comment in sys/net/ethernet.h */
+#define ETH_P_LINK_CTL 0x886C /* ITU-T G.989.2 */
+#define ETH_P_TDLS 0x890D /* 802.11z-2010, see wpa. */
+
+struct ethhdr {
+ uint8_t h_dest[ETH_ALEN];
+ uint8_t h_source[ETH_ALEN];
+ uint16_t h_proto;
+} __packed;
+
+static inline struct ethhdr *
+eth_hdr(const struct sk_buff *skb)
+{
+ struct ethhdr *hdr;
+
+ hdr = (struct ethhdr *)skb_mac_header(skb);
+ return (hdr);
+}
+
+#endif /* _LINUXKPI_LINUX_IF_ETHER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/if_vlan.h b/sys/compat/linuxkpi/common/include/linux/if_vlan.h
new file mode 100644
index 000000000000..3d1c61db1882
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/if_vlan.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_IF_VLAN_H_
+#define _LINUXKPI_LINUX_IF_VLAN_H_
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <net/ethernet.h>
+#include <net/if_var.h>
+#include <net/if_vlan_var.h>
+#include <net/if_types.h>
+
+#define VLAN_N_VID 4096
+
+static inline int
+is_vlan_dev(struct ifnet *ifp)
+{
+ return (if_gettype(ifp) == IFT_L2VLAN);
+}
+
+static inline uint16_t
+vlan_dev_vlan_id(struct ifnet *ifp)
+{
+ uint16_t vtag;
+ if (VLAN_TAG(ifp, &vtag) == 0)
+ return (vtag);
+ return (0);
+}
+
+#endif /* _LINUXKPI_LINUX_IF_VLAN_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/in.h b/sys/compat/linuxkpi/common/include/linux/in.h
new file mode 100644
index 000000000000..5cc92416c7da
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/in.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_IN_H_
+#define _LINUXKPI_LINUX_IN_H_
+
+#include "opt_inet.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <netinet/in.h>
+#include <asm/byteorder.h>
+
+#define ipv4_is_zeronet(be) IN_ZERONET(ntohl(be))
+#define ipv4_is_loopback(be) IN_LOOPBACK(ntohl(be))
+#define ipv4_is_multicast(be) IN_MULTICAST(ntohl(be))
+#define ipv4_is_lbcast(be) ((be) == INADDR_BROADCAST)
+
+#endif /* _LINUXKPI_LINUX_IN_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/in6.h b/sys/compat/linuxkpi/common/include/linux/in6.h
new file mode 100644
index 000000000000..79be45b6819a
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/in6.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_IN6_H_
+#define _LINUXKPI_LINUX_IN6_H_
+
+#include "opt_inet6.h"
+
+#endif /* _LINUXKPI_LINUX_IN6_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/inetdevice.h b/sys/compat/linuxkpi/common/include/linux/inetdevice.h
new file mode 100644
index 000000000000..ea256cd084a8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/inetdevice.h
@@ -0,0 +1,6 @@
+#ifndef _LINUXKPI_LINUX_INETDEVICE_H_
+#define _LINUXKPI_LINUX_INETDEVICE_H_
+
+#include <linux/netdevice.h>
+
+#endif /* _LINUXKPI_LINUX_INETDEVICE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/interrupt.h b/sys/compat/linuxkpi/common/include/linux/interrupt.h
new file mode 100644
index 000000000000..dfd9816da8be
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/interrupt.h
@@ -0,0 +1,195 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_INTERRUPT_H_
+#define _LINUXKPI_LINUX_INTERRUPT_H_
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/irqreturn.h>
+#include <linux/hardirq.h>
+
+#include <sys/param.h>
+#include <sys/interrupt.h>
+
+typedef irqreturn_t (*irq_handler_t)(int, void *);
+
+#define IRQF_SHARED 0x0004 /* Historically */
+#define IRQF_NOBALANCING 0
+
+#define IRQ_DISABLE_UNLAZY 0
+
+#define IRQ_NOTCONNECTED (1U << 31)
+
+int lkpi_request_irq(struct device *, unsigned int, irq_handler_t,
+ irq_handler_t, unsigned long, const char *, void *);
+int lkpi_enable_irq(unsigned int);
+void lkpi_disable_irq(unsigned int);
+int lkpi_bind_irq_to_cpu(unsigned int, int);
+void lkpi_free_irq(unsigned int, void *);
+void lkpi_devm_free_irq(struct device *, unsigned int, void *);
+
+static inline int
+request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *arg)
+{
+
+ return (lkpi_request_irq(NULL, irq, handler, NULL, flags, name, arg));
+}
+
+static inline int
+request_threaded_irq(int irq, irq_handler_t handler,
+ irq_handler_t thread_handler, unsigned long flags,
+ const char *name, void *arg)
+{
+
+ return (lkpi_request_irq(NULL, irq, handler, thread_handler,
+ flags, name, arg));
+}
+
+static inline int
+devm_request_irq(struct device *dev, int irq,
+ irq_handler_t handler, unsigned long flags, const char *name, void *arg)
+{
+
+ return (lkpi_request_irq(dev, irq, handler, NULL, flags, name, arg));
+}
+
+static inline int
+devm_request_threaded_irq(struct device *dev, int irq,
+ irq_handler_t handler, irq_handler_t thread_handler,
+ unsigned long flags, const char *name, void *arg)
+{
+
+ return (lkpi_request_irq(dev, irq, handler, thread_handler,
+ flags, name, arg));
+}
+
+static inline int
+enable_irq(unsigned int irq)
+{
+ return (lkpi_enable_irq(irq));
+}
+
+static inline void
+disable_irq(unsigned int irq)
+{
+ lkpi_disable_irq(irq);
+}
+
+static inline void
+disable_irq_nosync(unsigned int irq)
+{
+ lkpi_disable_irq(irq);
+}
+
+static inline int
+bind_irq_to_cpu(unsigned int irq, int cpu_id)
+{
+ return (lkpi_bind_irq_to_cpu(irq, cpu_id));
+}
+
+static inline void
+free_irq(unsigned int irq, void *device)
+{
+ lkpi_free_irq(irq, device);
+}
+
+static inline void
+devm_free_irq(struct device *xdev, unsigned int irq, void *p)
+{
+ lkpi_devm_free_irq(xdev, irq, p);
+}
+
+static inline int
+irq_set_affinity_hint(int vector, const cpumask_t *mask)
+{
+ int error;
+
+ if (mask != NULL)
+ error = intr_setaffinity(vector, CPU_WHICH_IRQ, mask);
+ else
+ error = intr_setaffinity(vector, CPU_WHICH_IRQ, cpuset_root);
+
+ return (-error);
+}
+
+static inline struct msi_desc *
+irq_get_msi_desc(unsigned int irq)
+{
+
+ return (lkpi_pci_msi_desc_alloc(irq));
+}
+
+static inline void
+irq_set_status_flags(unsigned int irq __unused, unsigned long flags __unused)
+{
+}
+
+/*
+ * LinuxKPI tasklet support
+ */
+struct tasklet_struct;
+typedef void tasklet_func_t(unsigned long);
+typedef void tasklet_callback_t(struct tasklet_struct *);
+
+struct tasklet_struct {
+ TAILQ_ENTRY(tasklet_struct) entry;
+ tasklet_func_t *func;
+ /* Our "state" implementation is different. Avoid same name as Linux. */
+ volatile u_int tasklet_state;
+ atomic_t count;
+ unsigned long data;
+ tasklet_callback_t *callback;
+ bool use_callback;
+};
+
+#define DECLARE_TASKLET(_name, _func, _data) \
+struct tasklet_struct _name = { .func = (_func), .data = (_data) }
+
+#define tasklet_hi_schedule(t) tasklet_schedule(t)
+
+/* Some other compat code in the tree has this defined as well. */
+#define from_tasklet(_dev, _t, _field) \
+ container_of(_t, typeof(*(_dev)), _field)
+
+void tasklet_setup(struct tasklet_struct *, tasklet_callback_t *);
+extern void tasklet_schedule(struct tasklet_struct *);
+extern void tasklet_kill(struct tasklet_struct *);
+extern void tasklet_init(struct tasklet_struct *, tasklet_func_t *,
+ unsigned long data);
+extern void tasklet_enable(struct tasklet_struct *);
+extern void tasklet_disable(struct tasklet_struct *);
+extern void tasklet_disable_nosync(struct tasklet_struct *);
+extern int tasklet_trylock(struct tasklet_struct *);
+extern void tasklet_unlock(struct tasklet_struct *);
+extern void tasklet_unlock_wait(struct tasklet_struct *ts);
+#define tasklet_unlock_spin_wait(ts) tasklet_unlock_wait(ts)
+
+#endif /* _LINUXKPI_LINUX_INTERRUPT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/interval_tree.h b/sys/compat/linuxkpi/common/include/linux/interval_tree.h
new file mode 100644
index 000000000000..1eb8a2fb9181
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/interval_tree.h
@@ -0,0 +1,55 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_INTERVAL_TREE_H
+#define _LINUXKPI_LINUX_INTERVAL_TREE_H
+
+#include <linux/rbtree.h>
+
+struct interval_tree_node {
+ struct rb_node rb;
+ unsigned long start;
+ unsigned long last;
+};
+
+#define interval_tree_iter_first(...) \
+ lkpi_interval_tree_iter_first(__VA_ARGS__)
+#define interval_tree_iter_next(...) \
+ lkpi_interval_tree_iter_next(__VA_ARGS__)
+#define interval_tree_insert(...) lkpi_interval_tree_insert(__VA_ARGS__)
+#define interval_tree_remove(...) lkpi_interval_tree_remove(__VA_ARGS__)
+
+struct interval_tree_node *lkpi_interval_tree_iter_first(
+ struct rb_root_cached *, unsigned long, unsigned long);
+struct interval_tree_node *lkpi_interval_tree_iter_next(
+ struct interval_tree_node *, unsigned long, unsigned long);
+void lkpi_interval_tree_insert(struct interval_tree_node *,
+ struct rb_root_cached *);
+void lkpi_interval_tree_remove(struct interval_tree_node *,
+ struct rb_root_cached *);
+
+#endif /* _LINUXKPI_LINUX_INTERVAL_TREE_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/interval_tree_generic.h b/sys/compat/linuxkpi/common/include/linux/interval_tree_generic.h
new file mode 100644
index 000000000000..3ed6e105cbda
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/interval_tree_generic.h
@@ -0,0 +1,99 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Mark Kettenis <kettenis@OpenBSD.org>
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/rbtree.h>
+
+#define INTERVAL_TREE_DEFINE(type, field, valtype, dummy, START, LAST, \
+ attr, name) \
+ __IT_DEFINE_ITER_FROM(type, field, valtype, START, LAST, name) \
+ __IT_DEFINE_ITER_FIRST(type, valtype, attr, name) \
+ __IT_DEFINE_ITER_NEXT(type, field, valtype, attr, name) \
+ __IT_DEFINE_INSERT(type, field, START, attr, name) \
+ __IT_DEFINE_REMOVE(type, field, attr, name)
+
+#define __IT_DEFINE_ITER_FROM(type, field, valtype, START, LAST, name) \
+static inline type * \
+name##_iter_from(struct rb_node *rb, valtype start, valtype last) \
+{ \
+ type *node; \
+ \
+ while (rb != NULL) { \
+ node = rb_entry(rb, type, field); \
+ if (LAST(node) >= start && START(node) <= last) \
+ return (node); \
+ else if (START(node) > last) \
+ break; \
+ rb = rb_next(rb); \
+ } \
+ return (NULL); \
+}
+
+#define __IT_DEFINE_ITER_FIRST(type, valtype, attr, name) \
+attr type * \
+name##_iter_first(struct rb_root_cached *root, valtype start, valtype last) \
+{ \
+ return (name##_iter_from(rb_first_cached(root), start, last)); \
+}
+
+#define __IT_DEFINE_ITER_NEXT(type, field, valtype, attr, name) \
+attr type * \
+name##_iter_next(type *node, valtype start, valtype last) \
+{ \
+ return (name##_iter_from(rb_next(&node->field), start, last)); \
+}
+
+#define __IT_DEFINE_INSERT(type, field, START, attr, name) \
+attr void \
+name##_insert(type *node, struct rb_root_cached *root) \
+{ \
+ struct rb_node **iter = &root->rb_root.rb_node; \
+ struct rb_node *parent = NULL; \
+ type *iter_node; \
+ bool min_entry = true; \
+ \
+ while (*iter != NULL) { \
+ parent = *iter; \
+ iter_node = rb_entry(parent, type, field); \
+ if (START(node) < START(iter_node)) \
+ iter = &parent->rb_left; \
+ else { \
+ iter = &parent->rb_right; \
+ min_entry = false; \
+ } \
+ } \
+ \
+ rb_link_node(&node->field, parent, iter); \
+ rb_insert_color_cached(&node->field, root, min_entry); \
+}
+
+#define __IT_DEFINE_REMOVE(type, field, attr, name) \
+attr void \
+name##_remove(type *node, struct rb_root_cached *root) \
+{ \
+ rb_erase_cached(&node->field, root); \
+}
diff --git a/sys/compat/linuxkpi/common/include/linux/io-64-nonatomic-lo-hi.h b/sys/compat/linuxkpi/common/include/linux/io-64-nonatomic-lo-hi.h
new file mode 100644
index 000000000000..844b3ef171d5
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/io-64-nonatomic-lo-hi.h
@@ -0,0 +1,65 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Felix Palmen
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_IO_64_NONATOMIC_LO_HI_H_
+#define _LINUXKPI_LINUX_IO_64_NONATOMIC_LO_HI_H_
+
+#include <linux/io.h>
+
+static inline uint64_t
+lo_hi_readq(const volatile void *addr)
+{
+ const volatile uint32_t *p = addr;
+ uint32_t l, h;
+
+ __io_br();
+ l = le32toh(__raw_readl(p));
+ h = le32toh(__raw_readl(p + 1));
+ __io_ar();
+
+ return (l + ((uint64_t)h << 32));
+}
+
+static inline void
+lo_hi_writeq(uint64_t v, volatile void *addr)
+{
+ volatile uint32_t *p = addr;
+
+ __io_bw();
+ __raw_writel(htole32(v), p);
+ __raw_writel(htole32(v >> 32), p + 1);
+ __io_aw();
+}
+
+#ifndef readq
+#define readq(addr) lo_hi_readq(addr)
+#endif
+
+#ifndef writeq
+#define writeq(v, addr) lo_hi_writeq(v, addr)
+#endif
+
+#endif /* _LINUXKPI_LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/io-mapping.h b/sys/compat/linuxkpi/common/include/linux/io-mapping.h
new file mode 100644
index 000000000000..f5f2fbc5c2cb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/io-mapping.h
@@ -0,0 +1,130 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_IO_MAPPING_H_
+#define _LINUXKPI_LINUX_IO_MAPPING_H_
+
+#include <sys/types.h>
+#include <machine/vm.h>
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+struct io_mapping {
+ unsigned long base;
+ unsigned long size;
+ void *mem;
+ vm_memattr_t attr;
+};
+
+struct io_mapping *io_mapping_create_wc(resource_size_t base, unsigned long size);
+
+static inline struct io_mapping *
+io_mapping_init_wc(struct io_mapping *mapping, resource_size_t base,
+ unsigned long size)
+{
+
+ mapping->base = base;
+ mapping->size = size;
+#ifdef VM_MEMATTR_WRITE_COMBINING
+ mapping->mem = ioremap_wc(base, size);
+ mapping->attr = VM_MEMATTR_WRITE_COMBINING;
+#else
+ mapping->mem = ioremap_nocache(base, size);
+ mapping->attr = VM_MEMATTR_UNCACHEABLE;
+#endif
+ return (mapping);
+}
+
+static inline void
+io_mapping_fini(struct io_mapping *mapping)
+{
+
+ iounmap(mapping->mem);
+}
+
+static inline void
+io_mapping_free(struct io_mapping *mapping)
+{
+
+ io_mapping_fini(mapping->mem);
+ kfree(mapping);
+}
+
+static inline void *
+io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
+{
+
+ return ((char *)mapping->mem + offset);
+}
+
+static inline void
+io_mapping_unmap_atomic(void *vaddr)
+{
+}
+
+static inline void *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+
+ return (io_mapping_map_atomic_wc(mapping, offset));
+}
+
+static inline void
+io_mapping_unmap_local(void *vaddr __unused)
+{
+}
+
+static inline void *
+io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset,
+ unsigned long size)
+{
+
+ return ((char *)mapping->mem + offset);
+}
+
+int lkpi_io_mapping_map_user(struct io_mapping *iomap,
+ struct vm_area_struct *vma, unsigned long addr, unsigned long pfn,
+ unsigned long size);
+
+static inline int
+io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size)
+{
+ return (lkpi_io_mapping_map_user(iomap, vma, addr, pfn, size));
+}
+
+static inline void
+io_mapping_unmap(void *vaddr)
+{
+}
+
+#endif /* _LINUXKPI_LINUX_IO_MAPPING_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/io.h b/sys/compat/linuxkpi/common/include/linux/io.h
new file mode 100644
index 000000000000..2d6fef4e7c52
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/io.h
@@ -0,0 +1,566 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_IO_H_
+#define _LINUXKPI_LINUX_IO_H_
+
+#include <sys/endian.h>
+#include <sys/types.h>
+
+#include <machine/vm.h>
+
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <asm-generic/io.h>
+#include <linux/types.h>
+#if !defined(__arm__)
+#include <asm/set_memory.h>
+#endif
+
+/*
+ * XXX This is all x86 specific. It should be bus space access.
+ */
+
+/* rmb and wmb are declared in machine/atomic.h, so should be included first. */
+#ifndef __io_br
+#define __io_br() __compiler_membar()
+#endif
+
+#ifndef __io_ar
+#ifdef rmb
+#define __io_ar() rmb()
+#else
+#define __io_ar() __compiler_membar()
+#endif
+#endif
+
+#ifndef __io_bw
+#ifdef wmb
+#define __io_bw() wmb()
+#else
+#define __io_bw() __compiler_membar()
+#endif
+#endif
+
+#ifndef __io_aw
+#define __io_aw() __compiler_membar()
+#endif
+
+/* Access MMIO registers atomically without barriers and byte swapping. */
+
+static inline uint8_t
+__raw_readb(const volatile void *addr)
+{
+ return (*(const volatile uint8_t *)addr);
+}
+#define __raw_readb(addr) __raw_readb(addr)
+
+static inline void
+__raw_writeb(uint8_t v, volatile void *addr)
+{
+ *(volatile uint8_t *)addr = v;
+}
+#define __raw_writeb(v, addr) __raw_writeb(v, addr)
+
+static inline uint16_t
+__raw_readw(const volatile void *addr)
+{
+ return (*(const volatile uint16_t *)addr);
+}
+#define __raw_readw(addr) __raw_readw(addr)
+
+static inline void
+__raw_writew(uint16_t v, volatile void *addr)
+{
+ *(volatile uint16_t *)addr = v;
+}
+#define __raw_writew(v, addr) __raw_writew(v, addr)
+
+static inline uint32_t
+__raw_readl(const volatile void *addr)
+{
+ return (*(const volatile uint32_t *)addr);
+}
+#define __raw_readl(addr) __raw_readl(addr)
+
+static inline void
+__raw_writel(uint32_t v, volatile void *addr)
+{
+ *(volatile uint32_t *)addr = v;
+}
+#define __raw_writel(v, addr) __raw_writel(v, addr)
+
+#ifdef __LP64__
+static inline uint64_t
+__raw_readq(const volatile void *addr)
+{
+ return (*(const volatile uint64_t *)addr);
+}
+#define __raw_readq(addr) __raw_readq(addr)
+
+static inline void
+__raw_writeq(uint64_t v, volatile void *addr)
+{
+ *(volatile uint64_t *)addr = v;
+}
+#define __raw_writeq(v, addr) __raw_writeq(v, addr)
+#endif
+
+#define mmiowb() barrier()
+
+/* Access little-endian MMIO registers atomically with memory barriers. */
+
+#undef readb
+static inline uint8_t
+readb(const volatile void *addr)
+{
+ uint8_t v;
+
+ __io_br();
+ v = *(const volatile uint8_t *)addr;
+ __io_ar();
+ return (v);
+}
+#define readb(addr) readb(addr)
+
+#undef writeb
+static inline void
+writeb(uint8_t v, volatile void *addr)
+{
+ __io_bw();
+ *(volatile uint8_t *)addr = v;
+ __io_aw();
+}
+#define writeb(v, addr) writeb(v, addr)
+
+#undef readw
+static inline uint16_t
+readw(const volatile void *addr)
+{
+ uint16_t v;
+
+ __io_br();
+ v = le16toh(__raw_readw(addr));
+ __io_ar();
+ return (v);
+}
+#define readw(addr) readw(addr)
+
+#undef writew
+static inline void
+writew(uint16_t v, volatile void *addr)
+{
+ __io_bw();
+ __raw_writew(htole16(v), addr);
+ __io_aw();
+}
+#define writew(v, addr) writew(v, addr)
+
+#undef readl
+static inline uint32_t
+readl(const volatile void *addr)
+{
+ uint32_t v;
+
+ __io_br();
+ v = le32toh(__raw_readl(addr));
+ __io_ar();
+ return (v);
+}
+#define readl(addr) readl(addr)
+
+#undef writel
+static inline void
+writel(uint32_t v, volatile void *addr)
+{
+ __io_bw();
+ __raw_writel(htole32(v), addr);
+ __io_aw();
+}
+#define writel(v, addr) writel(v, addr)
+
+#undef readq
+#undef writeq
+#ifdef __LP64__
+static inline uint64_t
+readq(const volatile void *addr)
+{
+ uint64_t v;
+
+ __io_br();
+ v = le64toh(__raw_readq(addr));
+ __io_ar();
+ return (v);
+}
+#define readq(addr) readq(addr)
+
+static inline void
+writeq(uint64_t v, volatile void *addr)
+{
+ __io_bw();
+ __raw_writeq(htole64(v), addr);
+ __io_aw();
+}
+#define writeq(v, addr) writeq(v, addr)
+#endif
+
+/* Access little-endian MMIO registers atomically without memory barriers. */
+
+#undef readb_relaxed
+static inline uint8_t
+readb_relaxed(const volatile void *addr)
+{
+ return (__raw_readb(addr));
+}
+#define readb_relaxed(addr) readb_relaxed(addr)
+
+#undef writeb_relaxed
+static inline void
+writeb_relaxed(uint8_t v, volatile void *addr)
+{
+ __raw_writeb(v, addr);
+}
+#define writeb_relaxed(v, addr) writeb_relaxed(v, addr)
+
+#undef readw_relaxed
+static inline uint16_t
+readw_relaxed(const volatile void *addr)
+{
+ return (le16toh(__raw_readw(addr)));
+}
+#define readw_relaxed(addr) readw_relaxed(addr)
+
+#undef writew_relaxed
+static inline void
+writew_relaxed(uint16_t v, volatile void *addr)
+{
+ __raw_writew(htole16(v), addr);
+}
+#define writew_relaxed(v, addr) writew_relaxed(v, addr)
+
+#undef readl_relaxed
+static inline uint32_t
+readl_relaxed(const volatile void *addr)
+{
+ return (le32toh(__raw_readl(addr)));
+}
+#define readl_relaxed(addr) readl_relaxed(addr)
+
+#undef writel_relaxed
+static inline void
+writel_relaxed(uint32_t v, volatile void *addr)
+{
+ __raw_writel(htole32(v), addr);
+}
+#define writel_relaxed(v, addr) writel_relaxed(v, addr)
+
+#undef readq_relaxed
+#undef writeq_relaxed
+#ifdef __LP64__
+static inline uint64_t
+readq_relaxed(const volatile void *addr)
+{
+ return (le64toh(__raw_readq(addr)));
+}
+#define readq_relaxed(addr) readq_relaxed(addr)
+
+static inline void
+writeq_relaxed(uint64_t v, volatile void *addr)
+{
+ __raw_writeq(htole64(v), addr);
+}
+#define writeq_relaxed(v, addr) writeq_relaxed(v, addr)
+#endif
+
+/* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
+
+#undef ioread8
+static inline uint8_t
+ioread8(const volatile void *addr)
+{
+ return (readb(addr));
+}
+#define ioread8(addr) ioread8(addr)
+
+#undef ioread16
+static inline uint16_t
+ioread16(const volatile void *addr)
+{
+ return (readw(addr));
+}
+#define ioread16(addr) ioread16(addr)
+
+#undef ioread16be
+static inline uint16_t
+ioread16be(const volatile void *addr)
+{
+ uint16_t v;
+
+ __io_br();
+ v = (be16toh(__raw_readw(addr)));
+ __io_ar();
+
+ return (v);
+}
+#define ioread16be(addr) ioread16be(addr)
+
+#undef ioread32
+static inline uint32_t
+ioread32(const volatile void *addr)
+{
+ return (readl(addr));
+}
+#define ioread32(addr) ioread32(addr)
+
+#undef ioread32be
+static inline uint32_t
+ioread32be(const volatile void *addr)
+{
+ uint32_t v;
+
+ __io_br();
+ v = (be32toh(__raw_readl(addr)));
+ __io_ar();
+
+ return (v);
+}
+#define ioread32be(addr) ioread32be(addr)
+
+#ifdef __LP64__
+#undef ioread64
+static inline uint64_t
+ioread64(const volatile void *addr)
+{
+ return (readq(addr));
+}
+#define ioread64(addr) ioread64(addr)
+#endif
+
+#undef iowrite8
+static inline void
+iowrite8(uint8_t v, volatile void *addr)
+{
+ writeb(v, addr);
+}
+#define iowrite8(v, addr) iowrite8(v, addr)
+
+#undef iowrite16
+static inline void
+iowrite16(uint16_t v, volatile void *addr)
+{
+ writew(v, addr);
+}
+#define iowrite16 iowrite16
+
+#undef iowrite32
+static inline void
+iowrite32(uint32_t v, volatile void *addr)
+{
+ writel(v, addr);
+}
+#define iowrite32(v, addr) iowrite32(v, addr)
+
+#undef iowrite32be
+static inline void
+iowrite32be(uint32_t v, volatile void *addr)
+{
+ __io_bw();
+ __raw_writel(htobe32(v), addr);
+ __io_aw();
+}
+#define iowrite32be(v, addr) iowrite32be(v, addr)
+
+#if defined(__i386__) || defined(__amd64__)
+#define _outb(data, port) outb((data), (port))
+#endif
+
+#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
+void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
+#else
+static __inline void *
+_ioremap_attr(vm_paddr_t _phys_addr, unsigned long _size, int _attr)
+{
+ return (NULL);
+}
+#endif
+
+struct device;
+static inline void *
+devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size)
+{
+ return (NULL);
+}
+
+#ifdef VM_MEMATTR_DEVICE
+#define ioremap_nocache(addr, size) \
+ _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
+#define ioremap_wt(addr, size) \
+ _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
+#define ioremap(addr, size) \
+ _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
+#else
+#define ioremap_nocache(addr, size) \
+ _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
+#define ioremap_wt(addr, size) \
+ _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
+#define ioremap(addr, size) \
+ _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
+#endif
+#ifdef VM_MEMATTR_WRITE_COMBINING
+#define ioremap_wc(addr, size) \
+ _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
+#else
+#define ioremap_wc(addr, size) ioremap_nocache(addr, size)
+#endif
+#define ioremap_cache(addr, size) \
+ _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
+void iounmap(void *addr);
+
+#define memset_io(a, b, c) memset((a), (b), (c))
+#define memcpy_fromio(a, b, c) memcpy((a), (b), (c))
+#define memcpy_toio(a, b, c) memcpy((a), (b), (c))
+
+static inline void
+__iowrite32_copy(void *to, const void *from, size_t count)
+{
+ const uint32_t *src;
+ uint32_t *dst;
+ int i;
+
+ for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
+ __raw_writel(*src, dst);
+}
+
+static inline void
+__iowrite64_copy(void *to, const void *from, size_t count)
+{
+#ifdef __LP64__
+ const uint64_t *src;
+ uint64_t *dst;
+ int i;
+
+ for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
+ __raw_writeq(*src, dst);
+#else
+ __iowrite32_copy(to, from, count * 2);
+#endif
+}
+
+static inline void
+__ioread32_copy(void *to, const void *from, size_t count)
+{
+ const uint32_t *src;
+ uint32_t *dst;
+ int i;
+
+ for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
+ *dst = __raw_readl(src);
+}
+
+static inline void
+__ioread64_copy(void *to, const void *from, size_t count)
+{
+#ifdef __LP64__
+ const uint64_t *src;
+ uint64_t *dst;
+ int i;
+
+ for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
+ *dst = __raw_readq(src);
+#else
+ __ioread32_copy(to, from, count * 2);
+#endif
+}
+
+enum {
+ MEMREMAP_WB = 1 << 0,
+ MEMREMAP_WT = 1 << 1,
+ MEMREMAP_WC = 1 << 2,
+};
+
+static inline void *
+memremap(resource_size_t offset, size_t size, unsigned long flags)
+{
+ void *addr = NULL;
+
+ if ((flags & MEMREMAP_WB) &&
+ (addr = ioremap_cache(offset, size)) != NULL)
+ goto done;
+ if ((flags & MEMREMAP_WT) &&
+ (addr = ioremap_wt(offset, size)) != NULL)
+ goto done;
+ if ((flags & MEMREMAP_WC) &&
+ (addr = ioremap_wc(offset, size)) != NULL)
+ goto done;
+done:
+ return (addr);
+}
+
+static inline void
+memunmap(void *addr)
+{
+ /* XXX May need to check if this is RAM */
+ iounmap(addr);
+}
+
+#define IOMEM_ERR_PTR(err) (void __iomem *)ERR_PTR(err)
+
+#define __MTRR_ID_BASE 1
+int lkpi_arch_phys_wc_add(unsigned long, unsigned long);
+void lkpi_arch_phys_wc_del(int);
+#define arch_phys_wc_add(...) lkpi_arch_phys_wc_add(__VA_ARGS__)
+#define arch_phys_wc_del(...) lkpi_arch_phys_wc_del(__VA_ARGS__)
+#define arch_phys_wc_index(x) \
+ (((x) < __MTRR_ID_BASE) ? -1 : ((x) - __MTRR_ID_BASE))
+
+static inline int
+arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
+{
+#if defined(__amd64__)
+ vm_offset_t va;
+
+ va = PHYS_TO_DMAP(start);
+ return (-pmap_change_attr(va, size, VM_MEMATTR_WRITE_COMBINING));
+#else
+ return (0);
+#endif
+}
+
+static inline void
+arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
+{
+#if defined(__amd64__)
+ vm_offset_t va;
+
+ va = PHYS_TO_DMAP(start);
+
+ pmap_change_attr(va, size, VM_MEMATTR_WRITE_BACK);
+#endif
+}
+
+#endif /* _LINUXKPI_LINUX_IO_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/ioctl.h b/sys/compat/linuxkpi/common/include/linux/ioctl.h
new file mode 100644
index 000000000000..77c01224e6a5
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/ioctl.h
@@ -0,0 +1,38 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_IOCTL_H_
+#define _LINUXKPI_LINUX_IOCTL_H_
+
+#include <sys/ioccom.h>
+
+#define _IOC_SIZE(cmd) IOCPARM_LEN(cmd)
+#define _IOC_TYPE(cmd) IOCGROUP(cmd)
+#define _IOC_NR(cmd) ((cmd) & 0xff)
+
+#endif /* _LINUXKPI_LINUX_IOCTL_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/iommu.h b/sys/compat/linuxkpi/common/include/linux/iommu.h
new file mode 100644
index 000000000000..391d9778a0c8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/iommu.h
@@ -0,0 +1,29 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_IOMMU_H_
+#define _LINUXKPI_LINUX_IOMMU_H_
+
+#include <linux/device.h>
+
+#define __IOMMU_DOMAIN_PAGING (1U << 0)
+#define __IOMMU_DOMAIN_DMA_API (1U << 1)
+#define __IOMMU_DOMAIN_PT (1U << 2)
+#define __IOMMU_DOMAIN_DMA_FQ (1U << 3)
+
+#define IOMMU_DOMAIN_BLOCKED (0U)
+#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
+#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
+#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | __IOMMU_DOMAIN_DMA_API)
+#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | __IOMMU_DOMAIN_DMA_API | __IOMMU_DOMAIN_DMA_FQ)
+
+struct iommu_domain {
+ unsigned int type;
+};
+
+static inline struct iommu_domain *
+iommu_get_domain_for_dev(struct device *dev __unused)
+{
+ return (NULL);
+}
+
+#endif /* _LINUXKPI_LINUX_IOMMU_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/iopoll.h b/sys/compat/linuxkpi/common/include/linux/iopoll.h
new file mode 100644
index 000000000000..8d0498a26da1
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/iopoll.h
@@ -0,0 +1,92 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2021 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_IOPOLL_H
+#define _LINUXKPI_LINUX_IOPOLL_H
+
+#include <sys/types.h>
+#include <sys/time.h>
+#include <linux/delay.h>
+
+#define read_poll_timeout(_pollfp, _var, _cond, _us, _to, _early_sleep, ...) \
+({ \
+ struct timeval __now, __end; \
+ if (_to) { \
+ __end.tv_sec = (_to) / USEC_PER_SEC; \
+ __end.tv_usec = (_to) % USEC_PER_SEC; \
+ microtime(&__now); \
+ timevaladd(&__end, &__now); \
+ } \
+ \
+ if ((_early_sleep) && (_us) > 0) \
+ usleep_range(_us, _us); \
+ do { \
+ (_var) = _pollfp(__VA_ARGS__); \
+ if (_cond) \
+ break; \
+ if (_to) { \
+ microtime(&__now); \
+ if (timevalcmp(&__now, &__end, >)) \
+ break; \
+ } \
+ if ((_us) != 0) \
+ usleep_range(_us, _us); \
+ } while (1); \
+ (_cond) ? 0 : (-ETIMEDOUT); \
+})
+
+#define readx_poll_timeout(_pollfp, _addr, _var, _cond, _us, _to) \
+ read_poll_timeout(_pollfp, _var, _cond, _us, _to, false, _addr)
+
+#define read_poll_timeout_atomic(_pollfp, _var, _cond, _us, _to, _early_sleep, ...) \
+({ \
+ struct timeval __now, __end; \
+ if (_to) { \
+ __end.tv_sec = (_to) / USEC_PER_SEC; \
+ __end.tv_usec = (_to) % USEC_PER_SEC; \
+ microtime(&__now); \
+ timevaladd(&__end, &__now); \
+ } \
+ \
+ if ((_early_sleep) && (_us) > 0) \
+ DELAY(_us); \
+ do { \
+ (_var) = _pollfp(__VA_ARGS__); \
+ if (_cond) \
+ break; \
+ if (_to) { \
+ microtime(&__now); \
+ if (timevalcmp(&__now, &__end, >)) \
+ break; \
+ } \
+ if ((_us) != 0) \
+ DELAY(_us); \
+ } while (1); \
+ (_cond) ? 0 : (-ETIMEDOUT); \
+})
+
+#endif /* _LINUXKPI_LINUX_IOPOLL_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/ioport.h b/sys/compat/linuxkpi/common/include/linux/ioport.h
new file mode 100644
index 000000000000..444f3ad94602
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/ioport.h
@@ -0,0 +1,57 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Serenity Cyber Security, LLC.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_IOPORT_H
+#define _LINUXKPI_LINUX_IOPORT_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#define DEFINE_RES_MEM(_start, _size) \
+ (struct resource) { \
+ .start = (_start), \
+ .end = (_start) + (_size) - 1, \
+ }
+
+struct resource {
+ resource_size_t start;
+ resource_size_t end;
+};
+
+static inline resource_size_t
+resource_size(const struct resource *r)
+{
+ return (r->end - r->start + 1);
+}
+
+static inline bool
+resource_contains(struct resource *a, struct resource *b)
+{
+ return (a->start <= b->start && a->end >= b->end);
+}
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/iosys-map.h b/sys/compat/linuxkpi/common/include/linux/iosys-map.h
new file mode 100644
index 000000000000..66c442b8668f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/iosys-map.h
@@ -0,0 +1,161 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_IOSYS_MAP_H
+#define _LINUXKPI_LINUX_IOSYS_MAP_H
+
+#include <linux/io.h>
+#include <linux/string.h>
+
+struct iosys_map {
+ union {
+ void *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+#ifdef __OpenBSD__
+ bus_space_handle_t bsh;
+ bus_size_t size;
+#endif
+};
+
+#define IOSYS_MAP_INIT_OFFSET(_ism_src_p, _off) ({ \
+ struct iosys_map ism_dst = *(_ism_src_p); \
+ iosys_map_incr(&ism_dst, _off); \
+ ism_dst; \
+})
+
+static inline void
+iosys_map_incr(struct iosys_map *ism, size_t n)
+{
+ if (ism->is_iomem)
+ ism->vaddr_iomem += n;
+ else
+ ism->vaddr += n;
+}
+
+static inline void
+iosys_map_memcpy_to(struct iosys_map *ism, size_t off, const void *src,
+ size_t len)
+{
+ if (ism->is_iomem)
+ memcpy_toio(ism->vaddr_iomem + off, src, len);
+ else
+ memcpy(ism->vaddr + off, src, len);
+}
+
+static inline bool
+iosys_map_is_null(const struct iosys_map *ism)
+{
+ if (ism->is_iomem)
+ return (ism->vaddr_iomem == NULL);
+ else
+ return (ism->vaddr == NULL);
+}
+
+static inline bool
+iosys_map_is_set(const struct iosys_map *ism)
+{
+ if (ism->is_iomem)
+ return (ism->vaddr_iomem != NULL);
+ else
+ return (ism->vaddr != NULL);
+}
+
+static inline bool
+iosys_map_is_equal(const struct iosys_map *ism_a,
+ const struct iosys_map *ism_b)
+{
+ if (ism_a->is_iomem != ism_b->is_iomem)
+ return (false);
+
+ if (ism_a->is_iomem)
+ return (ism_a->vaddr_iomem == ism_b->vaddr_iomem);
+ else
+ return (ism_a->vaddr == ism_b->vaddr);
+}
+
+static inline void
+iosys_map_clear(struct iosys_map *ism)
+{
+ if (ism->is_iomem) {
+ ism->vaddr_iomem = NULL;
+ ism->is_iomem = false;
+ } else {
+ ism->vaddr = NULL;
+ }
+}
+
+static inline void
+iosys_map_set_vaddr_iomem(struct iosys_map *ism, void *addr)
+{
+ ism->vaddr_iomem = addr;
+ ism->is_iomem = true;
+}
+
+static inline void
+iosys_map_set_vaddr(struct iosys_map *ism, void *addr)
+{
+ ism->vaddr = addr;
+ ism->is_iomem = false;
+}
+
+static inline void
+iosys_map_memset(struct iosys_map *ism, size_t off, int value, size_t len)
+{
+ if (ism->is_iomem)
+ memset_io(ism->vaddr_iomem + off, value, len);
+ else
+ memset(ism->vaddr + off, value, len);
+}
+
+#ifdef __LP64__
+#define _iosys_map_readq(_addr) readq(_addr)
+#define _iosys_map_writeq(_val, _addr) writeq(_val, _addr)
+#else
+#define _iosys_map_readq(_addr) ({ \
+ uint64_t val; \
+ memcpy_fromio(&val, _addr, sizeof(uint64_t)); \
+ val; \
+})
+#define _iosys_map_writeq(_val, _addr) \
+ memcpy_toio(_addr, &(_val), sizeof(uint64_t))
+#endif
+
+#define iosys_map_rd(_ism, _off, _type) ({ \
+ _type val; \
+ if ((_ism)->is_iomem) { \
+ void *addr = (_ism)->vaddr_iomem + (_off); \
+ val = _Generic(val, \
+ uint8_t : readb(addr), \
+ uint16_t: readw(addr), \
+ uint32_t: readl(addr), \
+ uint64_t: _iosys_map_readq(addr)); \
+ } else \
+ val = READ_ONCE(*(_type *)((_ism)->vaddr + (_off))); \
+ val; \
+})
+#define iosys_map_wr(_ism, _off, _type, _val) ({ \
+ _type val = (_val); \
+ if ((_ism)->is_iomem) { \
+ void *addr = (_ism)->vaddr_iomem + (_off); \
+ _Generic(val, \
+ uint8_t : writeb(val, addr), \
+ uint16_t: writew(val, addr), \
+ uint32_t: writel(val, addr), \
+ uint64_t: _iosys_map_writeq(val, addr)); \
+ } else \
+ WRITE_ONCE(*(_type *)((_ism)->vaddr + (_off)), val); \
+})
+
+#define iosys_map_rd_field(_ism, _off, _type, _field) ({ \
+ _type *s; \
+ iosys_map_rd(_ism, (_off) + offsetof(_type, _field), \
+ __typeof(s->_field)); \
+})
+#define iosys_map_wr_field(_ism, _off, _type, _field, _val) ({ \
+ _type *s; \
+ iosys_map_wr(_ism, (_off) + offsetof(_type, _field), \
+ __typeof(s->_field), _val); \
+})
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/ip.h b/sys/compat/linuxkpi/common/include/linux/ip.h
new file mode 100644
index 000000000000..137cf89e7dcb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/ip.h
@@ -0,0 +1,73 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_IP_H
+#define _LINUXKPI_LINUX_IP_H
+
+#include <sys/types.h>
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+
+#include <linux/skbuff.h>
+
+/* (u) unconfirmed structure field names; using FreeBSD's meanwhile. */
+struct iphdr {
+ uint8_t ip_hl:4, ip_ver:4; /* (u) */
+ uint8_t ip_tos; /* (u) */
+ uint16_t ip_len; /* (u) */
+ uint16_t id;
+ uint16_t ip_off; /* (u) */
+ uint8_t ip_ttl; /* (u) */
+ uint8_t protocol;
+ uint16_t check;
+ uint32_t saddr;
+ uint32_t daddr;
+};
+
+static __inline struct iphdr *
+ip_hdr(struct sk_buff *skb)
+{
+
+ return (struct iphdr *)skb_network_header(skb);
+}
+
+static __inline void
+ip_send_check(struct iphdr *iph)
+{
+
+ /* Clear the checksum before computing! */
+ iph->check = 0;
+ /* An IPv4 header is the same everywhere even if names differ. */
+ iph->check = in_cksum_hdr((const void *)iph);
+}
+
+#endif /* _LINUXKPI_LINUX_IP_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/irq_work.h b/sys/compat/linuxkpi/common/include/linux/irq_work.h
new file mode 100644
index 000000000000..7c4019bc0242
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/irq_work.h
@@ -0,0 +1,90 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_IRQ_WORK_H_
+#define _LINUXKPI_LINUX_IRQ_WORK_H_
+
+#include <sys/param.h>
+#include <sys/taskqueue.h>
+
+#include <linux/llist.h>
+#include <linux/workqueue.h>
+
+#define LKPI_IRQ_WORK_STD_TQ system_wq->taskqueue
+#define LKPI_IRQ_WORK_FAST_TQ linux_irq_work_tq
+
+#ifdef LKPI_IRQ_WORK_USE_FAST_TQ
+#define LKPI_IRQ_WORK_TQ LKPI_IRQ_WORK_FAST_TQ
+#else
+#define LKPI_IRQ_WORK_TQ LKPI_IRQ_WORK_STD_TQ
+#endif
+
+struct irq_work;
+typedef void (*irq_work_func_t)(struct irq_work *);
+
+struct irq_work {
+ struct task irq_task;
+ union {
+ struct llist_node llnode;
+ struct {
+ struct llist_node llist;
+ } node;
+ };
+ irq_work_func_t func;
+};
+
+extern struct taskqueue *linux_irq_work_tq;
+
+#define DEFINE_IRQ_WORK(name, _func) struct irq_work name = { \
+ .irq_task = TASK_INITIALIZER(0, linux_irq_work_fn, &(name)), \
+ .func = (_func), \
+}
+
+void linux_irq_work_fn(void *, int);
+
+static inline void
+init_irq_work(struct irq_work *irqw, irq_work_func_t func)
+{
+ TASK_INIT(&irqw->irq_task, 0, linux_irq_work_fn, irqw);
+ irqw->func = func;
+}
+
+static inline bool
+irq_work_queue(struct irq_work *irqw)
+{
+ return (taskqueue_enqueue_flags(LKPI_IRQ_WORK_TQ, &irqw->irq_task,
+ TASKQUEUE_FAIL_IF_PENDING) == 0);
+}
+
+static inline void
+irq_work_sync(struct irq_work *irqw)
+{
+ taskqueue_drain(LKPI_IRQ_WORK_TQ, &irqw->irq_task);
+}
+
+#endif /* _LINUXKPI_LINUX_IRQ_WORK_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/irqdomain.h b/sys/compat/linuxkpi/common/include/linux/irqdomain.h
new file mode 100644
index 000000000000..c7788e51cc89
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/irqdomain.h
@@ -0,0 +1,10 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_IRQDOMAIN_H
+#define _LINUXKPI_LINUX_IRQDOMAIN_H
+
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/radix-tree.h>
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/irqreturn.h b/sys/compat/linuxkpi/common/include/linux/irqreturn.h
new file mode 100644
index 000000000000..ff2618449d5e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/irqreturn.h
@@ -0,0 +1,38 @@
+/*-
+ * Copyright (c) 2017 Limelight Networks, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_IRQRETURN_H
+#define _LINUXKPI_LINUX_IRQRETURN_H
+
+typedef enum irqreturn {
+ IRQ_NONE = 0,
+ IRQ_HANDLED = 1,
+ IRQ_WAKE_THREAD = 2
+} irqreturn_t;
+
+#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE)
+
+#endif /* _LINUXKPI_LINUX_IRQRETURN_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/jhash.h b/sys/compat/linuxkpi/common/include/linux/jhash.h
new file mode 100644
index 000000000000..25e2c04f1965
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/jhash.h
@@ -0,0 +1,144 @@
+#ifndef _LINUXKPI_LINUX_JHASH_H_
+#define _LINUXKPI_LINUX_JHASH_H_
+
+#include <asm/types.h>
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose. It has no warranty.
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
+ * any bugs present are surely my fault. -DaveM
+ */
+
+/* NOTE: Arguments are modified. */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= b; a -= c; a ^= (c>>13); \
+ b -= c; b -= a; b ^= (a<<8); \
+ c -= a; c -= b; c ^= (b>>13); \
+ a -= b; a -= c; a ^= (c>>12); \
+ b -= c; b -= a; b ^= (a<<16); \
+ c -= a; c -= b; c ^= (b>>5); \
+ a -= b; a -= c; a ^= (c>>3); \
+ b -= c; b -= a; b ^= (a<<10); \
+ c -= a; c -= b; c ^= (b>>15); \
+}
+
+/* The golden ration: an arbitrary value */
+#define JHASH_GOLDEN_RATIO 0x9e3779b9
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
+ * the input key.
+ */
+static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c, len;
+ const u8 *k = key;
+
+ len = length;
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+
+ while (len >= 12) {
+ a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
+ b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
+ c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
+
+ __jhash_mix(a,b,c);
+
+ k += 12;
+ len -= 12;
+ }
+
+ c += length;
+ switch (len) {
+ case 11: c += ((u32)k[10]<<24);
+ case 10: c += ((u32)k[9]<<16);
+ case 9 : c += ((u32)k[8]<<8);
+ case 8 : b += ((u32)k[7]<<24);
+ case 7 : b += ((u32)k[6]<<16);
+ case 6 : b += ((u32)k[5]<<8);
+ case 5 : b += k[4];
+ case 4 : a += ((u32)k[3]<<24);
+ case 3 : a += ((u32)k[2]<<16);
+ case 2 : a += ((u32)k[1]<<8);
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+/* A special optimized version that handles 1 or more of u32s.
+ * The length parameter here is the number of u32s in the key.
+ */
+static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
+ u32 a, b, c, len;
+
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+ len = length;
+
+ while (len >= 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
+ k += 3; len -= 3;
+ }
+
+ c += length * 4;
+
+ switch (len) {
+ case 2 : b += k[1];
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
+ *
+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
+ * done at the end is not done here.
+ */
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += JHASH_GOLDEN_RATIO;
+ b += JHASH_GOLDEN_RATIO;
+ c += initval;
+
+ __jhash_mix(a, b, c);
+
+ return c;
+}
+
+static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return jhash_3words(a, b, 0, initval);
+}
+
+static inline u32 jhash_1word(u32 a, u32 initval)
+{
+ return jhash_3words(a, 0, 0, initval);
+}
+
+#endif /* _LINUXKPI_LINUX_JHASH_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/jiffies.h b/sys/compat/linuxkpi/common/include/linux/jiffies.h
new file mode 100644
index 000000000000..c2409726e874
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/jiffies.h
@@ -0,0 +1,143 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_JIFFIES_H_
+#define _LINUXKPI_LINUX_JIFFIES_H_
+
+#include <linux/types.h>
+#include <linux/time.h>
+
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/time.h>
+
+extern unsigned long jiffies; /* defined in sys/kern/subr_ticks.S */
+#define jiffies_64 jiffies /* XXX-MJ wrong on 32-bit platforms */
+#define jiffies_to_msecs(x) ((unsigned int)(((int64_t)(int)(x)) * 1000 / hz))
+
+#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1) - 1)
+
+#define time_after(a, b) ((long)((b) - (a)) < 0)
+#define time_after32(a, b) ((int32_t)((uint32_t)(b) - (uint32_t)(a)) < 0)
+#define time_before(a, b) time_after(b,a)
+#define time_before32(a, b) time_after32(b, a)
+#define time_after_eq(a, b) ((long)((a) - (b)) >= 0)
+#define time_before_eq(a, b) time_after_eq(b, a)
+#define time_in_range(a,b,c) \
+ (time_after_eq(a,b) && time_before_eq(a,c))
+#define time_is_after_eq_jiffies(a) time_after_eq(a, jiffies)
+#define time_is_after_jiffies(a) time_after(a, jiffies)
+#define time_is_before_jiffies(a) time_before(a, jiffies)
+
+#define HZ hz
+
+extern uint64_t lkpi_nsec2hz_rem;
+extern uint64_t lkpi_nsec2hz_div;
+extern uint64_t lkpi_nsec2hz_max;
+
+extern uint64_t lkpi_usec2hz_rem;
+extern uint64_t lkpi_usec2hz_div;
+extern uint64_t lkpi_usec2hz_max;
+
+extern uint64_t lkpi_msec2hz_rem;
+extern uint64_t lkpi_msec2hz_div;
+extern uint64_t lkpi_msec2hz_max;
+
+static inline unsigned long
+msecs_to_jiffies(uint64_t msec)
+{
+ uint64_t result;
+
+ if (msec > lkpi_msec2hz_max)
+ msec = lkpi_msec2hz_max;
+ result = howmany(msec * lkpi_msec2hz_rem, lkpi_msec2hz_div);
+ if (result > MAX_JIFFY_OFFSET)
+ result = MAX_JIFFY_OFFSET;
+
+ return ((unsigned long)result);
+}
+
+static inline unsigned long
+usecs_to_jiffies(uint64_t usec)
+{
+ uint64_t result;
+
+ if (usec > lkpi_usec2hz_max)
+ usec = lkpi_usec2hz_max;
+ result = howmany(usec * lkpi_usec2hz_rem, lkpi_usec2hz_div);
+ if (result > MAX_JIFFY_OFFSET)
+ result = MAX_JIFFY_OFFSET;
+
+ return ((unsigned long)result);
+}
+
+static inline uint64_t
+nsecs_to_jiffies64(uint64_t nsec)
+{
+
+ if (nsec > lkpi_nsec2hz_max)
+ nsec = lkpi_nsec2hz_max;
+ return (howmany(nsec * lkpi_nsec2hz_rem, lkpi_nsec2hz_div));
+}
+
+static inline unsigned long
+nsecs_to_jiffies(uint64_t nsec)
+{
+
+ if (sizeof(unsigned long) >= sizeof(uint64_t)) {
+ if (nsec > lkpi_nsec2hz_max)
+ nsec = lkpi_nsec2hz_max;
+ } else {
+ if (nsec > (lkpi_nsec2hz_max >> 32))
+ nsec = (lkpi_nsec2hz_max >> 32);
+ }
+ return (howmany(nsec * lkpi_nsec2hz_rem, lkpi_nsec2hz_div));
+}
+
+static inline uint64_t
+jiffies_to_nsecs(unsigned long j)
+{
+
+ return ((1000000000ULL / hz) * (uint64_t)j);
+}
+
+static inline uint64_t
+jiffies_to_usecs(unsigned long j)
+{
+
+ return ((1000000ULL / hz) * (uint64_t)j);
+}
+
+static inline uint64_t
+get_jiffies_64(void)
+{
+
+ return ((uint64_t)jiffies);
+}
+
+#endif /* _LINUXKPI_LINUX_JIFFIES_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/jump_label.h b/sys/compat/linuxkpi/common/include/linux/jump_label.h
new file mode 100644
index 000000000000..444754a0ff82
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/jump_label.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016-2020 François Tigeot <ftigeot@wolfpond.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_JUMP_LABEL_H_
+#define _LINUXKPI_LINUX_JUMP_LABEL_H_
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/bug.h>
+
+#define DEFINE_STATIC_KEY_FALSE(key) bool key = false
+
+static inline void
+static_branch_enable(bool *flag)
+{
+ *flag = true;
+}
+
+static inline bool
+static_branch_likely(bool *flag)
+{
+ return *flag;
+}
+
+#endif /* _LINUXKPI_LINUX_JUMP_LABEL_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kconfig.h b/sys/compat/linuxkpi/common/include/linux/kconfig.h
new file mode 100644
index 000000000000..c1d186b56e1f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kconfig.h
@@ -0,0 +1,76 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_KCONFIG_H_
+#define _LINUXKPI_LINUX_KCONFIG_H_
+
+/*
+ * Checking if an option is defined would be easy if we could do CPP inside CPP.
+ * The defined case whether -Dxxx or -Dxxx=1 are easy to deal with. In either
+ * case the defined value is "1". A more general -Dxxx=<c> case will require
+ * more effort to deal with all possible "true" values. Hope we do not have
+ * to do this as well.
+ * The real problem is the undefined case. To avoid this problem we do the
+ * concat/varargs trick: "yyy" ## xxx can make two arguments if xxx is "1"
+ * by having a #define for yyy_1 which is "ignore,".
+ * Otherwise we will just get "yyy".
+ * Need to be careful about variable substitutions in macros though.
+ * This way we make a (true, false) problem a (don't care, true, false) or a
+ * (don't care true, false). Then we can use a variadic macro to only select
+ * the always well known and defined argument #2. And that seems to be
+ * exactly what we need. Use 1 for true and 0 for false to also allow
+ * #if IS_*() checks pre-compiler checks which do not like #if true.
+ */
+#define ___XAB_1 dontcare,
+#define ___IS_XAB(_ignore, _x, ...) (_x)
+#define __IS_XAB(_x) ___IS_XAB(_x 1, 0)
+#define _IS_XAB(_x) __IS_XAB(__CONCAT(___XAB_, _x))
+
+/* This is if CONFIG_ccc=y. */
+#define IS_BUILTIN(_x) _IS_XAB(_x)
+/* This is if CONFIG_ccc=m. */
+#define IS_MODULE(_x) _IS_XAB(_x ## _MODULE)
+/* This is if CONFIG_ccc is compiled in(=y) or a module(=m). */
+#define IS_ENABLED(_x) (IS_BUILTIN(_x) || IS_MODULE(_x))
+/*
+ * This is weird case. If the CONFIG_ccc is builtin (=y) this returns true;
+ * or if the CONFIG_ccc is a module (=m) and the caller is built as a module
+ * (-DMODULE defined) this returns true, but if the callers is not a module
+ * (-DMODULE not defined, which means caller is BUILTIN) then it returns
+ * false. In other words, a module can reach the kernel, a module can reach
+ * a module, but the kernel cannot reach a module, and code never compiled
+ * cannot be reached either.
+ * XXX -- I'd hope the module-to-module case would be handled by a proper
+ * module dependency definition (MODULE_DEPEND() in FreeBSD).
+ */
+#define IS_REACHABLE(_x) (IS_BUILTIN(_x) || \
+ (IS_MODULE(_x) && IS_BUILTIN(MODULE)))
+
+#endif /* _LINUXKPI_LINUX_KCONFIG_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kdev_t.h b/sys/compat/linuxkpi/common/include/linux/kdev_t.h
new file mode 100644
index 000000000000..988dd771254a
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kdev_t.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_KDEV_T_H_
+#define _LINUXKPI_LINUX_KDEV_T_H_
+
+#include <sys/types.h>
+
+#define MAJOR(dev) major(dev)
+#define MINOR(dev) minor(dev)
+#define MKDEV(ma, mi) makedev(ma, mi)
+
+static inline uint16_t
+old_encode_dev(dev_t dev)
+{
+ return ((MAJOR(dev) << 8) | MINOR(dev));
+}
+
+#endif /* _LINUXKPI_LINUX_KDEV_T_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kernel.h b/sys/compat/linuxkpi/common/include/linux/kernel.h
new file mode 100644
index 000000000000..11a13cbd49b4
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kernel.h
@@ -0,0 +1,385 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * Copyright (c) 2014-2015 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_KERNEL_H_
+#define _LINUXKPI_LINUX_KERNEL_H_
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/param.h>
+#include <sys/libkern.h>
+#include <sys/stat.h>
+#include <sys/smp.h>
+#include <sys/stddef.h>
+#include <sys/syslog.h>
+#include <sys/time.h>
+
+#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/container_of.h>
+#include <linux/kstrtox.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/stringify.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/typecheck.h>
+#include <linux/jiffies.h>
+#include <linux/log2.h>
+#include <linux/kconfig.h>
+
+#include <asm/byteorder.h>
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+
+#include <linux/stdarg.h>
+
+#define KERN_CONT ""
+#define KERN_EMERG "<0>"
+#define KERN_ALERT "<1>"
+#define KERN_CRIT "<2>"
+#define KERN_ERR "<3>"
+#define KERN_WARNING "<4>"
+#define KERN_NOTICE "<5>"
+#define KERN_INFO "<6>"
+#define KERN_DEBUG "<7>"
+
+#define S8_C(x) x
+#define U8_C(x) x ## U
+#define S16_C(x) x
+#define U16_C(x) x ## U
+#define S32_C(x) x
+#define U32_C(x) x ## U
+#define S64_C(x) x ## LL
+#define U64_C(x) x ## ULL
+
+#define BUG() panic("BUG at %s:%d", __FILE__, __LINE__)
+#define BUG_ON(cond) do { \
+ if (cond) { \
+ panic("BUG ON %s failed at %s:%d", \
+ __stringify(cond), __FILE__, __LINE__); \
+ } \
+} while (0)
+
+extern int linuxkpi_warn_dump_stack;
+#define WARN_ON(cond) ({ \
+ bool __ret = (cond); \
+ if (__ret) { \
+ printf("WARNING %s failed at %s:%d\n", \
+ __stringify(cond), __FILE__, __LINE__); \
+ if (linuxkpi_warn_dump_stack) \
+ linux_dump_stack(); \
+ } \
+ unlikely(__ret); \
+})
+
+#define WARN_ON_SMP(cond) WARN_ON(cond)
+
+#define WARN_ON_ONCE(cond) ({ \
+ static bool __warn_on_once; \
+ bool __ret = (cond); \
+ if (__ret && !__warn_on_once) { \
+ __warn_on_once = 1; \
+ printf("WARNING %s failed at %s:%d\n", \
+ __stringify(cond), __FILE__, __LINE__); \
+ if (linuxkpi_warn_dump_stack) \
+ linux_dump_stack(); \
+ } \
+ unlikely(__ret); \
+})
+
+#define oops_in_progress SCHEDULER_STOPPED()
+
+#undef ALIGN
+#define ALIGN(x, y) roundup2((x), (y))
+#define ALIGN_DOWN(x, y) rounddown2(x, y)
+#undef PTR_ALIGN
+#define PTR_ALIGN(p, a) ((__typeof(p))ALIGN((uintptr_t)(p), (a)))
+#define IS_ALIGNED(x, a) (((x) & ((__typeof(x))(a) - 1)) == 0)
+#define __KERNEL_DIV_ROUND_UP(x, n) howmany(x, n)
+#define FIELD_SIZEOF(t, f) sizeof(((t *)0)->f)
+
+#define printk(...) printf(__VA_ARGS__)
+#define vprintk(f, a) vprintf(f, a)
+
+#define PTR_IF(x, p) ((x) ? (p) : NULL)
+
+#define asm __asm
+
+extern void linux_dump_stack(void);
+#define dump_stack() linux_dump_stack()
+
+struct va_format {
+ const char *fmt;
+ va_list *va;
+};
+
+static inline int
+vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ ssize_t ssize = size;
+ int i;
+
+ i = vsnprintf(buf, size, fmt, args);
+
+ return ((i >= ssize) ? (ssize - 1) : i);
+}
+
+static inline int
+scnprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ return (i);
+}
+
+/*
+ * The "pr_debug()" and "pr_devel()" macros should produce zero code
+ * unless DEBUG is defined:
+ */
+#ifdef DEBUG
+extern int linuxkpi_debug;
+#define pr_debug(fmt, ...) \
+ do { \
+ if (linuxkpi_debug) \
+ log(LOG_DEBUG, fmt, ##__VA_ARGS__); \
+ } while (0)
+#define pr_devel(fmt, ...) \
+ log(LOG_DEBUG, pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug(fmt, ...) \
+ ({ if (0) log(LOG_DEBUG, fmt, ##__VA_ARGS__); 0; })
+#define pr_devel(fmt, ...) \
+ ({ if (0) log(LOG_DEBUG, pr_fmt(fmt), ##__VA_ARGS__); 0; })
+#endif
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+/*
+ * Print a one-time message (analogous to WARN_ONCE() et al):
+ */
+#define printk_once(...) do { \
+ static bool __print_once; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ printk(__VA_ARGS__); \
+ } \
+} while (0)
+
+/*
+ * Log a one-time message (analogous to WARN_ONCE() et al):
+ */
+#define log_once(level,...) do { \
+ static bool __log_once; \
+ \
+ if (unlikely(!__log_once)) { \
+ __log_once = true; \
+ log(level, __VA_ARGS__); \
+ } \
+} while (0)
+
+#define pr_emerg(fmt, ...) \
+ log(LOG_EMERG, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert(fmt, ...) \
+ log(LOG_ALERT, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit(fmt, ...) \
+ log(LOG_CRIT, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+ log(LOG_ERR, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_once(fmt, ...) \
+ log_once(LOG_ERR, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+ log(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn(...) \
+ pr_warning(__VA_ARGS__)
+#define pr_warn_once(fmt, ...) \
+ log_once(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice(fmt, ...) \
+ log(LOG_NOTICE, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ log(LOG_INFO, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_once(fmt, ...) \
+ log_once(LOG_INFO, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_cont(fmt, ...) \
+ printk(KERN_CONT fmt, ##__VA_ARGS__)
+#define pr_warn_ratelimited(...) do { \
+ static linux_ratelimit_t __ratelimited; \
+ if (linux_ratelimited(&__ratelimited)) \
+ pr_warning(__VA_ARGS__); \
+} while (0)
+
+#ifndef WARN
+#define WARN(condition, ...) ({ \
+ bool __ret_warn_on = (condition); \
+ if (unlikely(__ret_warn_on)) \
+ pr_warning(__VA_ARGS__); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN_ONCE
+#define WARN_ONCE(condition, ...) ({ \
+ bool __ret_warn_on = (condition); \
+ if (unlikely(__ret_warn_on)) \
+ pr_warn_once(__VA_ARGS__); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#define u64_to_user_ptr(val) ((void *)(uintptr_t)(val))
+
+#define _RET_IP_ __builtin_return_address(0)
+
+#define offsetofend(t, m) \
+ (offsetof(t, m) + sizeof((((t *)0)->m)))
+
+#define smp_processor_id() PCPU_GET(cpuid)
+#define num_possible_cpus() mp_ncpus
+#define num_online_cpus() mp_ncpus
+
+#if defined(__i386__) || defined(__amd64__)
+extern bool linux_cpu_has_clflush;
+#define cpu_has_clflush linux_cpu_has_clflush
+#endif
+
+typedef struct linux_ratelimit {
+ struct timeval lasttime;
+ int counter;
+} linux_ratelimit_t;
+
+static inline bool
+linux_ratelimited(linux_ratelimit_t *rl)
+{
+ return (ppsratecheck(&rl->lasttime, &rl->counter, 1));
+}
+
+#define __is_constexpr(x) \
+ __builtin_constant_p(x)
+
+/*
+ * The is_signed() macro below returns true if the passed data type is
+ * signed. Else false is returned.
+ */
+#define is_signed(datatype) (((datatype)-1 / (datatype)2) == (datatype)0)
+
+#define TAINT_WARN 0
+#define test_taint(x) (0)
+#define add_taint(x,y) do { \
+ } while (0)
+
+static inline int
+_h2b(const char c)
+{
+
+ if (c >= '0' && c <= '9')
+ return (c - '0');
+ if (c >= 'a' && c <= 'f')
+ return (10 + c - 'a');
+ if (c >= 'A' && c <= 'F')
+ return (10 + c - 'A');
+ return (-EINVAL);
+}
+
+static inline int
+hex2bin(uint8_t *bindst, const char *hexsrc, size_t binlen)
+{
+ int hi4, lo4;
+
+ while (binlen > 0) {
+ hi4 = _h2b(*hexsrc++);
+ lo4 = _h2b(*hexsrc++);
+ if (hi4 < 0 || lo4 < 0)
+ return (-EINVAL);
+
+ *bindst++ = (hi4 << 4) | lo4;
+ binlen--;
+ }
+
+ return (0);
+}
+
+static inline bool
+mac_pton(const char *macin, uint8_t *macout)
+{
+ const char *s, *d;
+ uint8_t mac[6], hx, lx;
+ int i;
+
+ if (strlen(macin) < (3 * 6 - 1))
+ return (false);
+
+ i = 0;
+ s = macin;
+ do {
+ /* Should we also support '-'-delimiters? */
+ d = strchrnul(s, ':');
+ hx = lx = 0;
+ while (s < d) {
+ /* Fail on abc:123:xxx:... */
+ if ((d - s) > 2)
+ return (false);
+ /* We do support non-well-formed strings: 3:45:6:... */
+ if ((d - s) > 1) {
+ hx = _h2b(*s);
+ if (hx < 0)
+ return (false);
+ s++;
+ }
+ lx = _h2b(*s);
+ if (lx < 0)
+ return (false);
+ s++;
+ }
+ mac[i] = (hx << 4) | lx;
+ i++;
+ if (i >= 6)
+ return (false);
+ } while (d != NULL && *d != '\0');
+
+ memcpy(macout, mac, 6);
+ return (true);
+}
+
+#define DECLARE_FLEX_ARRAY(_t, _n) \
+ struct { struct { } __dummy_ ## _n; _t _n[0]; }
+
+#endif /* _LINUXKPI_LINUX_KERNEL_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kernel_stat.h b/sys/compat/linuxkpi/common/include/linux/kernel_stat.h
new file mode 100644
index 000000000000..c960b4ad2cff
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kernel_stat.h
@@ -0,0 +1,34 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_KERNEL_STAT_H_
+#define _LINUXKPI_LINUX_KERNEL_STAT_H_
+
+#include <linux/interrupt.h>
+
+#endif /* _LINUXKPI_LINUX_KERNEL_STAT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kfifo.h b/sys/compat/linuxkpi/common/include/linux/kfifo.h
new file mode 100644
index 000000000000..d2f570781661
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kfifo.h
@@ -0,0 +1,122 @@
+/*-
+ * Copyright (c) 2021 Beckhoff Automation GmbH & Co. KG
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUXKPI_LINUX_KFIFO_H_
+#define _LINUXKPI_LINUX_KFIFO_H_
+
+#include <sys/types.h>
+
+#include <linux/slab.h>
+#include <linux/gfp.h>
+
+#define INIT_KFIFO(x) 0
+#define DECLARE_KFIFO(x, y, z)
+
+#define DECLARE_KFIFO_PTR(_name, _type) \
+ struct kfifo_ ## _name { \
+ size_t total; \
+ size_t count; \
+ size_t first; \
+ size_t last; \
+ _type *head; \
+ } _name
+
+#define kfifo_len(_kf) \
+({ \
+ (_kf)->count; \
+})
+
+#define kfifo_is_empty(_kf) \
+({ \
+ ((_kf)->count == 0) ? true : false; \
+})
+
+#define kfifo_is_full(_kf) \
+({ \
+ ((_kf)->count == (_kf)->total) ? true : false; \
+})
+
+#define kfifo_put(_kf, _e) \
+({ \
+ bool _rc; \
+ \
+ /* Would overflow. */ \
+ if (kfifo_is_full(_kf)) { \
+ _rc = false; \
+ } else { \
+ (_kf)->head[(_kf)->last] = (_e); \
+ (_kf)->count++; \
+ (_kf)->last++; \
+ if ((_kf)->last > (_kf)->total) \
+ (_kf)->last = 0; \
+ _rc = true; \
+ } \
+ \
+ _rc; \
+})
+
+#define kfifo_get(_kf, _e) \
+({ \
+ bool _rc; \
+ \
+ if (kfifo_is_empty(_kf)) { \
+ _rc = false; \
+ } else { \
+ *(_e) = (_kf)->head[(_kf)->first]; \
+ (_kf)->count--; \
+ (_kf)->first++; \
+ if ((_kf)->first > (_kf)->total) \
+ (_kf)->first = 0; \
+ _rc = true; \
+ } \
+ \
+ _rc; \
+})
+
+#define kfifo_alloc(_kf, _s, _gfp) \
+({ \
+ int _error; \
+ \
+ (_kf)->head = kmalloc(sizeof(__typeof(*(_kf)->head)) * (_s), _gfp); \
+ if ((_kf)->head == NULL) \
+ _error = ENOMEM; \
+ else { \
+ (_kf)->total = (_s); \
+ _error = 0; \
+ } \
+ \
+ _error; \
+})
+
+#define kfifo_free(_kf) \
+({ \
+ kfree((_kf)->head); \
+ (_kf)->head = NULL; \
+ (_kf)->total = (_kf)->count = (_kf)->first = (_kf)->last = 0; \
+})
+
+#endif /* _LINUXKPI_LINUX_KFIFO_H_*/
diff --git a/sys/compat/linuxkpi/common/include/linux/kmemleak.h b/sys/compat/linuxkpi/common/include/linux/kmemleak.h
new file mode 100644
index 000000000000..7007e72718c7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kmemleak.h
@@ -0,0 +1,8 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_KMEMLEAK_H_
+#define _LINUXKPI_LINUX_KMEMLEAK_H_
+
+#define kmemleak_update_trace(x)
+
+#endif /* _LINUXKPI_LINUX_KMEMLEAK_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kmod.h b/sys/compat/linuxkpi/common/include/linux/kmod.h
new file mode 100644
index 000000000000..8f9f034aabd8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kmod.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_KMOD_H_
+#define _LINUXKPI_LINUX_KMOD_H_
+
+#include <sys/types.h>
+#include <sys/syscallsubr.h>
+#include <sys/refcount.h>
+#include <sys/sbuf.h>
+#include <sys/stdarg.h>
+#include <sys/proc.h>
+
+#define request_module(...) \
+({\
+ char modname[128]; \
+ snprintf(modname, sizeof(modname), __VA_ARGS__); \
+ kern_kldload(curthread, modname, NULL); \
+})
+
+#define request_module_nowait request_module
+
+#endif /* _LINUXKPI_LINUX_KMOD_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kobject.h b/sys/compat/linuxkpi/common/include/linux/kobject.h
new file mode 100644
index 000000000000..98f55d1234c4
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kobject.h
@@ -0,0 +1,210 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_KOBJECT_H_
+#define _LINUXKPI_LINUX_KOBJECT_H_
+
+#include <sys/stdarg.h>
+
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct kobject;
+struct kset;
+struct sysctl_oid;
+
+#define KOBJ_CHANGE 0x01
+
+struct kobj_type {
+ void (*release)(struct kobject *kobj);
+ const struct sysfs_ops *sysfs_ops;
+ struct attribute **default_attrs;
+ const struct attribute_group **default_groups;
+};
+
+extern const struct kobj_type linux_kfree_type;
+
+struct kobject {
+ struct kobject *parent;
+ char *name;
+ struct kref kref;
+ const struct kobj_type *ktype;
+ struct list_head entry;
+ struct sysctl_oid *oidp;
+ struct kset *kset;
+};
+
+extern struct kobject *mm_kobj;
+
+struct attribute {
+ const char *name;
+ struct module *owner;
+ mode_t mode;
+};
+
+extern const struct sysfs_ops kobj_sysfs_ops;
+
+struct kobj_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count);
+};
+
+struct kset_uevent_ops {
+ /* TODO */
+};
+
+struct kset {
+ struct list_head list;
+ spinlock_t list_lock;
+ struct kobject kobj;
+ const struct kset_uevent_ops *uevent_ops;
+};
+
+static inline void
+kobject_init(struct kobject *kobj, const struct kobj_type *ktype)
+{
+
+ kref_init(&kobj->kref);
+ INIT_LIST_HEAD(&kobj->entry);
+ kobj->ktype = ktype;
+ kobj->oidp = NULL;
+}
+
+void linux_kobject_release(struct kref *kref);
+
+static inline void
+kobject_put(struct kobject *kobj)
+{
+
+ if (kobj)
+ kref_put(&kobj->kref, linux_kobject_release);
+}
+
+static inline struct kobject *
+kobject_get(struct kobject *kobj)
+{
+
+ if (kobj)
+ kref_get(&kobj->kref);
+ return kobj;
+}
+
+struct kobject *kobject_create(void);
+int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list);
+int kobject_add(struct kobject *kobj, struct kobject *parent,
+ const char *fmt, ...);
+
+static inline struct kobject *
+kobject_create_and_add(const char *name, struct kobject *parent)
+{
+ struct kobject *kobj;
+
+ kobj = kobject_create();
+ if (kobj == NULL)
+ return (NULL);
+ if (kobject_add(kobj, parent, "%s", name) == 0)
+ return (kobj);
+ kobject_put(kobj);
+
+ return (NULL);
+}
+
+static inline void
+kobject_del(struct kobject *kobj __unused)
+{
+}
+
+static inline char *
+kobject_name(const struct kobject *kobj)
+{
+
+ return kobj->name;
+}
+
+int kobject_set_name(struct kobject *kobj, const char *fmt, ...);
+int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
+ struct kobject *parent, const char *fmt, ...);
+
+static __inline void
+kobject_uevent_env(struct kobject *kobj, int action, char *envp[])
+{
+
+ /*
+ * iwlwifi(4) sends an INACCESSIBLE event when it detects that the card
+ * (pice endpoint) is gone and it attempts a removal cleanup.
+ * Not sure if we do anything related to udev/sysfs at the moment or
+ * need a shortcut or simply ignore it (for now).
+ */
+}
+
+void kset_init(struct kset *kset);
+int kset_register(struct kset *kset);
+void kset_unregister(struct kset *kset);
+struct kset * kset_create_and_add(const char *name,
+ const struct kset_uevent_ops *u, struct kobject *parent_kobj);
+
+static inline struct kset *
+to_kset(struct kobject *kobj)
+{
+ if (kobj != NULL)
+ return container_of(kobj, struct kset, kobj);
+ else
+ return NULL;
+}
+
+static inline struct kset *
+kset_get(struct kset *kset)
+{
+ if (kset != NULL) {
+ struct kobject *kobj;
+
+ kobj = kobject_get(&kset->kobj);
+ return to_kset(kobj);
+ } else {
+ return NULL;
+ }
+}
+
+static inline void
+kset_put(struct kset *kset)
+{
+ if (kset != NULL)
+ kobject_put(&kset->kobj);
+}
+
+void linux_kobject_kfree_name(struct kobject *kobj);
+
+#endif /* _LINUXKPI_LINUX_KOBJECT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kref.h b/sys/compat/linuxkpi/common/include/linux/kref.h
new file mode 100644
index 000000000000..b2fba468f7df
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kref.h
@@ -0,0 +1,129 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_KREF_H_
+#define _LINUXKPI_LINUX_KREF_H_
+
+#include <sys/types.h>
+#include <sys/refcount.h>
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+
+#include <asm/atomic.h>
+
+struct kref {
+ refcount_t refcount;
+};
+
+static inline void
+kref_init(struct kref *kref)
+{
+
+ refcount_init((uint32_t *)&kref->refcount, 1);
+}
+
+static inline unsigned int
+kref_read(const struct kref *kref)
+{
+
+ return (refcount_load(__DECONST(u_int32_t *, &kref->refcount)));
+}
+
+static inline void
+kref_get(struct kref *kref)
+{
+
+ refcount_acquire((uint32_t *)&kref->refcount);
+}
+
+static inline int
+kref_put(struct kref *kref, void (*rel)(struct kref *kref))
+{
+
+ if (refcount_release((uint32_t *)&kref->refcount)) {
+ rel(kref);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int
+kref_put_lock(struct kref *kref, void (*rel)(struct kref *kref),
+ spinlock_t *lock)
+{
+
+ if (refcount_release((uint32_t *)&kref->refcount)) {
+ spin_lock(lock);
+ rel(kref);
+ return (1);
+ }
+ return (0);
+}
+
+static inline int
+kref_sub(struct kref *kref, unsigned int count,
+ void (*rel)(struct kref *kref))
+{
+
+ while (count--) {
+ if (refcount_release((uint32_t *)&kref->refcount)) {
+ rel(kref);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static inline int __must_check
+kref_get_unless_zero(struct kref *kref)
+{
+
+ return refcount_acquire_if_not_zero((uint32_t *)&kref->refcount);
+}
+
+static inline int kref_put_mutex(struct kref *kref,
+ void (*release)(struct kref *kref), struct mutex *lock)
+{
+ WARN_ON(release == NULL);
+ if (unlikely(!refcount_release_if_not_last((uint32_t *)&kref->refcount))) {
+ mutex_lock(lock);
+ if (unlikely(!refcount_release((uint32_t *)&kref->refcount))) {
+ mutex_unlock(lock);
+ return 0;
+ }
+ release(kref);
+ return 1;
+ }
+ return 0;
+}
+
+#endif /* _LINUXKPI_LINUX_KREF_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kstrtox.h b/sys/compat/linuxkpi/common/include/linux/kstrtox.h
new file mode 100644
index 000000000000..5da99de24197
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kstrtox.h
@@ -0,0 +1,324 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2017-2018 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ * Copyright (c) 2018 Johannes Lundberg <johalun0@gmail.com>
+ * Copyright (c) 2020-2022 The FreeBSD Foundation
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ * Copyright (c) 2023 Serenity Cyber Security, LLC
+ *
+ * Portions of this software were developed by Bjoern A. Zeeb and
+ * Emmanuel Vadot under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_KSTRTOX_H_
+#define _LINUXKPI_LINUX_KSTRTOX_H_
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/libkern.h>
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include <asm/uaccess.h>
+
+static inline unsigned long long
+simple_strtoull(const char *cp, char **endp, unsigned int base)
+{
+ return (strtouq(cp, endp, base));
+}
+
+static inline long long
+simple_strtoll(const char *cp, char **endp, unsigned int base)
+{
+ return (strtoq(cp, endp, base));
+}
+
+static inline unsigned long
+simple_strtoul(const char *cp, char **endp, unsigned int base)
+{
+ return (strtoul(cp, endp, base));
+}
+
+static inline long
+simple_strtol(const char *cp, char **endp, unsigned int base)
+{
+ return (strtol(cp, endp, base));
+}
+
+static inline int
+kstrtoul(const char *cp, unsigned int base, unsigned long *res)
+{
+ char *end;
+
+ *res = strtoul(cp, &end, base);
+
+ /* skip newline character, if any */
+ if (*end == '\n')
+ end++;
+ if (*cp == 0 || *end != 0)
+ return (-EINVAL);
+ return (0);
+}
+
+static inline int
+kstrtol(const char *cp, unsigned int base, long *res)
+{
+ char *end;
+
+ *res = strtol(cp, &end, base);
+
+ /* skip newline character, if any */
+ if (*end == '\n')
+ end++;
+ if (*cp == 0 || *end != 0)
+ return (-EINVAL);
+ return (0);
+}
+
+static inline int
+kstrtoint(const char *cp, unsigned int base, int *res)
+{
+ char *end;
+ long temp;
+
+ *res = temp = strtol(cp, &end, base);
+
+ /* skip newline character, if any */
+ if (*end == '\n')
+ end++;
+ if (*cp == 0 || *end != 0)
+ return (-EINVAL);
+ if (temp != (int)temp)
+ return (-ERANGE);
+ return (0);
+}
+
+static inline int
+kstrtouint(const char *cp, unsigned int base, unsigned int *res)
+{
+ char *end;
+ unsigned long temp;
+
+ *res = temp = strtoul(cp, &end, base);
+
+ /* skip newline character, if any */
+ if (*end == '\n')
+ end++;
+ if (*cp == 0 || *end != 0)
+ return (-EINVAL);
+ if (temp != (unsigned int)temp)
+ return (-ERANGE);
+ return (0);
+}
+
+static inline int
+kstrtou8(const char *cp, unsigned int base, uint8_t *res)
+{
+ char *end;
+ unsigned long temp;
+
+ *res = temp = strtoul(cp, &end, base);
+
+ /* skip newline character, if any */
+ if (*end == '\n')
+ end++;
+ if (*cp == 0 || *end != 0)
+ return (-EINVAL);
+ if (temp != (uint8_t)temp)
+ return (-ERANGE);
+ return (0);
+}
+
+static inline int
+kstrtou16(const char *cp, unsigned int base, uint16_t *res)
+{
+ char *end;
+ unsigned long temp;
+
+ *res = temp = strtoul(cp, &end, base);
+
+ /* skip newline character, if any */
+ if (*end == '\n')
+ end++;
+ if (*cp == 0 || *end != 0)
+ return (-EINVAL);
+ if (temp != (uint16_t)temp)
+ return (-ERANGE);
+ return (0);
+}
+
+static inline int
+kstrtou32(const char *cp, unsigned int base, uint32_t *res)
+{
+
+ return (kstrtouint(cp, base, res));
+}
+
+static inline int
+kstrtos32(const char *cp, unsigned int base, int32_t *res)
+{
+
+ return (kstrtoint(cp, base, res));
+}
+
+static inline int
+kstrtos64(const char *cp, unsigned int base, int64_t *res)
+{
+ char *end;
+
+ *res = strtoq(cp, &end, base);
+
+ /* skip newline character, if any */
+ if (*end == '\n')
+ end++;
+ if (*cp == 0 || *end != 0)
+ return (-EINVAL);
+ return (0);
+}
+
+static inline int
+kstrtoll(const char *cp, unsigned int base, long long *res)
+{
+ return (kstrtos64(cp, base, (int64_t *)res));
+}
+
+static inline int
+kstrtou64(const char *cp, unsigned int base, u64 *res)
+{
+ char *end;
+
+ *res = strtouq(cp, &end, base);
+
+ /* skip newline character, if any */
+ if (*end == '\n')
+ end++;
+ if (*cp == 0 || *end != 0)
+ return (-EINVAL);
+ return (0);
+}
+
+static inline int
+kstrtoull(const char *cp, unsigned int base, unsigned long long *res)
+{
+ return (kstrtou64(cp, base, (uint64_t *)res));
+}
+
+static inline int
+kstrtobool(const char *s, bool *res)
+{
+ int len;
+
+ if (s == NULL || (len = strlen(s)) == 0 || res == NULL)
+ return (-EINVAL);
+
+ /* skip newline character, if any */
+ if (s[len - 1] == '\n')
+ len--;
+
+ if (len == 1 && strchr("yY1", s[0]) != NULL)
+ *res = true;
+ else if (len == 1 && strchr("nN0", s[0]) != NULL)
+ *res = false;
+ else if (strncasecmp("on", s, len) == 0)
+ *res = true;
+ else if (strncasecmp("off", s, len) == 0)
+ *res = false;
+ else
+ return (-EINVAL);
+
+ return (0);
+}
+
+static inline int
+kstrtobool_from_user(const char __user *s, size_t count, bool *res)
+{
+ char buf[8] = {};
+
+ if (count > (sizeof(buf) - 1))
+ count = (sizeof(buf) - 1);
+
+ if (copy_from_user(buf, s, count))
+ return (-EFAULT);
+
+ return (kstrtobool(buf, res));
+}
+
+static inline int
+kstrtoint_from_user(const char __user *s, size_t count, unsigned int base,
+ int *p)
+{
+ char buf[36] = {};
+
+ if (count > (sizeof(buf) - 1))
+ count = (sizeof(buf) - 1);
+
+ if (copy_from_user(buf, s, count))
+ return (-EFAULT);
+
+ return (kstrtoint(buf, base, p));
+}
+
+static inline int
+kstrtouint_from_user(const char __user *s, size_t count, unsigned int base,
+ unsigned int *p)
+{
+ char buf[36] = {};
+
+ if (count > (sizeof(buf) - 1))
+ count = (sizeof(buf) - 1);
+
+ if (copy_from_user(buf, s, count))
+ return (-EFAULT);
+
+ return (kstrtouint(buf, base, p));
+}
+
+static inline int
+kstrtou32_from_user(const char __user *s, size_t count, unsigned int base,
+ unsigned int *p)
+{
+
+ return (kstrtouint_from_user(s, count, base, p));
+}
+
+static inline int
+kstrtou8_from_user(const char __user *s, size_t count, unsigned int base,
+ uint8_t *p)
+{
+ char buf[8] = {};
+
+ if (count > (sizeof(buf) - 1))
+ count = (sizeof(buf) - 1);
+
+ if (copy_from_user(buf, s, count))
+ return (-EFAULT);
+
+ return (kstrtou8(buf, base, p));
+}
+
+#endif /* _LINUXKPI_LINUX_KSTRTOX_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kthread.h b/sys/compat/linuxkpi/common/include/linux/kthread.h
new file mode 100644
index 000000000000..1fde734fd767
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/kthread.h
@@ -0,0 +1,166 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_KTHREAD_H_
+#define _LINUXKPI_LINUX_KTHREAD_H_
+
+#include <linux/sched.h>
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+#include <sys/unistd.h>
+
+struct task_struct;
+struct kthread_work;
+
+typedef void (*kthread_work_func_t)(struct kthread_work *work);
+
+struct kthread_worker {
+ struct task_struct *task;
+ struct taskqueue *tq;
+};
+
+struct kthread_work {
+ struct taskqueue *tq;
+ struct task task;
+ kthread_work_func_t func;
+};
+
+#define kthread_run(fn, data, fmt, ...) ({ \
+ struct task_struct *__task; \
+ struct thread *__td; \
+ \
+ if (kthread_add(linux_kthread_fn, NULL, NULL, &__td, \
+ RFSTOPPED, 0, fmt, ## __VA_ARGS__)) \
+ __task = NULL; \
+ else \
+ __task = linux_kthread_setup_and_run(__td, fn, data); \
+ __task; \
+})
+
+int linux_kthread_stop(struct task_struct *);
+bool linux_kthread_should_stop_task(struct task_struct *);
+bool linux_kthread_should_stop(void);
+int linux_kthread_park(struct task_struct *);
+void linux_kthread_parkme(void);
+bool linux_kthread_should_park(void);
+void linux_kthread_unpark(struct task_struct *);
+void linux_kthread_fn(void *);
+struct task_struct *linux_kthread_setup_and_run(struct thread *,
+ linux_task_fn_t *, void *arg);
+int linux_in_atomic(void);
+
+#define kthread_stop(task) linux_kthread_stop(task)
+#define kthread_should_stop() linux_kthread_should_stop()
+#define kthread_should_stop_task(task) linux_kthread_should_stop_task(task)
+#define kthread_park(task) linux_kthread_park(task)
+#define kthread_parkme() linux_kthread_parkme()
+#define kthread_should_park() linux_kthread_should_park()
+#define kthread_unpark(task) linux_kthread_unpark(task)
+
+#define in_atomic() linux_in_atomic()
+
+/* Only kthread_(create|destroy)_worker interface is allowed */
+#define kthread_init_worker(worker) \
+ _Static_assert(false, "pre-4.9 worker interface is not supported");
+
+task_fn_t lkpi_kthread_work_fn;
+task_fn_t lkpi_kthread_worker_init_fn;
+
+#define kthread_create_worker(flags, fmt, ...) ({ \
+ struct kthread_worker *__w; \
+ struct task __task; \
+ \
+ __w = malloc(sizeof(*__w), M_KMALLOC, M_WAITOK | M_ZERO); \
+ __w->tq = taskqueue_create("lkpi kthread taskq", M_WAITOK, \
+ taskqueue_thread_enqueue, &__w->tq); \
+ taskqueue_start_threads(&__w->tq, 1, PWAIT, fmt, ##__VA_ARGS__);\
+ TASK_INIT(&__task, 0, lkpi_kthread_worker_init_fn, __w); \
+ taskqueue_enqueue(__w->tq, &__task); \
+ taskqueue_drain(__w->tq, &__task); \
+ __w; \
+})
+
+static inline void
+kthread_destroy_worker(struct kthread_worker *worker)
+{
+ taskqueue_drain_all(worker->tq);
+ taskqueue_free(worker->tq);
+ free(worker, M_KMALLOC);
+}
+
+static inline void
+kthread_init_work(struct kthread_work *work, kthread_work_func_t func)
+{
+ work->tq = NULL;
+ work->func = func;
+ TASK_INIT(&work->task, 0, lkpi_kthread_work_fn, work);
+}
+
+static inline bool
+kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
+{
+ int error;
+
+ error = taskqueue_enqueue_flags(worker->tq, &work->task,
+ TASKQUEUE_FAIL_IF_CANCELING | TASKQUEUE_FAIL_IF_PENDING);
+ if (error == 0)
+ work->tq = worker->tq;
+ return (error == 0);
+}
+
+static inline bool
+kthread_cancel_work_sync(struct kthread_work *work)
+{
+ u_int pending = 0;
+
+ if (work->tq != NULL &&
+ taskqueue_cancel(work->tq, &work->task, &pending) != 0)
+ taskqueue_drain(work->tq, &work->task);
+
+ return (pending != 0);
+}
+
+static inline void
+kthread_flush_work(struct kthread_work *work)
+{
+ if (work->tq != NULL)
+ taskqueue_drain(work->tq, &work->task);
+}
+
+static inline void
+kthread_flush_worker(struct kthread_worker *worker)
+{
+ taskqueue_drain_all(worker->tq);
+}
+
+#endif /* _LINUXKPI_LINUX_KTHREAD_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/ktime.h b/sys/compat/linuxkpi/common/include/linux/ktime.h
new file mode 100644
index 000000000000..6a2f04f3d789
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/ktime.h
@@ -0,0 +1,287 @@
+/*-
+ * Copyright (c) 2018 Limelight Networks, Inc.
+ * Copyright (c) 2014-2018 Mellanox Technologies, Ltd.
+ * Copyright (c) 2015 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_KTIME_H
+#define _LINUXKPI_LINUX_KTIME_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/jiffies.h>
+
+/* time values in nanoseconds */
+typedef s64 ktime_t;
+
+#define KTIME_MAX ((s64)~((u64)1 << 63))
+#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+
+static inline int64_t
+ktime_to_ns(ktime_t kt)
+{
+ return (kt);
+}
+
+static inline ktime_t
+ns_to_ktime(uint64_t nsec)
+{
+ return (nsec);
+}
+
+static inline int64_t
+ktime_divns(const ktime_t kt, int64_t div)
+{
+ return (kt / div);
+}
+
+static inline int64_t
+ktime_to_us(ktime_t kt)
+{
+ return (ktime_divns(kt, NSEC_PER_USEC));
+}
+
+static inline int64_t
+ktime_to_ms(ktime_t kt)
+{
+ return (ktime_divns(kt, NSEC_PER_MSEC));
+}
+
+static inline ktime_t
+ms_to_ktime(uint64_t ms)
+{
+ return (ms * NSEC_PER_MSEC);
+}
+
+static inline struct timeval
+ktime_to_timeval(ktime_t kt)
+{
+ return (ns_to_timeval(kt));
+}
+
+static inline ktime_t
+ktime_add_ns(ktime_t kt, int64_t ns)
+{
+ return (kt + ns);
+}
+
+static inline ktime_t
+ktime_add_ms(ktime_t kt, int64_t ms)
+{
+
+ return (ktime_add_ns(kt, ms * NSEC_PER_MSEC));
+}
+
+static inline ktime_t
+ktime_add_us(ktime_t kt, int64_t us)
+{
+
+ return (ktime_add_ns(kt, us * NSEC_PER_USEC));
+}
+
+static inline ktime_t
+ktime_sub_ns(ktime_t kt, int64_t ns)
+{
+ return (kt - ns);
+}
+
+static inline ktime_t
+ktime_set(const long secs, const unsigned long nsecs)
+{
+ ktime_t retval = {(s64) secs * NSEC_PER_SEC + (s64) nsecs};
+
+ return (retval);
+}
+
+static inline ktime_t
+ktime_sub(ktime_t lhs, ktime_t rhs)
+{
+ return (lhs - rhs);
+}
+
+static inline int64_t
+ktime_us_delta(ktime_t later, ktime_t earlier)
+{
+ ktime_t diff = ktime_sub(later, earlier);
+
+ return (ktime_to_us(diff));
+}
+
+static inline int64_t
+ktime_ms_delta(ktime_t later, ktime_t earlier)
+{
+ ktime_t diff = ktime_sub(later, earlier);
+
+ return (ktime_to_ms(diff));
+}
+
+static inline ktime_t
+ktime_add(ktime_t lhs, ktime_t rhs)
+{
+ return (lhs + rhs);
+}
+
+static inline int
+ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
+{
+
+ if (cmp1 > cmp2)
+ return (1);
+ else if (cmp1 < cmp2)
+ return (-1);
+ else
+ return (0);
+}
+
+static inline bool
+ktime_after(const ktime_t cmp1, const ktime_t cmp2)
+{
+
+ return (ktime_compare(cmp1, cmp2) > 0);
+}
+
+static inline bool
+ktime_before(const ktime_t cmp1, const ktime_t cmp2)
+{
+
+ return (ktime_compare(cmp1, cmp2) < 0);
+}
+
+static inline ktime_t
+timespec_to_ktime(struct timespec ts)
+{
+ return (ktime_set(ts.tv_sec, ts.tv_nsec));
+}
+
+static inline ktime_t
+timeval_to_ktime(struct timeval tv)
+{
+ return (ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC));
+}
+
+static inline int64_t
+timespec64_to_ns(struct timespec64 *ts)
+{
+ return (timespec_to_ns(ts));
+}
+
+#define ktime_to_timespec(kt) ns_to_timespec(kt)
+#define ktime_to_timespec64(kt) ns_to_timespec(kt)
+#define ktime_to_timeval(kt) ns_to_timeval(kt)
+#define ktime_to_ns(kt) (kt)
+#define ktime_get_ts(ts) getnanouptime(ts)
+#define ktime_get_ts64(ts) getnanouptime(ts)
+#define ktime_get_raw_ts64(ts) getnanouptime(ts)
+#define ktime_get_real_ts64(ts) getnanotime(ts)
+#define getrawmonotonic64(ts) getnanouptime(ts)
+
+static inline int64_t
+ktime_get_ns(void)
+{
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+
+ return (ktime_to_ns(timespec_to_ktime(ts)));
+}
+
+static inline ktime_t
+ktime_get(void)
+{
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+ return (timespec_to_ktime(ts));
+}
+
+static inline ktime_t
+ktime_get_boottime(void)
+{
+ struct timespec ts;
+
+ nanouptime(&ts);
+ return (timespec_to_ktime(ts));
+}
+
+static inline uint64_t
+ktime_get_boottime_ns(void)
+{
+
+ return (ktime_to_ns(ktime_get_boottime()));
+}
+
+static inline uint64_t
+ktime_get_boottime_seconds(void)
+{
+
+ return (ktime_divns(ktime_get_boottime(), NSEC_PER_SEC));
+}
+
+static inline ktime_t
+ktime_get_real(void)
+{
+ struct timespec ts;
+
+ nanotime(&ts);
+ return (timespec_to_ktime(ts));
+}
+
+static inline ktime_t
+ktime_get_real_seconds(void)
+{
+ struct timespec ts;
+
+ nanotime(&ts);
+ return (ts.tv_sec);
+}
+
+static inline ktime_t
+ktime_get_raw(void)
+{
+ struct timespec ts;
+
+ nanouptime(&ts);
+ return (timespec_to_ktime(ts));
+}
+
+static inline u64
+ktime_get_raw_ns(void)
+{
+ struct timespec ts;
+
+ nanouptime(&ts);
+ return (ktime_to_ns(timespec_to_ktime(ts)));
+}
+
+static inline uint64_t
+ktime_get_raw_fast_ns(void)
+{
+ struct timespec ts;
+
+ getnanouptime(&ts);
+ return (ktime_to_ns(timespec_to_ktime(ts)));
+}
+
+#endif /* _LINUXKPI_LINUX_KTIME_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/leds.h b/sys/compat/linuxkpi/common/include/linux/leds.h
new file mode 100644
index 000000000000..89f7286f6800
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/leds.h
@@ -0,0 +1,41 @@
+/*-
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_LEDS_H
+#define _LINUXKPI_LINUX_LEDS_H
+
+enum led_brightness {
+ LED_OFF,
+};
+
+struct led_classdev {
+ const char *name;
+ const char *default_trigger;
+ int (*blink_set)(struct led_classdev *, unsigned long *, unsigned long *);
+ void (*brightness_set)(struct led_classdev *, enum led_brightness);
+ void (*led_set)(struct led_classdev *, enum led_brightness);
+};
+
+#endif /* _LINUXKPI_LINUX_LEDS_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/limits.h b/sys/compat/linuxkpi/common/include/linux/limits.h
new file mode 100644
index 000000000000..716366033bb3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/limits.h
@@ -0,0 +1,47 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Serenity Cyber Security, LLC.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_LIMITS_H
+#define _LINUXKPI_LINUX_LIMITS_H
+
+#include <sys/types.h>
+#include <sys/stdint.h>
+
+#define U8_MAX UINT8_MAX
+#define S8_MAX INT8_MAX
+#define S8_MIN INT8_MIN
+#define U16_MAX UINT16_MAX
+#define S16_MAX INT16_MAX
+#define S16_MIN INT16_MIN
+#define U32_MAX UINT32_MAX
+#define S32_MAX INT32_MAX
+#define S32_MIN INT32_MIN
+#define U64_MAX UINT64_MAX
+#define S64_MAX INT64_MAX
+#define S64_MIN INT64_MIN
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/list.h b/sys/compat/linuxkpi/common/include/linux/list.h
new file mode 100644
index 000000000000..a6c74a324dac
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/list.h
@@ -0,0 +1,529 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_LIST_H_
+#define _LINUXKPI_LINUX_LIST_H_
+
+#ifndef _STANDALONE
+/*
+ * Since LIST_HEAD conflicts with the Linux definition we must include any
+ * FreeBSD header which requires it here so it is resolved with the correct
+ * definition prior to the undef.
+ */
+#include <linux/types.h>
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+#include <sys/cpuset.h>
+#include <sys/jail.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/vnode.h>
+#include <sys/conf.h>
+#include <sys/socket.h>
+#include <sys/mbuf.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_types.h>
+#include <net/if_media.h>
+#include <net/vnet.h>
+
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/tcp_lro.h>
+
+#include <netinet6/in6_var.h>
+#include <netinet6/nd6.h>
+
+#include <net80211/ieee80211.h>
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_node.h>
+
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/pmap.h>
+#endif
+
+#ifndef prefetch
+#define prefetch(x)
+#endif
+
+#define LINUX_LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LINUX_LIST_HEAD(name) \
+ struct list_head name = LINUX_LIST_HEAD_INIT(name)
+
+static inline void
+INIT_LIST_HEAD(struct list_head *list)
+{
+
+ list->next = list->prev = list;
+}
+
+static inline int
+list_empty(const struct list_head *head)
+{
+
+ return (head->next == head);
+}
+
+static inline int
+list_empty_careful(const struct list_head *head)
+{
+ struct list_head *next = head->next;
+
+ return ((next == head) && (next == head->prev));
+}
+
+static inline void
+__list_del(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ WRITE_ONCE(prev->next, next);
+}
+
+static inline void
+__list_del_entry(struct list_head *entry)
+{
+
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void
+list_del(struct list_head *entry)
+{
+
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void
+list_replace(struct list_head *old, struct list_head *new)
+{
+ new->next = old->next;
+ new->next->prev = new;
+ new->prev = old->prev;
+ new->prev->next = new;
+}
+
+static inline void
+list_replace_init(struct list_head *old, struct list_head *new)
+{
+ list_replace(old, new);
+ INIT_LIST_HEAD(old);
+}
+
+static inline void
+__list_add(struct list_head *new, struct list_head *prev,
+ struct list_head *next)
+{
+
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+static inline void
+list_del_init(struct list_head *entry)
+{
+
+ list_del(entry);
+ INIT_LIST_HEAD(entry);
+}
+
+#define list_entry(ptr, type, field) container_of(ptr, type, field)
+
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+#define list_first_entry_or_null(ptr, type, member) \
+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+
+#define list_next_entry(ptr, member) \
+ list_entry(((ptr)->member.next), typeof(*(ptr)), member)
+
+#define list_safe_reset_next(ptr, n, member) \
+ (n) = list_next_entry(ptr, member)
+
+#define list_prev_entry(ptr, member) \
+ list_entry(((ptr)->member.prev), typeof(*(ptr)), member)
+
+#define list_for_each(p, head) \
+ for (p = (head)->next; p != (head); p = (p)->next)
+
+#define list_for_each_safe(p, n, head) \
+ for (p = (head)->next, n = (p)->next; p != (head); p = n, n = (p)->next)
+
+#define list_for_each_entry(p, h, field) \
+ for (p = list_entry((h)->next, typeof(*p), field); &(p)->field != (h); \
+ p = list_entry((p)->field.next, typeof(*p), field))
+
+#define list_for_each_entry_safe(p, n, h, field) \
+ for (p = list_entry((h)->next, typeof(*p), field), \
+ n = list_entry((p)->field.next, typeof(*p), field); &(p)->field != (h);\
+ p = n, n = list_entry(n->field.next, typeof(*n), field))
+
+#define list_for_each_entry_from(p, h, field) \
+ for ( ; &(p)->field != (h); \
+ p = list_entry((p)->field.next, typeof(*p), field))
+
+#define list_for_each_entry_continue(p, h, field) \
+ for (p = list_next_entry((p), field); &(p)->field != (h); \
+ p = list_next_entry((p), field))
+
+#define list_for_each_entry_safe_from(pos, n, head, member) \
+ for (n = list_entry((pos)->member.next, typeof(*pos), member); \
+ &(pos)->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+#define list_for_each_entry_reverse(p, h, field) \
+ for (p = list_entry((h)->prev, typeof(*p), field); &(p)->field != (h); \
+ p = list_entry((p)->field.prev, typeof(*p), field))
+
+#define list_for_each_entry_safe_reverse(p, n, h, field) \
+ for (p = list_entry((h)->prev, typeof(*p), field), \
+ n = list_entry((p)->field.prev, typeof(*p), field); &(p)->field != (h); \
+ p = n, n = list_entry(n->field.prev, typeof(*n), field))
+
+#define list_for_each_entry_continue_reverse(p, h, field) \
+ for (p = list_entry((p)->field.prev, typeof(*p), field); &(p)->field != (h); \
+ p = list_entry((p)->field.prev, typeof(*p), field))
+
+#define list_for_each_prev(p, h) for (p = (h)->prev; p != (h); p = (p)->prev)
+
+#define list_for_each_prev_safe(p, n, h) \
+ for (p = (h)->prev, n = (p)->prev; \
+ p != (h); \
+ p = n, n = (p)->prev)
+
+#define list_for_each_entry_from_reverse(p, h, field) \
+ for (; &p->field != (h); \
+ p = list_prev_entry(p, field))
+
+#define list_for_each_rcu(p, head) \
+ for (p = rcu_dereference((head)->next); \
+ p != (head); \
+ p = rcu_dereference((p)->next))
+
+static inline void
+list_add(struct list_head *new, struct list_head *head)
+{
+
+ __list_add(new, head, head->next);
+}
+
+static inline void
+list_add_tail(struct list_head *new, struct list_head *head)
+{
+
+ __list_add(new, head->prev, head);
+}
+
+static inline void
+list_move(struct list_head *list, struct list_head *head)
+{
+
+ list_del(list);
+ list_add(list, head);
+}
+
+static inline void
+list_move_tail(struct list_head *entry, struct list_head *head)
+{
+
+ list_del(entry);
+ list_add_tail(entry, head);
+}
+
+static inline void
+list_rotate_to_front(struct list_head *entry, struct list_head *head)
+{
+
+ list_move_tail(entry, head);
+}
+
+static inline void
+list_bulk_move_tail(struct list_head *head, struct list_head *first,
+ struct list_head *last)
+{
+ first->prev->next = last->next;
+ last->next->prev = first->prev;
+ head->prev->next = first;
+ first->prev = head->prev;
+ last->next = head;
+ head->prev = last;
+}
+
+static inline void
+linux_list_splice(const struct list_head *list, struct list_head *prev,
+ struct list_head *next)
+{
+ struct list_head *first;
+ struct list_head *last;
+
+ if (list_empty(list))
+ return;
+ first = list->next;
+ last = list->prev;
+ first->prev = prev;
+ prev->next = first;
+ last->next = next;
+ next->prev = last;
+}
+
+static inline void
+list_splice(const struct list_head *list, struct list_head *head)
+{
+
+ linux_list_splice(list, head, head->next);
+}
+
+static inline void
+list_splice_tail(struct list_head *list, struct list_head *head)
+{
+
+ linux_list_splice(list, head->prev, head);
+}
+
+static inline void
+list_splice_init(struct list_head *list, struct list_head *head)
+{
+
+ linux_list_splice(list, head, head->next);
+ INIT_LIST_HEAD(list);
+}
+
+static inline void
+list_splice_tail_init(struct list_head *list, struct list_head *head)
+{
+
+ linux_list_splice(list, head->prev, head);
+ INIT_LIST_HEAD(list);
+}
+
+#undef LIST_HEAD
+#define LIST_HEAD(name) struct list_head name = { &(name), &(name) }
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+#define HLIST_HEAD_INIT { }
+#define HLIST_HEAD(name) struct hlist_head name = HLIST_HEAD_INIT
+#define INIT_HLIST_HEAD(head) (head)->first = NULL
+#define INIT_HLIST_NODE(node) \
+do { \
+ (node)->next = NULL; \
+ (node)->pprev = NULL; \
+} while (0)
+
+static inline int
+hlist_unhashed(const struct hlist_node *h)
+{
+
+ return !h->pprev;
+}
+
+static inline int
+hlist_empty(const struct hlist_head *h)
+{
+
+ return !READ_ONCE(h->first);
+}
+
+static inline void
+hlist_del(struct hlist_node *n)
+{
+
+ WRITE_ONCE(*(n->pprev), n->next);
+ if (n->next != NULL)
+ n->next->pprev = n->pprev;
+}
+
+static inline void
+hlist_del_init(struct hlist_node *n)
+{
+
+ if (hlist_unhashed(n))
+ return;
+ hlist_del(n);
+ INIT_HLIST_NODE(n);
+}
+
+static inline void
+hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+
+ n->next = h->first;
+ if (h->first != NULL)
+ h->first->pprev = &n->next;
+ WRITE_ONCE(h->first, n);
+ n->pprev = &h->first;
+}
+
+static inline void
+hlist_add_before(struct hlist_node *n, struct hlist_node *next)
+{
+
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ WRITE_ONCE(*(n->pprev), n);
+}
+
+static inline void
+hlist_add_behind(struct hlist_node *n, struct hlist_node *prev)
+{
+
+ n->next = prev->next;
+ WRITE_ONCE(prev->next, n);
+ n->pprev = &prev->next;
+
+ if (n->next != NULL)
+ n->next->pprev = &n->next;
+}
+
+static inline void
+hlist_move_list(struct hlist_head *old, struct hlist_head *new)
+{
+
+ new->first = old->first;
+ if (new->first)
+ new->first->pprev = &new->first;
+ old->first = NULL;
+}
+
+static inline int list_is_singular(const struct list_head *head)
+{
+ return !list_empty(head) && (head->next == head->prev);
+}
+
+static inline void __list_cut_position(struct list_head *list,
+ struct list_head *head, struct list_head *entry)
+{
+ struct list_head *new_first = entry->next;
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry;
+ entry->next = list;
+ head->next = new_first;
+ new_first->prev = head;
+}
+
+static inline void list_cut_position(struct list_head *list,
+ struct list_head *head, struct list_head *entry)
+{
+ if (list_empty(head))
+ return;
+ if (list_is_singular(head) &&
+ (head->next != entry && head != entry))
+ return;
+ if (entry == head)
+ INIT_LIST_HEAD(list);
+ else
+ __list_cut_position(list, head, entry);
+}
+
+static inline int list_is_first(const struct list_head *list,
+ const struct list_head *head)
+{
+
+ return (list->prev == head);
+}
+
+static inline int list_is_last(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list->next == head;
+}
+
+static inline size_t
+list_count_nodes(const struct list_head *list)
+{
+ const struct list_head *lh;
+ size_t count;
+
+ count = 0;
+ list_for_each(lh, list) {
+ count++;
+ }
+
+ return (count);
+}
+
+#define hlist_entry(ptr, type, field) container_of(ptr, type, field)
+
+#define hlist_for_each(p, head) \
+ for (p = (head)->first; p; p = (p)->next)
+
+#define hlist_for_each_safe(p, n, head) \
+ for (p = (head)->first; p && ({ n = (p)->next; 1; }); p = n)
+
+#define hlist_entry_safe(ptr, type, member) \
+ ((ptr) ? hlist_entry(ptr, type, member) : NULL)
+
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#define hlist_for_each_entry_continue(pos, member) \
+ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member); \
+ (pos); \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#define hlist_for_each_entry_from(pos, member) \
+ for (; (pos); \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
+ (pos) && ({ n = (pos)->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*(pos)), member))
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
+extern void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
+ const struct list_head *a, const struct list_head *b));
+#else
+extern void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
+ struct list_head *a, struct list_head *b));
+#endif
+
+#endif /* _LINUXKPI_LINUX_LIST_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/llist.h b/sys/compat/linuxkpi/common/include/linux/llist.h
new file mode 100644
index 000000000000..fd842f05e9eb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/llist.h
@@ -0,0 +1,101 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_LLIST_H
+#define _LINUXKPI_LINUX_LLIST_H
+
+#include <sys/types.h>
+#include <machine/atomic.h>
+
+struct llist_node {
+ struct llist_node *next;
+};
+
+struct llist_head {
+ struct llist_node *first;
+};
+
+#define LLIST_HEAD_INIT(name) { NULL }
+#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name)
+
+#define llist_entry(ptr, type, member) \
+ ((ptr) ? container_of(ptr, type, member) : NULL)
+
+static inline struct llist_node *
+llist_del_all(struct llist_head *head)
+{
+ return ((void *)atomic_readandclear_ptr((uintptr_t *)&head->first));
+}
+
+static inline struct llist_node *
+llist_del_first(struct llist_head *head)
+{
+ struct llist_node *first, *next;
+
+ do {
+ first = head->first;
+ if (first == NULL)
+ return NULL;
+ next = first->next;
+ } while (atomic_cmpset_ptr((uintptr_t *)&head->first,
+ (uintptr_t)first, (uintptr_t)next) == 0);
+
+ return (first);
+}
+
+static inline bool
+llist_add(struct llist_node *new, struct llist_head *head)
+{
+ struct llist_node *first;
+
+ do {
+ new->next = first = head->first;
+ } while (atomic_cmpset_ptr((uintptr_t *)&head->first,
+ (uintptr_t)first, (uintptr_t)new) == 0);
+
+ return (first == NULL);
+}
+
+static inline bool
+llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
+ struct llist_head *head)
+{
+ struct llist_node *first;
+
+ do {
+ new_last->next = first = head->first;
+ } while (atomic_cmpset_ptr((uintptr_t *)&head->first,
+ (uintptr_t)first, (uintptr_t)new_first) == 0);
+
+ return (first == NULL);
+}
+
+static inline void
+init_llist_head(struct llist_head *head)
+{
+ head->first = NULL;
+}
+
+static inline bool
+llist_empty(struct llist_head *head)
+{
+ return (head->first == NULL);
+}
+
+#define llist_for_each_safe(pos, n, node) \
+ for ((pos) = (node); \
+ (pos) != NULL && \
+ ((n) = (pos)->next, pos); \
+ (pos) = (n))
+
+#define llist_for_each_entry_safe(pos, n, node, member) \
+ for (pos = llist_entry((node), __typeof(*pos), member); \
+ pos != NULL && \
+ (n = llist_entry(pos->member.next, __typeof(*pos), member), pos); \
+ pos = n)
+
+#define llist_for_each_entry(pos, node, member) \
+ for ((pos) = llist_entry((node), __typeof(*(pos)), member); \
+ (pos) != NULL; \
+ (pos) = llist_entry((pos)->member.next, __typeof(*(pos)), member))
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/lockdep.h b/sys/compat/linuxkpi/common/include/linux/lockdep.h
new file mode 100644
index 000000000000..93fe445f7057
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/lockdep.h
@@ -0,0 +1,120 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_LOCKDEP_H_
+#define _LINUXKPI_LINUX_LOCKDEP_H_
+
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/lock.h>
+
+struct lock_class_key {
+};
+struct lockdep_map {
+};
+struct pin_cookie {
+};
+
+#define lockdep_set_class(lock, key)
+#define lockdep_set_subclass(lock, sub)
+#define lockdep_set_class_and_name(lock, key, name)
+#define lockdep_set_current_reclaim_state(g) do { } while (0)
+#define lockdep_clear_current_reclaim_state() do { } while (0)
+#define lockdep_init_map(_map, _name, _key, _x) do { } while(0)
+#define lockdep_register_key(key) do { } while(0)
+#define lockdep_unregister_key(key) do { } while(0)
+
+#ifdef INVARIANTS
+#define lockdep_assert(cond) do { WARN_ON(!(cond)); } while (0)
+#define lockdep_assert_once(cond) do { WARN_ON_ONCE(!(cond)); } while (0)
+
+#define lockdep_assert_not_held(m) do { \
+ struct lock_object *__lock = (struct lock_object *)(m); \
+ LOCK_CLASS(__lock)->lc_assert(__lock, LA_UNLOCKED); \
+} while (0)
+
+#define lockdep_assert_held(m) do { \
+ struct lock_object *__lock = (struct lock_object *)(m); \
+ LOCK_CLASS(__lock)->lc_assert(__lock, LA_LOCKED); \
+} while (0)
+
+#define lockdep_assert_held_once(m) do { \
+ struct lock_object *__lock = (struct lock_object *)(m); \
+ LOCK_CLASS(__lock)->lc_assert(__lock, LA_LOCKED | LA_NOTRECURSED); \
+} while (0)
+
+#define lockdep_assert_none_held_once() do { } while (0)
+
+#else
+#define lockdep_assert(cond) do { } while (0)
+#define lockdep_assert_once(cond) do { } while (0)
+
+#define lockdep_assert_not_held(m) do { (void)(m); } while (0)
+#define lockdep_assert_held(m) do { (void)(m); } while (0)
+#define lockdep_assert_none_held_once() do { } while (0)
+
+#define lockdep_assert_held_once(m) do { (void)(m); } while (0)
+
+#endif
+
+static __inline bool
+lockdep_is_held(void *__m __diagused)
+{
+#ifdef INVARIANTS
+ struct lock_object *__lock;
+ struct thread *__td;
+
+ __lock = __m;
+ return (LOCK_CLASS(__lock)->lc_owner(__lock, &__td) != 0);
+#else
+ return (true);
+#endif
+}
+#define lockdep_is_held_type(_m, _t) lockdep_is_held(_m)
+
+#define might_lock(m) do { } while (0)
+#define might_lock_read(m) do { } while (0)
+#define might_lock_nested(m, n) do { } while (0)
+
+#define lock_acquire(...) do { } while (0)
+#define lock_release(...) do { } while (0)
+#define lock_acquire_shared_recursive(...) do { } while (0)
+
+#define mutex_acquire(...) do { } while (0)
+#define mutex_release(...) do { } while (0)
+
+#define lock_map_acquire(_map) do { } while (0)
+#define lock_map_acquire_read(_map) do { } while (0)
+#define lock_map_release(_map) do { } while (0)
+
+#define lockdep_pin_lock(l) ({ struct pin_cookie __pc = { }; __pc; })
+#define lockdep_repin_lock(l,c) do { (void)(l); (void)(c); } while (0)
+#define lockdep_unpin_lock(l,c) do { (void)(l); (void)(c); } while (0)
+
+#endif /* _LINUXKPI_LINUX_LOCKDEP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/log2.h b/sys/compat/linuxkpi/common/include/linux/log2.h
new file mode 100644
index 000000000000..660e9adb6fa9
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/log2.h
@@ -0,0 +1,41 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_LOG2_H_
+#define _LINUXKPI_LINUX_LOG2_H_
+
+#include <linux/types.h>
+
+#include <sys/libkern.h>
+
+#define is_power_of_2(n) ({ \
+ __typeof(n) _n = (n); \
+ _n != 0 && (_n & (_n - 1)) == 0; \
+})
+
+#endif /* _LINUXKPI_LINUX_LOG2_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/math.h b/sys/compat/linuxkpi/common/include/linux/math.h
new file mode 100644
index 000000000000..5a348a57747b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/math.h
@@ -0,0 +1,76 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * Copyright (c) 2014-2015 François Tigeot
+ * Copyright (c) 2016 Matt Macy <mmacy@FreeBSD.org>
+ * Copyright (c) 2019 Johannes Lundberg <johalun@FreeBSD.org>
+ * Copyright (c) 2023 Serenity Cyber Security, LLC.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_MATH_H_
+#define _LINUXKPI_LINUX_MATH_H_
+
+#include <linux/types.h>
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define DIV_ROUND_UP(x, n) howmany(x, n)
+#define DIV_ROUND_UP_ULL(x, n) DIV_ROUND_UP((unsigned long long)(x), (n))
+#define DIV_ROUND_DOWN_ULL(x, n) ((unsigned long long)(x) / (n))
+
+#define DIV_ROUND_CLOSEST(x, divisor) (((x) + ((divisor) / 2)) / (divisor))
+#define DIV_ROUND_CLOSEST_ULL(x, divisor) ({ \
+ __typeof(divisor) __d = (divisor); \
+ unsigned long long __ret = (x) + (__d) / 2; \
+ __ret /= __d; \
+ __ret; \
+})
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 60600
+#define abs_diff(x, y) ({ \
+ __typeof(x) _x = (x); \
+ __typeof(y) _y = (y); \
+ _x > _y ? _x - _y : _y - _x; \
+})
+#endif
+
+static inline uintmax_t
+mult_frac(uintmax_t x, uintmax_t multiplier, uintmax_t divisor)
+{
+ uintmax_t q = (x / divisor);
+ uintmax_t r = (x % divisor);
+
+ return ((q * multiplier) + ((r * multiplier) / divisor));
+}
+
+#endif /* _LINUXKPI_LINUX_MATH_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/math64.h b/sys/compat/linuxkpi/common/include/linux/math64.h
new file mode 100644
index 000000000000..a216d350570f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/math64.h
@@ -0,0 +1,171 @@
+/*-
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2014-2015 Mellanox Technologies, Ltd. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_MATH64_H
+#define _LINUXKPI_LINUX_MATH64_H
+
+#include <sys/stdint.h>
+#include <sys/systm.h>
+
+#define do_div(n, base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+})
+
+static inline uint64_t
+div64_u64_rem(uint64_t dividend, uint64_t divisor, uint64_t *remainder)
+{
+
+ *remainder = dividend % divisor;
+ return (dividend / divisor);
+}
+
+static inline int64_t
+div64_s64(int64_t dividend, int64_t divisor)
+{
+
+ return (dividend / divisor);
+}
+
+static inline uint64_t
+div64_u64(uint64_t dividend, uint64_t divisor)
+{
+
+ return (dividend / divisor);
+}
+
+#define div64_ul(x, y) div64_u64((x), (y))
+
+static inline uint64_t
+div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
+{
+
+ *remainder = dividend % divisor;
+ return (dividend / divisor);
+}
+
+static inline int64_t
+div_s64(int64_t dividend, int32_t divisor)
+{
+
+ return (dividend / divisor);
+}
+
+static inline uint64_t
+div_u64(uint64_t dividend, uint32_t divisor)
+{
+
+ return (dividend / divisor);
+}
+
+static inline uint64_t
+mul_u32_u32(uint32_t a, uint32_t b)
+{
+
+ return ((uint64_t)a * b);
+}
+
+static inline uint64_t
+div64_u64_round_up(uint64_t dividend, uint64_t divisor)
+{
+ return ((dividend + divisor - 1) / divisor);
+}
+
+#define DIV64_U64_ROUND_UP(...) \
+ div64_u64_round_up(__VA_ARGS__)
+
+static inline uint64_t
+mul_u64_u32_div(uint64_t x, uint32_t y, uint32_t div)
+{
+ const uint64_t rem = x % div;
+
+ return ((x / div) * y + (rem * y) / div);
+}
+
+static inline uint64_t
+mul_u64_u64_div_u64(uint64_t x, uint64_t y, uint64_t z)
+{
+ uint64_t res, rem;
+ uint64_t x1, y1, y1z;
+
+ res = rem = 0;
+ x1 = x;
+ y1z = y / z;
+ y1 = y - y1z * z;
+
+ /*
+ * INVARIANT: x * y = res * z + rem + (y1 + y1z * z) * x1
+ * INVARIANT: y1 < z
+ * INVARIANT: rem < z
+ */
+ while (x1 > 0) {
+ /* Handle low bit. */
+ if (x1 & 1) {
+ x1 &= ~1;
+ res += y1z;
+ rem += y1;
+ if ((rem < y1) || (rem >= z)) {
+ res += 1;
+ rem -= z;
+ }
+ }
+
+ /* Shift x1 right and (y1 + y1z * z) left */
+ x1 >>= 1;
+ if ((y1 * 2 < y1) || (y1 * 2 >= z)) {
+ y1z = y1z * 2 + 1;
+ y1 = y1 * 2 - z;
+ } else {
+ y1z *= 2;
+ y1 *= 2;
+ }
+ }
+
+ KASSERT(res * z + rem == x * y, ("%s: res %ju * z %ju + rem %ju != "
+ "x %ju * y %ju", __func__, (uintmax_t)res, (uintmax_t)z,
+ (uintmax_t)rem, (uintmax_t)x, (uintmax_t)y));
+ KASSERT(rem < z, ("%s: rem %ju >= z %ju\n", __func__,
+ (uintmax_t)rem, (uintmax_t)z));
+
+ return (res);
+}
+
+static inline uint64_t
+mul_u64_u32_shr(uint64_t x, uint32_t y, unsigned int shift)
+{
+ uint32_t hi, lo;
+ hi = x >> 32;
+ lo = x & 0xffffffff;
+
+ return (mul_u32_u32(lo, y) >> shift) +
+ (mul_u32_u32(hi, y) << (32 - shift));
+}
+
+#endif /* _LINUXKPI_LINUX_MATH64_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/media-bus-format.h b/sys/compat/linuxkpi/common/include/linux/media-bus-format.h
new file mode 100644
index 000000000000..1e1954d45409
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/media-bus-format.h
@@ -0,0 +1,8 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_MEDIA_BUS_FORMAT_H_
+#define _LINUXKPI_LINUX_MEDIA_BUS_FORMAT_H_
+
+#define MEDIA_BUS_FMT_FIXED 1
+
+#endif /* _LINUXKPI_LINUX_MEDIA_BUS_FORMAT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/mhi.h b/sys/compat/linuxkpi/common/include/linux/mhi.h
new file mode 100644
index 000000000000..24b3205d6f5a
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/mhi.h
@@ -0,0 +1,222 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022-2023 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_MHI_H
+#define _LINUXKPI_LINUX_MHI_H
+
+#include <linux/types.h>
+
+/* Modem Host Interface (MHI) */
+
+/* XXX FIXME */
+#define MHI_DB_BRST_DISABLE 0
+#define MHI_ER_CTRL 0
+
+enum mhi_callback {
+ MHI_CB_SYS_ERROR,
+ MHI_CB_BW_REQ,
+ MHI_CB_EE_MISSION_MODE,
+ MHI_CB_EE_RDDM,
+ MHI_CB_FATAL_ERROR,
+ MHI_CB_IDLE,
+ MHI_CB_LPM_ENTER,
+ MHI_CB_LPM_EXIT,
+ MHI_CB_PENDING_DATA,
+};
+
+struct mhi_channel_config {
+ const char *name;
+ int auto_queue, dir, doorbell, doorbell_mode_switch, ee_mask, event_ring, lpm_notify, num, num_elements, offload_channel, pollcfg;
+};
+
+struct mhi_event_config {
+ int client_managed, data_type, hardware_event, irq, irq_moderation_ms, mode, num_elements, offload_channel, priority;
+};
+
+struct mhi_device {
+};
+
+struct mhi_controller_config {
+ const struct mhi_channel_config *ch_cfg;
+ struct mhi_event_config *event_cfg;
+
+ int buf_len, max_channels, num_channels, num_events, use_bounce_buf;
+
+ uint32_t timeout_ms;
+};
+
+struct mhi_controller {
+ struct device *cntrl_dev;
+ struct mhi_device *mhi_dev;
+ void *regs;
+ int *irq;
+ const char *fw_image;
+ const u8 *fw_data;
+ size_t fw_sz;
+
+ bool fbc_download;
+ size_t rddm_size;
+ size_t sbl_size;
+ size_t seg_len;
+ size_t reg_len;
+ int nr_irqs;
+ unsigned long irq_flags;
+ uint32_t timeout_ms;
+
+ dma_addr_t iova_start;
+ dma_addr_t iova_stop;
+
+ int (*runtime_get)(struct mhi_controller *);
+ void (*runtime_put)(struct mhi_controller *);
+ void (*status_cb)(struct mhi_controller *, enum mhi_callback);
+ int (*read_reg)(struct mhi_controller *, void __iomem *, uint32_t *);
+ void (*write_reg)(struct mhi_controller *, void __iomem *, uint32_t);
+};
+
+/* -------------------------------------------------------------------------- */
+
+struct mhi_controller *linuxkpi_mhi_alloc_controller(void);
+void linuxkpi_mhi_free_controller(struct mhi_controller *);
+int linuxkpi_mhi_register_controller(struct mhi_controller *,
+ const struct mhi_controller_config *);
+void linuxkpi_mhi_unregister_controller(struct mhi_controller *);
+
+/* -------------------------------------------------------------------------- */
+
+static inline struct mhi_controller *
+mhi_alloc_controller(void)
+{
+
+ /* Keep allocations internal to our implementation. */
+ return (linuxkpi_mhi_alloc_controller());
+}
+
+static inline void
+mhi_free_controller(struct mhi_controller *mhi_ctrl)
+{
+
+ linuxkpi_mhi_free_controller(mhi_ctrl);
+}
+
+static inline int
+mhi_register_controller(struct mhi_controller *mhi_ctrl,
+ const struct mhi_controller_config *cfg)
+{
+
+ return (linuxkpi_mhi_register_controller(mhi_ctrl, cfg));
+}
+
+static inline void
+mhi_unregister_controller(struct mhi_controller *mhi_ctrl)
+{
+
+ linuxkpi_mhi_unregister_controller(mhi_ctrl);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline int
+mhi_device_get_sync(struct mhi_device *mhi_dev)
+{
+ /* XXX TODO */
+ return (-1);
+}
+
+static __inline void
+mhi_device_put(struct mhi_device *mhi_dev)
+{
+ /* XXX TODO */
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline int
+mhi_prepare_for_power_up(struct mhi_controller *mhi_ctrl)
+{
+ /* XXX TODO */
+ return (0);
+}
+
+static __inline int
+mhi_sync_power_up(struct mhi_controller *mhi_ctrl)
+{
+ /* XXX TODO */
+ return (0);
+}
+
+static __inline int
+mhi_async_power_up(struct mhi_controller *mhi_ctrl)
+{
+ /* XXX TODO */
+ return (0);
+}
+
+static __inline void
+mhi_power_down(struct mhi_controller *mhi_ctrl, bool x)
+{
+ /* XXX TODO */
+}
+
+static __inline void
+mhi_unprepare_after_power_down(struct mhi_controller *mhi_ctrl)
+{
+ /* XXX TODO */
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline int
+mhi_pm_suspend(struct mhi_controller *mhi_ctrl)
+{
+ /* XXX TODO */
+ return (0);
+}
+
+static __inline int
+mhi_pm_resume(struct mhi_controller *mhi_ctrl)
+{
+ /* XXX TODO */
+ return (0);
+}
+
+static __inline int
+mhi_pm_resume_force(struct mhi_controller *mhi_ctrl)
+{
+ /* XXX TODO */
+ return (0);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline int
+mhi_force_rddm_mode(struct mhi_controller *mhi_ctrl)
+{
+ /* XXX TODO */
+ return (0);
+}
+
+#endif /* _LINUXKPI_LINUX_MHI_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/minmax.h b/sys/compat/linuxkpi/common/include/linux/minmax.h
new file mode 100644
index 000000000000..d48958f0899f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/minmax.h
@@ -0,0 +1,74 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * Copyright (c) 2014-2015 François Tigeot
+ * Copyright (c) 2015 Hans Petter Selasky <hselasky@FreeBSD.org>
+ * Copyright (c) 2016 Matt Macy <mmacy@FreeBSD.org>
+ * Copyright (c) 2023 Serenity Cyber Security, LLC.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_MINMAX_H_
+#define _LINUXKPI_LINUX_MINMAX_H_
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#define min(x, y) ((x) < (y) ? (x) : (y))
+#define max(x, y) ((x) > (y) ? (x) : (y))
+
+#define min3(a, b, c) min(a, min(b, c))
+#define max3(a, b, c) max(a, max(b, c))
+
+#define min_not_zero(x, y) ({ \
+ __typeof(x) __min1 = (x); \
+ __typeof(y) __min2 = (y); \
+ __min1 == 0 ? __min2 : ((__min2 == 0) ? __min1 : min(__min1, __min2));\
+})
+
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+
+#define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max)
+#define clamp(x, lo, hi) min(max(x, lo), hi)
+#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
+
+/* Swap values of a and b */
+#define swap(a, b) do { \
+ __typeof(a) _swap_tmp = a; \
+ a = b; \
+ b = _swap_tmp; \
+} while (0)
+
+#endif /* _LINUXKPI_LINUX_MINMAX_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/miscdevice.h b/sys/compat/linuxkpi/common/include/linux/miscdevice.h
new file mode 100644
index 000000000000..c66006a6b78e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/miscdevice.h
@@ -0,0 +1,74 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_MISCDEVICE_H_
+#define _LINUXKPI_LINUX_MISCDEVICE_H_
+
+#define MISC_DYNAMIC_MINOR -1
+
+#include <linux/device.h>
+#include <linux/cdev.h>
+
+struct miscdevice {
+ const char *name;
+ struct device *this_device;
+ const struct file_operations *fops;
+ struct cdev *cdev;
+ int minor;
+ const char *nodename;
+ umode_t mode;
+};
+
+extern struct class linux_class_misc;
+
+static inline int
+misc_register(struct miscdevice *misc)
+{
+ misc->this_device = device_create(&linux_class_misc,
+ &linux_root_device, 0, misc, misc->name);
+ misc->cdev = cdev_alloc();
+ if (misc->cdev == NULL)
+ return -ENOMEM;
+ misc->cdev->owner = THIS_MODULE;
+ misc->cdev->ops = misc->fops;
+ kobject_set_name(&misc->cdev->kobj, misc->name);
+ if (cdev_add(misc->cdev, misc->this_device->devt, 1))
+ return -EINVAL;
+ return (0);
+}
+
+static inline int
+misc_deregister(struct miscdevice *misc)
+{
+ device_destroy(&linux_class_misc, misc->this_device->devt);
+ cdev_del(misc->cdev);
+
+ return (0);
+}
+
+#endif /* _LINUXKPI_LINUX_MISCDEVICE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/mm.h b/sys/compat/linuxkpi/common/include/linux/mm.h
new file mode 100644
index 000000000000..156b00a0c0f0
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/mm.h
@@ -0,0 +1,479 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * Copyright (c) 2015 François Tigeot
+ * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_MM_H_
+#define _LINUXKPI_LINUX_MM_H_
+
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mm_types.h>
+#include <linux/mmzone.h>
+#include <linux/pfn.h>
+#include <linux/list.h>
+#include <linux/mmap_lock.h>
+#include <linux/overflow.h>
+#include <linux/shrinker.h>
+#include <linux/page.h>
+
+#include <asm/pgtable.h>
+
+#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
+
+/*
+ * Make sure our LinuxKPI defined virtual memory flags don't conflict
+ * with the ones defined by FreeBSD:
+ */
+CTASSERT((VM_PROT_ALL & -(1 << 8)) == 0);
+
+#define VM_READ VM_PROT_READ
+#define VM_WRITE VM_PROT_WRITE
+#define VM_EXEC VM_PROT_EXECUTE
+
+#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
+
+#define VM_PFNINTERNAL (1 << 8) /* FreeBSD private flag to vm_insert_pfn() */
+#define VM_MIXEDMAP (1 << 9)
+#define VM_NORESERVE (1 << 10)
+#define VM_PFNMAP (1 << 11)
+#define VM_IO (1 << 12)
+#define VM_MAYWRITE (1 << 13)
+#define VM_DONTCOPY (1 << 14)
+#define VM_DONTEXPAND (1 << 15)
+#define VM_DONTDUMP (1 << 16)
+#define VM_SHARED (1 << 17)
+
+#define VMA_MAX_PREFAULT_RECORD 1
+
+#define FOLL_WRITE (1 << 0)
+#define FOLL_FORCE (1 << 1)
+
+#define VM_FAULT_OOM (1 << 0)
+#define VM_FAULT_SIGBUS (1 << 1)
+#define VM_FAULT_MAJOR (1 << 2)
+#define VM_FAULT_WRITE (1 << 3)
+#define VM_FAULT_HWPOISON (1 << 4)
+#define VM_FAULT_HWPOISON_LARGE (1 << 5)
+#define VM_FAULT_SIGSEGV (1 << 6)
+#define VM_FAULT_NOPAGE (1 << 7)
+#define VM_FAULT_LOCKED (1 << 8)
+#define VM_FAULT_RETRY (1 << 9)
+#define VM_FAULT_FALLBACK (1 << 10)
+
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+ VM_FAULT_HWPOISON |VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
+
+#define FAULT_FLAG_WRITE (1 << 0)
+#define FAULT_FLAG_MKWRITE (1 << 1)
+#define FAULT_FLAG_ALLOW_RETRY (1 << 2)
+#define FAULT_FLAG_RETRY_NOWAIT (1 << 3)
+#define FAULT_FLAG_KILLABLE (1 << 4)
+#define FAULT_FLAG_TRIED (1 << 5)
+#define FAULT_FLAG_USER (1 << 6)
+#define FAULT_FLAG_REMOTE (1 << 7)
+#define FAULT_FLAG_INSTRUCTION (1 << 8)
+
+#define fault_flag_allow_retry_first(flags) \
+ (((flags) & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_TRIED)) == FAULT_FLAG_ALLOW_RETRY)
+
+typedef int (*pte_fn_t)(linux_pte_t *, unsigned long addr, void *data);
+
+struct vm_area_struct {
+ vm_offset_t vm_start;
+ vm_offset_t vm_end;
+ vm_offset_t vm_pgoff;
+ pgprot_t vm_page_prot;
+ unsigned long vm_flags;
+ struct mm_struct *vm_mm;
+ void *vm_private_data;
+ const struct vm_operations_struct *vm_ops;
+ struct linux_file *vm_file;
+
+ /* internal operation */
+ vm_paddr_t vm_pfn; /* PFN for memory map */
+ vm_size_t vm_len; /* length for memory map */
+ vm_pindex_t vm_pfn_first;
+ int vm_pfn_count;
+ int *vm_pfn_pcount;
+ vm_object_t vm_obj;
+ vm_map_t vm_cached_map;
+ TAILQ_ENTRY(vm_area_struct) vm_entry;
+};
+
+struct vm_fault {
+ unsigned int flags;
+ pgoff_t pgoff;
+ union {
+ /* user-space address */
+ void *virtual_address; /* < 4.11 */
+ unsigned long address; /* >= 4.11 */
+ };
+ struct page *page;
+ struct vm_area_struct *vma;
+};
+
+struct vm_operations_struct {
+ void (*open) (struct vm_area_struct *);
+ void (*close) (struct vm_area_struct *);
+ int (*fault) (struct vm_fault *);
+ int (*access) (struct vm_area_struct *, unsigned long, void *, int, int);
+};
+
+struct sysinfo {
+ uint64_t totalram; /* Total usable main memory size */
+ uint64_t freeram; /* Available memory size */
+ uint64_t totalhigh; /* Total high memory size */
+ uint64_t freehigh; /* Available high memory size */
+ uint32_t mem_unit; /* Memory unit size in bytes */
+};
+
+static inline struct page *
+virt_to_head_page(const void *p)
+{
+
+ return (virt_to_page(p));
+}
+
+static inline struct folio *
+virt_to_folio(const void *p)
+{
+ struct page *page = virt_to_page(p);
+
+ return (page_folio(page));
+}
+
+/*
+ * Compute log2 of the power of two rounded up count of pages
+ * needed for size bytes.
+ */
+static inline int
+get_order(unsigned long size)
+{
+ int order;
+
+ size = (size - 1) >> PAGE_SHIFT;
+ order = 0;
+ while (size) {
+ order++;
+ size >>= 1;
+ }
+ return (order);
+}
+
+/*
+ * Resolve a page into a virtual address:
+ *
+ * NOTE: This function only works for pages allocated by the kernel.
+ */
+void *linux_page_address(const struct page *);
+#define page_address(page) linux_page_address(page)
+
+static inline void *
+lowmem_page_address(struct page *page)
+{
+ return (page_address(page));
+}
+
+/*
+ * This only works via memory map operations.
+ */
+static inline int
+io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ vm_memattr_t prot)
+{
+ vma->vm_page_prot = prot;
+ vma->vm_pfn = pfn;
+ vma->vm_len = size;
+
+ return (0);
+}
+
+vm_fault_t
+lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t prot);
+
+static inline vm_fault_t
+vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t prot)
+{
+ vm_fault_t ret;
+
+ VM_OBJECT_WLOCK(vma->vm_obj);
+ ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
+ VM_OBJECT_WUNLOCK(vma->vm_obj);
+
+ return (ret);
+}
+#define vmf_insert_pfn_prot(...) \
+ _Static_assert(false, \
+"This function is always called in a loop. Consider using the locked version")
+
+static inline int
+apply_to_page_range(struct mm_struct *mm, unsigned long address,
+ unsigned long size, pte_fn_t fn, void *data)
+{
+ return (-ENOTSUP);
+}
+
+int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
+
+int lkpi_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long start_addr, unsigned long start_pfn, unsigned long size,
+ pgprot_t prot);
+
+static inline int
+remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ return (lkpi_remap_pfn_range(vma, addr, pfn, size, prot));
+}
+
+static inline unsigned long
+vma_pages(struct vm_area_struct *vma)
+{
+ return ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+}
+
+#define offset_in_page(off) ((unsigned long)(off) & (PAGE_SIZE - 1))
+
+static inline void
+set_page_dirty(struct page *page)
+{
+ vm_page_dirty(page);
+}
+
+static inline void
+mark_page_accessed(struct page *page)
+{
+ vm_page_reference(page);
+}
+
+static inline void
+get_page(struct page *page)
+{
+ vm_page_wire(page);
+}
+
+static inline void
+put_page(struct page *page)
+{
+ /* `__free_page()` takes care of the refcounting (unwire). */
+ __free_page(page);
+}
+
+static inline void
+folio_get(struct folio *folio)
+{
+ get_page(&folio->page);
+}
+
+static inline void
+folio_put(struct folio *folio)
+{
+ put_page(&folio->page);
+}
+
+/*
+ * Linux uses the following "transparent" union so that `release_pages()`
+ * accepts both a list of `struct page` or a list of `struct folio`. This
+ * relies on the fact that a `struct folio` can be cast to a `struct page`.
+ */
+typedef union {
+ struct page **pages;
+ struct folio **folios;
+} release_pages_arg __attribute__ ((__transparent_union__));
+
+void linux_release_pages(release_pages_arg arg, int nr);
+#define release_pages(arg, nr) linux_release_pages((arg), (nr))
+
+extern long
+lkpi_get_user_pages(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **);
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 60500
+#define get_user_pages(start, nr_pages, gup_flags, pages) \
+ lkpi_get_user_pages(start, nr_pages, gup_flags, pages)
+#else
+#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
+ lkpi_get_user_pages(start, nr_pages, gup_flags, pages)
+#endif
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 60500
+static inline long
+pin_user_pages(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages)
+{
+ return (get_user_pages(start, nr_pages, gup_flags, pages));
+}
+#else
+static inline long
+pin_user_pages(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ return (get_user_pages(start, nr_pages, gup_flags, pages, vmas));
+}
+#endif
+
+extern int
+__get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **);
+
+static inline int
+pin_user_pages_fast(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages)
+{
+ return __get_user_pages_fast(
+ start, nr_pages, !!(gup_flags & FOLL_WRITE), pages);
+}
+
+extern long
+get_user_pages_remote(struct task_struct *, struct mm_struct *,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **,
+ struct vm_area_struct **);
+
+static inline long
+pin_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ return get_user_pages_remote(
+ task, mm, start, nr_pages, gup_flags, pages, vmas);
+}
+
+#define unpin_user_page(page) put_page(page)
+#define unpin_user_pages(pages, npages) release_pages(pages, npages)
+
+#define copy_highpage(to, from) pmap_copy_page(from, to)
+
+static inline pgprot_t
+vm_get_page_prot(unsigned long vm_flags)
+{
+ return (vm_flags & VM_PROT_ALL);
+}
+
+static inline void
+vm_flags_set(struct vm_area_struct *vma, unsigned long flags)
+{
+ vma->vm_flags |= flags;
+}
+
+static inline void
+vm_flags_clear(struct vm_area_struct *vma, unsigned long flags)
+{
+ vma->vm_flags &= ~flags;
+}
+
+static inline struct page *
+vmalloc_to_page(const void *addr)
+{
+ vm_paddr_t paddr;
+
+ paddr = pmap_kextract((vm_offset_t)addr);
+ return (PHYS_TO_VM_PAGE(paddr));
+}
+
+static inline int
+trylock_page(struct page *page)
+{
+ return (vm_page_tryxbusy(page));
+}
+
+static inline void
+unlock_page(struct page *page)
+{
+
+ vm_page_xunbusy(page);
+}
+
+extern int is_vmalloc_addr(const void *addr);
+void si_meminfo(struct sysinfo *si);
+
+static inline unsigned long
+totalram_pages(void)
+{
+ return ((unsigned long)physmem);
+}
+
+#define unmap_mapping_range(...) lkpi_unmap_mapping_range(__VA_ARGS__)
+void lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
+ loff_t const holelen, int even_cows __unused);
+
+#define PAGE_ALIGNED(p) __is_aligned(p, PAGE_SIZE)
+
+void vma_set_file(struct vm_area_struct *vma, struct linux_file *file);
+
+static inline void
+might_alloc(gfp_t gfp_mask __unused)
+{
+}
+
+#define is_cow_mapping(flags) (false)
+
+static inline bool
+want_init_on_free(void)
+{
+ return (false);
+}
+
+static inline unsigned long
+folio_pfn(struct folio *folio)
+{
+ return (page_to_pfn(&folio->page));
+}
+
+static inline long
+folio_nr_pages(struct folio *folio)
+{
+ return (1);
+}
+
+static inline size_t
+folio_size(struct folio *folio)
+{
+ return (PAGE_SIZE);
+}
+
+static inline void
+folio_mark_dirty(struct folio *folio)
+{
+ set_page_dirty(&folio->page);
+}
+
+static inline void *
+folio_address(const struct folio *folio)
+{
+ return (page_address(&folio->page));
+}
+
+#endif /* _LINUXKPI_LINUX_MM_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/mm_types.h b/sys/compat/linuxkpi/common/include/linux/mm_types.h
new file mode 100644
index 000000000000..3ea68e97004c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/mm_types.h
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_MM_TYPES_H_
+#define _LINUXKPI_LINUX_MM_TYPES_H_
+
+#include <linux/types.h>
+#include <linux/page.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+
+#include <asm/atomic.h>
+
+typedef int vm_fault_t;
+
+struct vm_area_struct;
+struct task_struct;
+
+struct mm_struct {
+ struct vm_area_struct *mmap;
+ atomic_t mm_count;
+ atomic_t mm_users;
+ size_t pinned_vm;
+ /* Renamed to mmap_lock in v5.8 */
+ struct rw_semaphore mmap_sem;
+};
+
+extern void linux_mm_dtor(struct mm_struct *mm);
+
+static inline void
+mmdrop(struct mm_struct *mm)
+{
+ if (__predict_false(atomic_dec_and_test(&mm->mm_count)))
+ linux_mm_dtor(mm);
+}
+
+static inline bool
+mmget_not_zero(struct mm_struct *mm)
+{
+ return (atomic_inc_not_zero(&mm->mm_users));
+}
+
+static inline void
+mmput(struct mm_struct *mm)
+{
+ if (__predict_false(atomic_dec_and_test(&mm->mm_users)))
+ mmdrop(mm);
+}
+
+static inline void
+mmgrab(struct mm_struct *mm)
+{
+ atomic_inc(&mm->mm_count);
+}
+
+extern struct mm_struct *linux_get_task_mm(struct task_struct *);
+#define get_task_mm(task) linux_get_task_mm(task)
+
+struct folio {
+ /*
+ * The page member must be at the beginning because `page_folio(p)`
+ * casts from a `struct page` to a `struct folio`.
+ *
+ * `release_pages()` also relies on this to be able to accept either a
+ * list of `struct page` or a list of `struct folio`.
+ */
+ struct page page;
+};
+
+#endif /* _LINUXKPI_LINUX_MM_TYPES_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/mman.h b/sys/compat/linuxkpi/common/include/linux/mman.h
new file mode 100644
index 000000000000..eff80759b4cd
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/mman.h
@@ -0,0 +1,38 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_MMAN_H
+#define _LINUX_MMAN_H
+
+/*
+ * In Linux, <linux/mman.h> includes <linux/percpu_counter.h>, which includes
+ * <linux/smp.h>.
+ */
+#include <linux/smp.h>
+
+#endif /* _LINUX_MMAN_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/mmap_lock.h b/sys/compat/linuxkpi/common/include/linux/mmap_lock.h
new file mode 100644
index 000000000000..de6b2a029b1f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/mmap_lock.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUXKPI_LINUX_MMAP_LOCK_H_
+#define _LINUXKPI_LINUX_MMAP_LOCK_H_
+
+#include <linux/mm_types.h>
+#include <linux/rwsem.h>
+
+static inline void
+mmap_read_lock(struct mm_struct *mm)
+{
+
+ down_read(&mm->mmap_sem);
+}
+
+static inline void
+mmap_read_unlock(struct mm_struct *mm)
+{
+
+ up_read(&mm->mmap_sem);
+}
+
+static inline void
+mmap_write_lock_killable(struct mm_struct *mm)
+{
+
+ down_write_killable(&mm->mmap_sem);
+}
+
+#endif /* _LINUXKPI_LINUX_MMAP_LOCK_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/mmu_context.h b/sys/compat/linuxkpi/common/include/linux/mmu_context.h
new file mode 100644
index 000000000000..4c1bc61b3edb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/mmu_context.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 2018 Johannes Lundberg <johalun0@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_MMU_CONTEXT_H_
+#define _LINUXKPI_LINUX_MMU_CONTEXT_H_
+
+struct mm_struct;
+
+static inline void
+use_mm(struct mm_struct *mm)
+{
+ /* NOP is deliberate */
+}
+
+static inline void
+unuse_mm(struct mm_struct *mm)
+{
+ /* NOP is deliberate */
+}
+
+#endif /* _LINUXKPI_LINUX_MMU_CONTEXT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/mmu_notifier.h b/sys/compat/linuxkpi/common/include/linux/mmu_notifier.h
new file mode 100644
index 000000000000..2492a6a3bd4f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/mmu_notifier.h
@@ -0,0 +1,33 @@
+/*-
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_MMU_NOTIFIER_H_
+#define _LINUXKPI_LINUX_MMU_NOTIFIER_H_
+
+struct mmu_notifier {
+};
+
+#endif /* _LINUXKPI_LINUX_MMU_NOTIFIER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/mmzone.h b/sys/compat/linuxkpi/common/include/linux/mmzone.h
new file mode 100644
index 000000000000..57d3dcac9597
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/mmzone.h
@@ -0,0 +1,15 @@
+/* Public domain. */
+
+#ifndef _LINUX_MMZONE_H
+#define _LINUX_MMZONE_H
+
+#include <linux/mm_types.h>
+#include <linux/numa.h>
+#include <linux/page-flags.h>
+
+#define MAX_ORDER 11
+
+#define MAX_PAGE_ORDER 10
+#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/mod_devicetable.h b/sys/compat/linuxkpi/common/include/linux/mod_devicetable.h
new file mode 100644
index 000000000000..87bd6ec24bce
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/mod_devicetable.h
@@ -0,0 +1,83 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __LINUXKPI_LINUX_MOD_DEVICETABLE_H__
+#define __LINUXKPI_LINUX_MOD_DEVICETABLE_H__
+
+#include <linux/types.h>
+
+enum dmi_field {
+ DMI_NONE,
+ DMI_BIOS_VENDOR,
+ DMI_BIOS_VERSION,
+ DMI_BIOS_DATE,
+ DMI_BIOS_RELEASE,
+ DMI_EC_FIRMWARE_RELEASE,
+ DMI_SYS_VENDOR,
+ DMI_PRODUCT_NAME,
+ DMI_PRODUCT_VERSION,
+ DMI_PRODUCT_SERIAL,
+ DMI_PRODUCT_UUID,
+ DMI_PRODUCT_SKU,
+ DMI_PRODUCT_FAMILY,
+ DMI_BOARD_VENDOR,
+ DMI_BOARD_NAME,
+ DMI_BOARD_VERSION,
+ DMI_BOARD_SERIAL,
+ DMI_BOARD_ASSET_TAG,
+ DMI_CHASSIS_VENDOR,
+ DMI_CHASSIS_TYPE,
+ DMI_CHASSIS_VERSION,
+ DMI_CHASSIS_SERIAL,
+ DMI_CHASSIS_ASSET_TAG,
+ DMI_STRING_MAX,
+ DMI_OEM_STRING,
+};
+
+struct dmi_strmatch {
+ unsigned char slot : 7;
+ unsigned char exact_match : 1;
+ char substr[79];
+};
+
+struct dmi_system_id {
+ int (*callback)(const struct dmi_system_id *);
+ const char *ident;
+ struct dmi_strmatch matches[4];
+ void *driver_data;
+};
+
+#define DMI_MATCH(a, b) { .slot = a, .substr = b }
+#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 }
+
+#define I2C_NAME_SIZE 20
+#define I2C_MODULE_PREFIX "i2c:"
+
+#define ACPI_ID_LEN 16
+
+#endif /* __LINUXKPI_LINUX_MOD_DEVICETABLE_H__ */
diff --git a/sys/compat/linuxkpi/common/include/linux/module.h b/sys/compat/linuxkpi/common/include/linux/module.h
new file mode 100644
index 000000000000..079dacf8df6c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/module.h
@@ -0,0 +1,127 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_MODULE_H_
+#define _LINUXKPI_LINUX_MODULE_H_
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/module.h>
+#include <sys/queue.h>
+#include <sys/linker.h>
+
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+#include <linux/kmod.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#define MODULE_AUTHOR(name)
+#define MODULE_DESCRIPTION(name)
+#define MODULE_LICENSE(name)
+#define MODULE_INFO(tag, info)
+#define MODULE_FIRMWARE(firmware)
+#define MODULE_SUPPORTED_DEVICE(name)
+#define MODULE_IMPORT_NS(_name)
+
+/*
+ * THIS_MODULE is used to differentiate modules on Linux. We currently
+ * completely stub out any Linux struct module usage, but THIS_MODULE is still
+ * used to populate the "owner" fields of various drivers. Even though we
+ * don't actually dereference these "owner" fields they are still used by
+ * drivers to check if devices/dmabufs/etc come from different modules. For
+ * example, during DRM GEM import some drivers check if the dmabuf's owner
+ * matches the dev's owner. If they match because they are both NULL drivers
+ * may incorrectly think two resources come from the same module.
+ *
+ * To handle this we specify an undefined symbol __this_linker_file, which
+ * will get special treatment from the linker when resolving. This will
+ * populate the usages of __this_linker_file with the linker_file_t of the
+ * module.
+ */
+#ifdef KLD_MODULE
+#define THIS_MODULE ((struct module *)&__this_linker_file)
+#else
+#define THIS_MODULE ((struct module *)0)
+#endif
+
+#define __MODULE_STRING(x) __stringify(x)
+
+/* OFED pre-module initialization */
+#define SI_SUB_OFED_PREINIT (SI_SUB_ROOT_CONF - 2)
+/* OFED default module initialization */
+#define SI_SUB_OFED_MODINIT (SI_SUB_ROOT_CONF - 1)
+
+#include <sys/linker.h>
+
+static inline void
+_module_run(void *arg)
+{
+ void (*fn)(void);
+#ifdef OFED_DEBUG_INIT
+ char name[1024];
+ caddr_t pc;
+ long offset;
+
+ pc = (caddr_t)arg;
+ if (linker_search_symbol_name(pc, name, sizeof(name), &offset) != 0)
+ printf("Running ??? (%p)\n", pc);
+ else
+ printf("Running %s (%p)\n", name, pc);
+#endif
+ fn = arg;
+ fn();
+}
+
+#define module_init(fn) \
+ SYSINIT(fn, SI_SUB_OFED_MODINIT, SI_ORDER_FIRST, _module_run, (fn))
+
+#define module_exit(fn) \
+ SYSUNINIT(fn, SI_SUB_OFED_MODINIT, SI_ORDER_SECOND, _module_run, (fn))
+
+/*
+ * The following two macros are a workaround for not having a module
+ * load and unload order resolver:
+ */
+#define module_init_order(fn, order) \
+ SYSINIT(fn, SI_SUB_OFED_MODINIT, (order), _module_run, (fn))
+
+#define module_exit_order(fn, order) \
+ SYSUNINIT(fn, SI_SUB_OFED_MODINIT, (order), _module_run, (fn))
+
+#define module_get(module)
+#define module_put(module)
+#define try_module_get(module) 1
+
+#define postcore_initcall(fn) module_init(fn)
+
+#endif /* _LINUXKPI_LINUX_MODULE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/moduleparam.h b/sys/compat/linuxkpi/common/include/linux/moduleparam.h
new file mode 100644
index 000000000000..b61bbce495ea
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/moduleparam.h
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_MODULEPARAM_H_
+#define _LINUXKPI_LINUX_MODULEPARAM_H_
+
+#include <sys/types.h>
+#include <sys/sysctl.h>
+
+#include <linux/types.h>
+
+#ifndef LINUXKPI_PARAM_PARENT
+#define LINUXKPI_PARAM_PARENT _compat_linuxkpi
+#endif
+
+#ifndef LINUXKPI_PARAM_PREFIX
+#define LINUXKPI_PARAM_PREFIX /* empty prefix is the default */
+#endif
+
+#ifndef LINUXKPI_PARAM_PERM
+#define LINUXKPI_PARAM_PERM(perm) (((perm) & 0222) ? CTLFLAG_RWTUN : CTLFLAG_RDTUN)
+#endif
+
+#define LINUXKPI_PARAM_CONCAT_SUB(a,b,c,d) a##b##c##d
+#define LINUXKPI_PARAM_CONCAT(...) LINUXKPI_PARAM_CONCAT_SUB(__VA_ARGS__)
+#define LINUXKPI_PARAM_PASS(...) __VA_ARGS__
+#define LINUXKPI_PARAM_DESC(name) LINUXKPI_PARAM_CONCAT(linuxkpi_,LINUXKPI_PARAM_PREFIX,name,_desc)
+#define LINUXKPI_PARAM_NAME(name) LINUXKPI_PARAM_CONCAT(LINUXKPI_PARAM_PREFIX,name,,)
+
+#define LINUXKPI_PARAM_bool(name, var, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_BOOL(LINUXKPI_PARAM_PARENT, OID_AUTO,\
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), &(var), 0, \
+ LINUXKPI_PARAM_DESC(name)))
+
+#define LINUXKPI_PARAM_byte(name, var, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_U8(LINUXKPI_PARAM_PARENT, OID_AUTO, \
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), &(var), 0, \
+ LINUXKPI_PARAM_DESC(name)))
+
+#define LINUXKPI_PARAM_short(name, var, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_S16(LINUXKPI_PARAM_PARENT, OID_AUTO, \
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), &(var), 0, \
+ LINUXKPI_PARAM_DESC(name)))
+
+#define LINUXKPI_PARAM_ushort(name, var, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_U16(LINUXKPI_PARAM_PARENT, OID_AUTO, \
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), &(var), 0, \
+ LINUXKPI_PARAM_DESC(name)))
+
+#define LINUXKPI_PARAM_int(name, var, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_INT(LINUXKPI_PARAM_PARENT, OID_AUTO, \
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), &(var), 0,\
+ LINUXKPI_PARAM_DESC(name)))
+
+#define LINUXKPI_PARAM_uint(name, var, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_UINT(LINUXKPI_PARAM_PARENT, OID_AUTO, \
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), &(var), 0, \
+ LINUXKPI_PARAM_DESC(name)))
+
+#define LINUXKPI_PARAM_bint(name, var, perm) \
+ LINUXKPI_PARAM_int(name, var, perm)
+
+#define LINUXKPI_PARAM_hexint(name, var, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_UINT(LINUXKPI_PARAM_PARENT, OID_AUTO, \
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), &(var), 0, \
+ LINUXKPI_PARAM_DESC(name)))
+
+#define LINUXKPI_PARAM_long(name, var, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_LONG(LINUXKPI_PARAM_PARENT, OID_AUTO, \
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), &(var), 0, \
+ LINUXKPI_PARAM_DESC(name)))
+
+#define LINUXKPI_PARAM_ulong(name, var, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_ULONG(LINUXKPI_PARAM_PARENT, OID_AUTO, \
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), &(var), 0, \
+ LINUXKPI_PARAM_DESC(name)))
+
+#define LINUXKPI_PARAM_charp(name, var, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_STRING(LINUXKPI_PARAM_PARENT, OID_AUTO, \
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), &(var), 0, \
+ LINUXKPI_PARAM_DESC(name)))
+
+#define module_param_string(name, str, len, perm) \
+ extern const char LINUXKPI_PARAM_DESC(name)[]; \
+ LINUXKPI_PARAM_PASS(SYSCTL_STRING(LINUXKPI_PARAM_PARENT, OID_AUTO, \
+ LINUXKPI_PARAM_NAME(name), LINUXKPI_PARAM_PERM(perm), (str), (len), \
+ LINUXKPI_PARAM_DESC(name)))
+
+#define module_param_named(name, var, type, mode) \
+ LINUXKPI_PARAM_##type(name, var, mode)
+
+#define module_param(var, type, mode) \
+ LINUXKPI_PARAM_##type(var, var, mode)
+
+#define module_param_named_unsafe(name, var, type, mode) \
+ LINUXKPI_PARAM_##type(name, var, mode)
+
+#define module_param_unsafe(var, type, mode) \
+ LINUXKPI_PARAM_##type(var, var, mode)
+
+#define module_param_array(var, type, addr_argc, mode)
+
+#define MODULE_PARM_DESC(name, desc) \
+ const char LINUXKPI_PARAM_DESC(name)[] = { desc }
+
+#define kernel_param_lock(...) do {} while (0)
+#define kernel_param_unlock(...) do {} while (0)
+
+SYSCTL_DECL(_compat_linuxkpi);
+
+#endif /* _LINUXKPI_LINUX_MODULEPARAM_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/mutex.h b/sys/compat/linuxkpi/common/include/linux/mutex.h
new file mode 100644
index 000000000000..6fb6a7744a89
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/mutex.h
@@ -0,0 +1,177 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_MUTEX_H_
+#define _LINUXKPI_LINUX_MUTEX_H_
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/sx.h>
+
+#include <linux/kernel.h>
+#include <linux/cleanup.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+typedef struct mutex {
+ struct sx sx;
+} mutex_t;
+
+/*
+ * By defining CONFIG_NO_MUTEX_SKIP LinuxKPI mutexes and asserts will
+ * not be skipped during panic().
+ */
+#ifdef CONFIG_NO_MUTEX_SKIP
+#define MUTEX_SKIP(void) 0
+#else
+#define MUTEX_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active)
+#endif
+
+#define mutex_lock(_m) do { \
+ if (MUTEX_SKIP()) \
+ break; \
+ sx_xlock(&(_m)->sx); \
+} while (0)
+
+#define mutex_lock_nested(_m, _s) mutex_lock(_m)
+#define mutex_lock_nest_lock(_m, _s) mutex_lock(_m)
+
+#define mutex_lock_interruptible(_m) ({ \
+ MUTEX_SKIP() ? 0 : \
+ linux_mutex_lock_interruptible(_m); \
+})
+
+#define mutex_lock_interruptible_nested(m, c) mutex_lock_interruptible(m)
+
+/*
+ * Reuse the interruptable method since the SX
+ * lock handles both signals and interrupts:
+ */
+#define mutex_lock_killable(_m) ({ \
+ MUTEX_SKIP() ? 0 : \
+ linux_mutex_lock_interruptible(_m); \
+})
+
+#define mutex_lock_killable_nested(_m, _sub) \
+ mutex_lock_killable(_m)
+
+#define mutex_unlock(_m) do { \
+ if (MUTEX_SKIP()) \
+ break; \
+ sx_xunlock(&(_m)->sx); \
+} while (0)
+
+#define mutex_trylock(_m) ({ \
+ MUTEX_SKIP() ? 1 : \
+ !!sx_try_xlock(&(_m)->sx); \
+})
+
+enum mutex_trylock_recursive_enum {
+ MUTEX_TRYLOCK_FAILED = 0,
+ MUTEX_TRYLOCK_SUCCESS = 1,
+ MUTEX_TRYLOCK_RECURSIVE = 2,
+};
+
+static inline __must_check enum mutex_trylock_recursive_enum
+mutex_trylock_recursive(struct mutex *lock)
+{
+ if (unlikely(sx_xholder(&lock->sx) == curthread))
+ return (MUTEX_TRYLOCK_RECURSIVE);
+
+ return (mutex_trylock(lock));
+}
+
+#define mutex_init(_m) \
+ linux_mutex_init(_m, mutex_name(#_m), SX_NOWITNESS)
+
+#define __mutex_init(_m, _n, _l) \
+ linux_mutex_init(_m, _n, SX_NOWITNESS)
+
+#define mutex_init_witness(_m) \
+ linux_mutex_init(_m, mutex_name(#_m), SX_DUPOK)
+
+#define mutex_destroy(_m) \
+ linux_mutex_destroy(_m)
+
+static inline bool
+mutex_is_locked(mutex_t *m)
+{
+ return ((struct thread *)SX_OWNER(m->sx.sx_lock) != NULL);
+}
+
+static inline bool
+mutex_is_owned(mutex_t *m)
+{
+ return (sx_xlocked(&m->sx));
+}
+
+static inline int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *m)
+{
+ if (atomic_dec_and_test(cnt)) {
+ mutex_lock(m);
+ return (1);
+ }
+
+ return (0);
+}
+
+#ifdef WITNESS_ALL
+/* NOTE: the maximum WITNESS name is 64 chars */
+#define __mutex_name(name, file, line) \
+ (((const char *){file ":" #line "-" name}) + \
+ (sizeof(file) > 16 ? sizeof(file) - 16 : 0))
+#else
+#define __mutex_name(name, file, line) name
+#endif
+#define _mutex_name(...) __mutex_name(__VA_ARGS__)
+#define mutex_name(name) _mutex_name(name, __FILE__, __LINE__)
+
+#define DEFINE_MUTEX(lock) \
+ mutex_t lock; \
+ SX_SYSINIT_FLAGS(lock, &(lock).sx, mutex_name(#lock), SX_DUPOK)
+
+static inline void
+linux_mutex_init(mutex_t *m, const char *name, int flags)
+{
+ memset(m, 0, sizeof(*m));
+ sx_init_flags(&m->sx, name, flags);
+}
+
+static inline void
+linux_mutex_destroy(mutex_t *m)
+{
+ if (mutex_is_owned(m))
+ mutex_unlock(m);
+ sx_destroy(&m->sx);
+}
+
+extern int linux_mutex_lock_interruptible(mutex_t *m);
+
+#endif /* _LINUXKPI_LINUX_MUTEX_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/net.h b/sys/compat/linuxkpi/common/include/linux/net.h
new file mode 100644
index 000000000000..a5172f3f31eb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/net.h
@@ -0,0 +1,88 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_NET_H_
+#define _LINUXKPI_LINUX_NET_H_
+
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/errno.h>
+
+static inline int
+sock_create_kern(int family, int type, int proto, struct socket **res)
+{
+ return -socreate(family, res, type, proto, curthread->td_ucred,
+ curthread);
+}
+
+static inline int
+sock_getname(struct socket *so, struct sockaddr *sa, int *sockaddr_len,
+ int peer)
+{
+ int error;
+
+ /*
+ * XXXGL: we can't use sopeeraddr()/sosockaddr() here since with
+ * INVARIANTS they would check if supplied sockaddr has enough
+ * length. Such notion doesn't even exist in Linux KPI.
+ */
+ if (peer) {
+ if ((so->so_state & SS_ISCONNECTED) == 0)
+ return (-ENOTCONN);
+
+ error = so->so_proto->pr_peeraddr(so, sa);
+ } else
+ error = so->so_proto->pr_sockaddr(so, sa);
+ if (error)
+ return (-error);
+ *sockaddr_len = sa->sa_len;
+
+ return (0);
+}
+
+static inline void
+sock_release(struct socket *so)
+{
+ soclose(so);
+}
+
+
+int linuxkpi_net_ratelimit(void);
+
+static inline int
+net_ratelimit(void)
+{
+
+ return (linuxkpi_net_ratelimit());
+}
+
+#endif /* _LINUXKPI_LINUX_NET_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/net_dim.h b/sys/compat/linuxkpi/common/include/linux/net_dim.h
new file mode 100644
index 000000000000..4fe3e39210e7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/net_dim.h
@@ -0,0 +1,408 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
+ *
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file implements Dynamic Interrupt Moderation, DIM */
+
+#ifndef _LINUXKPI_LINUX_NET_DIM_H
+#define _LINUXKPI_LINUX_NET_DIM_H
+
+#include <asm/types.h>
+
+#include <linux/workqueue.h>
+#include <linux/ktime.h>
+
+struct net_dim_cq_moder {
+ u16 usec;
+ u16 pkts;
+ u8 cq_period_mode;
+};
+
+struct net_dim_sample {
+ ktime_t time;
+ u32 pkt_ctr;
+ u32 byte_ctr;
+ u16 event_ctr;
+};
+
+struct net_dim_stats {
+ int ppms; /* packets per msec */
+ int bpms; /* bytes per msec */
+ int epms; /* events per msec */
+};
+
+struct net_dim { /* Adaptive Moderation */
+ u8 state;
+ struct net_dim_stats prev_stats;
+ struct net_dim_sample start_sample;
+ struct work_struct work;
+ u16 event_ctr;
+ u8 profile_ix;
+ u8 mode;
+ u8 tune_state;
+ u8 steps_right;
+ u8 steps_left;
+ u8 tired;
+};
+
+enum {
+ NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+ NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+ NET_DIM_CQ_PERIOD_NUM_MODES = 0x2,
+ NET_DIM_CQ_PERIOD_MODE_DISABLED = 0xFF,
+};
+
+/* Adaptive moderation logic */
+enum {
+ NET_DIM_START_MEASURE,
+ NET_DIM_MEASURE_IN_PROGRESS,
+ NET_DIM_APPLY_NEW_PROFILE,
+};
+
+enum {
+ NET_DIM_PARKING_ON_TOP,
+ NET_DIM_PARKING_TIRED,
+ NET_DIM_GOING_RIGHT,
+ NET_DIM_GOING_LEFT,
+};
+
+enum {
+ NET_DIM_STATS_WORSE,
+ NET_DIM_STATS_SAME,
+ NET_DIM_STATS_BETTER,
+};
+
+enum {
+ NET_DIM_STEPPED,
+ NET_DIM_TOO_TIRED,
+ NET_DIM_ON_EDGE,
+};
+
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+/* Adaptive moderation profiles */
+#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */
+#define NET_DIM_EQE_PROFILES { \
+ {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define NET_DIM_CQE_PROFILES { \
+ {2, 256}, \
+ {8, 128}, \
+ {16, 64}, \
+ {32, 64}, \
+ {64, 64} \
+}
+
+static const struct net_dim_cq_moder
+ net_dim_profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_EQE_PROFILES,
+ NET_DIM_CQE_PROFILES,
+};
+
+static inline struct net_dim_cq_moder
+net_dim_get_profile(u8 cq_period_mode,
+ int ix)
+{
+ struct net_dim_cq_moder cq_moder;
+
+ cq_moder = net_dim_profile[cq_period_mode][ix];
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+
+static inline struct net_dim_cq_moder
+net_dim_get_def_profile(u8 rx_cq_period_mode)
+{
+ int default_profile_ix;
+
+ if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE)
+ default_profile_ix = NET_DIM_DEF_PROFILE_CQE;
+ else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */
+ default_profile_ix = NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_profile(rx_cq_period_mode, default_profile_ix);
+}
+
+static inline bool
+net_dim_on_top(struct net_dim *dim)
+{
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ case NET_DIM_PARKING_TIRED:
+ return true;
+ case NET_DIM_GOING_RIGHT:
+ return (dim->steps_left > 1) && (dim->steps_right == 1);
+ default: /* NET_DIM_GOING_LEFT */
+ return (dim->steps_right > 1) && (dim->steps_left == 1);
+ }
+}
+
+static inline void
+net_dim_turn(struct net_dim *dim)
+{
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ case NET_DIM_PARKING_TIRED:
+ break;
+ case NET_DIM_GOING_RIGHT:
+ dim->tune_state = NET_DIM_GOING_LEFT;
+ dim->steps_left = 0;
+ break;
+ case NET_DIM_GOING_LEFT:
+ dim->tune_state = NET_DIM_GOING_RIGHT;
+ dim->steps_right = 0;
+ break;
+ }
+}
+
+static inline int
+net_dim_step(struct net_dim *dim)
+{
+ if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
+ return NET_DIM_TOO_TIRED;
+
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ case NET_DIM_PARKING_TIRED:
+ break;
+ case NET_DIM_GOING_RIGHT:
+ if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
+ return NET_DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ break;
+ case NET_DIM_GOING_LEFT:
+ if (dim->profile_ix == 0)
+ return NET_DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ break;
+ }
+
+ dim->tired++;
+ return NET_DIM_STEPPED;
+}
+
+static inline void
+net_dim_park_on_top(struct net_dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tired = 0;
+ dim->tune_state = NET_DIM_PARKING_ON_TOP;
+}
+
+static inline void
+net_dim_park_tired(struct net_dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tune_state = NET_DIM_PARKING_TIRED;
+}
+
+static inline void
+net_dim_exit_parking(struct net_dim *dim)
+{
+ dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT :
+ NET_DIM_GOING_RIGHT;
+ net_dim_step(dim);
+}
+
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+ (((100UL * abs((val) - (ref))) / (ref)) > 10) /* more than 10%
+ * difference */
+
+static inline int
+net_dim_stats_compare(struct net_dim_stats *curr,
+ struct net_dim_stats *prev)
+{
+ if (!prev->bpms)
+ return curr->bpms ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+ return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_WORSE;
+
+ if (!prev->ppms)
+ return curr->ppms ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+ return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_WORSE;
+
+ if (!prev->epms)
+ return NET_DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+ return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_WORSE;
+
+ return NET_DIM_STATS_SAME;
+}
+
+static inline bool
+net_dim_decision(struct net_dim_stats *curr_stats,
+ struct net_dim *dim)
+{
+ int prev_state = dim->tune_state;
+ int prev_ix = dim->profile_ix;
+ int stats_res;
+ int step_res;
+
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
+ if (stats_res != NET_DIM_STATS_SAME)
+ net_dim_exit_parking(dim);
+ break;
+
+ case NET_DIM_PARKING_TIRED:
+ dim->tired--;
+ if (!dim->tired)
+ net_dim_exit_parking(dim);
+ break;
+
+ case NET_DIM_GOING_RIGHT:
+ case NET_DIM_GOING_LEFT:
+ stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
+ if (stats_res != NET_DIM_STATS_BETTER)
+ net_dim_turn(dim);
+
+ if (net_dim_on_top(dim)) {
+ net_dim_park_on_top(dim);
+ break;
+ }
+ step_res = net_dim_step(dim);
+ switch (step_res) {
+ case NET_DIM_ON_EDGE:
+ net_dim_park_on_top(dim);
+ break;
+ case NET_DIM_TOO_TIRED:
+ net_dim_park_tired(dim);
+ break;
+ }
+
+ break;
+ }
+
+ if ((prev_state != NET_DIM_PARKING_ON_TOP) ||
+ (dim->tune_state != NET_DIM_PARKING_ON_TOP))
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+static inline void
+net_dim_sample(u16 event_ctr,
+ u64 packets,
+ u64 bytes,
+ struct net_dim_sample *s)
+{
+ s->time = ktime_get();
+ s->pkt_ctr = packets;
+ s->byte_ctr = bytes;
+ s->event_ctr = event_ctr;
+}
+
+#define NET_DIM_NEVENTS 64
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
+
+static inline void
+net_dim_calc_stats(struct net_dim_sample *start,
+ struct net_dim_sample *end,
+ struct net_dim_stats *curr_stats)
+{
+ /* u32 holds up to 71 minutes, should be enough */
+ u32 delta_us = ktime_us_delta(end->time, start->time);
+ u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+ u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+ start->byte_ctr);
+
+ if (!delta_us)
+ return;
+
+ curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+ curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+ curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC,
+ delta_us);
+}
+
+static inline void
+net_dim(struct net_dim *dim,
+ u64 packets, u64 bytes)
+{
+ struct net_dim_stats curr_stats;
+ struct net_dim_sample end_sample;
+ u16 nevents;
+
+ dim->event_ctr++;
+
+ switch (dim->state) {
+ case NET_DIM_MEASURE_IN_PROGRESS:
+ nevents = BIT_GAP(BITS_PER_TYPE(u16),
+ dim->event_ctr,
+ dim->start_sample.event_ctr);
+ if (nevents < NET_DIM_NEVENTS)
+ break;
+ net_dim_sample(dim->event_ctr, packets, bytes, &end_sample);
+ net_dim_calc_stats(&dim->start_sample, &end_sample,
+ &curr_stats);
+ if (net_dim_decision(&curr_stats, dim)) {
+ dim->state = NET_DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ /* FALLTHROUGH */
+ case NET_DIM_START_MEASURE:
+ net_dim_sample(dim->event_ctr, packets, bytes, &dim->start_sample);
+ dim->state = NET_DIM_MEASURE_IN_PROGRESS;
+ break;
+ case NET_DIM_APPLY_NEW_PROFILE:
+ break;
+ default:
+ break;
+ }
+}
+
+#endif /* _LINUXKPI_LINUX_NET_DIM_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/netdev_features.h b/sys/compat/linuxkpi/common/include/linux/netdev_features.h
new file mode 100644
index 000000000000..fae82776b071
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/netdev_features.h
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_NETDEV_FEATURES_H_
+#define _LINUXKPI_LINUX_NETDEV_FEATURES_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+typedef uint32_t netdev_features_t;
+
+#define NETIF_F_HIGHDMA BIT(0) /* Can DMA to high memory. */
+#define NETIF_F_SG BIT(1) /* Can do scatter/gather I/O. */
+#define NETIF_F_IP_CSUM BIT(2) /* Can csum TCP/UDP on IPv4. */
+#define NETIF_F_IPV6_CSUM BIT(3) /* Can csum TCP/UDP on IPv6. */
+#define NETIF_F_TSO BIT(4) /* Can do TCP over IPv4 segmentation. */
+#define NETIF_F_TSO6 BIT(5) /* Can do TCP over IPv6 segmentation. */
+#define NETIF_F_RXCSUM BIT(6) /* Can do receive csum offload. */
+#define NETIF_F_HW_CSUM BIT(7) /* Can csum packets (which?). */
+#define NETIF_F_HW_TC BIT(8) /* Can offload TC. */
+
+#define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)
+
+#define NETIF_F_BITS \
+ "\20\1HIGHDMA\2SG\3IP_CSUM\4IPV6_CSUM\5TSO\6TSO6\7RXCSUM" \
+ "\10HW_CSUM\11HW_TC"
+
+#endif /* _LINUXKPI_LINUX_NETDEV_FEATURES_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/netdevice.h b/sys/compat/linuxkpi/common/include/linux/netdevice.h
new file mode 100644
index 000000000000..cd7d23077a62
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/netdevice.h
@@ -0,0 +1,488 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2019 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ * Copyright (c) 2020-2022 Bjoern A. Zeeb
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_NETDEVICE_H
+#define _LINUXKPI_LINUX_NETDEVICE_H
+
+#include <linux/types.h>
+#include <linux/netdev_features.h>
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/taskqueue.h>
+
+#include <net/if_types.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_dl.h>
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/net.h>
+#include <linux/if_ether.h>
+#include <linux/notifier.h>
+#include <linux/random.h>
+#include <linux/rcupdate.h>
+
+#ifdef VIMAGE
+#define init_net *vnet0
+#else
+#define init_net *((struct vnet *)0)
+#endif
+
+struct sk_buff;
+struct net_device;
+struct wireless_dev; /* net/cfg80211.h */
+
+#define MAX_ADDR_LEN 20
+
+#define NET_NAME_UNKNOWN 0
+
+enum net_addr_assign_type {
+ NET_ADDR_RANDOM,
+};
+
+enum netdev_tx {
+ NETDEV_TX_OK = 0,
+};
+typedef enum netdev_tx netdev_tx_t;
+
+struct netdev_hw_addr {
+ struct list_head addr_list;
+ uint8_t addr[MAX_ADDR_LEN];
+};
+
+struct netdev_hw_addr_list {
+ struct list_head addr_list;
+ int count;
+};
+
+enum net_device_reg_state {
+ NETREG_DUMMY = 1,
+ NETREG_REGISTERED,
+};
+
+enum tc_setup_type {
+ TC_SETUP_MAX_DUMMY,
+};
+
+struct net_device_ops {
+ int (*ndo_open)(struct net_device *);
+ int (*ndo_stop)(struct net_device *);
+ int (*ndo_set_mac_address)(struct net_device *, void *);
+ netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *);
+ void (*ndo_set_rx_mode)(struct net_device *);
+};
+
+struct net_device {
+ /* net_device fields seen publicly. */
+ /* XXX can we later make some aliases to ifnet? */
+ char name[IFNAMSIZ];
+ struct wireless_dev *ieee80211_ptr;
+ uint8_t dev_addr[ETH_ALEN];
+ struct netdev_hw_addr_list mc;
+ netdev_features_t features;
+ struct {
+ unsigned long multicast;
+
+ unsigned long rx_bytes;
+ unsigned long rx_errors;
+ unsigned long rx_packets;
+ unsigned long tx_bytes;
+ unsigned long tx_dropped;
+ unsigned long tx_errors;
+ unsigned long tx_packets;
+ } stats;
+ enum net_addr_assign_type addr_assign_type;
+ enum net_device_reg_state reg_state;
+ const struct ethtool_ops *ethtool_ops;
+ const struct net_device_ops *netdev_ops;
+
+ bool needs_free_netdev;
+ /* Not properly typed as-of now. */
+ int flags, type;
+ int name_assign_type, needed_headroom;
+ int threaded;
+
+ void (*priv_destructor)(struct net_device *);
+
+ /* net_device internal. */
+ struct device dev;
+
+ /*
+ * In case we delete the net_device we need to be able to clear all
+ * NAPI consumers.
+ */
+ struct mtx napi_mtx;
+ TAILQ_HEAD(, napi_struct) napi_head;
+ struct taskqueue *napi_tq;
+
+ /* Must stay last. */
+ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE);
+};
+
+#define SET_NETDEV_DEV(_ndev, _dev) (_ndev)->dev.parent = _dev;
+
+/* -------------------------------------------------------------------------- */
+/* According to linux::ipoib_main.c. */
+struct netdev_notifier_info {
+ struct net_device *dev;
+ struct ifnet *ifp;
+};
+
+static inline struct net_device *
+netdev_notifier_info_to_dev(struct netdev_notifier_info *ni)
+{
+ return (ni->dev);
+}
+
+static inline struct ifnet *
+netdev_notifier_info_to_ifp(struct netdev_notifier_info *ni)
+{
+ return (ni->ifp);
+}
+
+int register_netdevice_notifier(struct notifier_block *);
+int register_inetaddr_notifier(struct notifier_block *);
+int unregister_netdevice_notifier(struct notifier_block *);
+int unregister_inetaddr_notifier(struct notifier_block *);
+
+/* -------------------------------------------------------------------------- */
+
+#define NAPI_POLL_WEIGHT 64 /* budget */
+
+/*
+ * There are drivers directly testing napi state bits, so we need to publicly
+ * expose them. If you ask me, those accesses should be hid behind an
+ * inline function and the bit flags not be directly exposed.
+ */
+enum napi_state_bits {
+ /*
+ * Official Linux flags encountered.
+ */
+ NAPI_STATE_SCHED = 1,
+
+ /*
+ * Our internal versions (for now).
+ */
+ /* Do not schedule new things while we are waiting to clear things. */
+ LKPI_NAPI_FLAG_DISABLE_PENDING = 0,
+ /* To synchronise that only one poll is ever running. */
+ LKPI_NAPI_FLAG_IS_SCHEDULED = 1,
+ /* If trying to schedule while poll is running. Need to re-schedule. */
+ LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN = 2,
+ /* When shutting down forcefully prevent anything from running task/poll. */
+ LKPI_NAPI_FLAG_SHUTDOWN = 3,
+};
+
+struct napi_struct {
+ TAILQ_ENTRY(napi_struct) entry;
+
+ struct list_head rx_list;
+ struct net_device *dev;
+ int (*poll)(struct napi_struct *, int);
+ int budget;
+ int rx_count;
+
+
+ /*
+ * These flags mostly need to be checked/changed atomically
+ * (multiple together in some cases).
+ */
+ volatile unsigned long state;
+
+ /* FreeBSD internal. */
+ /* Use task for now, so we can easily switch between direct and task. */
+ struct task napi_task;
+};
+
+void linuxkpi_init_dummy_netdev(struct net_device *);
+void linuxkpi_netif_napi_add(struct net_device *, struct napi_struct *,
+ int(*napi_poll)(struct napi_struct *, int));
+void linuxkpi_netif_napi_del(struct napi_struct *);
+bool linuxkpi_napi_schedule_prep(struct napi_struct *);
+void linuxkpi___napi_schedule(struct napi_struct *);
+bool linuxkpi_napi_schedule(struct napi_struct *);
+void linuxkpi_napi_reschedule(struct napi_struct *);
+bool linuxkpi_napi_complete_done(struct napi_struct *, int);
+bool linuxkpi_napi_complete(struct napi_struct *);
+void linuxkpi_napi_disable(struct napi_struct *);
+void linuxkpi_napi_enable(struct napi_struct *);
+void linuxkpi_napi_synchronize(struct napi_struct *);
+
+#define init_dummy_netdev(_n) \
+ linuxkpi_init_dummy_netdev(_n)
+#define netif_napi_add(_nd, _ns, _p) \
+ linuxkpi_netif_napi_add(_nd, _ns, _p)
+#define netif_napi_del(_n) \
+ linuxkpi_netif_napi_del(_n)
+#define napi_schedule_prep(_n) \
+ linuxkpi_napi_schedule_prep(_n)
+#define __napi_schedule(_n) \
+ linuxkpi___napi_schedule(_n)
+#define napi_schedule(_n) \
+ linuxkpi_napi_schedule(_n)
+#define napi_reschedule(_n) \
+ linuxkpi_napi_reschedule(_n)
+#define napi_complete_done(_n, _r) \
+ linuxkpi_napi_complete_done(_n, _r)
+#define napi_complete(_n) \
+ linuxkpi_napi_complete(_n)
+#define napi_disable(_n) \
+ linuxkpi_napi_disable(_n)
+#define napi_enable(_n) \
+ linuxkpi_napi_enable(_n)
+#define napi_synchronize(_n) \
+ linuxkpi_napi_synchronize(_n)
+
+
+static inline void
+netif_napi_add_tx(struct net_device *dev, struct napi_struct *napi,
+ int(*napi_poll)(struct napi_struct *, int))
+{
+
+ netif_napi_add(dev, napi, napi_poll);
+}
+
+static inline bool
+napi_is_scheduled(struct napi_struct *napi)
+{
+
+ return (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state));
+}
+
+/* -------------------------------------------------------------------------- */
+
+static inline void
+netdev_rss_key_fill(uint32_t *buf, size_t len)
+{
+
+ /*
+ * Remembering from a previous life there was discussions on what is
+ * a good RSS hash key. See end of rss_init() in net/rss_config.c.
+ * iwlwifi is looking for a 10byte "secret" so stay with random for now.
+ */
+ get_random_bytes(buf, len);
+}
+
+static inline int
+netdev_hw_addr_list_count(struct netdev_hw_addr_list *list)
+{
+
+ return (list->count);
+}
+
+static inline int
+netdev_mc_count(struct net_device *ndev)
+{
+
+ return (netdev_hw_addr_list_count(&ndev->mc));
+}
+
+#define netdev_hw_addr_list_for_each(_addr, _list) \
+ list_for_each_entry((_addr), &(_list)->addr_list, addr_list)
+
+#define netdev_for_each_mc_addr(na, ndev) \
+ netdev_hw_addr_list_for_each(na, &(ndev)->mc)
+
+static __inline void
+synchronize_net(void)
+{
+
+ /* We probably cannot do that unconditionally at some point anymore. */
+ synchronize_rcu();
+}
+
+static __inline void
+netif_receive_skb_list(struct list_head *head)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static __inline int
+napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (-1);
+}
+
+static __inline void
+ether_setup(struct net_device *ndev)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static __inline void
+dev_net_set(struct net_device *ndev, void *p)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static __inline int
+dev_set_threaded(struct net_device *ndev, bool threaded)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (-ENODEV);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline bool
+netif_carrier_ok(struct net_device *ndev)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (false);
+}
+
+static __inline void
+netif_carrier_off(struct net_device *ndev)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static __inline void
+netif_carrier_on(struct net_device *ndev)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline bool
+netif_queue_stopped(struct net_device *ndev)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (false);
+}
+
+static __inline void
+netif_stop_queue(struct net_device *ndev)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static __inline void
+netif_wake_queue(struct net_device *ndev)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline int
+register_netdevice(struct net_device *ndev)
+{
+
+ /* assert rtnl_locked? */
+ pr_debug("%s: TODO\n", __func__);
+ return (0);
+}
+
+static __inline int
+register_netdev(struct net_device *ndev)
+{
+ int error;
+
+ /* lock */
+ error = register_netdevice(ndev);
+ /* unlock */
+ pr_debug("%s: TODO\n", __func__);
+ return (error);
+}
+
+static __inline void
+unregister_netdev(struct net_device *ndev)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static __inline void
+unregister_netdevice(struct net_device *ndev)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline void
+netif_rx(struct sk_buff *skb)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static __inline void
+netif_rx_ni(struct sk_buff *skb)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+/* -------------------------------------------------------------------------- */
+
+struct net_device *linuxkpi_alloc_netdev(size_t, const char *, uint32_t,
+ void(*)(struct net_device *));
+void linuxkpi_free_netdev(struct net_device *);
+
+#define alloc_netdev(_l, _n, _f, _func) \
+ linuxkpi_alloc_netdev(_l, _n, _f, _func)
+#define alloc_netdev_dummy(_l) \
+ linuxkpi_alloc_netdev(_l, "dummy", NET_NAME_UNKNOWN, NULL)
+#define free_netdev(_n) \
+ linuxkpi_free_netdev(_n)
+
+static inline void *
+netdev_priv(const struct net_device *ndev)
+{
+
+ return (__DECONST(void *, ndev->drv_priv));
+}
+
+/* -------------------------------------------------------------------------- */
+/* This is really rtnetlink and probably belongs elsewhere. */
+
+#define rtnl_lock() do { } while(0)
+#define rtnl_unlock() do { } while(0)
+#define rcu_dereference_rtnl(x) READ_ONCE(x)
+
+#endif /* _LINUXKPI_LINUX_NETDEVICE_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/nl80211.h b/sys/compat/linuxkpi/common/include/linux/nl80211.h
new file mode 100644
index 000000000000..f3979d3a2abc
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/nl80211.h
@@ -0,0 +1,445 @@
+/*-
+ * Copyright (c) 2020-2024 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_NL80211_H
+#define _LINUXKPI_LINUX_NL80211_H
+
+#include <linux/bitops.h>
+
+enum nl80211_feature_flags {
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = BIT(0),
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = BIT(1),
+ NL80211_FEATURE_HT_IBSS = BIT(2),
+ NL80211_FEATURE_LOW_PRIORITY_SCAN = BIT(3),
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR = BIT(4),
+ NL80211_FEATURE_P2P_GO_CTWIN = BIT(5),
+ NL80211_FEATURE_P2P_GO_OPPPS = BIT(6),
+ NL80211_FEATURE_QUIET = BIT(7),
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = BIT(8),
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = BIT(9),
+ NL80211_FEATURE_DYNAMIC_SMPS = BIT(10),
+ NL80211_FEATURE_STATIC_SMPS = BIT(11),
+ NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = BIT(12),
+ NL80211_FEATURE_TDLS_CHANNEL_SWITCH = BIT(13),
+ NL80211_FEATURE_TX_POWER_INSERTION = BIT(14),
+ NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = BIT(15),
+ NL80211_FEATURE_AP_SCAN = BIT(16),
+ NL80211_FEATURE_ACTIVE_MONITOR = BIT(17),
+};
+
+enum nl80211_pmsr_ftm_failure_flags {
+ NL80211_PMSR_FTM_FAILURE_NO_RESPONSE = BIT(0),
+ NL80211_PMSR_FTM_FAILURE_PEER_BUSY = BIT(1),
+ NL80211_PMSR_FTM_FAILURE_UNSPECIFIED = BIT(2),
+};
+
+enum nl80211_pmsr_status_flags {
+ NL80211_PMSR_STATUS_FAILURE = BIT(0),
+ NL80211_PMSR_STATUS_SUCCESS = BIT(1),
+ NL80211_PMSR_STATUS_TIMEOUT = BIT(2),
+};
+
+#define NL80211_PMSR_TYPE_FTM 1
+
+enum nl80211_reg_rule_flags {
+ NL80211_RRF_AUTO_BW = BIT(0),
+ NL80211_RRF_DFS = BIT(1),
+ NL80211_RRF_GO_CONCURRENT = BIT(2),
+ NL80211_RRF_NO_IR = BIT(3),
+ NL80211_RRF_NO_OUTDOOR = BIT(4),
+ NL80211_RRF_NO_HT40MINUS = BIT(5),
+ NL80211_RRF_NO_HT40PLUS = BIT(6),
+ NL80211_RRF_NO_80MHZ = BIT(7),
+ NL80211_RRF_NO_160MHZ = BIT(8),
+ NL80211_RRF_NO_HE = BIT(9),
+ NL80211_RRF_NO_OFDM = BIT(10),
+ NL80211_RRF_NO_320MHZ = BIT(11),
+ NL80211_RRF_NO_EHT = BIT(12),
+ NL80211_RRF_DFS_CONCURRENT = BIT(13),
+ NL80211_RRF_NO_6GHZ_VLP_CLIENT = BIT(14),
+ NL80211_RRF_NO_6GHZ_AFC_CLIENT = BIT(15),
+ NL80211_RRF_PSD = BIT(16),
+ NL80211_RRF_ALLOW_6GHZ_VLP_AP = BIT(17),
+};
+#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS|NL80211_RRF_NO_HT40PLUS)
+
+enum nl80211_scan_flags {
+ NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME = BIT(0),
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = BIT(1),
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE = BIT(2),
+ NL80211_SCAN_FLAG_RANDOM_ADDR = BIT(3),
+ NL80211_SCAN_FLAG_COLOCATED_6GHZ = BIT(4),
+ NL80211_SCAN_FLAG_RANDOM_SN = BIT(5),
+ NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP = BIT(6),
+};
+
+#define NL80211_MAX_SUPP_REG_RULES 512 /* TODO FIXME, random */
+
+#define NL80211_BSS_CHAN_WIDTH_20 __LINE__ /* TODO FIXME, brcmfmac */
+
+enum nl80211_wpa_versions {
+ NL80211_WPA_VERSION_1 = 1,
+ NL80211_WPA_VERSION_2,
+ NL80211_WPA_VERSION_3,
+};
+
+enum nl80211_bss_select_attr {
+ __NL80211_BSS_SELECT_ATTR_INVALID = 0,
+ NL80211_BSS_SELECT_ATTR_BAND_PREF,
+ NL80211_BSS_SELECT_ATTR_RSSI,
+ NL80211_BSS_SELECT_ATTR_RSSI_ADJUST,
+};
+
+enum nl80211_sta_flag {
+ /* XXX TODO */
+ NL80211_STA_FLAG_ASSOCIATED,
+ NL80211_STA_FLAG_AUTHENTICATED,
+ NL80211_STA_FLAG_AUTHORIZED,
+ NL80211_STA_FLAG_TDLS_PEER,
+ NL80211_STA_FLAG_WME,
+};
+
+enum nl80211_band {
+ /* XXX TODO */
+ NL80211_BAND_2GHZ = 0,
+ NL80211_BAND_5GHZ,
+ NL80211_BAND_60GHZ,
+ NL80211_BAND_6GHZ,
+
+ /* Keep this last. */
+ NUM_NL80211_BANDS
+} __packed;
+
+enum nl80211_channel_type {
+ NL80211_CHAN_NO_HT,
+ NL80211_CHAN_HT20,
+ NL80211_CHAN_HT40PLUS,
+ NL80211_CHAN_HT40MINUS,
+};
+
+enum nl80211_chan_width {
+ /* XXX TODO */
+ NL80211_CHAN_WIDTH_20_NOHT,
+ NL80211_CHAN_WIDTH_20,
+ NL80211_CHAN_WIDTH_40,
+ NL80211_CHAN_WIDTH_80,
+ NL80211_CHAN_WIDTH_80P80,
+ NL80211_CHAN_WIDTH_160,
+ NL80211_CHAN_WIDTH_5,
+ NL80211_CHAN_WIDTH_10,
+ NL80211_CHAN_WIDTH_320,
+};
+
+enum nl80211_iftype {
+ /* XXX TODO */
+ NL80211_IFTYPE_UNSPECIFIED,
+ NL80211_IFTYPE_ADHOC,
+ NL80211_IFTYPE_STATION,
+ NL80211_IFTYPE_AP,
+ NL80211_IFTYPE_AP_VLAN,
+ NL80211_IFTYPE_MONITOR,
+ NL80211_IFTYPE_P2P_CLIENT,
+ NL80211_IFTYPE_P2P_DEVICE,
+ NL80211_IFTYPE_P2P_GO,
+ NL80211_IFTYPE_MESH_POINT,
+ NL80211_IFTYPE_WDS,
+ NL80211_IFTYPE_OCB,
+ NL80211_IFTYPE_NAN,
+
+ /* Keep this last. */
+ NUM_NL80211_IFTYPES
+};
+
+enum nl80211_preamble {
+ /* XXX TODO */
+ NL80211_PREAMBLE_LEGACY,
+ NL80211_PREAMBLE_HT,
+ NL80211_PREAMBLE_VHT,
+ NL80211_PREAMBLE_HE,
+};
+
+enum nl80211_tdls_operation {
+ /* XXX TODO */
+ NL80211_TDLS_SETUP,
+ NL80211_TDLS_TEARDOWN,
+ NL80211_TDLS_ENABLE_LINK,
+ NL80211_TDLS_DISABLE_LINK,
+ NL80211_TDLS_DISCOVERY_REQ,
+};
+
+enum nl80211_cqm_rssi_threshold_event {
+ /* XXX TODO */
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+};
+
+enum nl80211_ext_feature {
+ /* XXX TODO */
+ NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP,
+ NL80211_EXT_FEATURE_BSS_PARENT_TSF,
+ NL80211_EXT_FEATURE_CAN_REPLACE_PTK0,
+ NL80211_EXT_FEATURE_DFS_OFFLOAD,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
+ NL80211_EXT_FEATURE_EXT_KEY_ID,
+ NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME,
+ NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE,
+ NL80211_EXT_FEATURE_PROTECTED_TWT,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD,
+ NL80211_EXT_FEATURE_SCAN_START_TIME,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL,
+ NL80211_EXT_FEATURE_VHT_IBSS,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT,
+ NL80211_EXT_FEATURE_SCAN_RANDOM_SN,
+ NL80211_EXT_FEATURE_STA_TX_PWR,
+ NL80211_EXT_FEATURE_CQM_RSSI_LIST,
+ NL80211_EXT_FEATURE_AQL,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS,
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+ NL80211_EXT_FEATURE_BEACON_RATE_HT,
+ NL80211_EXT_FEATURE_BEACON_RATE_VHT,
+ NL80211_EXT_FEATURE_BEACON_RATE_HE,
+ NL80211_EXT_FEATURE_BSS_COLOR,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY,
+ NL80211_EXT_FEATURE_RADAR_BACKGROUND,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION,
+ NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
+ NL80211_EXT_FEATURE_PUNCT,
+ NL80211_EXT_FEATURE_DFS_CONCURRENT,
+ NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT,
+ NL80211_EXT_FEATURE_SECURE_LTF,
+ NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD_AP,
+
+ /* Keep this last. */
+ NUM_NL80211_EXT_FEATURES
+};
+
+/* Keep in order with lkpi_nl80211_sta_info_to_str() */
+enum nl80211_sta_info {
+ /* XXX TODO */
+ NL80211_STA_INFO_BEACON_RX,
+ NL80211_STA_INFO_BEACON_SIGNAL_AVG,
+ NL80211_STA_INFO_BSS_PARAM,
+ NL80211_STA_INFO_CHAIN_SIGNAL,
+ NL80211_STA_INFO_CHAIN_SIGNAL_AVG,
+ NL80211_STA_INFO_CONNECTED_TIME,
+ NL80211_STA_INFO_INACTIVE_TIME,
+ NL80211_STA_INFO_SIGNAL,
+ NL80211_STA_INFO_SIGNAL_AVG,
+ NL80211_STA_INFO_STA_FLAGS,
+ NL80211_STA_INFO_RX_BITRATE,
+ NL80211_STA_INFO_RX_PACKETS,
+ NL80211_STA_INFO_RX_BYTES,
+ NL80211_STA_INFO_RX_DROP_MISC,
+ NL80211_STA_INFO_TX_BITRATE,
+ NL80211_STA_INFO_TX_PACKETS,
+ NL80211_STA_INFO_TX_BYTES,
+ NL80211_STA_INFO_TX_BYTES64,
+ NL80211_STA_INFO_RX_BYTES64,
+ NL80211_STA_INFO_TX_FAILED,
+ NL80211_STA_INFO_TX_RETRIES,
+ NL80211_STA_INFO_RX_DURATION,
+ NL80211_STA_INFO_TX_DURATION,
+ NL80211_STA_INFO_ACK_SIGNAL,
+ NL80211_STA_INFO_ACK_SIGNAL_AVG,
+};
+
+enum nl80211_ftm_stats {
+ /* XXX TODO */
+ NL80211_FTM_STATS_ASAP_NUM,
+ NL80211_FTM_STATS_FAILED_NUM,
+ NL80211_FTM_STATS_NON_ASAP_NUM,
+ NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM,
+ NL80211_FTM_STATS_PARTIAL_NUM,
+ NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM,
+ NL80211_FTM_STATS_SUCCESS_NUM,
+ NL80211_FTM_STATS_TOTAL_DURATION_MSEC,
+ NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM,
+};
+
+enum nl80211_reg_initiator {
+ /* XXX TODO */
+ NL80211_REGDOM_SET_BY_USER,
+ NL80211_REGDOM_SET_BY_DRIVER,
+ NL80211_REGDOM_SET_BY_CORE,
+ NL80211_REGDOM_SET_BY_COUNTRY_IE,
+};
+
+struct nl80211_sta_flag_update {
+ /* XXX TODO */
+ int mask, set;
+
+};
+
+enum nl80211_tx_power_setting {
+ /* XXX TODO */
+ NL80211_TX_POWER_AUTOMATIC,
+ NL80211_TX_POWER_FIXED,
+ NL80211_TX_POWER_LIMITED,
+};
+
+enum nl80211_crit_proto_id {
+ /* XXX TODO */
+ NL80211_CRIT_PROTO_DHCP,
+};
+
+enum nl80211_auth_type {
+ NL80211_AUTHTYPE_AUTOMATIC,
+ NL80211_AUTHTYPE_OPEN_SYSTEM,
+ NL80211_AUTHTYPE_SHARED_KEY,
+ NL80211_AUTHTYPE_SAE,
+};
+
+enum nl80211_key_type {
+ NL80211_KEYTYPE_GROUP,
+ NL80211_KEYTYPE_PAIRWISE,
+};
+
+enum nl80211_he_ru_alloc {
+ NL80211_RATE_INFO_HE_RU_ALLOC_26,
+ NL80211_RATE_INFO_HE_RU_ALLOC_52,
+ NL80211_RATE_INFO_HE_RU_ALLOC_106,
+ NL80211_RATE_INFO_HE_RU_ALLOC_242,
+ NL80211_RATE_INFO_HE_RU_ALLOC_484,
+ NL80211_RATE_INFO_HE_RU_ALLOC_996,
+ NL80211_RATE_INFO_HE_RU_ALLOC_2x996,
+};
+
+enum nl80211_he_gi {
+ NL80211_RATE_INFO_HE_GI_0_8,
+ NL80211_RATE_INFO_HE_GI_1_6,
+ NL80211_RATE_INFO_HE_GI_3_2,
+};
+
+enum nl80211_he_ltf {
+ NL80211_RATE_INFO_HE_1XLTF,
+ NL80211_RATE_INFO_HE_2XLTF,
+ NL80211_RATE_INFO_HE_4XLTF,
+};
+
+enum nl80211_eht_gi {
+ NL80211_RATE_INFO_EHT_GI_0_8,
+ NL80211_RATE_INFO_EHT_GI_1_6,
+ NL80211_RATE_INFO_EHT_GI_3_2,
+};
+
+enum nl80211_eht_ru_alloc {
+ NL80211_RATE_INFO_EHT_RU_ALLOC_26,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_52,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_52P26,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_106,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_106P26,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_242,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_484P242,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_996,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_996P484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_2x996,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_3x996,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484,
+ NL80211_RATE_INFO_EHT_RU_ALLOC_4x996,
+};
+
+enum nl80211_dfs_regions {
+ NL80211_DFS_UNSET,
+ NL80211_DFS_FCC,
+ NL80211_DFS_ETSI,
+ NL80211_DFS_JP,
+};
+
+enum nl80211_dfs_state {
+ NL80211_DFS_USABLE,
+};
+
+enum nl80211_sar_type {
+ NL80211_SAR_TYPE_POWER,
+};
+
+#define NL80211_VHT_NSS_MAX 8
+#define NL80211_HE_NSS_MAX 8
+
+enum nl80211_tid_cfg_attr {
+ NL80211_TID_CONFIG_ATTR_NOACK,
+ NL80211_TID_CONFIG_ATTR_RETRY_SHORT,
+ NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE,
+ NL80211_TID_CONFIG_ATTR_TX_RATE,
+ NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL,
+ NL80211_TID_CONFIG_ATTR_RETRY_LONG,
+ NL80211_TID_CONFIG_ATTR_AMPDU_CTRL,
+ NL80211_TID_CONFIG_ATTR_AMSDU_CTRL,
+};
+
+enum nl80211_tid_config {
+ NL80211_TID_CONFIG_ENABLE,
+};
+
+enum nl80211_tx_rate_setting {
+ NL80211_TX_RATE_AUTOMATIC,
+ NL80211_TX_RATE_FIXED,
+ NL80211_TX_RATE_LIMITED,
+};
+
+enum nl80211_txrate_gi {
+ NL80211_TXRATE_DEFAULT_GI,
+ NL80211_TXRATE_FORCE_SGI,
+ NL80211_TXRATE_FORCE_LGI,
+};
+
+enum nl80211_probe_resp_offload_support {
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2,
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS,
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P,
+};
+
+enum nl80211_user_reg_hint_type {
+ NL80211_USER_REG_HINT_USER,
+};
+
+enum nl80211_hidden_ssid {
+ NL80211_HIDDEN_SSID_NOT_IN_USE,
+};
+
+#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16
+#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
+
+#define NL80211_KCK_LEN 16
+#define NL80211_KCK_EXT_LEN 24
+#define NL80211_KEK_LEN 16
+#define NL80211_KEK_EXT_LEN 32
+#define NL80211_REPLAY_CTR_LEN 8
+#endif /* _LINUXKPI_LINUX_NL80211_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/nodemask.h b/sys/compat/linuxkpi/common/include/linux/nodemask.h
new file mode 100644
index 000000000000..7a245cc6f256
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/nodemask.h
@@ -0,0 +1,46 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Serenity Cyber Security, LLC.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_NODEMASK_H_
+#define _LINUXKPI_LINUX_NODEMASK_H_
+
+#include <linux/kernel.h> /* pr_debug */
+
+static inline int
+num_online_nodes(void)
+{
+ return (1);
+}
+
+static inline int
+num_possible_nodes(void)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (1);
+}
+
+#endif /* _LINUXKPI_LINUX_NODEMASK_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/nospec.h b/sys/compat/linuxkpi/common/include/linux/nospec.h
new file mode 100644
index 000000000000..e8458ae8b371
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/nospec.h
@@ -0,0 +1,8 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_NOSPEC_H_
+#define _LINUXKPI_LINUX_NOSPEC_H_
+
+#define array_index_nospec(a, b) (a)
+
+#endif /* _LINUXKPILINUX_NOSPEC_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/notifier.h b/sys/compat/linuxkpi/common/include/linux/notifier.h
new file mode 100644
index 000000000000..9302a1ce4606
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/notifier.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_NOTIFIER_H_
+#define _LINUXKPI_LINUX_NOTIFIER_H_
+
+#include <sys/types.h>
+#include <sys/eventhandler.h>
+
+#define NOTIFY_DONE 0
+#define NOTIFY_OK 0x0001
+#define NOTIFY_STOP_MASK 0x8000
+#define NOTIFY_BAD (NOTIFY_STOP_MASK | 0x0002)
+
+enum {
+ NETDEV_CHANGE,
+ NETDEV_UP,
+ NETDEV_DOWN,
+ NETDEV_REGISTER,
+ NETDEV_UNREGISTER,
+ NETDEV_CHANGEADDR,
+ NETDEV_CHANGEIFADDR,
+ LINUX_NOTIFY_TAGS /* must be last */
+};
+
+struct notifier_block {
+ int (*notifier_call) (struct notifier_block *, unsigned long, void *);
+ struct notifier_block *next;
+ int priority;
+ eventhandler_tag tags[LINUX_NOTIFY_TAGS];
+};
+
+#endif /* _LINUXKPI_LINUX_NOTIFIER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/numa.h b/sys/compat/linuxkpi/common/include/linux/numa.h
new file mode 100644
index 000000000000..6b227e177a64
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/numa.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_NUMA_H_
+#define _LINUXKPI_LINUX_NUMA_H_
+
+#define NUMA_NO_NODE -1
+
+#endif /* _LINUXKPI_LINUX_NUMA_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/of.h b/sys/compat/linuxkpi/common/include/linux/of.h
new file mode 100644
index 000000000000..fb4554a8ddbc
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/of.h
@@ -0,0 +1,33 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Serenity Cyber Security, LLC.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_OF_H
+#define _LINUXKPI_LINUX_OF_H
+
+#include <linux/kobject.h>
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/overflow.h b/sys/compat/linuxkpi/common/include/linux/overflow.h
new file mode 100644
index 000000000000..9ba9b9500f11
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/overflow.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+#ifndef _LINUXKPI_LINUX_OVERFLOW_H
+#define _LINUXKPI_LINUX_OVERFLOW_H
+
+#include <linux/compiler.h>
+#include <linux/limits.h>
+#ifdef __linux__
+#include <linux/const.h>
+#endif
+
+/*
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
+ *
+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
+ *
+ * Unfortunately, the middle expressions, strictly speaking, have
+ * undefined behaviour, and at least some versions of gcc warn about
+ * the type_max expression (but not if -fsanitize=undefined is in
+ * effect; in that case, the warning is deferred to runtime...).
+ *
+ * The slightly excessive casting in type_min is to make sure the
+ * macros also produce sensible values for the exotic type _Bool. [The
+ * overflow checkers only almost work for _Bool, but that's
+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
+ * argument.]
+ *
+ * Idea stolen from
+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
+ * credit to Christian Biere.
+ */
+#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+
+/*
+ * Avoids triggering -Wtype-limits compilation warning,
+ * while using unsigned data types to check a < 0.
+ */
+#define is_non_negative(a) ((a) > 0 || (a) == 0)
+#define is_negative(a) (!(is_non_negative(a)))
+
+/*
+ * Allows for effectively applying __must_check to a macro so we can have
+ * both the type-agnostic benefits of the macros while also being able to
+ * enforce that the return value is, in fact, checked.
+ */
+static inline bool __must_check __must_check_overflow(bool overflow)
+{
+ return unlikely(overflow);
+}
+
+/**
+ * check_add_overflow() - Calculate addition with overflow checking
+ * @a: first addend
+ * @b: second addend
+ * @d: pointer to store sum
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted addition, but is not considered
+ * "safe for use" on a non-zero return value, which indicates that the
+ * sum has overflowed or been truncated.
+ */
+#define check_add_overflow(a, b, d) \
+ __must_check_overflow(__builtin_add_overflow(a, b, d))
+
+/**
+ * check_sub_overflow() - Calculate subtraction with overflow checking
+ * @a: minuend; value to subtract from
+ * @b: subtrahend; value to subtract from @a
+ * @d: pointer to store difference
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted subtraction, but is not considered
+ * "safe for use" on a non-zero return value, which indicates that the
+ * difference has underflowed or been truncated.
+ */
+#define check_sub_overflow(a, b, d) \
+ __must_check_overflow(__builtin_sub_overflow(a, b, d))
+
+/**
+ * check_mul_overflow() - Calculate multiplication with overflow checking
+ * @a: first factor
+ * @b: second factor
+ * @d: pointer to store product
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted multiplication, but is not
+ * considered "safe for use" on a non-zero return value, which indicates
+ * that the product has overflowed or been truncated.
+ */
+#define check_mul_overflow(a, b, d) \
+ __must_check_overflow(__builtin_mul_overflow(a, b, d))
+
+/**
+ * check_shl_overflow() - Calculate a left-shifted value and check overflow
+ * @a: Value to be shifted
+ * @s: How many bits left to shift
+ * @d: Pointer to where to store the result
+ *
+ * Computes *@d = (@a << @s)
+ *
+ * Returns true if '*@d' cannot hold the result or when '@a << @s' doesn't
+ * make sense. Example conditions:
+ *
+ * - '@a << @s' causes bits to be lost when stored in *@d.
+ * - '@s' is garbage (e.g. negative) or so large that the result of
+ * '@a << @s' is guaranteed to be 0.
+ * - '@a' is negative.
+ * - '@a << @s' sets the sign bit, if any, in '*@d'.
+ *
+ * '*@d' will hold the results of the attempted shift, but is not
+ * considered "safe for use" if true is returned.
+ */
+#define check_shl_overflow(a, s, d) __must_check_overflow(({ \
+ typeof(a) _a = a; \
+ typeof(s) _s = s; \
+ typeof(d) _d = d; \
+ u64 _a_full = _a; \
+ unsigned int _to_shift = \
+ is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
+ *_d = (_a_full << _to_shift); \
+ (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
+ (*_d >> _to_shift) != _a); \
+}))
+
+#define __overflows_type_constexpr(x, T) ( \
+ is_unsigned_type(typeof(x)) ? \
+ (x) > type_max(typeof(T)) : \
+ is_unsigned_type(typeof(T)) ? \
+ (x) < 0 || (x) > type_max(typeof(T)) : \
+ (x) < type_min(typeof(T)) || (x) > type_max(typeof(T)))
+
+#define __overflows_type(x, T) ({ \
+ typeof(T) v = 0; \
+ check_add_overflow((x), v, &v); \
+})
+
+/**
+ * overflows_type - helper for checking the overflows between value, variables,
+ * or data type
+ *
+ * @n: source constant value or variable to be checked
+ * @T: destination variable or data type proposed to store @x
+ *
+ * Compares the @x expression for whether or not it can safely fit in
+ * the storage of the type in @T. @x and @T can have different types.
+ * If @x is a constant expression, this will also resolve to a constant
+ * expression.
+ *
+ * Returns: true if overflow can occur, false otherwise.
+ */
+#define overflows_type(n, T) \
+ __builtin_choose_expr(__is_constexpr(n), \
+ __overflows_type_constexpr(n, T), \
+ __overflows_type(n, T))
+
+/**
+ * castable_to_type - like __same_type(), but also allows for casted literals
+ *
+ * @n: variable or constant value
+ * @T: variable or data type
+ *
+ * Unlike the __same_type() macro, this allows a constant value as the
+ * first argument. If this value would not overflow into an assignment
+ * of the second argument's type, it returns true. Otherwise, this falls
+ * back to __same_type().
+ */
+#define castable_to_type(n, T) \
+ __builtin_choose_expr(__is_constexpr(n), \
+ !__overflows_type_constexpr(n, T), \
+ __same_type(n, T))
+
+/**
+ * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
+ * @factor1: first factor
+ * @factor2: second factor
+ *
+ * Returns: calculate @factor1 * @factor2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
+ */
+static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(factor1, factor2, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * size_add() - Calculate size_t addition with saturation at SIZE_MAX
+ * @addend1: first addend
+ * @addend2: second addend
+ *
+ * Returns: calculate @addend1 + @addend2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
+ */
+static inline size_t __must_check size_add(size_t addend1, size_t addend2)
+{
+ size_t bytes;
+
+ if (check_add_overflow(addend1, addend2, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
+ * @minuend: value to subtract from
+ * @subtrahend: value to subtract from @minuend
+ *
+ * Returns: calculate @minuend - @subtrahend, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. For
+ * composition with the size_add() and size_mul() helpers, neither
+ * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX).
+ * The lvalue must be size_t to avoid implicit type conversion.
+ */
+static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
+{
+ size_t bytes;
+
+ if (minuend == SIZE_MAX || subtrahend == SIZE_MAX ||
+ check_sub_overflow(minuend, subtrahend, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * array_size() - Calculate size of 2-dimensional array.
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+#define array_size(a, b) size_mul(a, b)
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+#define array3_size(a, b, c) size_mul(size_mul(a, b), c)
+
+/**
+ * flex_array_size() - Calculate size of a flexible array member
+ * within an enclosing structure.
+ * @p: Pointer to the structure.
+ * @member: Name of the flexible array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of a flexible array of @count number of @member
+ * elements, at the end of structure @p.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define flex_array_size(p, member, count) \
+ __builtin_choose_expr(__is_constexpr(count), \
+ (count) * sizeof(*(p)->member) + __must_be_array((p)->member), \
+ size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member)))
+
+/**
+ * struct_size() - Calculate size of structure with trailing flexible array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure of @p followed by an
+ * array of @count number of @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, count) \
+ __builtin_choose_expr(__is_constexpr(count), \
+ sizeof(*(p)) + flex_array_size(p, member, count), \
+ size_add(sizeof(*(p)), flex_array_size(p, member, count)))
+
+/**
+ * struct_size_t() - Calculate size of structure with trailing flexible array
+ * @type: structure type name.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @type followed by an
+ * array of @count number of @member elements. Prefer using struct_size()
+ * when possible instead, to keep calculations associated with a specific
+ * instance variable of type @type.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size_t(type, member, count) \
+ struct_size((type *)NULL, member, count)
+
+/**
+ * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass (different) initializer.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ * @initializer: initializer expression (could be empty for no init).
+ */
+#define _DEFINE_FLEX(type, name, member, count, initializer) \
+ _Static_assert(__builtin_constant_p(count), \
+ "onstack flex array members require compile-time const count"); \
+ union { \
+ u8 bytes[struct_size_t(type, member, count)]; \
+ type obj; \
+ } name##_u initializer; \
+ type *name = (type *)&name##_u
+
+/**
+ * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ *
+ * Define a zeroed, on-stack, instance of @type structure with a trailing
+ * flexible array member.
+ * Use __struct_size(@name) to get compile-time size of it afterwards.
+ */
+#define DEFINE_FLEX(type, name, member, count) \
+ _DEFINE_FLEX(type, name, member, count, = {})
+
+#endif /* _LINUXKPI_LINUX_OVERFLOW_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/page-flags.h b/sys/compat/linuxkpi/common/include/linux/page-flags.h
new file mode 100644
index 000000000000..a22b3a24c330
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/page-flags.h
@@ -0,0 +1,41 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PAGEFLAGS_H_
+#define _LINUXKPI_LINUX_PAGEFLAGS_H_
+
+#include <linux/mm_types.h>
+
+#define PageHighMem(p) (0)
+
+#define page_folio(p) \
+ (_Generic((p), \
+ const struct page *: (const struct folio *)(p), \
+ struct page *: (struct folio *)(p)))
+
+#endif /* _LINUXKPI_LINUX_PAGEFLAGS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/page.h b/sys/compat/linuxkpi/common/include/linux/page.h
new file mode 100644
index 000000000000..37ab593a64e9
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/page.h
@@ -0,0 +1,130 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_PAGE_H_
+#define _LINUXKPI_LINUX_PAGE_H_
+
+#include <linux/types.h>
+
+#include <sys/param.h>
+#include <sys/vmmeter.h>
+
+#include <machine/atomic.h>
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+
+#if defined(__i386__) || defined(__amd64__)
+#include <machine/md_var.h>
+#endif
+
+typedef unsigned long linux_pte_t;
+typedef unsigned long linux_pmd_t;
+typedef unsigned long linux_pgd_t;
+typedef unsigned long pgprot_t;
+
+#define page vm_page
+
+#define LINUXKPI_PROT_VALID (1 << 3)
+#define LINUXKPI_CACHE_MODE_SHIFT 4
+
+CTASSERT((VM_PROT_ALL & -LINUXKPI_PROT_VALID) == 0);
+
+#define PAGE_KERNEL_IO 0x0000
+
+static inline pgprot_t
+cachemode2protval(vm_memattr_t attr)
+{
+ return ((attr << LINUXKPI_CACHE_MODE_SHIFT) | LINUXKPI_PROT_VALID);
+}
+
+static inline vm_memattr_t
+pgprot2cachemode(pgprot_t prot)
+{
+ if (prot & LINUXKPI_PROT_VALID)
+ return (prot >> LINUXKPI_CACHE_MODE_SHIFT);
+ else
+ return (VM_MEMATTR_DEFAULT);
+}
+
+#define page_to_virt(page) linux_page_address(page)
+#define virt_to_page(x) PHYS_TO_VM_PAGE(vtophys(x))
+#define page_to_pfn(pp) (VM_PAGE_TO_PHYS(pp) >> PAGE_SHIFT)
+#define pfn_to_page(pfn) (PHYS_TO_VM_PAGE((pfn) << PAGE_SHIFT))
+#define nth_page(page,n) pfn_to_page(page_to_pfn(page) + (n))
+#define page_to_phys(page) VM_PAGE_TO_PHYS(page)
+
+#define clear_page(page) memset(page, 0, PAGE_SIZE)
+#define pgprot_noncached(prot) \
+ (((prot) & VM_PROT_ALL) | cachemode2protval(VM_MEMATTR_UNCACHEABLE))
+#ifdef VM_MEMATTR_WRITE_COMBINING
+#define pgprot_writecombine(prot) \
+ (((prot) & VM_PROT_ALL) | cachemode2protval(VM_MEMATTR_WRITE_COMBINING))
+#else
+#define pgprot_writecombine(prot) pgprot_noncached(prot)
+#endif
+
+#undef PAGE_MASK
+#define PAGE_MASK (~(PAGE_SIZE-1))
+/*
+ * Modifying PAGE_MASK in the above way breaks trunc_page, round_page,
+ * and btoc macros. Therefore, redefine them in a way that makes sense
+ * so the LinuxKPI consumers don't get totally broken behavior.
+ */
+#undef btoc
+#define btoc(x) (((vm_offset_t)(x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
+#undef round_page
+#define round_page(x) ((((uintptr_t)(x)) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
+#undef trunc_page
+#define trunc_page(x) ((uintptr_t)(x) & ~(PAGE_SIZE - 1))
+
+#if defined(__i386__) || defined(__amd64__)
+#undef clflush
+#undef clflushopt
+static inline void
+lkpi_clflushopt(unsigned long addr)
+{
+ if (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT)
+ clflushopt(addr);
+ else if (cpu_feature & CPUID_CLFSH)
+ clflush(addr);
+ else
+ pmap_invalidate_cache();
+}
+#define clflush(x) clflush((unsigned long)(x))
+#define clflushopt(x) lkpi_clflushopt((unsigned long)(x))
+
+static inline void
+clflush_cache_range(void *addr, unsigned int size)
+{
+ pmap_force_invalidate_cache_range((vm_offset_t)addr,
+ (vm_offset_t)addr + size);
+}
+#endif
+
+#endif /* _LINUXKPI_LINUX_PAGE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/pagemap.h b/sys/compat/linuxkpi/common/include/linux/pagemap.h
new file mode 100644
index 000000000000..cb6a1820ea8b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pagemap.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PAGEMAP_H_
+#define _LINUXKPI_LINUX_PAGEMAP_H_
+
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+
+struct folio_batch;
+
+#define invalidate_mapping_pages(...) \
+ linux_invalidate_mapping_pages(__VA_ARGS__)
+
+unsigned long linux_invalidate_mapping_pages(vm_object_t obj, pgoff_t start,
+ pgoff_t end);
+
+static inline void
+mapping_clear_unevictable(vm_object_t mapping)
+{
+}
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/pagevec.h b/sys/compat/linuxkpi/common/include/linux/pagevec.h
new file mode 100644
index 000000000000..0a952e965b5a
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pagevec.h
@@ -0,0 +1,137 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_PAGEVEC_H_
+#define _LINUXKPI_LINUX_PAGEVEC_H_
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/errno.h>
+
+#include <linux/pagemap.h>
+
+#define PAGEVEC_SIZE 15
+
+struct pagevec {
+ uint8_t nr;
+ struct page *pages[PAGEVEC_SIZE];
+};
+
+static inline unsigned int
+pagevec_space(struct pagevec *pvec)
+{
+ return PAGEVEC_SIZE - pvec->nr;
+}
+
+static inline void
+pagevec_init(struct pagevec *pvec)
+{
+ pvec->nr = 0;
+}
+
+static inline void
+pagevec_reinit(struct pagevec *pvec)
+{
+ pvec->nr = 0;
+}
+
+static inline unsigned int
+pagevec_count(struct pagevec *pvec)
+{
+ return pvec->nr;
+}
+
+static inline unsigned int
+pagevec_add(struct pagevec *pvec, struct page *page)
+{
+ pvec->pages[pvec->nr++] = page;
+ return PAGEVEC_SIZE - pvec->nr;
+}
+
+static inline void
+__pagevec_release(struct pagevec *pvec)
+{
+ release_pages(pvec->pages, pagevec_count(pvec));
+ pagevec_reinit(pvec);
+}
+
+static inline void
+pagevec_release(struct pagevec *pvec)
+{
+ if (pagevec_count(pvec))
+ __pagevec_release(pvec);
+}
+
+static inline void
+check_move_unevictable_pages(struct pagevec *pvec)
+{
+}
+
+/*
+ * struct folio
+ *
+ * On Linux, `struct folio` replaces `struct page`. To manage a list of folios,
+ * there is `struct folio_batch` on top of this, which replaces `struct
+ * pagevec` above.
+ *
+ * Here is the original description when `struct folio` was added to the Linux
+ * kernel:
+ * "A struct folio is a new abstraction to replace the venerable struct page.
+ * A function which takes a struct folio argument declares that it will
+ * operate on the entire (possibly compound) page, not just PAGE_SIZE bytes.
+ * In return, the caller guarantees that the pointer it is passing does not
+ * point to a tail page. No change to generated code."
+ */
+
+struct folio;
+
+struct folio_batch {
+ uint8_t nr;
+ struct folio *folios[PAGEVEC_SIZE];
+};
+
+static inline void
+folio_batch_init(struct folio_batch *fbatch)
+{
+ fbatch->nr = 0;
+}
+
+static inline void
+folio_batch_reinit(struct folio_batch *fbatch)
+{
+ fbatch->nr = 0;
+}
+
+static inline unsigned int
+folio_batch_count(struct folio_batch *fbatch)
+{
+ return (fbatch->nr);
+}
+
+static inline unsigned int
+folio_batch_space(struct folio_batch *fbatch)
+{
+ return (PAGEVEC_SIZE - fbatch->nr);
+}
+
+static inline unsigned int
+folio_batch_add(struct folio_batch *fbatch, struct folio *folio)
+{
+ KASSERT(
+ fbatch->nr < PAGEVEC_SIZE,
+ ("struct folio_batch %p is full", fbatch));
+
+ fbatch->folios[fbatch->nr++] = folio;
+
+ return (folio_batch_space(fbatch));
+}
+
+void __folio_batch_release(struct folio_batch *fbatch);
+
+static inline void
+folio_batch_release(struct folio_batch *fbatch)
+{
+ if (folio_batch_count(fbatch))
+ __folio_batch_release(fbatch);
+}
+
+#endif /* _LINUXKPI_LINUX_PAGEVEC_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/pci.h b/sys/compat/linuxkpi/common/include/linux/pci.h
new file mode 100644
index 000000000000..af19829f1cbb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pci.h
@@ -0,0 +1,1537 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ * Copyright (c) 2020-2022 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_PCI_H_
+#define _LINUXKPI_LINUX_PCI_H_
+
+#define CONFIG_PCI_MSI
+
+#include <linux/types.h>
+#include <linux/device/driver.h>
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/nv.h>
+#include <sys/pciio.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pci_private.h>
+
+#include <machine/resource.h>
+
+#include <linux/list.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <asm/atomic.h>
+#include <asm/memtype.h>
+#include <linux/device.h>
+#include <linux/pci_ids.h>
+#include <linux/pm.h>
+
+#include <linux/kernel.h> /* pr_debug */
+
+struct pci_device_id {
+ uint32_t vendor;
+ uint32_t device;
+ uint32_t subvendor;
+ uint32_t subdevice;
+ uint32_t class;
+ uint32_t class_mask;
+ uintptr_t driver_data;
+};
+
+#define MODULE_DEVICE_TABLE_BUS_pci(_bus, _table) \
+MODULE_PNP_INFO("U32:vendor;U32:device;V32:subvendor;V32:subdevice", \
+ _bus, lkpi_ ## _table, _table, nitems(_table) - 1)
+
+/* Linux has an empty element at the end of the ID table -> nitems() - 1. */
+#define MODULE_DEVICE_TABLE(_bus, _table) \
+ \
+static device_method_t _ ## _bus ## _ ## _table ## _methods[] = { \
+ DEVMETHOD_END \
+}; \
+ \
+static driver_t _ ## _bus ## _ ## _table ## _driver = { \
+ "lkpi_" #_bus #_table, \
+ _ ## _bus ## _ ## _table ## _methods, \
+ 0 \
+}; \
+ \
+DRIVER_MODULE(lkpi_ ## _table, _bus, _ ## _bus ## _ ## _table ## _driver,\
+ 0, 0); \
+ \
+MODULE_DEVICE_TABLE_BUS_ ## _bus(_bus, _table)
+
+#define PCI_ANY_ID -1U
+
+#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+#define PCI_BUS_NUM(devfn) (((devfn) >> 8) & 0xff)
+#define PCI_DEVID(bus, devfn) ((((uint16_t)(bus)) << 8) | (devfn))
+
+#define PCI_VDEVICE(_vendor, _device) \
+ .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#define PCI_DEVICE(_vendor, _device) \
+ .vendor = (_vendor), .device = (_device), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+
+#define to_pci_dev(n) container_of(n, struct pci_dev, dev)
+
+#define PCI_STD_NUM_BARS 6
+#define PCI_BASE_ADDRESS_0 PCIR_BARS
+#define PCI_BASE_ADDRESS_MEM_TYPE_64 PCIM_BAR_MEM_64
+#define PCI_VENDOR_ID PCIR_VENDOR
+#define PCI_DEVICE_ID PCIR_DEVICE
+#define PCI_COMMAND PCIR_COMMAND
+#define PCI_COMMAND_INTX_DISABLE PCIM_CMD_INTxDIS
+#define PCI_COMMAND_MEMORY PCIM_CMD_MEMEN
+#define PCI_PRIMARY_BUS PCIR_PRIBUS_1
+#define PCI_SECONDARY_BUS PCIR_SECBUS_1
+#define PCI_SUBORDINATE_BUS PCIR_SUBBUS_1
+#define PCI_SEC_LATENCY_TIMER PCIR_SECLAT_1
+#define PCI_EXP_DEVCTL PCIER_DEVICE_CTL /* Device Control */
+#define PCI_EXP_LNKCTL PCIER_LINK_CTL /* Link Control */
+#define PCI_EXP_LNKCTL_ASPM_L0S PCIEM_LINK_CTL_ASPMC_L0S
+#define PCI_EXP_LNKCTL_ASPM_L1 PCIEM_LINK_CTL_ASPMC_L1
+#define PCI_EXP_LNKCTL_ASPMC PCIEM_LINK_CTL_ASPMC
+#define PCI_EXP_LNKCTL_CLKREQ_EN PCIEM_LINK_CTL_ECPM /* Enable clock PM */
+#define PCI_EXP_LNKCTL_HAWD PCIEM_LINK_CTL_HAWD
+#define PCI_EXP_FLAGS_TYPE PCIEM_FLAGS_TYPE /* Device/Port type */
+#define PCI_EXP_DEVCAP PCIER_DEVICE_CAP /* Device capabilities */
+#define PCI_EXP_DEVSTA PCIER_DEVICE_STA /* Device Status */
+#define PCI_EXP_LNKCAP PCIER_LINK_CAP /* Link Capabilities */
+#define PCI_EXP_LNKSTA PCIER_LINK_STA /* Link Status */
+#define PCI_EXP_SLTCAP PCIER_SLOT_CAP /* Slot Capabilities */
+#define PCI_EXP_SLTCTL PCIER_SLOT_CTL /* Slot Control */
+#define PCI_EXP_SLTSTA PCIER_SLOT_STA /* Slot Status */
+#define PCI_EXP_RTCTL PCIER_ROOT_CTL /* Root Control */
+#define PCI_EXP_RTCAP PCIER_ROOT_CAP /* Root Capabilities */
+#define PCI_EXP_RTSTA PCIER_ROOT_STA /* Root Status */
+#define PCI_EXP_DEVCAP2 PCIER_DEVICE_CAP2 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCTL2 PCIER_DEVICE_CTL2 /* Device Control 2 */
+#define PCI_EXP_DEVCTL2_LTR_EN PCIEM_CTL2_LTR_ENABLE
+#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS PCIEM_CTL2_COMP_TIMO_DISABLE
+#define PCI_EXP_LNKCAP2 PCIER_LINK_CAP2 /* Link Capabilities 2 */
+#define PCI_EXP_LNKCTL2 PCIER_LINK_CTL2 /* Link Control 2 */
+#define PCI_EXP_LNKSTA2 PCIER_LINK_STA2 /* Link Status 2 */
+#define PCI_EXP_FLAGS PCIER_FLAGS /* Capabilities register */
+#define PCI_EXP_FLAGS_VERS PCIEM_FLAGS_VERSION /* Capability version */
+#define PCI_EXP_TYPE_ROOT_PORT PCIEM_TYPE_ROOT_PORT /* Root Port */
+#define PCI_EXP_TYPE_ENDPOINT PCIEM_TYPE_ENDPOINT /* Express Endpoint */
+#define PCI_EXP_TYPE_LEG_END PCIEM_TYPE_LEGACY_ENDPOINT /* Legacy Endpoint */
+#define PCI_EXP_TYPE_DOWNSTREAM PCIEM_TYPE_DOWNSTREAM_PORT /* Downstream Port */
+#define PCI_EXP_FLAGS_SLOT PCIEM_FLAGS_SLOT /* Slot implemented */
+#define PCI_EXP_TYPE_RC_EC PCIEM_TYPE_ROOT_EC /* Root Complex Event Collector */
+#define PCI_EXP_LNKSTA_CLS PCIEM_LINK_STA_SPEED
+#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
+#define PCI_EXP_LNKCAP_SLS_2_5GB 0x01 /* Supported Link Speed 2.5GT/s */
+#define PCI_EXP_LNKCAP_SLS_5_0GB 0x02 /* Supported Link Speed 5.0GT/s */
+#define PCI_EXP_LNKCAP_SLS_8_0GB 0x03 /* Supported Link Speed 8.0GT/s */
+#define PCI_EXP_LNKCAP_SLS_16_0GB 0x04 /* Supported Link Speed 16.0GT/s */
+#define PCI_EXP_LNKCAP_SLS_32_0GB 0x05 /* Supported Link Speed 32.0GT/s */
+#define PCI_EXP_LNKCAP_SLS_64_0GB 0x06 /* Supported Link Speed 64.0GT/s */
+#define PCI_EXP_LNKCAP_MLW 0x03f0 /* Maximum Link Width */
+#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */
+#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x10 /* Supported Link Speed 16.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_32_0GB 0x20 /* Supported Link Speed 32.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_64_0GB 0x40 /* Supported Link Speed 64.0GT/s */
+#define PCI_EXP_LNKCTL2_TLS 0x000f
+#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
+#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
+#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCTL2_TLS_64_0GT 0x0006 /* Supported Speed 64GT/s */
+#define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
+#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
+
+#define PCI_MSI_ADDRESS_LO PCIR_MSI_ADDR
+#define PCI_MSI_ADDRESS_HI PCIR_MSI_ADDR_HIGH
+#define PCI_MSI_FLAGS PCIR_MSI_CTRL
+#define PCI_MSI_FLAGS_ENABLE PCIM_MSICTRL_MSI_ENABLE
+#define PCI_MSIX_FLAGS PCIR_MSIX_CTRL
+#define PCI_MSIX_FLAGS_ENABLE PCIM_MSIXCTRL_MSIX_ENABLE
+
+#define PCI_EXP_LNKCAP_CLKPM 0x00040000
+#define PCI_EXP_DEVSTA_TRPND 0x0020
+
+#define IORESOURCE_MEM (1 << SYS_RES_MEMORY)
+#define IORESOURCE_IO (1 << SYS_RES_IOPORT)
+#define IORESOURCE_IRQ (1 << SYS_RES_IRQ)
+
+enum pci_bus_speed {
+ PCI_SPEED_UNKNOWN = -1,
+ PCIE_SPEED_2_5GT,
+ PCIE_SPEED_5_0GT,
+ PCIE_SPEED_8_0GT,
+ PCIE_SPEED_16_0GT,
+ PCIE_SPEED_32_0GT,
+ PCIE_SPEED_64_0GT,
+};
+
+enum pcie_link_width {
+ PCIE_LNK_WIDTH_RESRV = 0x00,
+ PCIE_LNK_X1 = 0x01,
+ PCIE_LNK_X2 = 0x02,
+ PCIE_LNK_X4 = 0x04,
+ PCIE_LNK_X8 = 0x08,
+ PCIE_LNK_X12 = 0x0c,
+ PCIE_LNK_X16 = 0x10,
+ PCIE_LNK_X32 = 0x20,
+ PCIE_LNK_WIDTH_UNKNOWN = 0xff,
+};
+
+#define PCIE_LINK_STATE_L0S 0x00000001
+#define PCIE_LINK_STATE_L1 0x00000002
+#define PCIE_LINK_STATE_CLKPM 0x00000004
+
+typedef int pci_power_t;
+
+#define PCI_D0 PCI_POWERSTATE_D0
+#define PCI_D1 PCI_POWERSTATE_D1
+#define PCI_D2 PCI_POWERSTATE_D2
+#define PCI_D3hot PCI_POWERSTATE_D3
+#define PCI_D3cold 4
+
+#define PCI_POWER_ERROR PCI_POWERSTATE_UNKNOWN
+
+extern const char *pci_power_names[6];
+
+#define PCI_ERR_UNCOR_STATUS PCIR_AER_UC_STATUS
+#define PCI_ERR_COR_STATUS PCIR_AER_COR_STATUS
+#define PCI_ERR_ROOT_COMMAND PCIR_AER_ROOTERR_CMD
+#define PCI_ERR_ROOT_ERR_SRC PCIR_AER_COR_SOURCE_ID
+
+#define PCI_EXT_CAP_ID_ERR PCIZ_AER
+#define PCI_EXT_CAP_ID_L1SS PCIZ_L1PM
+
+#define PCI_L1SS_CTL1 0x8
+#define PCI_L1SS_CTL1_L1SS_MASK 0xf
+
+#define PCI_IRQ_INTX 0x01
+#define PCI_IRQ_MSI 0x02
+#define PCI_IRQ_MSIX 0x04
+#define PCI_IRQ_ALL_TYPES (PCI_IRQ_MSIX|PCI_IRQ_MSI|PCI_IRQ_INTX)
+
+#if defined(LINUXKPI_VERSION) && (LINUXKPI_VERSION <= 61000)
+#define PCI_IRQ_LEGACY PCI_IRQ_INTX
+#endif
+
+struct pci_dev;
+
+struct pci_driver {
+ struct list_head node;
+ char *name;
+ const struct pci_device_id *id_table;
+ int (*probe)(struct pci_dev *dev, const struct pci_device_id *id);
+ void (*remove)(struct pci_dev *dev);
+ int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
+ int (*resume) (struct pci_dev *dev); /* Device woken up */
+ void (*shutdown) (struct pci_dev *dev); /* Device shutdown */
+ driver_t bsddriver;
+ devclass_t bsdclass;
+ struct device_driver driver;
+ const struct pci_error_handlers *err_handler;
+ bool isdrm;
+ int bsd_probe_return;
+ int (*bsd_iov_init)(device_t dev, uint16_t num_vfs,
+ const nvlist_t *pf_config);
+ void (*bsd_iov_uninit)(device_t dev);
+ int (*bsd_iov_add_vf)(device_t dev, uint16_t vfnum,
+ const nvlist_t *vf_config);
+};
+
+struct pci_bus {
+ struct pci_dev *self;
+ /* struct pci_bus *parent */
+ int domain;
+ int number;
+};
+
+extern struct list_head pci_drivers;
+extern struct list_head pci_devices;
+extern spinlock_t pci_lock;
+
+#define __devexit_p(x) x
+
+#define module_pci_driver(_drv) \
+ module_driver(_drv, linux_pci_register_driver, linux_pci_unregister_driver)
+
+struct msi_msg {
+ uint32_t data;
+};
+
+struct pci_msi_desc {
+ struct {
+ bool is_64;
+ } msi_attrib;
+};
+
+struct msi_desc {
+ struct msi_msg msg;
+ struct pci_msi_desc pci;
+};
+
+struct msix_entry {
+ int entry;
+ int vector;
+};
+
+/*
+ * If we find drivers accessing this from multiple KPIs we may have to
+ * refcount objects of this structure.
+ */
+struct resource;
+struct pci_mmio_region {
+ TAILQ_ENTRY(pci_mmio_region) next;
+ struct resource *res;
+ int rid;
+ int type;
+};
+
+struct pci_dev {
+ struct device dev;
+ struct list_head links;
+ struct pci_driver *pdrv;
+ struct pci_bus *bus;
+ struct pci_dev *root;
+ pci_power_t current_state;
+ uint16_t device;
+ uint16_t vendor;
+ uint16_t subsystem_vendor;
+ uint16_t subsystem_device;
+ unsigned int irq;
+ unsigned int devfn;
+ uint32_t class;
+ uint8_t revision;
+ uint8_t msi_cap;
+ uint8_t msix_cap;
+ bool managed; /* devres "pcim_*(). */
+ bool want_iomap_res;
+ bool msi_enabled;
+ bool msix_enabled;
+ phys_addr_t rom;
+ size_t romlen;
+ struct msi_desc **msi_desc;
+ char *path_name;
+ spinlock_t pcie_cap_lock;
+
+ TAILQ_HEAD(, pci_mmio_region) mmio;
+};
+
+int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name);
+int pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
+ unsigned int flags);
+bool pci_device_is_present(struct pci_dev *pdev);
+
+int linuxkpi_pcim_enable_device(struct pci_dev *pdev);
+void __iomem **linuxkpi_pcim_iomap_table(struct pci_dev *pdev);
+void *linuxkpi_pci_iomap_range(struct pci_dev *pdev, int mmio_bar,
+ unsigned long mmio_off, unsigned long mmio_size);
+void *linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size);
+void linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res);
+int linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask,
+ const char *name);
+int linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name);
+void linuxkpi_pci_release_region(struct pci_dev *pdev, int bar);
+void linuxkpi_pci_release_regions(struct pci_dev *pdev);
+int linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
+ int nreq);
+
+/* Internal helper function(s). */
+struct pci_dev *lkpinew_pci_dev(device_t);
+void lkpi_pci_devres_release(struct device *, void *);
+struct pci_dev *lkpi_pci_get_device(uint16_t, uint16_t, struct pci_dev *);
+struct msi_desc *lkpi_pci_msi_desc_alloc(int);
+struct device *lkpi_pci_find_irq_dev(unsigned int irq);
+int _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec);
+
+#define pci_err(pdev, fmt, ...) \
+ dev_err(&(pdev)->dev, fmt, ##__VA_ARGS__)
+#define pci_info(pdev, fmt, ...) \
+ dev_info(&(pdev)->dev, fmt, ##__VA_ARGS__)
+
+static inline bool
+dev_is_pci(struct device *dev)
+{
+
+ return (device_get_devclass(dev->bsddev) == devclass_find("pci"));
+}
+
+static inline uint16_t
+pci_dev_id(struct pci_dev *pdev)
+{
+ return (PCI_DEVID(pdev->bus->number, pdev->devfn));
+}
+
+static inline int
+pci_resource_type(struct pci_dev *pdev, int bar)
+{
+ struct pci_map *pm;
+
+ pm = pci_find_bar(pdev->dev.bsddev, PCIR_BAR(bar));
+ if (!pm)
+ return (-1);
+
+ if (PCI_BAR_IO(pm->pm_value))
+ return (SYS_RES_IOPORT);
+ else
+ return (SYS_RES_MEMORY);
+}
+
+/*
+ * All drivers just seem to want to inspect the type not flags.
+ */
+static inline int
+pci_resource_flags(struct pci_dev *pdev, int bar)
+{
+ int type;
+
+ type = pci_resource_type(pdev, bar);
+ if (type < 0)
+ return (0);
+ return (1 << type);
+}
+
+static inline const char *
+pci_name(struct pci_dev *d)
+{
+ return d->path_name;
+}
+
+static inline void *
+pci_get_drvdata(struct pci_dev *pdev)
+{
+
+ return dev_get_drvdata(&pdev->dev);
+}
+
+static inline void
+pci_set_drvdata(struct pci_dev *pdev, void *data)
+{
+
+ dev_set_drvdata(&pdev->dev, data);
+}
+
+static inline struct pci_dev *
+pci_dev_get(struct pci_dev *pdev)
+{
+
+ if (pdev != NULL)
+ get_device(&pdev->dev);
+ return (pdev);
+}
+
+static __inline void
+pci_dev_put(struct pci_dev *pdev)
+{
+
+ if (pdev != NULL)
+ put_device(&pdev->dev);
+}
+
+static inline int
+pci_enable_device(struct pci_dev *pdev)
+{
+
+ pci_enable_io(pdev->dev.bsddev, SYS_RES_IOPORT);
+ pci_enable_io(pdev->dev.bsddev, SYS_RES_MEMORY);
+ return (0);
+}
+
+static inline void
+pci_disable_device(struct pci_dev *pdev)
+{
+
+ pci_disable_busmaster(pdev->dev.bsddev);
+}
+
+static inline int
+pci_set_master(struct pci_dev *pdev)
+{
+
+ pci_enable_busmaster(pdev->dev.bsddev);
+ return (0);
+}
+
+static inline int
+pci_set_power_state(struct pci_dev *pdev, int state)
+{
+
+ pci_set_powerstate(pdev->dev.bsddev, state);
+ return (0);
+}
+
+static inline int
+pci_clear_master(struct pci_dev *pdev)
+{
+
+ pci_disable_busmaster(pdev->dev.bsddev);
+ return (0);
+}
+
+static inline bool
+pci_is_root_bus(struct pci_bus *pbus)
+{
+
+ return (pbus->self == NULL);
+}
+
+static inline struct pci_dev *
+pci_upstream_bridge(struct pci_dev *pdev)
+{
+
+ if (pci_is_root_bus(pdev->bus))
+ return (NULL);
+
+ /*
+ * If we do not have a (proper) "upstream bridge" set, e.g., we point
+ * to ourselves, try to handle this case on the fly like we do
+ * for pcie_find_root_port().
+ */
+ if (pdev == pdev->bus->self) {
+ device_t bridge;
+
+ /*
+ * In the case of DRM drivers, the passed device is a child of
+ * `vgapci`. We want to start the lookup from `vgapci`, so the
+ * parent of the passed `drmn`.
+ *
+ * We can use the `isdrm` flag to determine this.
+ */
+ bridge = pdev->dev.bsddev;
+ if (pdev->pdrv != NULL && pdev->pdrv->isdrm)
+ bridge = device_get_parent(bridge);
+ if (bridge == NULL)
+ goto done;
+
+ bridge = device_get_parent(bridge);
+ if (bridge == NULL)
+ goto done;
+ bridge = device_get_parent(bridge);
+ if (bridge == NULL)
+ goto done;
+ if (device_get_devclass(device_get_parent(bridge)) !=
+ devclass_find("pci"))
+ goto done;
+
+ /*
+ * "bridge" is a PCI-to-PCI bridge. Create a Linux pci_dev
+ * for it so it can be returned.
+ */
+ pdev->bus->self = lkpinew_pci_dev(bridge);
+ }
+done:
+ return (pdev->bus->self);
+}
+
+#define pci_release_region(pdev, bar) linuxkpi_pci_release_region(pdev, bar)
+#define pci_release_regions(pdev) linuxkpi_pci_release_regions(pdev)
+#define pci_request_regions(pdev, res_name) \
+ linuxkpi_pci_request_regions(pdev, res_name)
+
+static inline void
+lkpi_pci_disable_msix(struct pci_dev *pdev)
+{
+
+ pci_release_msi(pdev->dev.bsddev);
+
+ /*
+ * The MSIX IRQ numbers associated with this PCI device are no
+ * longer valid and might be re-assigned. Make sure
+ * lkpi_pci_find_irq_dev() does no longer see them by
+ * resetting their references to zero:
+ */
+ pdev->dev.irq_start = 0;
+ pdev->dev.irq_end = 0;
+ pdev->msix_enabled = false;
+}
+/* Only for consistency. No conflict on that one. */
+#define pci_disable_msix(pdev) lkpi_pci_disable_msix(pdev)
+
+static inline void
+lkpi_pci_disable_msi(struct pci_dev *pdev)
+{
+
+ pci_release_msi(pdev->dev.bsddev);
+
+ pdev->dev.irq_start = 0;
+ pdev->dev.irq_end = 0;
+ pdev->irq = pdev->dev.irq;
+ pdev->msi_enabled = false;
+}
+#define pci_disable_msi(pdev) lkpi_pci_disable_msi(pdev)
+#define pci_free_irq_vectors(pdev) lkpi_pci_disable_msi(pdev)
+
+unsigned long pci_resource_start(struct pci_dev *pdev, int bar);
+unsigned long pci_resource_len(struct pci_dev *pdev, int bar);
+
+static inline bus_addr_t
+pci_bus_address(struct pci_dev *pdev, int bar)
+{
+
+ return (pci_resource_start(pdev, bar));
+}
+
+#define PCI_CAP_ID_EXP PCIY_EXPRESS
+#define PCI_CAP_ID_PCIX PCIY_PCIX
+#define PCI_CAP_ID_AGP PCIY_AGP
+#define PCI_CAP_ID_PM PCIY_PMG
+
+#define PCI_EXP_DEVCTL PCIER_DEVICE_CTL
+#define PCI_EXP_DEVCTL_PAYLOAD PCIEM_CTL_MAX_PAYLOAD
+#define PCI_EXP_DEVCTL_READRQ PCIEM_CTL_MAX_READ_REQUEST
+#define PCI_EXP_LNKCTL PCIER_LINK_CTL
+#define PCI_EXP_LNKSTA PCIER_LINK_STA
+
+static inline int
+pci_find_capability(struct pci_dev *pdev, int capid)
+{
+ int reg;
+
+ if (pci_find_cap(pdev->dev.bsddev, capid, &reg))
+ return (0);
+ return (reg);
+}
+
+static inline int pci_pcie_cap(struct pci_dev *dev)
+{
+ return pci_find_capability(dev, PCI_CAP_ID_EXP);
+}
+
+static inline int
+pci_find_ext_capability(struct pci_dev *pdev, int capid)
+{
+ int reg;
+
+ if (pci_find_extcap(pdev->dev.bsddev, capid, &reg))
+ return (0);
+ return (reg);
+}
+
+#define PCIM_PCAP_PME_SHIFT 11
+static __inline bool
+pci_pme_capable(struct pci_dev *pdev, uint32_t flag)
+{
+ struct pci_devinfo *dinfo;
+ pcicfgregs *cfg;
+
+ if (flag > (PCIM_PCAP_D3PME_COLD >> PCIM_PCAP_PME_SHIFT))
+ return (false);
+
+ dinfo = device_get_ivars(pdev->dev.bsddev);
+ cfg = &dinfo->cfg;
+
+ if (cfg->pp.pp_cap == 0)
+ return (false);
+
+ if ((cfg->pp.pp_cap & (1 << (PCIM_PCAP_PME_SHIFT + flag))) != 0)
+ return (true);
+
+ return (false);
+}
+
+static inline int
+pci_disable_link_state(struct pci_dev *pdev, uint32_t flags)
+{
+
+ if (!pci_enable_aspm)
+ return (-EPERM);
+
+ return (-ENXIO);
+}
+
+static inline int
+pci_read_config_byte(const struct pci_dev *pdev, int where, u8 *val)
+{
+
+ *val = (u8)pci_read_config(pdev->dev.bsddev, where, 1);
+ return (0);
+}
+
+static inline int
+pci_read_config_word(const struct pci_dev *pdev, int where, u16 *val)
+{
+
+ *val = (u16)pci_read_config(pdev->dev.bsddev, where, 2);
+ return (0);
+}
+
+static inline int
+pci_read_config_dword(const struct pci_dev *pdev, int where, u32 *val)
+{
+
+ *val = (u32)pci_read_config(pdev->dev.bsddev, where, 4);
+ return (0);
+}
+
+static inline int
+pci_write_config_byte(const struct pci_dev *pdev, int where, u8 val)
+{
+
+ pci_write_config(pdev->dev.bsddev, where, val, 1);
+ return (0);
+}
+
+static inline int
+pci_write_config_word(const struct pci_dev *pdev, int where, u16 val)
+{
+
+ pci_write_config(pdev->dev.bsddev, where, val, 2);
+ return (0);
+}
+
+static inline int
+pci_write_config_dword(const struct pci_dev *pdev, int where, u32 val)
+{
+
+ pci_write_config(pdev->dev.bsddev, where, val, 4);
+ return (0);
+}
+
+int linux_pci_register_driver(struct pci_driver *pdrv);
+int linux_pci_register_drm_driver(struct pci_driver *pdrv);
+void linux_pci_unregister_driver(struct pci_driver *pdrv);
+void linux_pci_unregister_drm_driver(struct pci_driver *pdrv);
+
+#define pci_register_driver(pdrv) linux_pci_register_driver(pdrv)
+#define pci_unregister_driver(pdrv) linux_pci_unregister_driver(pdrv)
+
+/*
+ * Enable msix, positive errors indicate actual number of available
+ * vectors. Negative errors are failures.
+ *
+ * NB: define added to prevent this definition of pci_enable_msix from
+ * clashing with the native FreeBSD version.
+ */
+#define pci_enable_msix(...) linuxkpi_pci_enable_msix(__VA_ARGS__)
+
+#define pci_enable_msix_range(...) \
+ linux_pci_enable_msix_range(__VA_ARGS__)
+
+static inline int
+pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+ int minvec, int maxvec)
+{
+ int nvec = maxvec;
+ int rc;
+
+ if (maxvec < minvec)
+ return (-ERANGE);
+
+ do {
+ rc = pci_enable_msix(dev, entries, nvec);
+ if (rc < 0) {
+ return (rc);
+ } else if (rc > 0) {
+ if (rc < minvec)
+ return (-ENOSPC);
+ nvec = rc;
+ }
+ } while (rc);
+ return (nvec);
+}
+
+#define pci_enable_msi(pdev) \
+ linux_pci_enable_msi(pdev)
+
+static inline int
+pci_enable_msi(struct pci_dev *pdev)
+{
+
+ return (_lkpi_pci_enable_msi_range(pdev, 1, 1));
+}
+
+static inline int
+pci_channel_offline(struct pci_dev *pdev)
+{
+
+ return (pci_read_config(pdev->dev.bsddev, PCIR_VENDOR, 2) == PCIV_INVALID);
+}
+
+static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
+{
+ return -ENODEV;
+}
+
+static inline void pci_disable_sriov(struct pci_dev *dev)
+{
+}
+
+#define pci_iomap_range(pdev, mmio_bar, mmio_off, mmio_size) \
+ linuxkpi_pci_iomap_range(pdev, mmio_bar, mmio_off, mmio_size)
+#define pci_iomap(pdev, mmio_bar, mmio_size) \
+ linuxkpi_pci_iomap(pdev, mmio_bar, mmio_size)
+#define pci_iounmap(pdev, res) linuxkpi_pci_iounmap(pdev, res)
+
+static inline void
+lkpi_pci_save_state(struct pci_dev *pdev)
+{
+
+ pci_save_state(pdev->dev.bsddev);
+}
+
+static inline void
+lkpi_pci_restore_state(struct pci_dev *pdev)
+{
+
+ pci_restore_state(pdev->dev.bsddev);
+}
+
+#define pci_save_state(dev) lkpi_pci_save_state(dev)
+#define pci_restore_state(dev) lkpi_pci_restore_state(dev)
+
+static inline int
+pci_reset_function(struct pci_dev *pdev)
+{
+
+ return (-ENOSYS);
+}
+
+#define DEFINE_PCI_DEVICE_TABLE(_table) \
+ const struct pci_device_id _table[] __devinitdata
+
+/* XXX This should not be necessary. */
+#define pcix_set_mmrbc(d, v) 0
+#define pcix_get_max_mmrbc(d) 0
+#define pcie_set_readrq(d, v) pci_set_max_read_req((d)->dev.bsddev, (v))
+
+#define PCI_DMA_BIDIRECTIONAL 0
+#define PCI_DMA_TODEVICE 1
+#define PCI_DMA_FROMDEVICE 2
+#define PCI_DMA_NONE 3
+
+#define pci_pool dma_pool
+#define pci_pool_destroy(...) dma_pool_destroy(__VA_ARGS__)
+#define pci_pool_alloc(...) dma_pool_alloc(__VA_ARGS__)
+#define pci_pool_free(...) dma_pool_free(__VA_ARGS__)
+#define pci_pool_create(_name, _pdev, _size, _align, _alloc) \
+ dma_pool_create(_name, &(_pdev)->dev, _size, _align, _alloc)
+#define pci_free_consistent(_hwdev, _size, _vaddr, _dma_handle) \
+ dma_free_coherent((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
+ _size, _vaddr, _dma_handle)
+#define pci_map_sg(_hwdev, _sg, _nents, _dir) \
+ dma_map_sg((_hwdev) == NULL ? NULL : &(_hwdev->dev), \
+ _sg, _nents, (enum dma_data_direction)_dir)
+#define pci_map_single(_hwdev, _ptr, _size, _dir) \
+ dma_map_single((_hwdev) == NULL ? NULL : &(_hwdev->dev), \
+ (_ptr), (_size), (enum dma_data_direction)_dir)
+#define pci_unmap_single(_hwdev, _addr, _size, _dir) \
+ dma_unmap_single((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
+ _addr, _size, (enum dma_data_direction)_dir)
+#define pci_unmap_sg(_hwdev, _sg, _nents, _dir) \
+ dma_unmap_sg((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
+ _sg, _nents, (enum dma_data_direction)_dir)
+#define pci_map_page(_hwdev, _page, _offset, _size, _dir) \
+ dma_map_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, _page,\
+ _offset, _size, (enum dma_data_direction)_dir)
+#define pci_unmap_page(_hwdev, _dma_address, _size, _dir) \
+ dma_unmap_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
+ _dma_address, _size, (enum dma_data_direction)_dir)
+#define pci_set_dma_mask(_pdev, mask) dma_set_mask(&(_pdev)->dev, (mask))
+#define pci_dma_mapping_error(_pdev, _dma_addr) \
+ dma_mapping_error(&(_pdev)->dev, _dma_addr)
+#define pci_set_consistent_dma_mask(_pdev, _mask) \
+ dma_set_coherent_mask(&(_pdev)->dev, (_mask))
+#define DECLARE_PCI_UNMAP_ADDR(x) DEFINE_DMA_UNMAP_ADDR(x);
+#define DECLARE_PCI_UNMAP_LEN(x) DEFINE_DMA_UNMAP_LEN(x);
+#define pci_unmap_addr dma_unmap_addr
+#define pci_unmap_addr_set dma_unmap_addr_set
+#define pci_unmap_len dma_unmap_len
+#define pci_unmap_len_set dma_unmap_len_set
+
+typedef unsigned int __bitwise pci_channel_state_t;
+typedef unsigned int __bitwise pci_ers_result_t;
+
+enum pci_channel_state {
+ pci_channel_io_normal = 1,
+ pci_channel_io_frozen = 2,
+ pci_channel_io_perm_failure = 3,
+};
+
+enum pci_ers_result {
+ PCI_ERS_RESULT_NONE = 1,
+ PCI_ERS_RESULT_CAN_RECOVER = 2,
+ PCI_ERS_RESULT_NEED_RESET = 3,
+ PCI_ERS_RESULT_DISCONNECT = 4,
+ PCI_ERS_RESULT_RECOVERED = 5,
+};
+
+/* PCI bus error event callbacks */
+struct pci_error_handlers {
+ pci_ers_result_t (*error_detected)(struct pci_dev *dev,
+ enum pci_channel_state error);
+ pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
+ pci_ers_result_t (*link_reset)(struct pci_dev *dev);
+ pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
+ void (*resume)(struct pci_dev *dev);
+};
+
+/* FreeBSD does not support SRIOV - yet */
+static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
+{
+ return dev;
+}
+
+static inline bool pci_is_pcie(struct pci_dev *dev)
+{
+ return !!pci_pcie_cap(dev);
+}
+
+static inline u16 pcie_flags_reg(struct pci_dev *dev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
+
+ return reg16;
+}
+
+static inline int pci_pcie_type(struct pci_dev *dev)
+{
+ return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+
+static inline int pcie_cap_version(struct pci_dev *dev)
+{
+ return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS;
+}
+
+static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return pcie_cap_version(dev) > 1 ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_ENDPOINT ||
+ type == PCI_EXP_TYPE_LEG_END;
+}
+
+static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
+{
+ return true;
+}
+
+static inline bool pcie_cap_has_sltctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
+ (type == PCI_EXP_TYPE_DOWNSTREAM &&
+ pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT);
+}
+
+static inline bool pcie_cap_has_rtctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_RC_EC;
+}
+
+static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
+{
+ if (!pci_is_pcie(dev))
+ return false;
+
+ switch (pos) {
+ case PCI_EXP_FLAGS_TYPE:
+ return true;
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVSTA:
+ return pcie_cap_has_devctl(dev);
+ case PCI_EXP_LNKCAP:
+ case PCI_EXP_LNKCTL:
+ case PCI_EXP_LNKSTA:
+ return pcie_cap_has_lnkctl(dev);
+ case PCI_EXP_SLTCAP:
+ case PCI_EXP_SLTCTL:
+ case PCI_EXP_SLTSTA:
+ return pcie_cap_has_sltctl(dev);
+ case PCI_EXP_RTCTL:
+ case PCI_EXP_RTCAP:
+ case PCI_EXP_RTSTA:
+ return pcie_cap_has_rtctl(dev);
+ case PCI_EXP_DEVCAP2:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCAP2:
+ case PCI_EXP_LNKCTL2:
+ case PCI_EXP_LNKSTA2:
+ return pcie_cap_version(dev) > 1;
+ default:
+ return false;
+ }
+}
+
+static inline int
+pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *dst)
+{
+ *dst = 0;
+ if (pos & 3)
+ return -EINVAL;
+
+ if (!pcie_capability_reg_implemented(dev, pos))
+ return -EINVAL;
+
+ return pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, dst);
+}
+
+static inline int
+pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *dst)
+{
+ *dst = 0;
+ if (pos & 3)
+ return -EINVAL;
+
+ if (!pcie_capability_reg_implemented(dev, pos))
+ return -EINVAL;
+
+ return pci_read_config_word(dev, pci_pcie_cap(dev) + pos, dst);
+}
+
+static inline int
+pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
+{
+ if (pos & 1)
+ return -EINVAL;
+
+ if (!pcie_capability_reg_implemented(dev, pos))
+ return 0;
+
+ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
+}
+
+static inline int
+pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ uint16_t clear, uint16_t set)
+{
+ int error;
+ uint16_t v;
+
+ if (pos == PCI_EXP_LNKCTL || pos == PCI_EXP_RTCTL)
+ spin_lock(&dev->pcie_cap_lock);
+
+ error = pcie_capability_read_word(dev, pos, &v);
+ if (error == 0) {
+ v &= ~clear;
+ v |= set;
+ error = pcie_capability_write_word(dev, pos, v);
+ }
+
+ if (pos == PCI_EXP_LNKCTL || pos == PCI_EXP_RTCTL)
+ spin_unlock(&dev->pcie_cap_lock);
+
+ return (error);
+}
+
+static inline int
+pcie_capability_set_word(struct pci_dev *dev, int pos, uint16_t val)
+{
+ return (pcie_capability_clear_and_set_word(dev, pos, 0, val));
+}
+
+static inline int
+pcie_capability_clear_word(struct pci_dev *dev, int pos, uint16_t val)
+{
+ return (pcie_capability_clear_and_set_word(dev, pos, val, 0));
+}
+
+static inline int pcie_get_minimum_link(struct pci_dev *dev,
+ enum pci_bus_speed *speed, enum pcie_link_width *width)
+{
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+ return (0);
+}
+
+static inline int
+pci_num_vf(struct pci_dev *dev)
+{
+ return (0);
+}
+
+static inline enum pci_bus_speed
+pcie_get_speed_cap(struct pci_dev *dev)
+{
+ device_t root;
+ uint32_t lnkcap, lnkcap2;
+ int error, pos;
+
+ root = device_get_parent(dev->dev.bsddev);
+ if (root == NULL)
+ return (PCI_SPEED_UNKNOWN);
+ root = device_get_parent(root);
+ if (root == NULL)
+ return (PCI_SPEED_UNKNOWN);
+ root = device_get_parent(root);
+ if (root == NULL)
+ return (PCI_SPEED_UNKNOWN);
+
+ if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA ||
+ pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS)
+ return (PCI_SPEED_UNKNOWN);
+
+ if ((error = pci_find_cap(root, PCIY_EXPRESS, &pos)) != 0)
+ return (PCI_SPEED_UNKNOWN);
+
+ lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4);
+
+ if (lnkcap2) { /* PCIe r3.0-compliant */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ return (PCIE_SPEED_2_5GT);
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ return (PCIE_SPEED_5_0GT);
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ return (PCIE_SPEED_8_0GT);
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
+ return (PCIE_SPEED_16_0GT);
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
+ return (PCIE_SPEED_32_0GT);
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_64_0GB)
+ return (PCIE_SPEED_64_0GT);
+ } else { /* pre-r3.0 */
+ lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4);
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
+ return (PCIE_SPEED_2_5GT);
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
+ return (PCIE_SPEED_5_0GT);
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
+ return (PCIE_SPEED_8_0GT);
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
+ return (PCIE_SPEED_16_0GT);
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_32_0GB)
+ return (PCIE_SPEED_32_0GT);
+ if (lnkcap & PCI_EXP_LNKCAP_SLS_64_0GB)
+ return (PCIE_SPEED_64_0GT);
+ }
+ return (PCI_SPEED_UNKNOWN);
+}
+
+static inline enum pcie_link_width
+pcie_get_width_cap(struct pci_dev *dev)
+{
+ uint32_t lnkcap;
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap)
+ return ((lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
+
+ return (PCIE_LNK_WIDTH_UNKNOWN);
+}
+
+static inline int
+pcie_get_mps(struct pci_dev *dev)
+{
+ return (pci_get_max_payload(dev->dev.bsddev));
+}
+
+static inline uint32_t
+PCIE_SPEED2MBS_ENC(enum pci_bus_speed spd)
+{
+
+ switch(spd) {
+ case PCIE_SPEED_64_0GT:
+ return (64000 * 128 / 130);
+ case PCIE_SPEED_32_0GT:
+ return (32000 * 128 / 130);
+ case PCIE_SPEED_16_0GT:
+ return (16000 * 128 / 130);
+ case PCIE_SPEED_8_0GT:
+ return (8000 * 128 / 130);
+ case PCIE_SPEED_5_0GT:
+ return (5000 * 8 / 10);
+ case PCIE_SPEED_2_5GT:
+ return (2500 * 8 / 10);
+ default:
+ return (0);
+ }
+}
+
+static inline uint32_t
+pcie_bandwidth_available(struct pci_dev *pdev,
+ struct pci_dev **limiting,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ enum pci_bus_speed nspeed = pcie_get_speed_cap(pdev);
+ enum pcie_link_width nwidth = pcie_get_width_cap(pdev);
+
+ if (speed)
+ *speed = nspeed;
+ if (width)
+ *width = nwidth;
+
+ return (nwidth * PCIE_SPEED2MBS_ENC(nspeed));
+}
+
+static inline bool
+pcie_aspm_enabled(struct pci_dev *pdev)
+{
+ return (false);
+}
+
+static inline struct pci_dev *
+pcie_find_root_port(struct pci_dev *pdev)
+{
+ device_t root;
+
+ if (pdev->root != NULL)
+ return (pdev->root);
+
+ root = pci_find_pcie_root_port(pdev->dev.bsddev);
+ if (root == NULL)
+ return (NULL);
+
+ pdev->root = lkpinew_pci_dev(root);
+ return (pdev->root);
+}
+
+/* This is needed when people rip out the device "HotPlug". */
+static inline void
+pci_lock_rescan_remove(void)
+{
+}
+
+static inline void
+pci_unlock_rescan_remove(void)
+{
+}
+
+static __inline void
+pci_stop_and_remove_bus_device(struct pci_dev *pdev)
+{
+}
+
+static inline int
+pci_rescan_bus(struct pci_bus *pbus)
+{
+ device_t *devlist, parent;
+ int devcount, error;
+
+ if (!device_is_attached(pbus->self->dev.bsddev))
+ return (0);
+ /* pci_rescan_method() will work on the pcib (parent). */
+ error = BUS_RESCAN(pbus->self->dev.bsddev);
+ if (error != 0)
+ return (0);
+
+ parent = device_get_parent(pbus->self->dev.bsddev);
+ error = device_get_children(parent, &devlist, &devcount);
+ if (error != 0)
+ return (0);
+ if (devcount != 0)
+ free(devlist, M_TEMP);
+
+ return (devcount);
+}
+
+/*
+ * The following functions can be used to attach/detach the LinuxKPI's
+ * PCI device runtime. The pci_driver and pci_device_id pointer is
+ * allowed to be NULL. Other pointers must be all valid.
+ * The pci_dev structure should be zero-initialized before passed
+ * to the linux_pci_attach_device function.
+ */
+extern int linux_pci_attach_device(device_t, struct pci_driver *,
+ const struct pci_device_id *, struct pci_dev *);
+extern int linux_pci_detach_device(struct pci_dev *);
+
+static inline int
+pci_dev_present(const struct pci_device_id *cur)
+{
+ while (cur != NULL && (cur->vendor || cur->device)) {
+ if (pci_find_device(cur->vendor, cur->device) != NULL) {
+ return (1);
+ }
+ cur++;
+ }
+ return (0);
+}
+
+static inline const struct pci_device_id *
+pci_match_id(const struct pci_device_id *ids, struct pci_dev *pdev)
+{
+ if (ids == NULL)
+ return (NULL);
+
+ for (;
+ ids->vendor != 0 || ids->subvendor != 0 || ids->class_mask != 0;
+ ids++)
+ if ((ids->vendor == PCI_ANY_ID ||
+ ids->vendor == pdev->vendor) &&
+ (ids->device == PCI_ANY_ID ||
+ ids->device == pdev->device) &&
+ (ids->subvendor == PCI_ANY_ID ||
+ ids->subvendor == pdev->subsystem_vendor) &&
+ (ids->subdevice == PCI_ANY_ID ||
+ ids->subdevice == pdev->subsystem_device) &&
+ ((ids->class ^ pdev->class) & ids->class_mask) == 0)
+ return (ids);
+
+ return (NULL);
+}
+
+struct pci_dev *lkpi_pci_get_domain_bus_and_slot(int domain,
+ unsigned int bus, unsigned int devfn);
+#define pci_get_domain_bus_and_slot(domain, bus, devfn) \
+ lkpi_pci_get_domain_bus_and_slot(domain, bus, devfn)
+
+static inline int
+pci_domain_nr(struct pci_bus *pbus)
+{
+
+ return (pbus->domain);
+}
+
+static inline int
+pci_bus_read_config(struct pci_bus *bus, unsigned int devfn,
+ int pos, uint32_t *val, int len)
+{
+
+ *val = pci_read_config(bus->self->dev.bsddev, pos, len);
+ return (0);
+}
+
+static inline int
+pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, int pos, u16 *val)
+{
+ uint32_t tmp;
+ int ret;
+
+ ret = pci_bus_read_config(bus, devfn, pos, &tmp, 2);
+ *val = (u16)tmp;
+ return (ret);
+}
+
+static inline int
+pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, int pos, u8 *val)
+{
+ uint32_t tmp;
+ int ret;
+
+ ret = pci_bus_read_config(bus, devfn, pos, &tmp, 1);
+ *val = (u8)tmp;
+ return (ret);
+}
+
+static inline int
+pci_bus_write_config(struct pci_bus *bus, unsigned int devfn, int pos,
+ uint32_t val, int size)
+{
+
+ pci_write_config(bus->self->dev.bsddev, pos, val, size);
+ return (0);
+}
+
+static inline int
+pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, int pos,
+ uint8_t val)
+{
+ return (pci_bus_write_config(bus, devfn, pos, val, 1));
+}
+
+static inline int
+pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, int pos,
+ uint16_t val)
+{
+ return (pci_bus_write_config(bus, devfn, pos, val, 2));
+}
+
+struct pci_dev *lkpi_pci_get_class(unsigned int class, struct pci_dev *from);
+#define pci_get_class(class, from) lkpi_pci_get_class(class, from)
+struct pci_dev *lkpi_pci_get_base_class(unsigned int class,
+ struct pci_dev *from);
+#define pci_get_base_class(class, from) lkpi_pci_get_base_class(class, from)
+
+/* -------------------------------------------------------------------------- */
+
+#define pcim_enable_device(pdev) linuxkpi_pcim_enable_device(pdev)
+#define pcim_iomap_table(pdev) linuxkpi_pcim_iomap_table(pdev)
+#define pcim_iomap_regions(pdev, mask, name) \
+ linuxkpi_pcim_iomap_regions(pdev, mask, name)
+
+static inline int
+pcim_iomap_regions_request_all(struct pci_dev *pdev, uint32_t mask, char *name)
+{
+ uint32_t requests, req_mask;
+ int bar, error;
+
+ /* Request all the BARs ("regions") we do not iomap. */
+ req_mask = ((1 << (PCIR_MAX_BAR_0 + 1)) - 1) & ~mask;
+ for (bar = requests = 0; requests != req_mask; bar++) {
+ if ((req_mask & (1 << bar)) == 0)
+ continue;
+ error = pci_request_region(pdev, bar, name);
+ if (error != 0 && error != -ENODEV)
+ goto err;
+ requests |= (1 << bar);
+ }
+
+ error = pcim_iomap_regions(pdev, mask, name);
+ if (error != 0)
+ goto err;
+
+ return (0);
+
+err:
+ for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
+ if ((requests & (1 << bar)) != 0)
+ pci_release_region(pdev, bar);
+ }
+
+ return (-EINVAL);
+}
+
+/*
+ * We cannot simply re-define pci_get_device() as we would normally do
+ * and then hide it in linux_pci.c as too many semi-native drivers still
+ * include linux/pci.h and run into the conflict with native PCI. Linux drivers
+ * using pci_get_device() need to be changed to call linuxkpi_pci_get_device().
+ */
+static inline struct pci_dev *
+linuxkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev)
+{
+
+ return (lkpi_pci_get_device(vendor, device, odev));
+}
+
+/* This is a FreeBSD extension so we can use bus_*(). */
+static inline void
+linuxkpi_pcim_want_to_use_bus_functions(struct pci_dev *pdev)
+{
+ pdev->want_iomap_res = true;
+}
+
+static inline bool
+pci_is_thunderbolt_attached(struct pci_dev *pdev)
+{
+
+ return (false);
+}
+
+static inline void *
+pci_platform_rom(struct pci_dev *pdev, size_t *size)
+{
+
+ return (NULL);
+}
+
+static inline void
+pci_ignore_hotplug(struct pci_dev *pdev)
+{
+}
+
+static inline const char *
+pci_power_name(pci_power_t state)
+{
+ int pstate = state + 1;
+
+ if (pstate >= 0 && pstate < nitems(pci_power_names))
+ return (pci_power_names[pstate]);
+ else
+ return (pci_power_names[0]);
+}
+
+static inline int
+pcie_get_readrq(struct pci_dev *dev)
+{
+ u16 ctl;
+
+ if (pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl))
+ return (-EINVAL);
+
+ return (128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12));
+}
+
+static inline bool
+pci_is_enabled(struct pci_dev *pdev)
+{
+
+ return ((pci_read_config(pdev->dev.bsddev, PCIR_COMMAND, 2) &
+ PCIM_CMD_BUSMASTEREN) != 0);
+}
+
+static inline int
+pci_wait_for_pending_transaction(struct pci_dev *pdev)
+{
+
+ return (0);
+}
+
+static inline int
+pci_assign_resource(struct pci_dev *pdev, int bar)
+{
+
+ return (0);
+}
+
+static inline int
+pci_irq_vector(struct pci_dev *pdev, unsigned int vector)
+{
+
+ if (!pdev->msix_enabled && !pdev->msi_enabled) {
+ if (vector != 0)
+ return (-EINVAL);
+ return (pdev->irq);
+ }
+
+ if (pdev->msix_enabled || pdev->msi_enabled) {
+ if ((pdev->dev.irq_start + vector) >= pdev->dev.irq_end)
+ return (-EINVAL);
+ return (pdev->dev.irq_start + vector);
+ }
+
+ return (-ENXIO);
+}
+
+static inline int
+pci_wake_from_d3(struct pci_dev *pdev, bool enable)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (0);
+}
+
+#endif /* _LINUXKPI_LINUX_PCI_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/pci_ids.h b/sys/compat/linuxkpi/common/include/linux/pci_ids.h
new file mode 100644
index 000000000000..e318f6f75ce7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pci_ids.h
@@ -0,0 +1,78 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PCI_IDS_H
+#define _LINUXKPI_LINUX_PCI_IDS_H
+
+#define PCI_CLASS_NETWORK_OTHER 0x0280
+
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_CLASS_DISPLAY_VGA 0x0300
+#define PCI_CLASS_DISPLAY_OTHER 0x0380
+
+#define PCI_BASE_CLASS_BRIDGE 0x06
+#define PCI_CLASS_BRIDGE_ISA 0x0601
+
+#define PCI_CLASS_ACCELERATOR_PROCESSING 0x1200
+
+
+/* XXX We should really generate these and use them throughout the tree. */
+
+#define PCI_VENDOR_ID_APPLE 0x106b
+#define PCI_VENDOR_ID_ASUSTEK 0x1043
+#define PCI_VENDOR_ID_ASMEDIA 0x1b21
+#define PCI_VENDOR_ID_ATHEROS 0x168c
+#define PCI_VENDOR_ID_ATI 0x1002
+#define PCI_VENDOR_ID_BROADCOM 0x14e4
+#define PCI_VENDOR_ID_DELL 0x1028
+#define PCI_VENDOR_ID_HP 0x103c
+#define PCI_VENDOR_ID_IBM 0x1014
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_VENDOR_ID_ITTIM 0x0b48
+#define PCI_VENDOR_ID_MEDIATEK 0x14c3
+#define PCI_VENDOR_ID_MELLANOX 0x15b3
+#define PCI_VENDOR_ID_QCOM 0x17cb
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
+#define PCI_VENDOR_ID_SERVERWORKS 0x1166
+#define PCI_VENDOR_ID_SONY 0x104d
+#define PCI_VENDOR_ID_TOPSPIN 0x1867
+#define PCI_VENDOR_ID_UBIQUITI 0x0777
+#define PCI_VENDOR_ID_VIA 0x1106
+#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
+#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159
+#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44
+#define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
+#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
+#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
+#define PCI_SUBDEVICE_ID_QEMU 0x1100
+
+#endif /* _LINUXKPI_LINUX_PCI_IDS_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/perf_event.h b/sys/compat/linuxkpi/common/include/linux/perf_event.h
new file mode 100644
index 000000000000..86b0d06cdc1f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/perf_event.h
@@ -0,0 +1,34 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PERF_EVENT_H_
+#define _LINUXKPI_LINUX_PERF_EVENT_H_
+
+#include <linux/cgroup.h>
+
+#endif /* _LINUXKPI_LINUX_PERF_EVENT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/pfn.h b/sys/compat/linuxkpi/common/include/linux/pfn.h
new file mode 100644
index 000000000000..26d47b9bc3b1
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pfn.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PFN_H_
+#define _LINUXKPI_LINUX_PFN_H_
+
+#include <linux/types.h>
+
+typedef struct {
+ u64 val;
+} pfn_t;
+
+#define PFN_ALIGN(x) (((unsigned long)(x) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
+#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
+#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
+
+#endif /* _LINUXKPI_LINUX_PFN_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/pfn_t.h b/sys/compat/linuxkpi/common/include/linux/pfn_t.h
new file mode 100644
index 000000000000..f22289802cb8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pfn_t.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PFN_T_H_
+#define _LINUXKPI_LINUX_PFN_T_H_
+
+#include <linux/mm.h>
+
+CTASSERT(PAGE_SHIFT > 4);
+
+#define PFN_FLAGS_MASK (((u64)(PAGE_SIZE - 1)) << (64 - PAGE_SHIFT))
+#define PFN_SG_CHAIN (1ULL << (64 - 1))
+#define PFN_SG_LAST (1ULL << (64 - 2))
+#define PFN_DEV (1ULL << (64 - 3))
+#define PFN_MAP (1ULL << (64 - 4))
+
+static inline pfn_t
+__pfn_to_pfn_t(unsigned long pfn, u64 flags)
+{
+ pfn_t pfn_t = { pfn | (flags & PFN_FLAGS_MASK) };
+
+ return (pfn_t);
+}
+
+static inline pfn_t
+pfn_to_pfn_t(unsigned long pfn)
+{
+ return (__pfn_to_pfn_t (pfn, 0));
+}
+
+#endif /* _LINUXKPI_LINUX_PFN_T_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/pid.h b/sys/compat/linuxkpi/common/include/linux/pid.h
new file mode 100644
index 000000000000..60cb9f725b21
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pid.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PID_H_
+#define _LINUXKPI_LINUX_PID_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+
+enum pid_type {
+ PIDTYPE_PID,
+ PIDTYPE_PGID,
+ PIDTYPE_SID,
+ PIDTYPE_MAX
+};
+
+#define pid_nr(n) (n)
+#define pid_vnr(n) (n)
+#define from_kuid_munged(a, uid) (uid)
+
+#define pid_task(pid, type) ({ \
+ struct task_struct *__ts; \
+ CTASSERT((type) == PIDTYPE_PID); \
+ __ts = linux_pid_task(pid); \
+ __ts; \
+})
+
+#define get_pid_task(pid, type) ({ \
+ struct task_struct *__ts; \
+ CTASSERT((type) == PIDTYPE_PID); \
+ __ts = linux_get_pid_task(pid); \
+ __ts; \
+})
+
+#define get_task_pid(task, type) ({ \
+ CTASSERT((type) == PIDTYPE_PID); \
+ (task)->task_thread->td_tid; \
+})
+
+struct task_struct;
+extern struct task_struct *linux_pid_task(pid_t);
+extern struct task_struct *linux_get_pid_task(pid_t);
+
+#endif /* _LINUXKPI_LINUX_PID_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/platform_device.h b/sys/compat/linuxkpi/common/include/linux/platform_device.h
new file mode 100644
index 000000000000..6853e709cb70
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/platform_device.h
@@ -0,0 +1,97 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PLATFORM_DEVICE_H
+#define _LINUXKPI_LINUX_PLATFORM_DEVICE_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+struct platform_device {
+ const char *name;
+ int id;
+ bool id_auto;
+ struct device dev;
+};
+
+struct platform_driver {
+ int (*remove)(struct platform_device *);
+ struct device_driver driver;
+};
+
+#define dev_is_platform(dev) (false)
+#define to_platform_device(dev) (NULL)
+
+static __inline int
+platform_driver_register(struct platform_driver *pdrv)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (-ENXIO);
+}
+
+static __inline void *
+dev_get_platdata(struct device *dev)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (NULL);
+}
+
+static __inline int
+platform_driver_probe(struct platform_driver *pdrv,
+ int(*pd_probe_f)(struct platform_device *))
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (-ENODEV);
+}
+
+static __inline void
+platform_driver_unregister(struct platform_driver *pdrv)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return;
+}
+
+static __inline int
+platform_device_register(struct platform_device *pdev)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (0);
+}
+
+static __inline void
+platform_device_unregister(struct platform_device *pdev)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return;
+}
+
+#endif /* _LINUXKPI_LINUX_PLATFORM_DEVICE_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/pm.h b/sys/compat/linuxkpi/common/include/linux/pm.h
new file mode 100644
index 000000000000..c8d943027909
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pm.h
@@ -0,0 +1,100 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2024 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PM_H
+#define _LINUXKPI_LINUX_PM_H
+
+#include <linux/kernel.h> /* pr_debug */
+#include <asm/atomic.h>
+
+/* Needed but breaks linux_usb.c */
+/* #include <linux/completion.h> */
+/* #include <linux/wait.h> */
+
+struct device;
+
+typedef struct pm_message {
+ int event;
+} pm_message_t;
+
+struct dev_pm_domain {
+};
+
+struct dev_pm_info {
+ atomic_t usage_count;
+};
+
+#define PM_EVENT_FREEZE 0x0001
+#define PM_EVENT_SUSPEND 0x0002
+
+#define pm_sleep_ptr(_p) \
+ IS_ENABLED(CONFIG_PM_SLEEP) ? (_p) : NULL
+
+#ifdef CONFIG_PM_SLEEP
+#define __SET_PM_OPS(_suspendfunc, _resumefunc) \
+ .suspend = _suspendfunc, \
+ .resume = _resumefunc, \
+ .freeze = _suspendfunc, \
+ .thaw = _resumefunc, \
+ .poweroff = _suspendfunc, \
+ .restore = _resumefunc, \
+
+#define SIMPLE_DEV_PM_OPS(_name, _suspendfunc, _resumefunc) \
+const struct dev_pm_ops _name = { \
+ __SET_PM_OPS(_suspendfunc, _resumefunc) \
+}
+
+#define DEFINE_SIMPLE_DEV_PM_OPS(_name, _suspendfunc, _resumefunc) \
+const struct dev_pm_ops _name = { \
+ __SET_PM_OPS(_suspendfunc, _resumefunc) \
+}
+
+#define SET_SYSTEM_SLEEP_PM_OPS(_suspendfunc, _resumefunc) \
+ __SET_PM_OPS(_suspendfunc, _resumefunc)
+#else
+#define SIMPLE_DEV_PM_OPS(_name, _suspendfunc, _resumefunc) \
+const struct dev_pm_ops _name = { \
+}
+#define DEFINE_SIMPLE_DEV_PM_OPS(_name, _suspendfunc, _resumefunc) \
+const struct dev_pm_ops _name = { \
+}
+#endif
+
+bool linuxkpi_device_can_wakeup(struct device *);
+#define device_can_wakeup(_dev) linuxkpi_device_can_wakeup(_dev)
+
+static inline void
+pm_wakeup_event(struct device *dev __unused, unsigned int x __unused)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+}
+
+#endif /* _LINUXKPI_LINUX_PM_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/pm_qos.h b/sys/compat/linuxkpi/common/include/linux/pm_qos.h
new file mode 100644
index 000000000000..47c41a819ba8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pm_qos.h
@@ -0,0 +1,57 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PM_QOS_H
+#define _LINUXKPI_LINUX_PM_QOS_H
+
+#define PM_QOS_DEFAULT_VALUE (-1)
+
+struct pm_qos_request {
+};
+
+static inline void
+cpu_latency_qos_add_request(struct pm_qos_request *qos, int x)
+{
+}
+
+static inline void
+cpu_latency_qos_update_request(struct pm_qos_request *qos, int x)
+{
+}
+
+static inline void
+cpu_latency_qos_remove_request(struct pm_qos_request *qos)
+{
+}
+
+static inline bool
+cpu_latency_qos_request_active(struct pm_qos_request *qos)
+{
+ return (false);
+}
+
+#endif /* _LINUXKPI_LINUX_PM_QOS_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/pm_runtime.h b/sys/compat/linuxkpi/common/include/linux/pm_runtime.h
new file mode 100644
index 000000000000..6114b7b159d7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pm_runtime.h
@@ -0,0 +1,54 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_PM_RUNTIME_H_
+#define _LINUXKPI_LINUX_PM_RUNTIME_H_
+
+#include <linux/device.h>
+#include <linux/pm.h>
+
+#define pm_runtime_mark_last_busy(x) (void)(x)
+#define pm_runtime_use_autosuspend(x) (void)(x)
+#define pm_runtime_dont_use_autosuspend(x) (void)(x)
+#define pm_runtime_put_autosuspend(x) (void)(x)
+#define pm_runtime_set_autosuspend_delay(x, y) (void)(x); (void)(y)
+#define pm_runtime_set_active(x) (void)(x)
+#define pm_runtime_allow(x) (void)(x)
+#define pm_runtime_put_noidle(x) (void)(x)
+#define pm_runtime_forbid(x) (void)(x)
+#define pm_runtime_get_noresume(x) (void)(x)
+#define pm_runtime_put(x) (void)(x)
+#define pm_runtime_enable(x) (void)(x)
+#define pm_runtime_disable(x) (void)(x)
+#define pm_runtime_autosuspend(x) (void)(x)
+#define pm_runtime_resume(x) (void)(x)
+
+static inline int
+pm_runtime_get_sync(struct device *dev)
+{
+ return 0;
+}
+
+static inline int
+pm_runtime_get_if_in_use(struct device *dev)
+{
+ return 1;
+}
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION < 60900
+static inline int
+pm_runtime_get_if_active(struct device *dev, bool x)
+#else
+static inline int
+pm_runtime_get_if_active(struct device *dev)
+#endif
+{
+ return 1;
+}
+
+static inline int
+pm_runtime_suspended(struct device *dev)
+{
+ return 0;
+}
+
+#endif /* _LINUXKPI_LINUX_PM_RUNTIME_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/poison.h b/sys/compat/linuxkpi/common/include/linux/poison.h
new file mode 100644
index 000000000000..f1594c6dd1dc
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/poison.h
@@ -0,0 +1,9 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_POISON_H
+#define _LINUXKPI_LINUX_POISON_H
+
+#define POISON_INUSE 0xdb
+#define POISON_FREE 0xdf
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/poll.h b/sys/compat/linuxkpi/common/include/linux/poll.h
new file mode 100644
index 000000000000..3acb3c740954
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/poll.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_POLL_H_
+#define _LINUXKPI_LINUX_POLL_H_
+
+#include <sys/poll.h>
+#include <sys/fcntl.h>
+
+#include <linux/eventpoll.h>
+#include <linux/wait.h>
+#include <linux/file.h>
+
+typedef struct poll_table_struct {
+} poll_table;
+
+extern void linux_poll_wait(struct linux_file *, wait_queue_head_t *, poll_table *);
+#define poll_wait(...) linux_poll_wait(__VA_ARGS__)
+
+extern void linux_poll_wakeup(struct linux_file *);
+
+#endif /* _LINUXKPI_LINUX_POLL_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/power_supply.h b/sys/compat/linuxkpi/common/include/linux/power_supply.h
new file mode 100644
index 000000000000..8855cfff0539
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/power_supply.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_POWER_SUPPLY_H_
+#define _LINUXKPI_LINUX_POWER_SUPPLY_H_
+
+#include <sys/types.h>
+#include <sys/power.h>
+
+static inline int
+power_supply_is_system_supplied(void)
+{
+
+ return (power_profile_get_state() == POWER_PROFILE_PERFORMANCE);
+}
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/preempt.h b/sys/compat/linuxkpi/common/include/linux/preempt.h
new file mode 100644
index 000000000000..32177d4a980c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/preempt.h
@@ -0,0 +1,41 @@
+/*-
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PREEMPT_H_
+#define _LINUXKPI_LINUX_PREEMPT_H_
+
+#include <linux/hardirq.h>
+#include <linux/list.h>
+
+#define in_interrupt() \
+ (curthread->td_intr_nesting_level || curthread->td_critnest)
+
+#define in_task() (curthread->td_priority >= PI_SOFT)
+
+#define preempt_disable() critical_enter()
+#define preempt_enable() critical_exit()
+
+#endif /* _LINUXKPI_LINUX_PREEMPT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/prefetch.h b/sys/compat/linuxkpi/common/include/linux/prefetch.h
new file mode 100644
index 000000000000..71839f0ca191
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/prefetch.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PREFETCH_H_
+#define _LINUXKPI_LINUX_PREFETCH_H_
+
+#define prefetchw(x) __builtin_prefetch(x,1)
+
+#endif /* _LINUXKPI_LINUX_PREFETCH_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/printk.h b/sys/compat/linuxkpi/common/include/linux/printk.h
new file mode 100644
index 000000000000..da9d45122d4d
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/printk.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_PRINTK_H_
+#define _LINUXKPI_LINUX_PRINTK_H_
+
+#include <linux/kernel.h>
+
+/* GID printing macros */
+#define GID_PRINT_FMT "%.4x:%.4x:%.4x:%.4x:%.4x:%.4x:%.4x:%.4x"
+#define GID_PRINT_ARGS(gid_raw) htons(((u16 *)gid_raw)[0]), htons(((u16 *)gid_raw)[1]),\
+ htons(((u16 *)gid_raw)[2]), htons(((u16 *)gid_raw)[3]),\
+ htons(((u16 *)gid_raw)[4]), htons(((u16 *)gid_raw)[5]),\
+ htons(((u16 *)gid_raw)[6]), htons(((u16 *)gid_raw)[7])
+
+enum {
+ DUMP_PREFIX_NONE,
+ DUMP_PREFIX_ADDRESS,
+ DUMP_PREFIX_OFFSET
+};
+
+int __lkpi_hexdump_printf(void *, const char *, ...) __printflike(2, 3);
+
+void lkpi_hex_dump(int(*)(void *, const char *, ...), void *arg1,
+ const char *, const char *, const int, const int, const int,
+ const void *, size_t, const bool);
+
+static inline void
+print_hex_dump(const char *level, const char *prefix_str,
+ const int prefix_type, const int rowsize, const int groupsize,
+ const void *buf, size_t len, const bool ascii)
+{
+ lkpi_hex_dump(__lkpi_hexdump_printf, NULL, level, prefix_str, prefix_type,
+ rowsize, groupsize, buf, len, ascii);
+}
+
+static inline void
+print_hex_dump_bytes(const char *prefix_str, const int prefix_type,
+ const void *buf, size_t len)
+{
+ print_hex_dump(NULL, prefix_str, prefix_type, 16, 1, buf, len, 0);
+}
+
+#define printk_ratelimit() ({ \
+ static linux_ratelimit_t __ratelimited; \
+ linux_ratelimited(&__ratelimited); \
+})
+
+#define printk_ratelimited(...) ({ \
+ bool __retval = printk_ratelimit(); \
+ if (__retval) \
+ printk(__VA_ARGS__); \
+ __retval; \
+})
+
+#define pr_err_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+
+#define print_hex_dump_debug(...) \
+ print_hex_dump(KERN_DEBUG, ##__VA_ARGS__)
+
+#define pr_info_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+
+#define no_printk(fmt, ...) \
+({ \
+ if (0) \
+ printk(pr_fmt(fmt), ##__VA_ARGS__); \
+ 0; \
+})
+
+#endif /* _LINUXKPI_LINUX_PRINTK_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/ptp_clock_kernel.h b/sys/compat/linuxkpi/common/include/linux/ptp_clock_kernel.h
new file mode 100644
index 000000000000..aad46cc25b1b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/ptp_clock_kernel.h
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (c) 2023 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PTP_CLOCK_KERNEL_H
+#define _LINUXKPI_LINUX_PTP_CLOCK_KERNEL_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/kernel.h> /* pr_debug */
+#include <linux/ktime.h> /* system_device_crosststamp */
+
+/* This very likely belongs elsewhere. */
+struct system_device_crosststamp {
+ ktime_t device;
+ ktime_t sys_realtime;
+ ktime_t sys_monotonic_raw; /* name guessed based on comment */
+};
+
+struct ptp_clock_info {
+ char name[32];
+ int max_adj;
+ void *owner; /* THIS_MODULE */
+ int (*adjfine)(struct ptp_clock_info *, long);
+ int (*adjtime)(struct ptp_clock_info *, s64);
+ int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *);
+ int (*gettime64)(struct ptp_clock_info *, struct timespec *);
+};
+
+static inline struct ptp_clock *
+ptp_clock_register(struct ptp_clock_info *ptpci, struct device *dev)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (NULL);
+}
+
+static inline void
+ptp_clock_unregister(struct ptp_clock *ptpc)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static inline int
+ptp_clock_index(struct ptp_clock *ptpc)
+{
+ pr_debug("%s: TODO\n", __func__);
+ return (0);
+}
+
+#endif /* _LINUXKPI_LINUX_PTP_CLOCK_KERNEL_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/pwm.h b/sys/compat/linuxkpi/common/include/linux/pwm.h
new file mode 100644
index 000000000000..c0740db675e8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/pwm.h
@@ -0,0 +1,100 @@
+/*-
+ * Copyright (c) 2022 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_PWM_H_
+#define _LINUXKPI_LINUX_PWM_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+
+struct pwm_state {
+ uint64_t period;
+ bool enabled;
+};
+
+struct pwm_device {
+ struct pwm_state state;
+};
+
+static inline struct pwm_device *
+pwm_get(struct device *dev, const char *consumer)
+{
+ return (ERR_PTR(-ENODEV));
+}
+
+static inline void
+pwm_put(struct pwm_device *pwm)
+{
+}
+
+static inline int
+pwm_enable(struct pwm_device *pwm)
+{
+ return (-EINVAL);
+}
+
+static inline void
+pwm_disable(struct pwm_device *pwm)
+{
+}
+
+static inline bool
+pwm_is_enabled(const struct pwm_device *pwm)
+{
+ return (false);
+}
+
+static inline unsigned int
+pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
+{
+ return (0);
+}
+
+static inline int
+pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
+ unsigned int scale)
+{
+ return (0);
+}
+
+static inline void
+pwm_get_state(const struct pwm_device *pwm, struct pwm_state *state)
+{
+ *state = pwm->state;
+}
+
+static inline int
+pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
+{
+ return (-ENOTSUPP);
+}
+
+static inline int
+pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state)
+{
+ return (0);
+}
+
+#endif /* _LINUXKPI_LINUX_PWM_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/qrtr.h b/sys/compat/linuxkpi/common/include/linux/qrtr.h
new file mode 100644
index 000000000000..1d2af0efdce2
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/qrtr.h
@@ -0,0 +1,41 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_QRTR_H
+#define _LINUXKPI_LINUX_QRTR_H
+
+/* Qualcomm IPC Router (QRTR) */
+
+#include <sys/socket.h>
+
+struct sockaddr_qrtr {
+ sa_family_t sq_family;
+ uint32_t sq_node;
+ uint32_t sq_port;
+};
+
+#endif /* _LINUXKPI_LINUX_QRTR_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/radix-tree.h b/sys/compat/linuxkpi/common/include/linux/radix-tree.h
new file mode 100644
index 000000000000..ea75836c26fb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/radix-tree.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_RADIX_TREE_H_
+#define _LINUXKPI_LINUX_RADIX_TREE_H_
+
+#include <linux/rcupdate.h>
+#include <linux/types.h>
+
+#define RADIX_TREE_MAP_SHIFT 6
+#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
+#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE - 1UL)
+#define RADIX_TREE_MAX_HEIGHT \
+ howmany(sizeof(long) * NBBY, RADIX_TREE_MAP_SHIFT)
+
+#define RADIX_TREE_ENTRY_MASK 3UL
+#define RADIX_TREE_EXCEPTIONAL_ENTRY 2UL
+#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
+
+struct radix_tree_node {
+ void *slots[RADIX_TREE_MAP_SIZE];
+ int count;
+};
+
+struct radix_tree_root {
+ struct radix_tree_node *rnode;
+ gfp_t gfp_mask;
+ int height;
+};
+
+struct radix_tree_iter {
+ unsigned long index;
+};
+
+#define RADIX_TREE_INIT(mask) \
+ { .rnode = NULL, .gfp_mask = mask, .height = 0 };
+#define INIT_RADIX_TREE(root, mask) \
+ { (root)->rnode = NULL; (root)->gfp_mask = mask; (root)->height = 0; }
+#define RADIX_TREE(name, mask) \
+ struct radix_tree_root name = RADIX_TREE_INIT(mask)
+
+#define radix_tree_for_each_slot(slot, root, iter, start) \
+ for ((iter)->index = (start); \
+ radix_tree_iter_find(root, iter, &(slot)); (iter)->index++)
+
+static inline int
+radix_tree_exception(void *arg)
+{
+ return ((uintptr_t)arg & RADIX_TREE_ENTRY_MASK);
+}
+
+void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
+void *radix_tree_delete(struct radix_tree_root *, unsigned long);
+int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
+int radix_tree_store(struct radix_tree_root *, unsigned long, void **);
+bool radix_tree_iter_find(struct radix_tree_root *, struct radix_tree_iter *, void ***);
+void radix_tree_iter_delete(struct radix_tree_root *, struct radix_tree_iter *, void **);
+
+#endif /* _LINUXKPI_LINUX_RADIX_TREE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/random.h b/sys/compat/linuxkpi/common/include/linux/random.h
new file mode 100644
index 000000000000..893ee2b7b728
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/random.h
@@ -0,0 +1,129 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ * Copyright 2023 The FreeBSD Foundation
+ *
+ * Portions of this software was developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_RANDOM_H_
+#define _LINUXKPI_LINUX_RANDOM_H_
+
+#include <linux/types.h>
+#include <sys/random.h>
+#include <sys/libkern.h>
+
+static inline void
+get_random_bytes(void *buf, int nbytes)
+{
+
+ arc4random_buf(buf, nbytes);
+}
+
+static inline u_int
+get_random_int(void)
+{
+ u_int val;
+
+ get_random_bytes(&val, sizeof(val));
+ return (val);
+}
+
+static inline uint8_t
+get_random_u8(void)
+{
+ uint8_t val;
+
+ get_random_bytes(&val, sizeof(val));
+ return (val);
+}
+
+#define get_random_u32() get_random_int()
+
+/*
+ * See "Fast Random Integer Generation in an Interval" by Daniel Lemire
+ * [https://arxiv.org/pdf/1805.10941.pdf] for implementation insights.
+ */
+static inline uint32_t
+get_random_u32_inclusive(uint32_t floor, uint32_t ceil)
+{
+ uint64_t x;
+ uint32_t t, v;
+
+ MPASS(ceil >= floor);
+
+ v = get_random_u32();
+ t = ceil - floor + 1;
+ x = (uint64_t)t * v;
+ while (x < t)
+ x = (uint64_t)t * get_random_u32();
+ v = x >> 32;
+
+ return (floor + v);
+}
+
+static inline u_long
+get_random_long(void)
+{
+ u_long val;
+
+ get_random_bytes(&val, sizeof(val));
+ return (val);
+}
+
+static inline uint64_t
+get_random_u64(void)
+{
+ uint64_t val;
+
+ get_random_bytes(&val, sizeof(val));
+ return (val);
+}
+
+static inline uint32_t
+get_random_u32_below(uint32_t max)
+{
+ return (arc4random_uniform(max));
+}
+
+static __inline uint32_t
+prandom_u32(void)
+{
+ uint32_t val;
+
+ get_random_bytes(&val, sizeof(val));
+ return (val);
+}
+
+static inline u32
+prandom_u32_max(u32 max)
+{
+ return (arc4random_uniform(max));
+}
+
+#endif /* _LINUXKPI_LINUX_RANDOM_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/ratelimit.h b/sys/compat/linuxkpi/common/include/linux/ratelimit.h
new file mode 100644
index 000000000000..9585b4b994d7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/ratelimit.h
@@ -0,0 +1,17 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_RATELIMIT_H
+#define _LINUXKPI_LINUX_RATELIMIT_H
+
+struct ratelimit_state {
+};
+
+#define DEFINE_RATELIMIT_STATE(name, interval, burst) \
+ int name __used = 1;
+
+#define __ratelimit(x) (1)
+
+#define ratelimit_state_init(x, y, z)
+#define ratelimit_set_flags(x, y)
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/rbtree.h b/sys/compat/linuxkpi/common/include/linux/rbtree.h
new file mode 100644
index 000000000000..e6033cfd760d
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/rbtree.h
@@ -0,0 +1,206 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_RBTREE_H_
+#define _LINUXKPI_LINUX_RBTREE_H_
+
+#ifndef _STANDALONE
+#include <sys/stddef.h>
+#endif
+
+#include <sys/types.h>
+#include <sys/tree.h>
+
+struct rb_node {
+ RB_ENTRY(rb_node) __entry;
+};
+#define rb_left __entry.rbe_link[_RB_L]
+#define rb_right __entry.rbe_link[_RB_R]
+
+/*
+ * We provide a false structure that has the same bit pattern as tree.h
+ * presents so it matches the member names expected by linux.
+ */
+struct rb_root {
+ struct rb_node *rb_node;
+};
+
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
+
+/*
+ * In linux all of the comparisons are done by the caller.
+ */
+int panic_cmp(struct rb_node *one, struct rb_node *two);
+
+RB_HEAD(linux_root, rb_node);
+RB_PROTOTYPE(linux_root, rb_node, __entry, panic_cmp);
+
+#define rb_parent(r) RB_PARENT(r, __entry)
+#define rb_entry(ptr, type, member) container_of(ptr, type, member)
+#define rb_entry_safe(ptr, type, member) \
+ ((ptr) != NULL ? rb_entry(ptr, type, member) : NULL)
+
+#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+#define RB_EMPTY_NODE(node) (RB_PARENT(node, __entry) == node)
+#define RB_CLEAR_NODE(node) RB_SET_PARENT(node, node, __entry)
+
+#define rb_insert_color(node, root) do { \
+ if (rb_parent(node)) \
+ linux_root_RB_INSERT_COLOR((struct linux_root *)(root), \
+ rb_parent(node), (node)); \
+} while (0)
+#define rb_erase(node, root) \
+ linux_root_RB_REMOVE((struct linux_root *)(root), (node))
+#define rb_next(node) RB_NEXT(linux_root, NULL, (node))
+#define rb_prev(node) RB_PREV(linux_root, NULL, (node))
+#define rb_first(root) RB_MIN(linux_root, (struct linux_root *)(root))
+#define rb_last(root) RB_MAX(linux_root, (struct linux_root *)(root))
+#define rb_first_cached(root) (root)->rb_leftmost
+
+static inline struct rb_node *
+__rb_deepest_left(struct rb_node *node)
+{
+ struct rb_node *parent = NULL;
+ while (node != NULL) {
+ parent = node;
+ if (RB_LEFT(node, __entry))
+ node = RB_LEFT(node, __entry);
+ else
+ node = RB_RIGHT(node, __entry);
+ }
+ return (parent);
+}
+
+static inline struct rb_node *
+rb_next_postorder(const struct rb_node *node)
+{
+ struct rb_node *parent =
+ RB_PARENT(__DECONST(struct rb_node *, node), __entry);
+ /* left -> right, right -> root */
+ if (parent != NULL &&
+ (node == RB_LEFT(parent, __entry)) &&
+ (RB_RIGHT(parent, __entry)))
+ return (__rb_deepest_left(RB_RIGHT(parent, __entry)));
+ else
+ return (parent);
+}
+
+#define rbtree_postorder_for_each_entry_safe(x, y, head, member) \
+ for ((x) = rb_entry_safe(__rb_deepest_left((head)->rb_node), \
+ __typeof(*x), member); \
+ ((x) != NULL) && ((y) = \
+ rb_entry_safe(rb_next_postorder(&x->member), typeof(*x), member), 1); \
+ (x) = (y))
+
+static inline void
+rb_link_node(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
+{
+ RB_SET(node, parent, __entry);
+ *rb_link = node;
+}
+
+static inline void
+rb_replace_node(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root)
+{
+
+ RB_SWAP_CHILD((struct linux_root *)root, rb_parent(victim),
+ victim, new, __entry);
+ if (RB_LEFT(victim, __entry))
+ RB_SET_PARENT(RB_LEFT(victim, __entry), new, __entry);
+ if (RB_RIGHT(victim, __entry))
+ RB_SET_PARENT(RB_RIGHT(victim, __entry), new, __entry);
+ *new = *victim;
+}
+
+static inline void
+rb_insert_color_cached(struct rb_node *node, struct rb_root_cached *root,
+ bool leftmost)
+{
+ if (rb_parent(node))
+ linux_root_RB_INSERT_COLOR((struct linux_root *)&root->rb_root,
+ rb_parent(node), node);
+ if (leftmost)
+ root->rb_leftmost = node;
+}
+
+static inline struct rb_node *
+rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
+{
+ struct rb_node *retval;
+
+ if (node == root->rb_leftmost)
+ retval = root->rb_leftmost = linux_root_RB_NEXT(node);
+ else
+ retval = NULL;
+ linux_root_RB_REMOVE((struct linux_root *)&root->rb_root, node);
+ return (retval);
+}
+
+static inline void
+rb_replace_node_cached(struct rb_node *old, struct rb_node *new,
+ struct rb_root_cached *root)
+{
+ rb_replace_node(old, new, &root->rb_root);
+ if (root->rb_leftmost == old)
+ root->rb_leftmost = new;
+}
+
+static inline struct rb_node *
+rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link != NULL) {
+ parent = *link;
+ if (less(node, parent)) {
+ link = &RB_LEFT(parent, __entry);
+ } else {
+ link = &RB_RIGHT(parent, __entry);
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color_cached(node, tree, leftmost);
+
+ return (leftmost ? node : NULL);
+}
+
+#undef RB_ROOT
+#define RB_ROOT (struct rb_root) { NULL }
+#define RB_ROOT_CACHED (struct rb_root_cached) { RB_ROOT, NULL }
+
+#endif /* _LINUXKPI_LINUX_RBTREE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/rculist.h b/sys/compat/linuxkpi/common/include/linux/rculist.h
new file mode 100644
index 000000000000..066ed92b7996
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/rculist.h
@@ -0,0 +1,147 @@
+/*-
+ * Copyright (c) 2015 François Tigeot
+ * Copyright (c) 2016-2020 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_RCULIST_H_
+#define _LINUXKPI_LINUX_RCULIST_H_
+
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+
+#define list_entry_rcu(ptr, type, member) \
+ container_of(READ_ONCE(ptr), type, member)
+
+#define list_next_rcu(head) (*((struct list_head **)(&(head)->next)))
+#define list_prev_rcu(head) (*((struct list_head **)(&(head)->prev)))
+
+#define list_for_each_entry_rcu(pos, head, member) \
+ for (pos = list_entry_rcu((head)->next, typeof(*(pos)), member); \
+ &(pos)->member != (head); \
+ pos = list_entry_rcu((pos)->member.next, typeof(*(pos)), member))
+
+#define list_for_each_entry_from_rcu(pos, head, member) \
+ for (; \
+ &(pos)->member != (head); \
+ pos = list_entry_rcu((pos)->member.next, typeof(*(pos)), member))
+
+#define list_for_each_entry_lockless(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member)
+
+static inline void
+linux_list_add_rcu(struct list_head *new, struct list_head *prev,
+ struct list_head *next)
+{
+ new->next = next;
+ new->prev = prev;
+ rcu_assign_pointer(list_next_rcu(prev), new);
+ next->prev = new;
+}
+
+static inline void
+list_add_rcu(struct list_head *new, struct list_head *head)
+{
+ linux_list_add_rcu(new, head, head->next);
+}
+
+static inline void
+list_add_tail_rcu(struct list_head *new, struct list_head *head)
+{
+ linux_list_add_rcu(new, head->prev, head);
+}
+
+static inline void
+__list_del_rcu(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ rcu_assign_pointer(list_next_rcu(prev), next);
+}
+
+static inline void
+__list_del_entry_rcu(struct list_head *entry)
+{
+ __list_del_rcu(entry->prev, entry->next);
+}
+
+static inline void
+list_del_rcu(struct list_head *entry)
+{
+ __list_del_rcu(entry->prev, entry->next);
+}
+
+#define hlist_first_rcu(head) (*((struct hlist_node **)(&(head)->first)))
+#define hlist_next_rcu(node) (*((struct hlist_node **)(&(node)->next)))
+#define hlist_pprev_rcu(node) (*((struct hlist_node **)((node)->pprev)))
+
+static inline void
+hlist_add_behind_rcu(struct hlist_node *n, struct hlist_node *prev)
+{
+ n->next = prev->next;
+ n->pprev = &prev->next;
+ rcu_assign_pointer(hlist_next_rcu(prev), n);
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
+#define hlist_for_each_entry_rcu(pos, head, member) \
+ for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ (pos); \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
+
+static inline void
+hlist_del_rcu(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+
+ WRITE_ONCE(*pprev, next);
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void
+hlist_add_head_rcu(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+
+ n->next = first;
+ n->pprev = &h->first;
+ rcu_assign_pointer(hlist_first_rcu(h), n);
+ if (first)
+ first->pprev = &n->next;
+}
+
+static inline void
+hlist_del_init_rcu(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ hlist_del_rcu(n);
+ n->pprev = NULL;
+ }
+}
+
+#endif /* _LINUXKPI_LINUX_RCULIST_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/rcupdate.h b/sys/compat/linuxkpi/common/include/linux/rcupdate.h
new file mode 100644
index 000000000000..85d766c8dbc9
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/rcupdate.h
@@ -0,0 +1,165 @@
+/*-
+ * Copyright (c) 2016-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_RCUPDATE_H_
+#define _LINUXKPI_LINUX_RCUPDATE_H_
+
+#include <sys/cdefs.h>
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include <machine/atomic.h>
+
+extern int linuxkpi_rcu_debug;
+#define RCU_WARN_ONCE(c, ...) do { \
+ if (unlikely(linuxkpi_rcu_debug > 0)) \
+ WARN_ONCE((c), ##__VA_ARGS__); \
+} while(0)
+
+#define LINUX_KFREE_RCU_OFFSET_MAX 4096 /* exclusive */
+
+/* BSD specific defines */
+#define RCU_TYPE_REGULAR 0
+#define RCU_TYPE_SLEEPABLE 1
+#define RCU_TYPE_MAX 2
+
+#define RCU_INITIALIZER(v) \
+ ((__typeof(*(v)) *)(v))
+
+#define RCU_INIT_POINTER(p, v) do { \
+ (p) = (v); \
+} while (0)
+
+#define call_rcu(ptr, func) do { \
+ linux_call_rcu(RCU_TYPE_REGULAR, ptr, func); \
+} while (0)
+
+#define rcu_barrier(void) do { \
+ linux_rcu_barrier(RCU_TYPE_REGULAR); \
+} while (0)
+
+#define rcu_read_lock(void) do { \
+ linux_rcu_read_lock(RCU_TYPE_REGULAR); \
+} while (0)
+
+#define rcu_read_unlock(void) do { \
+ linux_rcu_read_unlock(RCU_TYPE_REGULAR);\
+} while (0)
+
+#define rcu_read_lock_held(void) \
+ linux_rcu_read_lock_held(RCU_TYPE_REGULAR)
+
+#define synchronize_rcu(void) do { \
+ linux_synchronize_rcu(RCU_TYPE_REGULAR); \
+} while (0)
+
+#define synchronize_rcu_expedited(void) do { \
+ linux_synchronize_rcu(RCU_TYPE_REGULAR); \
+} while (0)
+
+#define kfree_rcu(ptr, rcu_head) do { \
+ CTASSERT(offsetof(__typeof(*(ptr)), rcu_head) < \
+ LINUX_KFREE_RCU_OFFSET_MAX); \
+ call_rcu(&(ptr)->rcu_head, (rcu_callback_t)(uintptr_t) \
+ offsetof(__typeof(*(ptr)), rcu_head)); \
+} while (0)
+
+#define rcu_access_pointer(p) \
+ ((__typeof(*p) *)READ_ONCE(p))
+
+#define rcu_dereference(p) \
+ ((__typeof(*p) *)READ_ONCE(p))
+
+#define __rcu_var_name(n, f, l) \
+ __CONCAT(__CONCAT(__CONCAT(rcu_, n), _), __COUNTER__)
+
+#define __rcu_dereference_protected(p, c, n) \
+({ \
+ RCU_WARN_ONCE(!(c), "%s:%d: condition for %s failed\n", \
+ __func__, __LINE__, __XSTRING(n)); \
+ rcu_dereference(p); \
+})
+
+#define rcu_dereference_protected(p, c) \
+ __rcu_dereference_protected((p), (c), \
+ __rcu_var_name(protected, __func__, __LINE__))
+
+#define __rcu_dereference_check(p, c, n) \
+({ \
+ __typeof(*p) *n = rcu_dereference(p); \
+ RCU_WARN_ONCE(!(c), "%s:%d: condition for %s failed\n", \
+ __func__, __LINE__, __XSTRING(n)); \
+ n; \
+})
+
+#define rcu_dereference_check(p, c) \
+ __rcu_dereference_check((p), (c) || rcu_read_lock_held(), \
+ __rcu_var_name(check, __func__, __LINE__))
+
+#define rcu_dereference_raw(p) \
+ ((__typeof(*p) *)READ_ONCE(p))
+
+#define rcu_pointer_handoff(p) (p)
+
+#define rcu_assign_pointer(p, v) do { \
+ atomic_store_rel_ptr((volatile uintptr_t *)&(p), \
+ (uintptr_t)(v)); \
+} while (0)
+
+#define rcu_replace_pointer(rcu, ptr, c) \
+({ \
+ typeof(ptr) __tmp = rcu_dereference_protected(rcu, c); \
+ rcu_assign_pointer(rcu, ptr); \
+ __tmp; \
+})
+
+#define rcu_swap_protected(rcu, ptr, c) do { \
+ typeof(ptr) p = rcu_dereference_protected(rcu, c); \
+ rcu_assign_pointer(rcu, ptr); \
+ (ptr) = p; \
+} while (0)
+
+/* prototypes */
+
+void linux_call_rcu(unsigned type, struct rcu_head *ptr, rcu_callback_t func);
+void linux_rcu_barrier(unsigned type);
+void linux_rcu_read_lock(unsigned type);
+void linux_rcu_read_unlock(unsigned type);
+bool linux_rcu_read_lock_held(unsigned);
+void linux_synchronize_rcu(unsigned type);
+
+/* Empty implementation for !DEBUG */
+#define init_rcu_head(...)
+#define destroy_rcu_head(...)
+#define init_rcu_head_on_stack(...)
+#define destroy_rcu_head_on_stack(...)
+
+#endif /* _LINUXKPI_LINUX_RCUPDATE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/reboot.h b/sys/compat/linuxkpi/common/include/linux/reboot.h
new file mode 100644
index 000000000000..eb696d7b9d2e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/reboot.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUXKPI_LINUX_REBOOT_H_
+#define _LINUXKPI_LINUX_REBOOT_H_
+
+#include <sys/reboot.h>
+
+static inline void
+orderly_poweroff(bool force)
+{
+
+ shutdown_nice(RB_POWEROFF);
+}
+
+#endif
+
diff --git a/sys/compat/linuxkpi/common/include/linux/ref_tracker.h b/sys/compat/linuxkpi/common/include/linux/ref_tracker.h
new file mode 100644
index 000000000000..fa510b2498e1
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/ref_tracker.h
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 2025 The FreeBSD Foundation
+ * Copyright (c) 2025 Jean-Sébastien Pédron
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_REF_TRACKER_H_
+#define _LINUXKPI_LINUX_REF_TRACKER_H_
+
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/stackdepot.h>
+
+struct ref_tracker;
+
+struct ref_tracker_dir {
+};
+
+/*
+ * The following functions currently have dummy implementations that, on Linux,
+ * are used when CONFIG_REF_TRACKER is not set at compile time.
+ *
+ * The ref tracker is a tool to associate a refcount increase to a refcount
+ * decrease. This helps developers track, document and debug refcounts. We
+ * don't need this feature for now in linuxkpi.
+ */
+
+static inline void
+ref_tracker_dir_init(struct ref_tracker_dir *dir,
+ unsigned int quarantine_count, const char *name)
+{
+}
+
+static inline void
+ref_tracker_dir_exit(struct ref_tracker_dir *dir)
+{
+}
+
+static inline void
+ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+}
+
+static inline void
+ref_tracker_dir_print(struct ref_tracker_dir *dir, unsigned int display_limit)
+{
+}
+
+static inline int
+ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size)
+{
+ return (0);
+}
+
+static inline int
+ref_tracker_alloc(struct ref_tracker_dir *dir, struct ref_tracker **trackerp,
+ gfp_t gfp)
+{
+ return (0);
+}
+
+static inline int
+ref_tracker_free(struct ref_tracker_dir *dir, struct ref_tracker **trackerp)
+{
+ return (0);
+}
+
+#endif /* !defined(_LINUXKPI_LINUX_REF_TRACKER_H_) */
diff --git a/sys/compat/linuxkpi/common/include/linux/refcount.h b/sys/compat/linuxkpi/common/include/linux/refcount.h
new file mode 100644
index 000000000000..02a7eda3f4a9
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/refcount.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_REFCOUNT_H
+#define _LINUXKPI_LINUX_REFCOUNT_H
+
+#include <linux/atomic.h>
+
+typedef atomic_t refcount_t;
+
+static inline void
+refcount_set(refcount_t *ref, unsigned int i)
+{
+ atomic_set(ref, i);
+}
+
+static inline void
+refcount_inc(refcount_t *ref)
+{
+ atomic_inc(ref);
+}
+
+static inline bool
+refcount_inc_not_zero(refcount_t *ref)
+{
+ return (atomic_inc_not_zero(ref));
+}
+
+static inline void
+refcount_dec(refcount_t *ref)
+{
+ atomic_dec(ref);
+}
+
+static inline unsigned int
+refcount_read(refcount_t *ref)
+{
+ return atomic_read(ref);
+}
+
+static inline bool
+refcount_dec_and_lock_irqsave(refcount_t *ref, spinlock_t *lock,
+ unsigned long *flags)
+{
+ if (atomic_dec_and_test(ref) == true) {
+ spin_lock_irqsave(lock, flags);
+ return (true);
+ }
+ return (false);
+}
+
+static inline bool
+refcount_dec_and_test(refcount_t *r)
+{
+
+ return (atomic_dec_and_test(r));
+}
+
+#endif /* __LINUXKPI_LINUX_REFCOUNT_H__ */
diff --git a/sys/compat/linuxkpi/common/include/linux/rhashtable.h b/sys/compat/linuxkpi/common/include/linux/rhashtable.h
new file mode 100644
index 000000000000..c6958b6ee5f3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/rhashtable.h
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 2023 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_RHASHTABLE_H
+#define _LINUXKPI_LINUX_RHASHTABLE_H
+
+#include <linux/kernel.h> /* pr_debug */
+
+struct rhash_head {
+};
+
+struct rhashtable_params {
+ uint16_t head_offset;
+ uint16_t key_len;
+ uint16_t key_offset;
+ uint16_t nelem_hint;
+ bool automatic_shrinking;
+};
+
+struct rhashtable {
+};
+
+static inline int
+rhashtable_init(struct rhashtable *rht,
+ const struct rhashtable_params *params)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (-1);
+}
+
+static inline void
+rhashtable_destroy(struct rhashtable *rht)
+{
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static inline void *
+rhashtable_lookup_fast(struct rhashtable *rht, const void *key,
+ const struct rhashtable_params params)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (NULL);
+}
+
+static inline void *
+rhashtable_lookup_get_insert_fast(struct rhashtable *rht,
+ struct rhash_head *obj, const struct rhashtable_params params)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (NULL);
+}
+
+static inline int
+rhashtable_remove_fast(struct rhashtable *rht,
+ struct rhash_head *obj, const struct rhashtable_params params)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (-ENOENT);
+}
+
+#endif /* _LINUXKPI_LINUX_RHASHTABLE_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/rwlock.h b/sys/compat/linuxkpi/common/include/linux/rwlock.h
new file mode 100644
index 000000000000..3030ec89ff1e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/rwlock.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_RWLOCK_H_
+#define _LINUXKPI_LINUX_RWLOCK_H_
+
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/rwlock.h>
+#include <sys/libkern.h>
+
+typedef struct rwlock rwlock_t;
+
+#define read_lock(_l) rw_rlock(_l)
+#define write_lock(_l) rw_wlock(_l)
+#define read_unlock(_l) rw_runlock(_l)
+#define write_unlock(_l) rw_wunlock(_l)
+#define read_lock_irq(lock) read_lock((lock))
+#define read_unlock_irq(lock) read_unlock((lock))
+#define write_lock_irq(lock) write_lock((lock))
+#define write_unlock_irq(lock) write_unlock((lock))
+#define read_lock_irqsave(lock, flags) \
+ do {(flags) = 0; read_lock(lock); } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do {(flags) = 0; write_lock(lock); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ do { read_unlock(lock); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ do { write_unlock(lock); } while (0)
+#define rwlock_init(_l) rw_init_flags(_l, "lnxrw", RW_NOWITNESS | RW_NEW)
+
+#endif /* _LINUXKPI_LINUX_RWLOCK_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/rwsem.h b/sys/compat/linuxkpi/common/include/linux/rwsem.h
new file mode 100644
index 000000000000..b7a800b12e18
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/rwsem.h
@@ -0,0 +1,85 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_RWSEM_H_
+#define _LINUXKPI_LINUX_RWSEM_H_
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/sx.h>
+#include <sys/libkern.h>
+#include <sys/kernel.h>
+
+struct rw_semaphore {
+ struct sx sx;
+};
+
+#define down_write(_rw) sx_xlock(&(_rw)->sx)
+#define up_write(_rw) sx_xunlock(&(_rw)->sx)
+#define down_read(_rw) sx_slock(&(_rw)->sx)
+#define up_read(_rw) sx_sunlock(&(_rw)->sx)
+#define down_read_trylock(_rw) !!sx_try_slock(&(_rw)->sx)
+#define down_read_killable(_rw) linux_down_read_killable(_rw)
+#define down_write_trylock(_rw) !!sx_try_xlock(&(_rw)->sx)
+#define down_write_killable(_rw) linux_down_write_killable(_rw)
+#define downgrade_write(_rw) sx_downgrade(&(_rw)->sx)
+#define down_read_nested(_rw, _sc) down_read(_rw)
+#define init_rwsem(_rw) linux_init_rwsem(_rw, rwsem_name("lnxrwsem"))
+#define down_write_nest_lock(sem, _rw) down_write(_rw)
+
+#ifdef WITNESS_ALL
+/* NOTE: the maximum WITNESS name is 64 chars */
+#define __rwsem_name(name, file, line) \
+ (((const char *){file ":" #line "-" name}) + \
+ (sizeof(file) > 16 ? sizeof(file) - 16 : 0))
+#else
+#define __rwsem_name(name, file, line) name
+#endif
+#define _rwsem_name(...) __rwsem_name(__VA_ARGS__)
+#define rwsem_name(name) _rwsem_name(name, __FILE__, __LINE__)
+
+#define DECLARE_RWSEM(name) \
+struct rw_semaphore name; \
+static void name##_rwsem_init(void *arg) \
+{ \
+ linux_init_rwsem(&name, rwsem_name(#name)); \
+} \
+SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_rwsem_init, NULL)
+
+static inline void
+linux_init_rwsem(struct rw_semaphore *rw, const char *name)
+{
+
+ memset(rw, 0, sizeof(*rw));
+ sx_init_flags(&rw->sx, name, SX_NOWITNESS);
+}
+
+extern int linux_down_read_killable(struct rw_semaphore *);
+extern int linux_down_write_killable(struct rw_semaphore *);
+
+#endif /* _LINUXKPI_LINUX_RWSEM_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/scatterlist.h b/sys/compat/linuxkpi/common/include/linux/scatterlist.h
new file mode 100644
index 000000000000..537f5bebc5aa
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/scatterlist.h
@@ -0,0 +1,684 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
+ * Copyright (c) 2016 Matthew Macy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_SCATTERLIST_H_
+#define _LINUXKPI_LINUX_SCATTERLIST_H_
+
+#include <sys/types.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/sf_buf.h>
+
+#include <linux/err.h>
+#include <linux/page.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+struct bus_dmamap;
+struct scatterlist {
+ unsigned long page_link;
+#define SG_PAGE_LINK_CHAIN 0x1UL
+#define SG_PAGE_LINK_LAST 0x2UL
+#define SG_PAGE_LINK_MASK 0x3UL
+ unsigned int offset;
+ unsigned int length;
+ dma_addr_t dma_address;
+ struct bus_dmamap *dma_map; /* FreeBSD specific */
+};
+
+CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0);
+
+struct sg_table {
+ struct scatterlist *sgl;
+ unsigned int nents;
+ unsigned int orig_nents;
+};
+
+struct sg_page_iter {
+ struct scatterlist *sg;
+ unsigned int sg_pgoffset;
+ unsigned int maxents;
+ struct {
+ unsigned int nents;
+ int pg_advance;
+ } internal;
+};
+
+struct sg_dma_page_iter {
+ struct sg_page_iter base;
+};
+
+#define SCATTERLIST_MAX_SEGMENT (-1U & ~(PAGE_SIZE - 1))
+
+#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
+
+#define SG_MAGIC 0x87654321UL
+#define SG_CHAIN SG_PAGE_LINK_CHAIN
+#define SG_END SG_PAGE_LINK_LAST
+
+#define sg_is_chain(sg) ((sg)->page_link & SG_PAGE_LINK_CHAIN)
+#define sg_is_last(sg) ((sg)->page_link & SG_PAGE_LINK_LAST)
+#define sg_chain_ptr(sg) \
+ ((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK))
+
+#define sg_dma_address(sg) (sg)->dma_address
+#define sg_dma_len(sg) (sg)->length
+
+#define for_each_sg_page(sgl, iter, nents, pgoffset) \
+ for (_sg_iter_init(sgl, iter, nents, pgoffset); \
+ (iter)->sg; _sg_iter_next(iter))
+#define for_each_sg_dma_page(sgl, iter, nents, pgoffset) \
+ for_each_sg_page(sgl, &(iter)->base, nents, pgoffset)
+
+#define for_each_sg(sglist, sg, sgmax, iter) \
+ for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg))
+
+#define for_each_sgtable_sg(sgt, sg, i) \
+ for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
+
+#define for_each_sgtable_page(sgt, iter, pgoffset) \
+ for_each_sg_page((sgt)->sgl, iter, (sgt)->orig_nents, pgoffset)
+
+#define for_each_sgtable_dma_sg(sgt, sg, iter) \
+ for_each_sg((sgt)->sgl, sg, (sgt)->nents, iter)
+
+#define for_each_sgtable_dma_page(sgt, iter, pgoffset) \
+ for_each_sg_dma_page((sgt)->sgl, iter, (sgt)->nents, pgoffset)
+
+typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t);
+typedef void (sg_free_fn) (struct scatterlist *, unsigned int);
+
+static inline void
+sg_assign_page(struct scatterlist *sg, struct page *page)
+{
+ unsigned long page_link = sg->page_link & SG_PAGE_LINK_MASK;
+
+ sg->page_link = page_link | (unsigned long)page;
+}
+
+static inline void
+sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
+ unsigned int offset)
+{
+ sg_assign_page(sg, page);
+ sg->offset = offset;
+ sg->length = len;
+}
+
+static inline struct page *
+sg_page(struct scatterlist *sg)
+{
+ return ((struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK));
+}
+
+static inline void
+sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
+{
+ sg_set_page(sg, virt_to_page(buf), buflen,
+ ((uintptr_t)buf) & (PAGE_SIZE - 1));
+}
+
+static inline struct scatterlist *
+sg_next(struct scatterlist *sg)
+{
+ if (sg_is_last(sg))
+ return (NULL);
+ sg++;
+ if (sg_is_chain(sg))
+ sg = sg_chain_ptr(sg);
+ return (sg);
+}
+
+static inline vm_paddr_t
+sg_phys(struct scatterlist *sg)
+{
+ return (page_to_phys(sg_page(sg)) + sg->offset);
+}
+
+static inline void *
+sg_virt(struct scatterlist *sg)
+{
+
+ return ((void *)((unsigned long)page_address(sg_page(sg)) + sg->offset));
+}
+
+static inline void
+sg_chain(struct scatterlist *prv, unsigned int prv_nents,
+ struct scatterlist *sgl)
+{
+ struct scatterlist *sg = &prv[prv_nents - 1];
+
+ sg->offset = 0;
+ sg->length = 0;
+ sg->page_link = ((unsigned long)sgl |
+ SG_PAGE_LINK_CHAIN) & ~SG_PAGE_LINK_LAST;
+}
+
+static inline void
+sg_mark_end(struct scatterlist *sg)
+{
+ sg->page_link |= SG_PAGE_LINK_LAST;
+ sg->page_link &= ~SG_PAGE_LINK_CHAIN;
+}
+
+static inline void
+sg_init_table(struct scatterlist *sg, unsigned int nents)
+{
+ bzero(sg, sizeof(*sg) * nents);
+ sg_mark_end(&sg[nents - 1]);
+}
+
+static inline void
+sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
+{
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, buf, buflen);
+}
+
+static struct scatterlist *
+sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
+{
+ if (nents == SG_MAX_SINGLE_ALLOC) {
+ return ((void *)__get_free_page(gfp_mask));
+ } else
+ return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask));
+}
+
+static inline void
+sg_kfree(struct scatterlist *sg, unsigned int nents)
+{
+ if (nents == SG_MAX_SINGLE_ALLOC) {
+ free_page((unsigned long)sg);
+ } else
+ kfree(sg);
+}
+
+static inline void
+__sg_free_table(struct sg_table *table, unsigned int max_ents,
+ bool skip_first_chunk, sg_free_fn * free_fn)
+{
+ struct scatterlist *sgl, *next;
+
+ if (unlikely(!table->sgl))
+ return;
+
+ sgl = table->sgl;
+ while (table->orig_nents) {
+ unsigned int alloc_size = table->orig_nents;
+ unsigned int sg_size;
+
+ if (alloc_size > max_ents) {
+ next = sg_chain_ptr(&sgl[max_ents - 1]);
+ alloc_size = max_ents;
+ sg_size = alloc_size - 1;
+ } else {
+ sg_size = alloc_size;
+ next = NULL;
+ }
+
+ table->orig_nents -= sg_size;
+ if (skip_first_chunk)
+ skip_first_chunk = 0;
+ else
+ free_fn(sgl, alloc_size);
+ sgl = next;
+ }
+
+ table->sgl = NULL;
+}
+
+static inline void
+sg_free_table(struct sg_table *table)
+{
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
+}
+
+static inline int
+__sg_alloc_table(struct sg_table *table, unsigned int nents,
+ unsigned int max_ents, struct scatterlist *first_chunk,
+ gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
+{
+ struct scatterlist *sg, *prv;
+ unsigned int left;
+
+ memset(table, 0, sizeof(*table));
+
+ if (nents == 0)
+ return (-EINVAL);
+ left = nents;
+ prv = NULL;
+ do {
+ unsigned int sg_size;
+ unsigned int alloc_size = left;
+
+ if (alloc_size > max_ents) {
+ alloc_size = max_ents;
+ sg_size = alloc_size - 1;
+ } else
+ sg_size = alloc_size;
+
+ left -= sg_size;
+
+ if (first_chunk) {
+ sg = first_chunk;
+ first_chunk = NULL;
+ } else {
+ sg = alloc_fn(alloc_size, gfp_mask);
+ }
+ if (unlikely(!sg)) {
+ if (prv)
+ table->nents = ++table->orig_nents;
+
+ return (-ENOMEM);
+ }
+ sg_init_table(sg, alloc_size);
+ table->nents = table->orig_nents += sg_size;
+
+ if (prv)
+ sg_chain(prv, max_ents, sg);
+ else
+ table->sgl = sg;
+
+ if (!left)
+ sg_mark_end(&sg[sg_size - 1]);
+
+ prv = sg;
+ } while (left);
+
+ return (0);
+}
+
+static inline int
+sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
+{
+ int ret;
+
+ ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
+ NULL, gfp_mask, sg_kmalloc);
+ if (unlikely(ret))
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
+
+ return (ret);
+}
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
+static inline struct scatterlist *
+__sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages, unsigned int count,
+ unsigned long off, unsigned long size,
+ unsigned int max_segment,
+ struct scatterlist *prv, unsigned int left_pages,
+ gfp_t gfp_mask)
+#else
+static inline int
+__sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages, unsigned int count,
+ unsigned long off, unsigned long size,
+ unsigned int max_segment, gfp_t gfp_mask)
+#endif
+{
+ unsigned int i, segs, cur, len;
+ int rc;
+ struct scatterlist *s, *sg_iter;
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
+ if (prv != NULL) {
+ panic(
+ "Support for prv != NULL not implemented in "
+ "__sg_alloc_table_from_pages()");
+ }
+#endif
+
+ if (__predict_false(!max_segment || offset_in_page(max_segment)))
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
+ return (ERR_PTR(-EINVAL));
+#else
+ return (-EINVAL);
+#endif
+
+ len = 0;
+ for (segs = i = 1; i < count; ++i) {
+ len += PAGE_SIZE;
+ if (len >= max_segment ||
+ page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
+ ++segs;
+ len = 0;
+ }
+ }
+ if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask))))
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
+ return (ERR_PTR(rc));
+#else
+ return (rc);
+#endif
+
+ cur = 0;
+ for_each_sg(sgt->sgl, sg_iter, sgt->orig_nents, i) {
+ unsigned long seg_size;
+ unsigned int j;
+
+ /*
+ * We need to make sure that when we exit this loop "s" has the
+ * last sg in the chain so we can call sg_mark_end() on it.
+ * Only set this inside the loop since sg_iter will be iterated
+ * until it is NULL.
+ */
+ s = sg_iter;
+
+ len = 0;
+ for (j = cur + 1; j < count; ++j) {
+ len += PAGE_SIZE;
+ if (len >= max_segment || page_to_pfn(pages[j]) !=
+ page_to_pfn(pages[j - 1]) + 1)
+ break;
+ }
+
+ seg_size = ((j - cur) << PAGE_SHIFT) - off;
+ sg_set_page(s, pages[cur], MIN(size, seg_size), off);
+ size -= seg_size;
+ off = 0;
+ cur = j;
+ }
+ KASSERT(s != NULL, ("s is NULL after loop in __sg_alloc_table_from_pages()"));
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
+ if (left_pages == 0)
+ sg_mark_end(s);
+
+ return (s);
+#else
+ return (0);
+#endif
+}
+
+static inline int
+sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages, unsigned int count,
+ unsigned long off, unsigned long size,
+ gfp_t gfp_mask)
+{
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
+ return (PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, count, off, size,
+ SCATTERLIST_MAX_SEGMENT, NULL, 0, gfp_mask)));
+#else
+ return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
+ SCATTERLIST_MAX_SEGMENT, gfp_mask));
+#endif
+}
+
+static inline int
+sg_alloc_table_from_pages_segment(struct sg_table *sgt,
+ struct page **pages, unsigned int count, unsigned int off,
+ unsigned long size, unsigned int max_segment, gfp_t gfp_mask)
+{
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
+ return (PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, count, off, size,
+ max_segment, NULL, 0, gfp_mask)));
+#else
+ return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
+ max_segment, gfp_mask));
+#endif
+}
+
+static inline int
+sg_nents(struct scatterlist *sg)
+{
+ int nents;
+
+ for (nents = 0; sg; sg = sg_next(sg))
+ nents++;
+ return (nents);
+}
+
+static inline void
+__sg_page_iter_start(struct sg_page_iter *piter,
+ struct scatterlist *sglist, unsigned int nents,
+ unsigned long pgoffset)
+{
+ piter->internal.pg_advance = 0;
+ piter->internal.nents = nents;
+
+ piter->sg = sglist;
+ piter->sg_pgoffset = pgoffset;
+}
+
+static inline void
+_sg_iter_next(struct sg_page_iter *iter)
+{
+ struct scatterlist *sg;
+ unsigned int pgcount;
+
+ sg = iter->sg;
+ pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ ++iter->sg_pgoffset;
+ while (iter->sg_pgoffset >= pgcount) {
+ iter->sg_pgoffset -= pgcount;
+ sg = sg_next(sg);
+ --iter->maxents;
+ if (sg == NULL || iter->maxents == 0)
+ break;
+ pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ }
+ iter->sg = sg;
+}
+
+static inline int
+sg_page_count(struct scatterlist *sg)
+{
+ return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT);
+}
+#define sg_dma_page_count(sg) \
+ sg_page_count(sg)
+
+static inline bool
+__sg_page_iter_next(struct sg_page_iter *piter)
+{
+ unsigned int pgcount;
+
+ if (piter->internal.nents == 0)
+ return (0);
+ if (piter->sg == NULL)
+ return (0);
+
+ piter->sg_pgoffset += piter->internal.pg_advance;
+ piter->internal.pg_advance = 1;
+
+ while (1) {
+ pgcount = sg_page_count(piter->sg);
+ if (likely(piter->sg_pgoffset < pgcount))
+ break;
+ piter->sg_pgoffset -= pgcount;
+ piter->sg = sg_next(piter->sg);
+ if (--piter->internal.nents == 0)
+ return (0);
+ if (piter->sg == NULL)
+ return (0);
+ }
+ return (1);
+}
+#define __sg_page_iter_dma_next(itr) \
+ __sg_page_iter_next(&(itr)->base)
+
+static inline void
+_sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
+ unsigned int nents, unsigned long pgoffset)
+{
+ if (nents) {
+ iter->sg = sgl;
+ iter->sg_pgoffset = pgoffset - 1;
+ iter->maxents = nents;
+ _sg_iter_next(iter);
+ } else {
+ iter->sg = NULL;
+ iter->sg_pgoffset = 0;
+ iter->maxents = 0;
+ }
+}
+
+/*
+ * sg_page_iter_dma_address() is implemented as a macro because it
+ * needs to accept two different and identical structure types. This
+ * allows both old and new code to co-exist. The compile time assert
+ * adds some safety, that the structure sizes match.
+ */
+#define sg_page_iter_dma_address(spi) ({ \
+ struct sg_page_iter *__spi = (void *)(spi); \
+ dma_addr_t __dma_address; \
+ CTASSERT(sizeof(*(spi)) == sizeof(*__spi)); \
+ __dma_address = __spi->sg->dma_address + \
+ (__spi->sg_pgoffset << PAGE_SHIFT); \
+ __dma_address; \
+})
+
+static inline struct page *
+sg_page_iter_page(struct sg_page_iter *piter)
+{
+ return (nth_page(sg_page(piter->sg), piter->sg_pgoffset));
+}
+
+static __inline size_t
+sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+ const void *buf, size_t buflen, off_t skip)
+{
+ struct sg_page_iter piter;
+ struct page *page;
+ struct sf_buf *sf;
+ size_t len, copied;
+ char *p, *b;
+
+ if (buflen == 0)
+ return (0);
+
+ b = __DECONST(char *, buf);
+ copied = 0;
+ sched_pin();
+ for_each_sg_page(sgl, &piter, nents, 0) {
+
+ /* Skip to the start. */
+ if (piter.sg->length <= skip) {
+ skip -= piter.sg->length;
+ continue;
+ }
+
+ /* See how much to copy. */
+ KASSERT(((piter.sg->length - skip) != 0 && (buflen != 0)),
+ ("%s: sg len %u - skip %ju || buflen %zu is 0\n",
+ __func__, piter.sg->length, (uintmax_t)skip, buflen));
+ len = min(piter.sg->length - skip, buflen);
+
+ page = sg_page_iter_page(&piter);
+ sf = sf_buf_alloc(page, SFB_CPUPRIVATE | SFB_NOWAIT);
+ if (sf == NULL)
+ break;
+ p = (char *)sf_buf_kva(sf) + piter.sg_pgoffset + skip;
+ memcpy(p, b, len);
+ sf_buf_free(sf);
+
+ /* We copied so nothing more to skip. */
+ skip = 0;
+ copied += len;
+ /* Either we exactly filled the page, or we are done. */
+ buflen -= len;
+ if (buflen == 0)
+ break;
+ b += len;
+ }
+ sched_unpin();
+
+ return (copied);
+}
+
+static inline size_t
+sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+ const void *buf, size_t buflen)
+{
+ return (sg_pcopy_from_buffer(sgl, nents, buf, buflen, 0));
+}
+
+static inline size_t
+sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen, off_t offset)
+{
+ struct sg_page_iter iter;
+ struct scatterlist *sg;
+ struct page *page;
+ struct sf_buf *sf;
+ char *vaddr;
+ size_t total = 0;
+ size_t len;
+
+ if (!PMAP_HAS_DMAP)
+ sched_pin();
+ for_each_sg_page(sgl, &iter, nents, 0) {
+ sg = iter.sg;
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ continue;
+ }
+ len = ulmin(buflen, sg->length - offset);
+ if (len == 0)
+ break;
+
+ page = sg_page_iter_page(&iter);
+ if (!PMAP_HAS_DMAP) {
+ sf = sf_buf_alloc(page, SFB_CPUPRIVATE | SFB_NOWAIT);
+ if (sf == NULL)
+ break;
+ vaddr = (char *)sf_buf_kva(sf);
+ } else
+ vaddr = (char *)PHYS_TO_DMAP(page_to_phys(page));
+ memcpy(buf, vaddr + sg->offset + offset, len);
+ if (!PMAP_HAS_DMAP)
+ sf_buf_free(sf);
+
+ /* start at beginning of next page */
+ offset = 0;
+
+ /* advance buffer */
+ buf = (char *)buf + len;
+ buflen -= len;
+ total += len;
+ }
+ if (!PMAP_HAS_DMAP)
+ sched_unpin();
+ return (total);
+}
+
+static inline void
+sg_set_folio(struct scatterlist *sg, struct folio *folio, size_t len,
+ size_t offset)
+{
+ sg_set_page(sg, &folio->page, len, offset);
+}
+
+#endif /* _LINUXKPI_LINUX_SCATTERLIST_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/sched.h b/sys/compat/linuxkpi/common/include/linux/sched.h
new file mode 100644
index 000000000000..3ad2f8e4ce8b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/sched.h
@@ -0,0 +1,243 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_SCHED_H_
+#define _LINUXKPI_LINUX_SCHED_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/rtprio.h>
+#include <sys/sched.h>
+#include <sys/sleepqueue.h>
+#include <sys/time.h>
+
+#include <linux/bitmap.h>
+#include <linux/compat.h>
+#include <linux/completion.h>
+#include <linux/hrtimer.h>
+#include <linux/mm_types.h>
+#include <linux/nodemask.h>
+#include <linux/pid.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/time.h>
+
+#include <linux/sched/mm.h>
+
+#include <asm/atomic.h>
+
+#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+
+#define TASK_RUNNING 0x0000
+#define TASK_INTERRUPTIBLE 0x0001
+#define TASK_UNINTERRUPTIBLE 0x0002
+#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_WAKING 0x0100
+#define TASK_PARKED 0x0200
+
+#define TASK_COMM_LEN (MAXCOMLEN + 1)
+
+struct seq_file;
+
+struct work_struct;
+struct task_struct {
+ struct thread *task_thread;
+ struct mm_struct *mm;
+ linux_task_fn_t *task_fn;
+ void *task_data;
+ int task_ret;
+ atomic_t usage;
+ atomic_t state;
+ atomic_t kthread_flags;
+ pid_t pid; /* BSD thread ID */
+ const char *comm;
+ void *bsd_ioctl_data;
+ unsigned bsd_ioctl_len;
+ struct completion parked;
+ struct completion exited;
+#define TS_RCU_TYPE_MAX 2
+ TAILQ_ENTRY(task_struct) rcu_entry[TS_RCU_TYPE_MAX];
+ int rcu_recurse[TS_RCU_TYPE_MAX];
+ int bsd_interrupt_value;
+ struct work_struct *work; /* current work struct, if set */
+ struct task_struct *group_leader;
+ unsigned rcu_section[TS_RCU_TYPE_MAX];
+ unsigned int fpu_ctx_level;
+};
+
+#define current ({ \
+ struct thread *__td = curthread; \
+ linux_set_current(__td); \
+ ((struct task_struct *)__td->td_lkpi_task); \
+})
+
+#define task_pid_group_leader(task) (task)->task_thread->td_proc->p_pid
+#define task_pid(task) ((task)->pid)
+#define task_pid_nr(task) ((task)->pid)
+#define task_pid_vnr(task) ((task)->pid)
+#define get_pid(x) (x)
+#define put_pid(x) do { } while (0)
+#define current_euid() (curthread->td_ucred->cr_uid)
+#define task_euid(task) ((task)->task_thread->td_ucred->cr_uid)
+
+#define get_task_state(task) atomic_read(&(task)->state)
+#define set_task_state(task, x) atomic_set(&(task)->state, (x))
+#define __set_task_state(task, x) ((task)->state.counter = (x))
+#define set_current_state(x) set_task_state(current, x)
+#define __set_current_state(x) __set_task_state(current, x)
+
+static inline void
+get_task_struct(struct task_struct *task)
+{
+ atomic_inc(&task->usage);
+}
+
+static inline void
+put_task_struct(struct task_struct *task)
+{
+ if (atomic_dec_and_test(&task->usage))
+ linux_free_current(task);
+}
+
+#define cond_resched() do { if (!cold) sched_relinquish(curthread); } while (0)
+
+#define yield() kern_yield(PRI_UNCHANGED)
+#define sched_yield() sched_relinquish(curthread)
+
+#define need_resched() (curthread->td_owepreempt || \
+ td_ast_pending(curthread, TDA_SCHED))
+
+static inline int
+cond_resched_lock(spinlock_t *lock)
+{
+
+ if (need_resched() == 0)
+ return (0);
+ spin_unlock(lock);
+ cond_resched();
+ spin_lock(lock);
+ return (1);
+}
+
+bool linux_signal_pending(struct task_struct *task);
+bool linux_fatal_signal_pending(struct task_struct *task);
+bool linux_signal_pending_state(long state, struct task_struct *task);
+void linux_send_sig(int signo, struct task_struct *task);
+
+#define signal_pending(task) linux_signal_pending(task)
+#define fatal_signal_pending(task) linux_fatal_signal_pending(task)
+#define signal_pending_state(state, task) \
+ linux_signal_pending_state(state, task)
+#define send_sig(signo, task, priv) do { \
+ CTASSERT((priv) == 0); \
+ linux_send_sig(signo, task); \
+} while (0)
+
+long linux_schedule_timeout(long timeout);
+
+static inline void
+linux_schedule_save_interrupt_value(struct task_struct *task, int value)
+{
+ task->bsd_interrupt_value = value;
+}
+
+bool linux_task_exiting(struct task_struct *task);
+
+#define current_exiting() \
+ linux_task_exiting(current)
+
+static inline int
+linux_schedule_get_interrupt_value(struct task_struct *task)
+{
+ int value = task->bsd_interrupt_value;
+ task->bsd_interrupt_value = 0;
+ return (value);
+}
+
+static inline void
+schedule(void)
+{
+ (void)linux_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+}
+
+#define schedule_timeout(timeout) \
+ linux_schedule_timeout(timeout)
+#define schedule_timeout_killable(timeout) \
+ schedule_timeout_interruptible(timeout)
+#define schedule_timeout_interruptible(timeout) ({ \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ schedule_timeout(timeout); \
+})
+#define schedule_timeout_uninterruptible(timeout) ({ \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout(timeout); \
+})
+
+#define io_schedule() schedule()
+#define io_schedule_timeout(timeout) schedule_timeout(timeout)
+
+static inline uint64_t
+local_clock(void)
+{
+ struct timespec ts;
+
+ nanotime(&ts);
+ return ((uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec);
+}
+
+static inline const char *
+get_task_comm(char *buf, struct task_struct *task)
+{
+
+ buf[0] = 0; /* buffer is too small */
+ return (task->comm);
+}
+
+static inline void
+sched_set_fifo(struct task_struct *t)
+{
+ struct rtprio rtp;
+
+ rtp.prio = (RTP_PRIO_MIN + RTP_PRIO_MAX) / 2;
+ rtp.type = RTP_PRIO_FIFO;
+ rtp_to_pri(&rtp, t->task_thread);
+}
+
+static inline void
+sched_set_fifo_low(struct task_struct *t)
+{
+ struct rtprio rtp;
+
+ rtp.prio = RTP_PRIO_MAX; /* lowest priority */
+ rtp.type = RTP_PRIO_FIFO;
+ rtp_to_pri(&rtp, t->task_thread);
+}
+
+#endif /* _LINUXKPI_LINUX_SCHED_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/sched/mm.h b/sys/compat/linuxkpi/common/include/linux/sched/mm.h
new file mode 100644
index 000000000000..c26d99378974
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/sched/mm.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 2021 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUXKPI_LINUX_SCHED_MM_H_
+#define _LINUXKPI_LINUX_SCHED_MM_H_
+
+#include <linux/gfp.h>
+
+#define fs_reclaim_acquire(x) do { \
+ } while (0)
+#define fs_reclaim_release(x) do { \
+ } while (0)
+#define memalloc_nofs_save(x) 0
+#define memalloc_nofs_restore(x) do { \
+ } while (0)
+#define memalloc_noreclaim_save(x) 0
+#define memalloc_noreclaim_restore(x) do { \
+ } while (0)
+
+#endif /* _BSD_LKPI_LINUX_SCHED_MM_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/semaphore.h b/sys/compat/linuxkpi/common/include/linux/semaphore.h
new file mode 100644
index 000000000000..4b1a1502e589
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/semaphore.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_SEMAPHORE_H_
+#define _LINUXKPI_LINUX_SEMAPHORE_H_
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/sema.h>
+#include <sys/libkern.h>
+
+/*
+ * XXX BSD semaphores are disused and slow. They also do not provide a
+ * sema_wait_sig method. This must be resolved eventually.
+ */
+struct semaphore {
+ struct sema sema;
+};
+
+#define down(_sem) sema_wait(&(_sem)->sema)
+#define down_interruptible(_sem) sema_wait(&(_sem)->sema), 0
+#define down_trylock(_sem) !sema_trywait(&(_sem)->sema)
+#define up(_sem) sema_post(&(_sem)->sema)
+
+static inline void
+linux_sema_init(struct semaphore *sem, int val)
+{
+
+ memset(&sem->sema, 0, sizeof(sem->sema));
+ sema_init(&sem->sema, val, "lnxsema");
+}
+
+static inline void
+init_MUTEX(struct semaphore *sem)
+{
+
+ memset(&sem->sema, 0, sizeof(sem->sema));
+ sema_init(&sem->sema, 1, "lnxsema");
+}
+
+#define sema_init(...) linux_sema_init(__VA_ARGS__)
+
+#endif /* _LINUXKPI_LINUX_SEMAPHORE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/seq_file.h b/sys/compat/linuxkpi/common/include/linux/seq_file.h
new file mode 100644
index 000000000000..876ef9e8dfe5
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/seq_file.h
@@ -0,0 +1,108 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2016-2018, Matthew Macy <mmacy@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_SEQ_FILE_H_
+#define _LINUXKPI_LINUX_SEQ_FILE_H_
+
+#include <sys/types.h>
+#include <sys/sbuf.h>
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/string_helpers.h>
+#include <linux/printk.h>
+
+#undef file
+#define inode vnode
+
+MALLOC_DECLARE(M_LSEQ);
+
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct linux_file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+struct seq_file {
+ struct sbuf *buf;
+ size_t size;
+ const struct seq_operations *op;
+ const struct linux_file *file;
+ void *private;
+};
+
+struct seq_operations {
+ void * (*start) (struct seq_file *m, off_t *pos);
+ void (*stop) (struct seq_file *m, void *v);
+ void * (*next) (struct seq_file *m, void *v, off_t *pos);
+ int (*show) (struct seq_file *m, void *v);
+};
+
+ssize_t seq_read(struct linux_file *, char *, size_t, off_t *);
+int seq_write(struct seq_file *seq, const void *data, size_t len);
+void seq_putc(struct seq_file *m, char c);
+void seq_puts(struct seq_file *m, const char *str);
+bool seq_has_overflowed(struct seq_file *m);
+
+void *__seq_open_private(struct linux_file *, const struct seq_operations *, int);
+int seq_release_private(struct inode *, struct linux_file *);
+
+int seq_open(struct linux_file *f, const struct seq_operations *op);
+int seq_release(struct inode *inode, struct linux_file *file);
+
+off_t seq_lseek(struct linux_file *file, off_t offset, int whence);
+int single_open(struct linux_file *, int (*)(struct seq_file *, void *), void *);
+int single_open_size(struct linux_file *, int (*)(struct seq_file *, void *), void *, size_t);
+int single_release(struct inode *, struct linux_file *);
+
+void lkpi_seq_vprintf(struct seq_file *m, const char *fmt, va_list args);
+void lkpi_seq_printf(struct seq_file *m, const char *fmt, ...);
+
+#define seq_vprintf(...) lkpi_seq_vprintf(__VA_ARGS__)
+#define seq_printf(...) lkpi_seq_printf(__VA_ARGS__)
+
+int __lkpi_hexdump_sbuf_printf(void *, const char *, ...) __printflike(2, 3);
+
+static inline void
+seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
+{
+ lkpi_hex_dump(__lkpi_hexdump_sbuf_printf, m->buf, NULL, prefix_str, prefix_type,
+ rowsize, groupsize, buf, len, ascii);
+}
+
+#define file linux_file
+
+#endif /* _LINUXKPI_LINUX_SEQ_FILE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/seqlock.h b/sys/compat/linuxkpi/common/include/linux/seqlock.h
new file mode 100644
index 000000000000..554fdfd6e202
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/seqlock.h
@@ -0,0 +1,184 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_SEQLOCK_H__
+#define _LINUXKPI_LINUX_SEQLOCK_H__
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/cdefs.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/rwlock.h>
+#include <sys/seqc.h>
+
+struct lock_class_key;
+
+struct seqcount {
+ seqc_t seqc;
+};
+typedef struct seqcount seqcount_t;
+
+struct seqlock {
+ struct mtx seql_lock;
+ struct seqcount seql_count;
+};
+typedef struct seqlock seqlock_t;
+
+struct seqcount_mutex {
+ seqc_t seqc;
+};
+typedef struct seqcount_mutex seqcount_mutex_t;
+typedef struct seqcount_mutex seqcount_ww_mutex_t;
+
+static inline void
+__seqcount_init(struct seqcount *seqcount, const char *name __unused,
+ struct lock_class_key *key __unused)
+{
+ seqcount->seqc = 0;
+}
+#define seqcount_init(seqcount) __seqcount_init(seqcount, NULL, NULL)
+
+static inline void
+seqcount_mutex_init(struct seqcount_mutex *seqcount, void *mutex __unused)
+{
+ seqcount->seqc = 0;
+}
+
+#define seqcount_ww_mutex_init(seqcount, ww_mutex) \
+ seqcount_mutex_init((seqcount), (ww_mutex))
+
+#define write_seqcount_begin(s) \
+ _Generic(*(s), \
+ struct seqcount: seqc_sleepable_write_begin, \
+ struct seqcount_mutex: seqc_write_begin \
+ )(&(s)->seqc)
+
+#define write_seqcount_end(s) \
+ _Generic(*(s), \
+ struct seqcount: seqc_sleepable_write_end, \
+ struct seqcount_mutex: seqc_write_end \
+ )(&(s)->seqc)
+
+static inline void
+lkpi_write_seqcount_invalidate(seqc_t *seqcp)
+{
+ atomic_thread_fence_rel();
+ *seqcp += SEQC_MOD * 2;
+}
+#define write_seqcount_invalidate(s) lkpi_write_seqcount_invalidate(&(s)->seqc)
+
+#define read_seqcount_begin(s) seqc_read(&(s)->seqc)
+#define raw_read_seqcount(s) seqc_read_any(&(s)->seqc)
+
+static inline seqc_t
+lkpi_seqprop_sequence(const seqc_t *seqcp)
+{
+ return (atomic_load_int(seqcp));
+}
+#define seqprop_sequence(s) lkpi_seqprop_sequence(&(s)->seqc)
+
+/*
+ * XXX: Are predicts from inline functions still not honored by clang?
+ */
+#define __read_seqcount_retry(seqcount, gen) \
+ (!seqc_consistent_no_fence(&(seqcount)->seqc, gen))
+#define read_seqcount_retry(seqcount, gen) \
+ (!seqc_consistent(&(seqcount)->seqc, gen))
+
+static inline void
+seqlock_init(struct seqlock *seqlock)
+{
+ /*
+ * Don't enroll to witness(4) to avoid orphaned references after struct
+ * seqlock has been freed. There is no seqlock destructor exists so we
+ * can't expect automatic mtx_destroy() execution before free().
+ */
+ mtx_init(&seqlock->seql_lock, "seqlock", NULL, MTX_DEF|MTX_NOWITNESS);
+ seqcount_init(&seqlock->seql_count);
+}
+
+static inline void
+lkpi_write_seqlock(struct seqlock *seqlock, const bool irqsave)
+{
+ mtx_lock(&seqlock->seql_lock);
+ if (irqsave)
+ critical_enter();
+ write_seqcount_begin(&seqlock->seql_count);
+}
+
+static inline void
+write_seqlock(struct seqlock *seqlock)
+{
+ lkpi_write_seqlock(seqlock, false);
+}
+
+static inline void
+lkpi_write_sequnlock(struct seqlock *seqlock, const bool irqsave)
+{
+ write_seqcount_end(&seqlock->seql_count);
+ if (irqsave)
+ critical_exit();
+ mtx_unlock(&seqlock->seql_lock);
+}
+
+static inline void
+write_sequnlock(struct seqlock *seqlock)
+{
+ lkpi_write_sequnlock(seqlock, false);
+}
+
+/*
+ * Disable preemption when the consumer wants to disable interrupts. This
+ * ensures that the caller won't be starved if it is preempted by a
+ * higher-priority reader, but assumes that the caller won't perform any
+ * blocking operations while holding the write lock; probably a safe
+ * assumption.
+ */
+#define write_seqlock_irqsave(seqlock, flags) do { \
+ (flags) = 0; \
+ lkpi_write_seqlock(seqlock, true); \
+} while (0)
+
+static inline void
+write_sequnlock_irqrestore(struct seqlock *seqlock,
+ unsigned long flags __unused)
+{
+ lkpi_write_sequnlock(seqlock, true);
+}
+
+static inline unsigned
+read_seqbegin(const struct seqlock *seqlock)
+{
+ return (read_seqcount_begin(&seqlock->seql_count));
+}
+
+#define read_seqretry(seqlock, gen) \
+ read_seqcount_retry(&(seqlock)->seql_count, gen)
+
+#endif /* _LINUXKPI_LINUX_SEQLOCK_H__ */
diff --git a/sys/compat/linuxkpi/common/include/linux/shmem_fs.h b/sys/compat/linuxkpi/common/include/linux/shmem_fs.h
new file mode 100644
index 000000000000..5e91725d4a1c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/shmem_fs.h
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_SHMEM_FS_H_
+#define _LINUXKPI_LINUX_SHMEM_FS_H_
+
+#include <linux/file.h>
+#include <linux/mempolicy.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+/* Shared memory support */
+struct page *linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex,
+ gfp_t gfp);
+struct linux_file *linux_shmem_file_setup(const char *name, loff_t size,
+ unsigned long flags);
+void linux_shmem_truncate_range(vm_object_t obj, loff_t lstart,
+ loff_t lend);
+
+#define shmem_read_mapping_page(...) \
+ linux_shmem_read_mapping_page_gfp(__VA_ARGS__, 0)
+
+#define shmem_read_mapping_page_gfp(...) \
+ linux_shmem_read_mapping_page_gfp(__VA_ARGS__)
+
+#define shmem_file_setup(...) \
+ linux_shmem_file_setup(__VA_ARGS__)
+
+#define shmem_truncate_range(...) \
+ linux_shmem_truncate_range(__VA_ARGS__)
+
+static inline struct folio *
+shmem_read_folio_gfp(vm_object_t obj, int pindex, gfp_t gfp)
+{
+ struct page *page;
+
+ page = shmem_read_mapping_page_gfp(obj, pindex, gfp);
+
+ return (page_folio(page));
+}
+
+#endif /* _LINUXKPI_LINUX_SHMEM_FS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/shrinker.h b/sys/compat/linuxkpi/common/include/linux/shrinker.h
new file mode 100644
index 000000000000..eb95dafb83ce
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/shrinker.h
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 2020 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_SHRINKER_H_
+#define _LINUXKPI_LINUX_SHRINKER_H_
+
+#include <sys/queue.h>
+
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+
+struct shrink_control {
+ gfp_t gfp_mask;
+ unsigned long nr_to_scan;
+ unsigned long nr_scanned;
+};
+
+struct shrinker {
+ unsigned long (*count_objects)(struct shrinker *, struct shrink_control *);
+ unsigned long (*scan_objects)(struct shrinker *, struct shrink_control *);
+ int seeks;
+ unsigned int flags;
+ void * private_data;
+ long batch;
+ TAILQ_ENTRY(shrinker) next;
+};
+
+#define SHRINK_STOP (~0UL)
+
+#define DEFAULT_SEEKS 2
+
+#define SHRINKER_REGISTERED BIT(0)
+#define SHRINKER_ALLOCATED BIT(1)
+
+struct shrinker *linuxkpi_shrinker_alloc(
+ unsigned int flags, const char *fmt, ...);
+int linuxkpi_register_shrinker(struct shrinker *s);
+void linuxkpi_unregister_shrinker(struct shrinker *s);
+void linuxkpi_shrinker_free(struct shrinker *shrinker);
+void linuxkpi_synchronize_shrinkers(void);
+
+#define shrinker_alloc(flags, fmt, ...) \
+ linuxkpi_shrinker_alloc(flags, fmt __VA_OPT__(,) __VA_ARGS__)
+#define shrinker_register(shrinker) \
+ linuxkpi_register_shrinker(shrinker)
+#define shrinker_free(shrinker) \
+ linuxkpi_shrinker_free(shrinker)
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 60000
+#define register_shrinker(s, ...) linuxkpi_register_shrinker(s)
+#else
+#define register_shrinker(s) linuxkpi_register_shrinker(s)
+#endif
+#define unregister_shrinker(s) linuxkpi_unregister_shrinker(s)
+#define synchronize_shrinkers() linuxkpi_synchronize_shrinkers()
+
+#endif /* _LINUXKPI_LINUX_SHRINKER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/sizes.h b/sys/compat/linuxkpi/common/include/linux/sizes.h
new file mode 100644
index 000000000000..d8a6e75192f6
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/sizes.h
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_SIZES_H_
+#define _LINUXKPI_LINUX_SIZES_H_
+
+#define SZ_1K (1024 * 1)
+#define SZ_2K (1024 * 2)
+#define SZ_4K (1024 * 4)
+#define SZ_8K (1024 * 8)
+#define SZ_16K (1024 * 16)
+#define SZ_32K (1024 * 32)
+#define SZ_64K (1024 * 64)
+#define SZ_128K (1024 * 128)
+#define SZ_256K (1024 * 256)
+#define SZ_512K (1024 * 512)
+
+#define SZ_1M (1024 * 1024 * 1)
+#define SZ_2M (1024 * 1024 * 2)
+#define SZ_4M (1024 * 1024 * 4)
+#define SZ_8M (1024 * 1024 * 8)
+#define SZ_16M (1024 * 1024 * 16)
+#define SZ_32M (1024 * 1024 * 32)
+#define SZ_64M (1024 * 1024 * 64)
+#define SZ_128M (1024 * 1024 * 128)
+#define SZ_256M (1024 * 1024 * 256)
+#define SZ_512M (1024 * 1024 * 512)
+
+#define SZ_1G (1024 * 1024 * 1024 * 1)
+#define SZ_2G (1024 * 1024 * 1024 * 2)
+#define SZ_4G (1024 * 1024 * 1024 * 4)
+#define SZ_8G (1024 * 1024 * 1024 * 8)
+#define SZ_16G (1024 * 1024 * 1024 * 16)
+#define SZ_32G (1024 * 1024 * 1024 * 32)
+
+#define SZ_64T (1024 * 1024 * 1024 * 1024 * 64)
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/skbuff.h b/sys/compat/linuxkpi/common/include/linux/skbuff.h
new file mode 100644
index 000000000000..c8ad90281e34
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/skbuff.h
@@ -0,0 +1,1167 @@
+/*-
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
+ * Copyright (c) 2021-2023 Bjoern A. Zeeb
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
+ * Do not rely on the internals of this implementation. They are highly
+ * likely to change as we will improve the integration to FreeBSD mbufs.
+ */
+
+#ifndef _LINUXKPI_LINUX_SKBUFF_H
+#define _LINUXKPI_LINUX_SKBUFF_H
+
+#include <linux/kernel.h>
+#include <linux/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdev_features.h>
+#include <linux/list.h>
+#include <linux/gfp.h>
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/compiler.h>
+
+#include "opt_wlan.h"
+
+/* Currently this is only used for wlan so we can depend on that. */
+#if defined(IEEE80211_DEBUG) && !defined(SKB_DEBUG)
+#define SKB_DEBUG
+#endif
+
+/* #define SKB_DEBUG */
+
+#ifdef SKB_DEBUG
+#define DSKB_TODO 0x01
+#define DSKB_IMPROVE 0x02
+#define DSKB_TRACE 0x10
+#define DSKB_TRACEX 0x20
+extern int linuxkpi_debug_skb;
+
+#define SKB_TODO() \
+ if (linuxkpi_debug_skb & DSKB_TODO) \
+ printf("SKB_TODO %s:%d\n", __func__, __LINE__)
+#define SKB_IMPROVE(...) \
+ if (linuxkpi_debug_skb & DSKB_IMPROVE) \
+ printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
+#define SKB_TRACE(_s) \
+ if (linuxkpi_debug_skb & DSKB_TRACE) \
+ printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
+#define SKB_TRACE2(_s, _p) \
+ if (linuxkpi_debug_skb & DSKB_TRACE) \
+ printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
+#define SKB_TRACE_FMT(_s, _fmt, ...) \
+ if (linuxkpi_debug_skb & DSKB_TRACE) \
+ printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s, \
+ __VA_ARGS__)
+#else
+#define SKB_TODO() do { } while(0)
+#define SKB_IMPROVE(...) do { } while(0)
+#define SKB_TRACE(_s) do { } while(0)
+#define SKB_TRACE2(_s, _p) do { } while(0)
+#define SKB_TRACE_FMT(_s, ...) do { } while(0)
+#endif
+
+enum sk_buff_pkt_type {
+ PACKET_BROADCAST,
+ PACKET_MULTICAST,
+ PACKET_OTHERHOST,
+};
+
+struct skb_shared_hwtstamps {
+ ktime_t hwtstamp;
+};
+
+#define NET_SKB_PAD max(CACHE_LINE_SIZE, 32)
+#define SKB_DATA_ALIGN(_x) roundup2(_x, CACHE_LINE_SIZE)
+
+struct sk_buff_head {
+ /* XXX TODO */
+ union {
+ struct {
+ struct sk_buff *next;
+ struct sk_buff *prev;
+ };
+ struct sk_buff_head_l {
+ struct sk_buff *next;
+ struct sk_buff *prev;
+ } list;
+ };
+ size_t qlen;
+ spinlock_t lock;
+};
+
+enum sk_checksum_flags {
+ CHECKSUM_NONE = 0x00,
+ CHECKSUM_UNNECESSARY = 0x01,
+ CHECKSUM_PARTIAL = 0x02,
+ CHECKSUM_COMPLETE = 0x04,
+};
+
+struct skb_frag {
+ /* XXX TODO */
+ struct page *page; /* XXX-BZ These three are a wild guess so far! */
+ off_t offset;
+ size_t size;
+};
+typedef struct skb_frag skb_frag_t;
+
+enum skb_shared_info_gso_type {
+ SKB_GSO_TCPV4,
+ SKB_GSO_TCPV6,
+};
+
+struct skb_shared_info {
+ enum skb_shared_info_gso_type gso_type;
+ uint16_t gso_size;
+ uint16_t nr_frags;
+ struct sk_buff *frag_list;
+ skb_frag_t frags[64]; /* XXX TODO, 16xpage? */
+};
+
+struct sk_buff {
+ /* XXX TODO */
+ union {
+ /* struct sk_buff_head */
+ struct {
+ struct sk_buff *next;
+ struct sk_buff *prev;
+ };
+ struct list_head list;
+ };
+
+ uint8_t *head; /* Head of buffer. */
+ uint8_t *data; /* Head of data. */
+ uint8_t *tail; /* End of data. */
+ uint8_t *end; /* End of buffer. */
+
+ uint32_t len; /* ? */
+ uint32_t data_len; /* ? If we have frags? */
+ union {
+ __wsum csum;
+ struct {
+ uint16_t csum_offset;
+ uint16_t csum_start;
+ };
+ };
+ uint16_t protocol;
+ uint8_t ip_summed;
+ /* uint8_t */
+
+ /* "Scratch" area for layers to store metadata. */
+ /* ??? I see sizeof() operations so probably an array. */
+ uint8_t cb[64] __aligned(CACHE_LINE_SIZE);
+
+ struct skb_shared_info *shinfo __aligned(CACHE_LINE_SIZE);
+
+ uint32_t truesize; /* The total size of all buffers, incl. frags. */
+ uint32_t priority;
+ uint16_t qmap; /* queue mapping */
+ uint16_t _flags; /* Internal flags. */
+#define _SKB_FLAGS_SKBEXTFRAG 0x0001
+ uint16_t l3hdroff; /* network header offset from *head */
+ uint16_t l4hdroff; /* transport header offset from *head */
+ uint16_t mac_header; /* offset of mac_header */
+ uint16_t mac_len; /* Link-layer header length. */
+ enum sk_buff_pkt_type pkt_type;
+ refcount_t refcnt;
+
+ struct net_device *dev;
+ void *sk; /* XXX net/sock.h? */
+
+ /* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
+ void *m;
+ void(*m_free_func)(void *);
+
+ /* Force padding to CACHE_LINE_SIZE. */
+ uint8_t __scratch[0] __aligned(CACHE_LINE_SIZE);
+};
+
+/* -------------------------------------------------------------------------- */
+
+struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
+struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
+struct sk_buff *linuxkpi_build_skb(void *, size_t);
+void linuxkpi_kfree_skb(struct sk_buff *);
+
+struct sk_buff *linuxkpi_skb_copy(const struct sk_buff *, gfp_t);
+
+/* -------------------------------------------------------------------------- */
+
+static inline struct sk_buff *
+alloc_skb(size_t size, gfp_t gfp)
+{
+ struct sk_buff *skb;
+
+ skb = linuxkpi_alloc_skb(size, gfp);
+ SKB_TRACE(skb);
+ return (skb);
+}
+
+static inline struct sk_buff *
+__dev_alloc_skb(size_t len, gfp_t gfp)
+{
+ struct sk_buff *skb;
+
+ skb = linuxkpi_dev_alloc_skb(len, gfp);
+ SKB_IMPROVE();
+ SKB_TRACE(skb);
+ return (skb);
+}
+
+static inline struct sk_buff *
+dev_alloc_skb(size_t len)
+{
+ struct sk_buff *skb;
+
+ skb = __dev_alloc_skb(len, GFP_NOWAIT);
+ SKB_IMPROVE();
+ SKB_TRACE(skb);
+ return (skb);
+}
+
+static inline void
+kfree_skb(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ linuxkpi_kfree_skb(skb);
+}
+
+static inline void
+consume_skb(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ kfree_skb(skb);
+}
+
+static inline void
+dev_kfree_skb(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ kfree_skb(skb);
+}
+
+static inline void
+dev_kfree_skb_any(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ dev_kfree_skb(skb);
+}
+
+static inline void
+dev_kfree_skb_irq(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ SKB_IMPROVE("Do we have to defer this?");
+ dev_kfree_skb(skb);
+}
+
+static inline struct sk_buff *
+build_skb(void *data, unsigned int fragsz)
+{
+ struct sk_buff *skb;
+
+ skb = linuxkpi_build_skb(data, fragsz);
+ SKB_TRACE(skb);
+ return (skb);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static inline bool
+skb_is_nonlinear(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ return ((skb->data_len > 0) ? true : false);
+}
+
+/* Add headroom; cannot do once there is data in there. */
+static inline void
+skb_reserve(struct sk_buff *skb, size_t len)
+{
+ SKB_TRACE(skb);
+#if 0
+ /* Apparently it is allowed to call skb_reserve multiple times in a row. */
+ KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
+ "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
+#else
+ KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
+ "empty head %p data %p tail %p len %u\n", __func__, skb,
+ skb->head, skb->data, skb->tail, skb->len));
+#endif
+ skb->data += len;
+ skb->tail += len;
+}
+
+/*
+ * Remove headroom; return new data pointer; basically make space at the
+ * front to copy data in (manually).
+ */
+static inline void *
+__skb_push(struct sk_buff *skb, size_t len)
+{
+ SKB_TRACE(skb);
+ KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
+ "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
+ skb->len += len;
+ skb->data -= len;
+ return (skb->data);
+}
+
+static inline void *
+skb_push(struct sk_buff *skb, size_t len)
+{
+
+ SKB_TRACE(skb);
+ return (__skb_push(skb, len));
+}
+
+/*
+ * Length of the data on the skb (without any frags)???
+ */
+static inline size_t
+skb_headlen(struct sk_buff *skb)
+{
+
+ SKB_TRACE(skb);
+ return (skb->len - skb->data_len);
+}
+
+
+/* Return the end of data (tail pointer). */
+static inline uint8_t *
+skb_tail_pointer(struct sk_buff *skb)
+{
+
+ SKB_TRACE(skb);
+ return (skb->tail);
+}
+
+/* Return number of bytes available at end of buffer. */
+static inline unsigned int
+skb_tailroom(struct sk_buff *skb)
+{
+
+ SKB_TRACE(skb);
+ KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
+ "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
+ if (unlikely(skb_is_nonlinear(skb)))
+ return (0);
+ return (skb->end - skb->tail);
+}
+
+/* Return number of bytes available at the beginning of buffer. */
+static inline unsigned int
+skb_headroom(const struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
+ "data %p head %p\n", __func__, skb, skb->data, skb->head));
+ return (skb->data - skb->head);
+}
+
+
+/*
+ * Remove tailroom; return the old tail pointer; basically make space at
+ * the end to copy data in (manually). See also skb_put_data() below.
+ */
+static inline void *
+__skb_put(struct sk_buff *skb, size_t len)
+{
+ void *s;
+
+ SKB_TRACE(skb);
+ KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
+ "len %zu) > end %p, head %p data %p len %u\n", __func__,
+ skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
+
+ s = skb_tail_pointer(skb);
+ if (len == 0)
+ return (s);
+ skb->tail += len;
+ skb->len += len;
+#ifdef SKB_DEBUG
+ if (linuxkpi_debug_skb & DSKB_TRACEX)
+ printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
+ __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
+ s, len);
+#endif
+ return (s);
+}
+
+static inline void *
+skb_put(struct sk_buff *skb, size_t len)
+{
+
+ SKB_TRACE(skb);
+ return (__skb_put(skb, len));
+}
+
+/* skb_put() + copying data in. */
+static inline void *
+skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
+{
+ void *s;
+
+ SKB_TRACE2(skb, buf);
+ s = skb_put(skb, len);
+ if (len == 0)
+ return (s);
+ memcpy(s, buf, len);
+ return (s);
+}
+
+/* skb_put() + filling with zeros. */
+static inline void *
+skb_put_zero(struct sk_buff *skb, size_t len)
+{
+ void *s;
+
+ SKB_TRACE(skb);
+ s = skb_put(skb, len);
+ memset(s, '\0', len);
+ return (s);
+}
+
+/*
+ * Remove len bytes from beginning of data.
+ *
+ * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
+ * we return the advanced data pointer so we don't have to keep a temp, correct?
+ */
+static inline void *
+skb_pull(struct sk_buff *skb, size_t len)
+{
+
+ SKB_TRACE(skb);
+#if 0 /* Apparently this doesn't barf... */
+ KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
+ __func__, skb, skb->len, len, skb->data));
+#endif
+ if (skb->len < len)
+ return (NULL);
+ skb->len -= len;
+ skb->data += len;
+ return (skb->data);
+}
+
+/* Reduce skb data to given length or do nothing if smaller already. */
+static inline void
+__skb_trim(struct sk_buff *skb, unsigned int len)
+{
+
+ SKB_TRACE(skb);
+ if (skb->len < len)
+ return;
+
+ skb->len = len;
+ skb->tail = skb->data + skb->len;
+}
+
+static inline void
+skb_trim(struct sk_buff *skb, unsigned int len)
+{
+
+ return (__skb_trim(skb, len));
+}
+
+static inline struct skb_shared_info *
+skb_shinfo(struct sk_buff *skb)
+{
+
+ SKB_TRACE(skb);
+ return (skb->shinfo);
+}
+
+static inline void
+skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
+ off_t offset, size_t size, unsigned int truesize)
+{
+ struct skb_shared_info *shinfo;
+
+ SKB_TRACE(skb);
+#ifdef SKB_DEBUG
+ if (linuxkpi_debug_skb & DSKB_TRACEX)
+ printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
+ "page %#jx offset %ju size %zu truesize %u\n", __func__,
+ skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
+ (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
+ size, truesize);
+#endif
+
+ shinfo = skb_shinfo(skb);
+ KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
+ "fragno %d too big\n", __func__, skb, fragno));
+ shinfo->frags[fragno].page = page;
+ shinfo->frags[fragno].offset = offset;
+ shinfo->frags[fragno].size = size;
+ shinfo->nr_frags = fragno + 1;
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += truesize;
+}
+
+/* -------------------------------------------------------------------------- */
+
+#define skb_queue_walk(_q, skb) \
+ for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q); \
+ (skb) = (skb)->next)
+
+#define skb_queue_walk_safe(_q, skb, tmp) \
+ for ((skb) = (_q)->next, (tmp) = (skb)->next; \
+ (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
+
+#define skb_list_walk_safe(_q, skb, tmp) \
+ for ((skb) = (_q), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL; \
+ ((skb) != NULL); \
+ (skb) = (tmp), (tmp) = ((skb) != NULL) ? (skb)->next ? NULL)
+
+static inline bool
+skb_queue_empty(const struct sk_buff_head *q)
+{
+ SKB_TRACE(q);
+ return (q->next == (const struct sk_buff *)q);
+}
+
+static inline bool
+skb_queue_empty_lockless(const struct sk_buff_head *q)
+{
+ SKB_TRACE(q);
+ return (READ_ONCE(q->next) == (const struct sk_buff *)q);
+}
+
+static inline void
+__skb_queue_head_init(struct sk_buff_head *q)
+{
+ SKB_TRACE(q);
+ q->prev = q->next = (struct sk_buff *)q;
+ q->qlen = 0;
+}
+
+static inline void
+skb_queue_head_init(struct sk_buff_head *q)
+{
+ SKB_TRACE(q);
+ __skb_queue_head_init(q);
+ spin_lock_init(&q->lock);
+}
+
+static inline void
+__skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
+ struct sk_buff_head *q)
+{
+
+ SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
+ WRITE_ONCE(new->prev, prev);
+ WRITE_ONCE(new->next, next);
+ WRITE_ONCE(((struct sk_buff_head_l *)next)->prev, new);
+ WRITE_ONCE(((struct sk_buff_head_l *)prev)->next, new);
+ WRITE_ONCE(q->qlen, q->qlen + 1);
+}
+
+static inline void
+__skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
+ struct sk_buff *new)
+{
+
+ SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
+ __skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q);
+}
+
+static inline void
+__skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
+ struct sk_buff *new)
+{
+
+ SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
+ __skb_insert(new, skb->prev, skb, q);
+}
+
+static inline void
+__skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
+{
+
+ SKB_TRACE2(q, new);
+ __skb_queue_before(q, (struct sk_buff *)q, new);
+}
+
+static inline void
+skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
+{
+ unsigned long flags;
+
+ SKB_TRACE2(q, new);
+ spin_lock_irqsave(&q->lock, flags);
+ __skb_queue_tail(q, new);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static inline struct sk_buff *
+skb_peek(const struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ skb = q->next;
+ SKB_TRACE2(q, skb);
+ if (skb == (const struct sk_buff *)q)
+ return (NULL);
+ return (skb);
+}
+
+static inline struct sk_buff *
+skb_peek_tail(const struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ skb = READ_ONCE(q->prev);
+ SKB_TRACE2(q, skb);
+ if (skb == (const struct sk_buff *)q)
+ return (NULL);
+ return (skb);
+}
+
+static inline void
+__skb_unlink(struct sk_buff *skb, struct sk_buff_head *q)
+{
+ struct sk_buff *p, *n;
+
+ SKB_TRACE2(skb, q);
+
+ WRITE_ONCE(q->qlen, q->qlen - 1);
+ p = skb->prev;
+ n = skb->next;
+ WRITE_ONCE(n->prev, p);
+ WRITE_ONCE(p->next, n);
+ skb->prev = skb->next = NULL;
+}
+
+static inline void
+skb_unlink(struct sk_buff *skb, struct sk_buff_head *q)
+{
+ unsigned long flags;
+
+ SKB_TRACE2(skb, q);
+ spin_lock_irqsave(&q->lock, flags);
+ __skb_unlink(skb, q);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static inline struct sk_buff *
+__skb_dequeue(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ skb = skb_peek(q);
+ if (skb != NULL)
+ __skb_unlink(skb, q);
+ SKB_TRACE2(q, skb);
+ return (skb);
+}
+
+static inline struct sk_buff *
+skb_dequeue(struct sk_buff_head *q)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&q->lock, flags);
+ skb = __skb_dequeue(q);
+ spin_unlock_irqrestore(&q->lock, flags);
+ SKB_TRACE2(q, skb);
+ return (skb);
+}
+
+static inline struct sk_buff *
+__skb_dequeue_tail(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ skb = skb_peek_tail(q);
+ if (skb != NULL)
+ __skb_unlink(skb, q);
+ SKB_TRACE2(q, skb);
+ return (skb);
+}
+
+static inline struct sk_buff *
+skb_dequeue_tail(struct sk_buff_head *q)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&q->lock, flags);
+ skb = __skb_dequeue_tail(q);
+ spin_unlock_irqrestore(&q->lock, flags);
+ SKB_TRACE2(q, skb);
+ return (skb);
+}
+
+static inline void
+__skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
+{
+
+ SKB_TRACE2(q, skb);
+ __skb_queue_after(q, (struct sk_buff *)q, skb);
+}
+
+static inline void
+skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ SKB_TRACE2(q, skb);
+ spin_lock_irqsave(&q->lock, flags);
+ __skb_queue_head(q, skb);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static inline uint32_t
+skb_queue_len(const struct sk_buff_head *q)
+{
+
+ SKB_TRACE(q);
+ return (q->qlen);
+}
+
+static inline uint32_t
+skb_queue_len_lockless(const struct sk_buff_head *q)
+{
+
+ SKB_TRACE(q);
+ return (READ_ONCE(q->qlen));
+}
+
+static inline void
+___skb_queue_splice(const struct sk_buff_head *from,
+ struct sk_buff *p, struct sk_buff *n)
+{
+ struct sk_buff *b, *e;
+
+ b = from->next;
+ e = from->prev;
+
+ WRITE_ONCE(b->prev, p);
+ WRITE_ONCE(((struct sk_buff_head_l *)p)->next, b);
+ WRITE_ONCE(e->next, n);
+ WRITE_ONCE(((struct sk_buff_head_l *)n)->prev, e);
+}
+
+static inline void
+skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
+{
+
+ SKB_TRACE2(from, to);
+
+ if (skb_queue_empty(from))
+ return;
+
+ ___skb_queue_splice(from, (struct sk_buff *)to, to->next);
+ to->qlen += from->qlen;
+ __skb_queue_head_init(from);
+}
+
+static inline void
+skb_queue_splice_tail_init(struct sk_buff_head *from, struct sk_buff_head *to)
+{
+
+ SKB_TRACE2(from, to);
+
+ if (skb_queue_empty(from))
+ return;
+
+ ___skb_queue_splice(from, to->prev, (struct sk_buff *)to);
+ to->qlen += from->qlen;
+ __skb_queue_head_init(from);
+}
+
+
+static inline void
+__skb_queue_purge(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ SKB_TRACE(q);
+ while ((skb = __skb_dequeue(q)) != NULL)
+ kfree_skb(skb);
+ WARN_ONCE(skb_queue_len(q) != 0, "%s: queue %p not empty: %u",
+ __func__, q, skb_queue_len(q));
+}
+
+static inline void
+skb_queue_purge(struct sk_buff_head *q)
+{
+ struct sk_buff_head _q;
+ unsigned long flags;
+
+ SKB_TRACE(q);
+
+ if (skb_queue_empty_lockless(q))
+ return;
+
+ __skb_queue_head_init(&_q);
+ spin_lock_irqsave(&q->lock, flags);
+ skb_queue_splice_init(q, &_q);
+ spin_unlock_irqrestore(&q->lock, flags);
+ __skb_queue_purge(&_q);
+}
+
+static inline struct sk_buff *
+skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
+{
+
+ SKB_TRACE2(q, skb);
+ /* XXX what is the q argument good for? */
+ return (skb->prev);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static inline struct sk_buff *
+skb_copy(const struct sk_buff *skb, gfp_t gfp)
+{
+ struct sk_buff *new;
+
+ new = linuxkpi_skb_copy(skb, gfp);
+ SKB_TRACE2(skb, new);
+ return (new);
+}
+
+static inline uint16_t
+skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (0xffff);
+}
+
+static inline int
+skb_checksum_start_offset(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (-1);
+}
+
+static inline dma_addr_t
+skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
+ size_t fragsz, enum dma_data_direction dir)
+{
+ SKB_TRACE2(frag, dev);
+ SKB_TODO();
+ return (-1);
+}
+
+static inline size_t
+skb_frag_size(const skb_frag_t *frag)
+{
+ SKB_TRACE(frag);
+ return (frag->size);
+}
+
+#define skb_walk_frags(_skb, _frag) \
+ for ((_frag) = (_skb); false; (_frag)++)
+
+static inline void
+skb_checksum_help(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+}
+
+static inline bool
+skb_ensure_writable(struct sk_buff *skb, size_t off)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (false);
+}
+
+static inline void *
+skb_frag_address(const skb_frag_t *frag)
+{
+ SKB_TRACE(frag);
+ return (page_address(frag->page + frag->offset));
+}
+
+static inline void
+skb_free_frag(void *frag)
+{
+
+ page_frag_free(frag);
+}
+
+static inline struct sk_buff *
+skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (NULL);
+}
+
+static inline bool
+skb_is_gso(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ SKB_IMPROVE("Really a TODO but get it away from logging");
+ return (false);
+}
+
+static inline void
+skb_mark_not_on_list(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ skb->next = NULL;
+}
+
+static inline void
+skb_reset_transport_header(struct sk_buff *skb)
+{
+
+ SKB_TRACE(skb);
+ skb->l4hdroff = skb->data - skb->head;
+}
+
+static inline uint8_t *
+skb_transport_header(struct sk_buff *skb)
+{
+
+ SKB_TRACE(skb);
+ return (skb->head + skb->l4hdroff);
+}
+
+static inline uint8_t *
+skb_network_header(struct sk_buff *skb)
+{
+
+ SKB_TRACE(skb);
+ return (skb->head + skb->l3hdroff);
+}
+
+static inline int
+__skb_linearize(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (-ENXIO);
+}
+
+static inline int
+skb_linearize(struct sk_buff *skb)
+{
+ return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
+}
+
+static inline int
+pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (-ENXIO);
+}
+
+/* Not really seen this one but need it as symmetric accessor function. */
+static inline void
+skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
+{
+
+ SKB_TRACE_FMT(skb, "qmap %u", qmap);
+ skb->qmap = qmap;
+}
+
+static inline uint16_t
+skb_get_queue_mapping(struct sk_buff *skb)
+{
+
+ SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
+ return (skb->qmap);
+}
+
+static inline void
+skb_copy_header(struct sk_buff *to, const struct sk_buff *from)
+{
+ SKB_TRACE2(to, from);
+ SKB_TODO();
+}
+
+static inline bool
+skb_header_cloned(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (true);
+}
+
+static inline uint8_t *
+skb_mac_header(const struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ return (skb->head + skb->mac_header);
+}
+
+static inline void
+skb_reset_mac_header(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ skb->mac_header = skb->data - skb->head;
+}
+
+static inline void
+skb_set_mac_header(struct sk_buff *skb, const size_t len)
+{
+ SKB_TRACE(skb);
+ skb_reset_mac_header(skb);
+ skb->mac_header += len;
+}
+
+static inline struct skb_shared_hwtstamps *
+skb_hwtstamps(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (NULL);
+}
+
+static inline void
+skb_orphan(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+}
+
+static inline __wsum
+csum_unfold(__sum16 sum)
+{
+ return (sum);
+}
+
+static __inline void
+skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
+{
+ SKB_TODO();
+}
+
+static inline void
+skb_reset_tail_pointer(struct sk_buff *skb)
+{
+
+ SKB_TRACE(skb);
+#ifdef SKB_DOING_OFFSETS_US_NOT
+ skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
+#endif
+ skb->tail = skb->data;
+ SKB_TRACE(skb);
+}
+
+static inline struct sk_buff *
+skb_get(struct sk_buff *skb)
+{
+
+ SKB_TRACE(skb);
+ refcount_inc(&skb->refcnt);
+ return (skb);
+}
+
+static inline struct sk_buff *
+skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
+{
+
+ SKB_TODO();
+ return (NULL);
+}
+
+static inline void
+skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
+{
+
+ SKB_TRACE(skb);
+ /* Let us just hope the destination has len space ... */
+ memcpy(dst, skb->data, len);
+}
+
+static inline int
+skb_pad(struct sk_buff *skb, int pad)
+{
+
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (-1);
+}
+
+static inline void
+skb_list_del_init(struct sk_buff *skb)
+{
+
+ SKB_TRACE(skb);
+ __list_del_entry(&skb->list);
+ skb_mark_not_on_list(skb);
+}
+
+static inline void
+napi_consume_skb(struct sk_buff *skb, int budget)
+{
+
+ SKB_TRACE(skb);
+ SKB_TODO();
+}
+
+static inline struct sk_buff *
+napi_build_skb(void *data, size_t len)
+{
+
+ SKB_TODO();
+ return (NULL);
+}
+
+static inline uint32_t
+skb_get_hash(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (0);
+}
+
+static inline void
+skb_mark_for_recycle(struct sk_buff *skb)
+{
+ SKB_TRACE(skb);
+ /* page_pool */
+ SKB_TODO();
+}
+
+static inline int
+skb_cow_head(struct sk_buff *skb, unsigned int headroom)
+{
+ SKB_TRACE(skb);
+ SKB_TODO();
+ return (-1);
+}
+
+#define SKB_WITH_OVERHEAD(_s) \
+ (_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
+
+#endif /* _LINUXKPI_LINUX_SKBUFF_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/slab.h b/sys/compat/linuxkpi/common/include/linux/slab.h
new file mode 100644
index 000000000000..efa5c8cb67b3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/slab.h
@@ -0,0 +1,284 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_SLAB_H_
+#define _LINUXKPI_LINUX_SLAB_H_
+
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/limits.h>
+
+#include <linux/compat.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/llist.h>
+#include <linux/overflow.h>
+
+MALLOC_DECLARE(M_KMALLOC);
+
+#define kvzalloc(size, flags) kvmalloc(size, (flags) | __GFP_ZERO)
+#define kvcalloc(n, size, flags) kvmalloc_array(n, size, (flags) | __GFP_ZERO)
+#define kzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO)
+#define kzalloc_node(size, flags, node) kmalloc_node(size, (flags) | __GFP_ZERO, node)
+#define kfree_const(ptr) kfree(ptr)
+#define kfree_async(ptr) kfree(ptr) /* drm-kmod 5.4 compat */
+#define vzalloc(size) __vmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0)
+#define vfree(arg) kfree(arg)
+#define kvfree(arg) kfree(arg)
+#define vmalloc_node(size, node) __vmalloc_node(size, GFP_KERNEL, node)
+#define vmalloc_user(size) __vmalloc(size, GFP_KERNEL | __GFP_ZERO, 0)
+#define vmalloc(size) __vmalloc(size, GFP_KERNEL, 0)
+
+/*
+ * Prefix some functions with linux_ to avoid namespace conflict
+ * with the OpenSolaris code in the kernel.
+ */
+#define kmem_cache linux_kmem_cache
+#define kmem_cache_create(...) linux_kmem_cache_create(__VA_ARGS__)
+#define kmem_cache_alloc(...) lkpi_kmem_cache_alloc(__VA_ARGS__)
+#define kmem_cache_zalloc(...) lkpi_kmem_cache_zalloc(__VA_ARGS__)
+#define kmem_cache_free(...) lkpi_kmem_cache_free(__VA_ARGS__)
+#define kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__)
+#define kmem_cache_shrink(x) (0)
+
+#define KMEM_CACHE(__struct, flags) \
+ linux_kmem_cache_create(#__struct, sizeof(struct __struct), \
+ __alignof(struct __struct), (flags), NULL)
+
+typedef void linux_kmem_ctor_t (void *);
+
+struct linux_kmem_cache;
+
+#define SLAB_HWCACHE_ALIGN (1 << 0)
+#define SLAB_TYPESAFE_BY_RCU (1 << 1)
+#define SLAB_RECLAIM_ACCOUNT (1 << 2)
+
+#define SLAB_DESTROY_BY_RCU \
+ SLAB_TYPESAFE_BY_RCU
+
+#define ARCH_KMALLOC_MINALIGN \
+ __alignof(unsigned long long)
+
+#define ZERO_SIZE_PTR ((void *)16)
+#define ZERO_OR_NULL_PTR(x) ((x) == NULL || (x) == ZERO_SIZE_PTR)
+
+struct linux_kmem_cache *linux_kmem_cache_create(const char *name,
+ size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor);
+void *lkpi_kmem_cache_alloc(struct linux_kmem_cache *, gfp_t);
+void *lkpi_kmem_cache_zalloc(struct linux_kmem_cache *, gfp_t);
+void lkpi_kmem_cache_free(struct linux_kmem_cache *, void *);
+void linux_kmem_cache_destroy(struct linux_kmem_cache *);
+
+void *lkpi_kmalloc(size_t, gfp_t);
+void *lkpi___kmalloc(size_t, gfp_t);
+void *lkpi___kmalloc_node(size_t, gfp_t, int);
+void *lkpi_krealloc(void *, size_t, gfp_t);
+void lkpi_kfree(const void *);
+
+static inline gfp_t
+linux_check_m_flags(gfp_t flags)
+{
+ const gfp_t m = M_NOWAIT | M_WAITOK;
+
+ /* make sure either M_NOWAIT or M_WAITOK is set */
+ if ((flags & m) == 0)
+ flags |= M_NOWAIT;
+ else if ((flags & m) == m)
+ flags &= ~M_WAITOK;
+
+ /* mask away LinuxKPI specific flags */
+ return (flags & GFP_NATIVE_MASK);
+}
+
+/*
+ * Base functions with a native implementation.
+ */
+static inline void *
+kmalloc(size_t size, gfp_t flags)
+{
+ return (lkpi_kmalloc(size, flags));
+}
+
+static inline void *
+__kmalloc(size_t size, gfp_t flags)
+{
+ return (lkpi___kmalloc(size, flags));
+}
+
+static inline void *
+kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return (lkpi___kmalloc_node(size, flags, node));
+}
+
+static inline void *
+krealloc(void *ptr, size_t size, gfp_t flags)
+{
+ return (lkpi_krealloc(ptr, size, flags));
+}
+
+static inline void
+kfree(const void *ptr)
+{
+ lkpi_kfree(ptr);
+}
+
+/*
+ * Other k*alloc() funtions using the above as underlying allocator.
+ */
+/* kmalloc */
+static inline void *
+kmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ if (WOULD_OVERFLOW(n, size))
+ panic("%s: %zu * %zu overflowed", __func__, n, size);
+
+ return (kmalloc(size * n, flags));
+}
+
+static inline void *
+kcalloc(size_t n, size_t size, gfp_t flags)
+{
+ flags |= __GFP_ZERO;
+ return (kmalloc_array(n, size, flags));
+}
+
+/* kmalloc_node */
+static inline void *
+kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
+{
+ if (WOULD_OVERFLOW(n, size))
+ panic("%s: %zu * %zu overflowed", __func__, n, size);
+
+ return (kmalloc_node(size * n, flags, node));
+}
+
+static inline void *
+kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+{
+ flags |= __GFP_ZERO;
+ return (kmalloc_array_node(n, size, flags, node));
+}
+
+/* krealloc */
+static inline void *
+krealloc_array(void *ptr, size_t n, size_t size, gfp_t flags)
+{
+ if (WOULD_OVERFLOW(n, size))
+ return NULL;
+
+ return (krealloc(ptr, n * size, flags));
+}
+
+/*
+ * vmalloc/kvalloc functions.
+ */
+static inline void *
+__vmalloc(size_t size, gfp_t flags, int other)
+{
+ return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
+}
+
+static inline void *
+__vmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return (malloc_domainset(size, M_KMALLOC,
+ linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+}
+
+static inline void *
+vmalloc_32(size_t size)
+{
+ return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1));
+}
+
+/* May return non-contiguous memory. */
+static inline void *
+kvmalloc(size_t size, gfp_t flags)
+{
+ return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
+}
+
+static inline void *
+kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ if (WOULD_OVERFLOW(n, size))
+ panic("%s: %zu * %zu overflowed", __func__, n, size);
+
+ return (kvmalloc(size * n, flags));
+}
+
+static inline void *
+kvrealloc(const void *ptr, size_t oldsize, size_t newsize, gfp_t flags)
+{
+ void *newptr;
+
+ if (newsize <= oldsize)
+ return (__DECONST(void *, ptr));
+
+ newptr = kvmalloc(newsize, flags);
+ if (newptr != NULL) {
+ memcpy(newptr, ptr, oldsize);
+ kvfree(ptr);
+ }
+
+ return (newptr);
+}
+
+/*
+ * Misc.
+ */
+
+static __inline void
+kfree_sensitive(const void *ptr)
+{
+ if (ZERO_OR_NULL_PTR(ptr))
+ return;
+
+ zfree(__DECONST(void *, ptr), M_KMALLOC);
+}
+
+static inline size_t
+ksize(const void *ptr)
+{
+ return (malloc_usable_size(ptr));
+}
+
+static inline size_t
+kmalloc_size_roundup(size_t size)
+{
+ if (unlikely(size == 0 || size == SIZE_MAX))
+ return (size);
+ return (malloc_size(size));
+}
+
+#endif /* _LINUXKPI_LINUX_SLAB_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/smp.h b/sys/compat/linuxkpi/common/include/linux/smp.h
new file mode 100644
index 000000000000..b057953e6282
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/smp.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_SMP_H_
+#define _LINUXKPI_LINUX_SMP_H_
+
+#include <asm/smp.h>
+
+/*
+ * Important note about the use of the function provided below:
+ *
+ * The callback function passed to on_each_cpu() is called from a
+ * so-called critical section, and if you need a mutex you will have
+ * to rewrite the code to use native FreeBSD mtx spinlocks instead of
+ * the spinlocks provided by the LinuxKPI! Be very careful to not call
+ * any LinuxKPI functions inside the on_each_cpu()'s callback
+ * function, because they may sleep, unlike in native Linux.
+ *
+ * Enabling witness(4) when testing, can catch such issues.
+ */
+#define on_each_cpu(cb, data, wait) ({ \
+ CTASSERT(wait); \
+ linux_on_each_cpu(cb, data); \
+})
+
+extern int linux_on_each_cpu(void (*)(void *), void *);
+
+#endif /* _LINUXKPI_LINUX_SMP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/soc/mediatek/mtk_wed.h b/sys/compat/linuxkpi/common/include/linux/soc/mediatek/mtk_wed.h
new file mode 100644
index 000000000000..903053e7f6e8
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/soc/mediatek/mtk_wed.h
@@ -0,0 +1,62 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022-2023 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_SOC_MEDIATEK_MTK_WED_H
+#define _LINUXKPI_LINUX_SOC_MEDIATEK_MTK_WED_H
+
+struct mtk_wed_device {
+};
+
+#define WED_WO_STA_REC 0x6
+
+#define mtk_wed_device_start(_dev, _mask) do { } while(0)
+#define mtk_wed_device_detach(_dev) do { } while(0)
+#define mtk_wed_device_irq_get(_dev, _mask) 0
+#define mtk_wed_device_irq_set_mask(_dev, _mask) do { } while(0)
+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) (-ENODEV)
+#define mtk_wed_device_dma_reset(_dev) do {} while (0)
+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _entry) \
+ do {} while (0)
+#define mtk_wed_device_stop(_dev) do { } while(0)
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _b) do { } while(0)
+#define mtk_wed_device_setup_tc(_dev, _ndev, _type, _tdata) (-EOPNOTSUPP)
+
+static inline bool
+mtk_wed_device_active(struct mtk_wed_device *dev __unused)
+{
+
+ return (false);
+}
+
+static inline bool
+mtk_wed_get_rx_capa(struct mtk_wed_device *dev __unused)
+{
+
+ return (false);
+}
+
+#endif /* _LINUXKPI_LINUX_SOC_MEDIATEK_MTK_WED_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/soc/qcom/qmi.h b/sys/compat/linuxkpi/common/include/linux/soc/qcom/qmi.h
new file mode 100644
index 000000000000..647eaf271d87
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/soc/qcom/qmi.h
@@ -0,0 +1,173 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022-2023 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_SOC_QCOM_QMI_H
+#define _LINUXKPI_LINUX_SOC_QCOM_QMI_H
+
+/* QMI (Qualcomm MSM Interface) */
+
+#include <linux/qrtr.h>
+
+enum soc_qcom_qmi_data_type {
+ QMI_EOTI,
+ QMI_DATA_LEN,
+ QMI_OPT_FLAG,
+ QMI_UNSIGNED_1_BYTE,
+ QMI_UNSIGNED_2_BYTE,
+ QMI_UNSIGNED_4_BYTE,
+ QMI_UNSIGNED_8_BYTE,
+ QMI_SIGNED_4_BYTE_ENUM,
+ QMI_STRUCT,
+ QMI_STRING,
+};
+
+#define QMI_RESULT_SUCCESS_V01 __LINE__
+#define QMI_INDICATION __LINE__
+
+struct qmi_handle;
+
+enum soc_qcom_qmi_array_type {
+ NO_ARRAY,
+ STATIC_ARRAY,
+ VAR_LEN_ARRAY,
+};
+
+/* Should this become an enum? */
+#define QMI_COMMON_TLV_TYPE 0
+
+struct qmi_elem_info {
+ enum soc_qcom_qmi_data_type data_type;
+ uint32_t elem_len;
+ uint32_t elem_size;
+ enum soc_qcom_qmi_array_type array_type;
+ uint8_t tlv_type;
+ uint32_t offset;
+ const struct qmi_elem_info *ei_array;
+};
+
+struct qmi_response_type_v01 {
+ uint16_t result;
+ uint16_t error;
+};
+
+struct qmi_txn {
+};
+
+struct qmi_service {
+ uint32_t node;
+ uint32_t port;
+};
+
+struct qmi_msg_handler {
+ uint32_t type;
+ uint32_t msg_id;
+ const struct qmi_elem_info *ei;
+ size_t decoded_size;
+ void (*fn)(struct qmi_handle *, struct sockaddr_qrtr *, struct qmi_txn *, const void *);
+};
+
+struct qmi_ops {
+ int (*new_server)(struct qmi_handle *, struct qmi_service *);
+ void (*del_server)(struct qmi_handle *, struct qmi_service *);
+};
+
+struct qmi_handle {
+ int sock;
+
+ const struct qmi_msg_handler *handler;
+ struct qmi_ops ops;
+};
+
+
+/* XXX-TODO need implementation somewhere... it is not in ath1xk* */
+extern struct qmi_elem_info qmi_response_type_v01_ei[];
+
+static inline int
+qmi_handle_init(struct qmi_handle *handle, size_t resp_len_max,
+ const struct qmi_ops *ops, const struct qmi_msg_handler *handler)
+{
+
+ handle->handler = handler;
+ if (ops != NULL)
+ handle->ops = *ops;
+
+ /* We will find out what else to do here. */
+ /* XXX TODO */
+
+ return (0);
+}
+
+static __inline int
+qmi_add_lookup(struct qmi_handle *handle, uint32_t service, uint32_t version,
+ uint32_t service_ins_id)
+{
+
+ /* XXX TODO */
+ return (0);
+}
+
+static __inline void
+qmi_handle_release(struct qmi_handle *handle)
+{
+
+ /* XXX TODO */
+}
+
+static __inline int
+qmi_send_request(struct qmi_handle *handle, void *x, struct qmi_txn *txn,
+ uint32_t msd_id, size_t len, const struct qmi_elem_info *ei, void *req)
+{
+
+ /* XXX TODO */
+ return (-ENXIO);
+}
+
+static __inline void
+qmi_txn_cancel(struct qmi_txn *txn)
+{
+
+ /* XXX TODO */
+}
+
+static __inline int
+qmi_txn_init(struct qmi_handle *handle, struct qmi_txn *txn,
+ const struct qmi_elem_info *ei, void *resp)
+{
+
+ /* XXX TODO */
+ return (-ENXIO);
+}
+
+static __inline int
+qmi_txn_wait(struct qmi_txn *txn, uint64_t jiffies)
+{
+
+ /* XXX TODO */
+ return (-ENXIO);
+}
+
+#endif /* _LINUXKPI_LINUX_SOC_QCOM_QMI_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/socket.h b/sys/compat/linuxkpi/common/include/linux/socket.h
new file mode 100644
index 000000000000..638ee058c2f5
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/socket.h
@@ -0,0 +1,78 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_SOCKET_H_
+#define _LINUXKPI_LINUX_SOCKET_H_
+
+#include <sys/socket.h>
+
+#define AF_QIPCRTR 42
+
+static inline int
+kernel_connect(int sd, struct sockaddr *sa, size_t salen, int flags)
+{
+
+ /* kern_connectat()? It is used for sockaddr_qrtr by ath1xk/qmi. */
+ pr_debug("%s: TODO\n", __func__);
+ return (-EINVAL);
+}
+
+#ifdef notyet
+static inline int
+memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len)
+{
+ struct uio uio;
+ int error;
+
+ uio.uio_iov = v;
+ uio.uio_iovcnt = -1;
+ uio.uio_offset = 0;
+ uio.uio_resid = len;
+ uio.uio_segflag = UIO_USERSPACE;
+ uio.uio_rw = UIO_READ;
+ error = -uiomove(kdata, len, &uio);
+ return (error);
+}
+
+static inline int
+memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
+{
+ struct uio uio;
+ int error;
+
+ uio.uio_iov = v;
+ uio.uio_iovcnt = -1;
+ uio.uio_offset = 0;
+ uio.uio_resid = len;
+ uio.uio_segflag = UIO_USERSPACE;
+ uio.uio_rw = UIO_WRITE;
+ error = -uiomove(kdata, len, &uio);
+}
+#endif
+
+#endif /* _LINUXKPI_LINUX_SOCKET_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/sort.h b/sys/compat/linuxkpi/common/include/linux/sort.h
new file mode 100644
index 000000000000..e6196d1f41c7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/sort.h
@@ -0,0 +1,41 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_SORT_H_
+#define _LINUXKPI_LINUX_SORT_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include <sys/libkern.h>
+
+#define sort(base, num, size, cmp, swap) do { \
+ BUILD_BUG_ON_ZERO(swap); \
+ qsort(base, num, size, cmp); \
+} while (0)
+
+#endif /* _LINUXKPI_LINUX_SORT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/spinlock.h b/sys/compat/linuxkpi/common/include/linux/spinlock.h
new file mode 100644
index 000000000000..dc10b0457153
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/spinlock.h
@@ -0,0 +1,181 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_SPINLOCK_H_
+#define _LINUXKPI_LINUX_SPINLOCK_H_
+
+#include <asm/atomic.h>
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/kdb.h>
+
+#include <linux/compiler.h>
+#include <linux/rwlock.h>
+#include <linux/bottom_half.h>
+#include <linux/lockdep.h>
+
+typedef struct mtx spinlock_t;
+
+/*
+ * By defining CONFIG_SPIN_SKIP LinuxKPI spinlocks and asserts will be
+ * skipped during panic(). By default it is disabled due to
+ * performance reasons.
+ */
+#ifdef CONFIG_SPIN_SKIP
+#define SPIN_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active)
+#else
+#define SPIN_SKIP(void) 0
+#endif
+
+#define spin_lock(_l) do { \
+ if (SPIN_SKIP()) \
+ break; \
+ mtx_lock(_l); \
+ local_bh_disable(); \
+} while (0)
+
+#define spin_lock_bh(_l) do { \
+ spin_lock(_l); \
+ local_bh_disable(); \
+} while (0)
+
+#define spin_lock_irq(_l) do { \
+ spin_lock(_l); \
+} while (0)
+
+#define spin_unlock(_l) do { \
+ if (SPIN_SKIP()) \
+ break; \
+ local_bh_enable(); \
+ mtx_unlock(_l); \
+} while (0)
+
+#define spin_unlock_bh(_l) do { \
+ local_bh_enable(); \
+ spin_unlock(_l); \
+} while (0)
+
+#define spin_unlock_irq(_l) do { \
+ spin_unlock(_l); \
+} while (0)
+
+#define spin_trylock(_l) ({ \
+ int __ret; \
+ if (SPIN_SKIP()) { \
+ __ret = 1; \
+ } else { \
+ __ret = mtx_trylock(_l); \
+ if (likely(__ret != 0)) \
+ local_bh_disable(); \
+ } \
+ __ret; \
+})
+
+#define spin_trylock_irq(_l) \
+ spin_trylock(_l)
+
+#define spin_trylock_irqsave(_l, flags) ({ \
+ (flags) = 0; \
+ spin_trylock(_l); \
+})
+
+#define spin_lock_nested(_l, _n) do { \
+ if (SPIN_SKIP()) \
+ break; \
+ mtx_lock_flags(_l, MTX_DUPOK); \
+ local_bh_disable(); \
+} while (0)
+
+#define spin_lock_irqsave(_l, flags) do { \
+ (flags) = 0; \
+ spin_lock(_l); \
+} while (0)
+
+#define spin_lock_irqsave_nested(_l, flags, _n) do { \
+ (flags) = 0; \
+ spin_lock_nested(_l, _n); \
+} while (0)
+
+#define spin_unlock_irqrestore(_l, flags) do { \
+ (void)(flags); \
+ spin_unlock(_l); \
+} while (0)
+
+#ifdef WITNESS_ALL
+/* NOTE: the maximum WITNESS name is 64 chars */
+#define __spin_lock_name(name, file, line) \
+ (((const char *){file ":" #line "-" name}) + \
+ (sizeof(file) > 16 ? sizeof(file) - 16 : 0))
+#else
+#define __spin_lock_name(name, file, line) name
+#endif
+#define _spin_lock_name(...) __spin_lock_name(__VA_ARGS__)
+#define spin_lock_name(name) _spin_lock_name(name, __FILE__, __LINE__)
+
+#define spin_lock_init(lock) mtx_init(lock, spin_lock_name("lnxspin"), \
+ NULL, MTX_DEF | MTX_NOWITNESS | MTX_NEW)
+
+#define spin_lock_destroy(_l) mtx_destroy(_l)
+
+#define DEFINE_SPINLOCK(lock) \
+ spinlock_t lock; \
+ MTX_SYSINIT(lock, &lock, spin_lock_name("lnxspin"), MTX_DEF)
+
+#define assert_spin_locked(_l) do { \
+ if (SPIN_SKIP()) \
+ break; \
+ mtx_assert(_l, MA_OWNED); \
+} while (0)
+
+#define local_irq_save(flags) do { \
+ (flags) = 0; \
+} while (0)
+
+#define local_irq_restore(flags) do { \
+ (void)(flags); \
+} while (0)
+
+#define atomic_dec_and_lock_irqsave(cnt, lock, flags) \
+ _atomic_dec_and_lock_irqsave(cnt, lock, &(flags))
+static inline int
+_atomic_dec_and_lock_irqsave(atomic_t *cnt, spinlock_t *lock,
+ unsigned long *flags)
+{
+ if (atomic_add_unless(cnt, -1, 1))
+ return (0);
+
+ spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(cnt))
+ return (1);
+ spin_unlock_irqrestore(lock, *flags);
+ return (0);
+}
+
+#endif /* _LINUXKPI_LINUX_SPINLOCK_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/srcu.h b/sys/compat/linuxkpi/common/include/linux/srcu.h
new file mode 100644
index 000000000000..b42c28a1311b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/srcu.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2015-2020 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_SRCU_H_
+#define _LINUXKPI_LINUX_SRCU_H_
+
+#include <linux/compiler.h>
+
+struct srcu_struct {
+};
+
+#define srcu_dereference(p, srcu) \
+ ((__typeof(*(p)) *)READ_ONCE(p))
+
+#define DEFINE_STATIC_SRCU(name) \
+ static struct srcu_struct name
+
+/* prototypes */
+
+extern int srcu_read_lock(struct srcu_struct *);
+extern void srcu_read_unlock(struct srcu_struct *, int index);
+extern void synchronize_srcu(struct srcu_struct *);
+extern void srcu_barrier(struct srcu_struct *);
+extern int init_srcu_struct(struct srcu_struct *);
+extern void cleanup_srcu_struct(struct srcu_struct *);
+
+#define synchronize_srcu_expedited(srcu) do { \
+ synchronize_srcu(srcu); \
+} while (0)
+
+#endif /* _LINUXKPI_LINUX_SRCU_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/stackdepot.h b/sys/compat/linuxkpi/common/include/linux/stackdepot.h
new file mode 100644
index 000000000000..df223d46be6e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/stackdepot.h
@@ -0,0 +1,32 @@
+/*-
+ * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUXKPI_LINUX_STACKDEPOT_H_
+#define _LINUXKPI_LINUX_STACKDEPOT_H_
+
+typedef bool depot_stack_handle_t;
+
+#endif /* _LINUXKPI_LINUX_STACKDEPOT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/stdarg.h b/sys/compat/linuxkpi/common/include/linux/stdarg.h
new file mode 100644
index 000000000000..698ac45e9198
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/stdarg.h
@@ -0,0 +1,33 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_STDARG_H_
+#define _LINUXKPI_STDARG_H_
+
+#include <sys/stdarg.h>
+
+#endif /* _LINUXKPI_STDARG_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/stddef.h b/sys/compat/linuxkpi/common/include/linux/stddef.h
new file mode 100644
index 000000000000..d04a5a4bf516
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/stddef.h
@@ -0,0 +1,31 @@
+/* Public domain */
+
+#ifndef _LINUXKPI_LINUX_STDDEF_H_
+#define _LINUXKPI_LINUX_STDDEF_H_
+
+#include <sys/stddef.h>
+
+/*
+ * FreeBSD has multiple (vendor) drivers containing copies of this
+ * and including LinuxKPI headers. Put the #defines behind guards.
+ */
+
+#ifndef __struct_group
+#define __struct_group(_tag, _name, _attrs, _members...) \
+ union { \
+ struct { _members } _attrs; \
+ struct _tag { _members } _attrs _name; \
+ } _attrs
+#endif
+
+#ifndef struct_group
+#define struct_group(_name, _members...) \
+ __struct_group(/* no tag */, _name, /* no attrs */, _members)
+#endif
+
+#ifndef struct_group_tagged
+#define struct_group_tagged(_tag, _name, _members...) \
+ __struct_group(_tag, _name, /* no attrs */, _members)
+#endif
+
+#endif /* _LINUXKPI_LINUX_STDDEF_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/string.h b/sys/compat/linuxkpi/common/include/linux/string.h
new file mode 100644
index 000000000000..f7b64560d254
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/string.h
@@ -0,0 +1,329 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_STRING_H_
+#define _LINUXKPI_LINUX_STRING_H_
+
+#include <sys/ctype.h>
+
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/err.h>
+#include <linux/bitops.h> /* for BITS_PER_LONG */
+#include <linux/overflow.h>
+#include <linux/stdarg.h>
+
+#include <sys/libkern.h>
+
+#define strnicmp(...) strncasecmp(__VA_ARGS__)
+
+static inline int
+match_string(const char *const *table, int n, const char *key)
+{
+ int i;
+
+ for (i = 0; i != n && table[i] != NULL; i++) {
+ if (strcmp(table[i], key) == 0)
+ return (i);
+ }
+ return (-EINVAL);
+}
+
+static inline void *
+memdup_user(const void *ptr, size_t len)
+{
+ void *retval;
+ int error;
+
+ retval = malloc(len, M_KMALLOC, M_WAITOK);
+ error = linux_copyin(ptr, retval, len);
+ if (error != 0) {
+ free(retval, M_KMALLOC);
+ return (ERR_PTR(error));
+ }
+ return (retval);
+}
+
+static inline void *
+memdup_user_nul(const void *ptr, size_t len)
+{
+ char *retval;
+ int error;
+
+ retval = malloc(len + 1, M_KMALLOC, M_WAITOK);
+ error = linux_copyin(ptr, retval, len);
+ if (error != 0) {
+ free(retval, M_KMALLOC);
+ return (ERR_PTR(error));
+ }
+ retval[len] = '\0';
+ return (retval);
+}
+
+static inline void *
+kmemdup(const void *src, size_t len, gfp_t gfp)
+{
+ void *dst;
+
+ dst = kmalloc(len, gfp);
+ if (dst != NULL)
+ memcpy(dst, src, len);
+ return (dst);
+}
+
+/* See slab.h for kvmalloc/kvfree(). */
+static inline void *
+kvmemdup(const void *src, size_t len, gfp_t gfp)
+{
+ void *dst;
+
+ dst = kvmalloc(len, gfp);
+ if (dst != NULL)
+ memcpy(dst, src, len);
+ return (dst);
+}
+
+static inline char *
+strndup_user(const char __user *ustr, long n)
+{
+ if (n < 1)
+ return (ERR_PTR(-EINVAL));
+
+ return (memdup_user_nul(ustr, n - 1));
+}
+
+static inline char *
+kstrdup(const char *string, gfp_t gfp)
+{
+ char *retval;
+ size_t len;
+
+ if (string == NULL)
+ return (NULL);
+ len = strlen(string) + 1;
+ retval = kmalloc(len, gfp);
+ if (retval != NULL)
+ memcpy(retval, string, len);
+ return (retval);
+}
+
+static inline char *
+kstrndup(const char *string, size_t len, gfp_t gfp)
+{
+ char *retval;
+
+ if (string == NULL)
+ return (NULL);
+ retval = kmalloc(len + 1, gfp);
+ if (retval != NULL)
+ strncpy(retval, string, len);
+ return (retval);
+}
+
+static inline const char *
+kstrdup_const(const char *src, gfp_t gfp)
+{
+ return (kmemdup(src, strlen(src) + 1, gfp));
+}
+
+static inline char *
+skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (__DECONST(char *, str));
+}
+
+/*
+ * This function trims whitespaces at the end of a string and returns a pointer
+ * to the first non-whitespace character.
+ */
+static inline char *
+strim(char *str)
+{
+ char *end;
+
+ end = str + strlen(str);
+ while (end >= str && (*end == '\0' || isspace(*end))) {
+ *end = '\0';
+ end--;
+ }
+
+ return (skip_spaces(str));
+}
+
+static inline void *
+memchr_inv(const void *start, int c, size_t length)
+{
+ const u8 *ptr;
+ const u8 *end;
+ u8 ch;
+
+ ch = c;
+ ptr = start;
+ end = ptr + length;
+
+ while (ptr != end) {
+ if (*ptr != ch)
+ return (__DECONST(void *, ptr));
+ ptr++;
+ }
+ return (NULL);
+}
+
+static inline size_t
+str_has_prefix(const char *str, const char *prefix)
+{
+ size_t len;
+
+ len = strlen(prefix);
+ return (strncmp(str, prefix, len) == 0 ? len : 0);
+}
+
+static inline char *
+strreplace(char *str, char old, char new)
+{
+ char *p;
+
+ p = strchrnul(str, old);
+ while (p != NULL && *p != '\0') {
+ *p = new;
+ p = strchrnul(str, old);
+ }
+ return (p);
+}
+
+static inline ssize_t
+strscpy(char* dst, const char* src, size_t len)
+{
+ size_t i;
+
+ if (len <= INT_MAX) {
+ for (i = 0; i < len; i++)
+ if ('\0' == (dst[i] = src[i]))
+ return ((ssize_t)i);
+ if (i != 0)
+ dst[--i] = '\0';
+ }
+
+ return (-E2BIG);
+}
+
+static inline ssize_t
+strscpy_pad(char* dst, const char* src, size_t len)
+{
+
+ bzero(dst, len);
+
+ return (strscpy(dst, src, len));
+}
+
+static inline char *
+strnchr(const char *cp, size_t n, int ch)
+{
+ char *p;
+
+ for (p = __DECONST(char *, cp); n--; ++p) {
+ if (*p == ch)
+ return (p);
+ if (*p == '\0')
+ break;
+ }
+
+ return (NULL);
+}
+
+static inline void *
+memset32(uint32_t *b, uint32_t c, size_t len)
+{
+ uint32_t *dst = b;
+
+ while (len--)
+ *dst++ = c;
+ return (b);
+}
+
+static inline void *
+memset64(uint64_t *b, uint64_t c, size_t len)
+{
+ uint64_t *dst = b;
+
+ while (len--)
+ *dst++ = c;
+ return (b);
+}
+
+static inline void *
+memset_p(void **p, void *v, size_t n)
+{
+
+ if (BITS_PER_LONG == 32)
+ return (memset32((uint32_t *)p, (uintptr_t)v, n));
+ else
+ return (memset64((uint64_t *)p, (uintptr_t)v, n));
+}
+
+static inline void
+memcpy_and_pad(void *dst, size_t dstlen, const void *src, size_t len, int ch)
+{
+
+ if (len >= dstlen) {
+ memcpy(dst, src, dstlen);
+ } else {
+ memcpy(dst, src, len);
+ /* Pad with given padding character. */
+ memset((char *)dst + len, ch, dstlen - len);
+ }
+}
+
+#define memset_startat(ptr, bytepat, smember) \
+({ \
+ uint8_t *_ptr = (uint8_t *)(ptr); \
+ int _c = (int)(bytepat); \
+ size_t _o = offsetof(typeof(*(ptr)), smember); \
+ memset(_ptr + _o, _c, sizeof(*(ptr)) - _o); \
+})
+
+#define memset_after(ptr, bytepat, smember) \
+({ \
+ uint8_t *_ptr = (uint8_t *)(ptr); \
+ int _c = (int)(bytepat); \
+ size_t _o = offsetofend(typeof(*(ptr)), smember); \
+ memset(_ptr + _o, _c, sizeof(*(ptr)) - _o); \
+})
+
+static inline void
+memzero_explicit(void *p, size_t s)
+{
+ memset(p, 0, s);
+ __asm__ __volatile__("": :"r"(p) :"memory");
+}
+
+#endif /* _LINUXKPI_LINUX_STRING_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/string_helpers.h b/sys/compat/linuxkpi/common/include/linux/string_helpers.h
new file mode 100644
index 000000000000..1bdff2730361
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/string_helpers.h
@@ -0,0 +1,69 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_STRING_HELPERS_H_
+#define _LINUXKPI_LINUX_STRING_HELPERS_H_
+
+#include <sys/types.h>
+
+static inline const char *
+str_yes_no(bool value)
+{
+ if (value)
+ return "yes";
+ else
+ return "no";
+}
+
+static inline const char *
+str_on_off(bool value)
+{
+ if (value)
+ return "on";
+ else
+ return "off";
+}
+
+static inline const char *
+str_enabled_disabled(bool value)
+{
+ if (value)
+ return "enabled";
+ else
+ return "disabled";
+}
+
+static inline const char *
+str_enable_disable(bool value)
+{
+ if (value)
+ return "enable";
+ else
+ return "disable";
+}
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/stringify.h b/sys/compat/linuxkpi/common/include/linux/stringify.h
new file mode 100644
index 000000000000..9345bdc441aa
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/stringify.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_STRINGIFY_H_
+#define _LINUXKPI_LINUX_STRINGIFY_H_
+
+#include <sys/cdefs.h>
+
+#define ___stringify(...) #__VA_ARGS__
+#define __stringify(...) ___stringify(__VA_ARGS__)
+
+#endif /* _LINUXKPI_LINUX_STRINGIFY_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/suspend.h b/sys/compat/linuxkpi/common/include/linux/suspend.h
new file mode 100644
index 000000000000..dacecbebdc08
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/suspend.h
@@ -0,0 +1,23 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_LINUX_SUSPEND_H_
+#define _LINUXKPI_LINUX_SUSPEND_H_
+
+typedef int suspend_state_t;
+
+extern suspend_state_t pm_suspend_target_state;
+
+#define PM_SUSPEND_ON 0
+#define PM_SUSPEND_TO_IDLE 1
+#define PM_SUSPEND_STANDBY 2
+#define PM_SUSPEND_MEM 3
+#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
+#define PM_SUSPEND_MAX 4
+
+static inline int
+pm_suspend_via_firmware(void)
+{
+ return 0;
+}
+
+#endif /* _LINUXKPI_LINUX_SUSPEND_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/swap.h b/sys/compat/linuxkpi/common/include/linux/swap.h
new file mode 100644
index 000000000000..5828db7ae392
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/swap.h
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 2018 Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_SWAP_H_
+#define _LINUXKPI_LINUX_SWAP_H_
+
+#include <sys/param.h>
+#include <sys/domainset.h>
+#include <sys/queue.h>
+#include <sys/proc.h>
+#include <sys/pcpu.h>
+
+#include <vm/vm.h>
+#include <vm/swap_pager.h>
+#include <vm/vm_pageout.h>
+
+#include <linux/pagemap.h>
+#include <linux/page-flags.h>
+
+static inline long
+get_nr_swap_pages(void)
+{
+ int i, j;
+
+ /* NB: This could be done cheaply by obtaining swap_total directly */
+ swap_pager_status(&i, &j);
+ return i - j;
+}
+
+static inline int
+current_is_kswapd(void)
+{
+
+ return (curproc == pageproc);
+}
+
+static inline void
+folio_mark_accessed(struct folio *folio)
+{
+ mark_page_accessed(&folio->page);
+}
+
+static inline void
+check_move_unevictable_folios(struct folio_batch *fbatch)
+{
+}
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/sysfs.h b/sys/compat/linuxkpi/common/include/linux/sysfs.h
new file mode 100644
index 000000000000..65e023031bb2
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/sysfs.h
@@ -0,0 +1,487 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_SYSFS_H_
+#define _LINUXKPI_LINUX_SYSFS_H_
+
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/errno.h>
+
+#include <linux/kobject.h>
+#include <linux/stringify.h>
+#include <linux/mm.h>
+
+struct sysfs_ops {
+ ssize_t (*show)(struct kobject *, struct attribute *, char *);
+ ssize_t (*store)(struct kobject *, struct attribute *, const char *,
+ size_t);
+};
+
+struct attribute_group {
+ const char *name;
+ mode_t (*is_visible)(struct kobject *,
+ struct attribute *, int);
+ struct attribute **attrs;
+};
+
+struct bin_attribute {
+ struct attribute attr;
+ size_t size;
+ ssize_t (*read)(struct linux_file *, struct kobject *,
+ struct bin_attribute *, char *, loff_t, size_t);
+ ssize_t (*write)(struct linux_file *, struct kobject *,
+ struct bin_attribute *, char *, loff_t, size_t);
+};
+
+#define __ATTR(_name, _mode, _show, _store) { \
+ .attr = { .name = __stringify(_name), .mode = _mode }, \
+ .show = _show, .store = _store, \
+}
+#define __ATTR_RO(_name) { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = _name##_show, \
+}
+#define __ATTR_WO(_name) __ATTR(_name, 0200, NULL, _name##_store)
+#define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store)
+#define __ATTR_NULL { .attr = { .name = NULL } }
+
+#define ATTRIBUTE_GROUPS(_name) \
+ static struct attribute_group _name##_group = { \
+ .name = __stringify(_name), \
+ .attrs = _name##_attrs, \
+ }; \
+ static const struct attribute_group *_name##_groups[] = { \
+ &_name##_group, \
+ NULL, \
+ }
+
+#define __BIN_ATTR(_name, _mode, _read, _write, _size) { \
+ .attr = { .name = __stringify(_name), .mode = _mode }, \
+ .read = _read, .write = _write, .size = _size, \
+}
+#define __BIN_ATTR_RO(_name, _size) { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .read = _name##_read, .size = _size, \
+}
+#define __BIN_ATTR_WO(_name, _size) { \
+ .attr = { .name = __stringify(_name), .mode = 0200 }, \
+ .write = _name##_write, .size = _size, \
+}
+#define __BIN_ATTR_WR(_name, _size) { \
+ .attr = { .name = __stringify(_name), .mode = 0644 }, \
+ .read = _name##_read, .write = _name##_write, .size = _size, \
+}
+
+#define BIN_ATTR(_name, _mode, _read, _write, _size) \
+struct bin_attribute bin_attr_##_name = \
+ __BIN_ATTR(_name, _mode, _read, _write, _size);
+
+#define BIN_ATTR_RO(_name, _size) \
+struct bin_attribute bin_attr_##_name = \
+ __BIN_ATTR_RO(_name, _size);
+
+#define BIN_ATTR_WO(_name, _size) \
+struct bin_attribute bin_attr_##_name = \
+ __BIN_ATTR_WO(_name, _size);
+
+#define BIN_ATTR_WR(_name, _size) \
+struct bin_attribute bin_attr_##_name = \
+ __BIN_ATTR_WR(_name, _size);
+
+/*
+ * Handle our generic '\0' terminated 'C' string.
+ * Two cases:
+ * a variable string: point arg1 at it, arg2 is max length.
+ * a constant string: point arg1 at it, arg2 is zero.
+ */
+
+static inline int
+sysctl_handle_attr(SYSCTL_HANDLER_ARGS)
+{
+ struct kobject *kobj;
+ struct attribute *attr;
+ const struct sysfs_ops *ops;
+ char *buf;
+ int error;
+ ssize_t len;
+
+ kobj = arg1;
+ attr = (struct attribute *)(intptr_t)arg2;
+ if (kobj->ktype == NULL || kobj->ktype->sysfs_ops == NULL)
+ return (ENODEV);
+ buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (buf == NULL)
+ return (ENOMEM);
+ ops = kobj->ktype->sysfs_ops;
+ if (ops->show) {
+ len = ops->show(kobj, attr, buf);
+ /*
+ * It's valid to not have a 'show' so just return an
+ * empty string.
+ */
+ if (len < 0) {
+ error = -len;
+ if (error != EIO)
+ goto out;
+ buf[0] = '\0';
+ } else if (len) {
+ len--;
+ if (len >= PAGE_SIZE)
+ len = PAGE_SIZE - 1;
+ /* Trim trailing newline. */
+ buf[len] = '\0';
+ }
+ }
+
+ /* Leave one trailing byte to append a newline. */
+ error = sysctl_handle_string(oidp, buf, PAGE_SIZE - 1, req);
+ if (error != 0 || req->newptr == NULL || ops->store == NULL)
+ goto out;
+ len = strlcat(buf, "\n", PAGE_SIZE);
+ KASSERT(len < PAGE_SIZE, ("new attribute truncated"));
+ len = ops->store(kobj, attr, buf, len);
+ if (len < 0)
+ error = -len;
+out:
+ free_page((unsigned long)buf);
+
+ return (error);
+}
+
+static inline int
+sysfs_create_file(struct kobject *kobj, const struct attribute *attr)
+{
+ struct sysctl_oid *oid;
+
+ oid = SYSCTL_ADD_OID(NULL, SYSCTL_CHILDREN(kobj->oidp), OID_AUTO,
+ attr->name, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE, kobj,
+ (uintptr_t)attr, sysctl_handle_attr, "A", "");
+ if (!oid) {
+ return (-ENOMEM);
+ }
+
+ return (0);
+}
+
+static inline void
+sysfs_remove_file(struct kobject *kobj, const struct attribute *attr)
+{
+
+ if (kobj->oidp)
+ sysctl_remove_name(kobj->oidp, attr->name, 1, 1);
+}
+
+static inline int
+sysctl_handle_bin_attr(SYSCTL_HANDLER_ARGS)
+{
+ struct kobject *kobj;
+ struct bin_attribute *attr;
+ char *buf;
+ int error;
+ ssize_t len;
+
+ kobj = arg1;
+ attr = (struct bin_attribute *)(intptr_t)arg2;
+ if (kobj->ktype == NULL || kobj->ktype->sysfs_ops == NULL)
+ return (ENODEV);
+ buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ if (attr->read) {
+ len = attr->read(
+ NULL, /* <-- struct file, unimplemented */
+ kobj, attr, buf, req->oldidx, PAGE_SIZE);
+ if (len < 0) {
+ error = -len;
+ if (error != EIO)
+ goto out;
+ }
+ }
+
+ error = sysctl_handle_opaque(oidp, buf, PAGE_SIZE, req);
+ if (error != 0 || req->newptr == NULL || attr->write == NULL)
+ goto out;
+
+ len = attr->write(
+ NULL, /* <-- struct file, unimplemented */
+ kobj, attr, buf, req->newidx, req->newlen);
+ if (len < 0)
+ error = -len;
+out:
+ free_page((unsigned long)buf);
+
+ return (error);
+}
+
+static inline int
+sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr)
+{
+ struct sysctl_oid *oid;
+ int ctlflags;
+
+ ctlflags = CTLTYPE_OPAQUE | CTLFLAG_MPSAFE;
+ if (attr->attr.mode & (S_IRUSR | S_IWUSR))
+ ctlflags |= CTLFLAG_RW;
+ else if (attr->attr.mode & S_IRUSR)
+ ctlflags |= CTLFLAG_RD;
+ else if (attr->attr.mode & S_IWUSR)
+ ctlflags |= CTLFLAG_WR;
+
+ oid = SYSCTL_ADD_OID(NULL, SYSCTL_CHILDREN(kobj->oidp), OID_AUTO,
+ attr->attr.name, ctlflags, kobj,
+ (uintptr_t)attr, sysctl_handle_bin_attr, "", "");
+ if (oid == NULL)
+ return (-ENOMEM);
+
+ return (0);
+}
+
+static inline void
+sysfs_remove_bin_file(struct kobject *kobj, const struct bin_attribute *attr)
+{
+
+ if (kobj->oidp)
+ sysctl_remove_name(kobj->oidp, attr->attr.name, 1, 1);
+}
+
+static inline int
+sysfs_create_link(struct kobject *kobj __unused,
+ struct kobject *target __unused, const char *name __unused)
+{
+ /* TODO */
+
+ return (0);
+}
+
+static inline void
+sysfs_remove_link(struct kobject *kobj, const char *name)
+{
+ /* TODO (along with sysfs_create_link) */
+}
+
+static inline int
+sysfs_create_files(struct kobject *kobj, const struct attribute * const *attrs)
+{
+ int error = 0;
+ int i;
+
+ for (i = 0; attrs[i] && !error; i++)
+ error = sysfs_create_file(kobj, attrs[i]);
+ while (error && --i >= 0)
+ sysfs_remove_file(kobj, attrs[i]);
+
+ return (error);
+}
+
+static inline void
+sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attrs)
+{
+ int i;
+
+ for (i = 0; attrs[i]; i++)
+ sysfs_remove_file(kobj, attrs[i]);
+}
+
+static inline int
+sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp)
+{
+ struct attribute **attr;
+ struct sysctl_oid *oidp;
+
+ /* Don't create the group node if grp->name is undefined. */
+ if (grp->name)
+ oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(kobj->oidp),
+ OID_AUTO, grp->name, CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, grp->name);
+ else
+ oidp = kobj->oidp;
+ for (attr = grp->attrs; *attr != NULL; attr++) {
+ SYSCTL_ADD_OID(NULL, SYSCTL_CHILDREN(oidp), OID_AUTO,
+ (*attr)->name, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ kobj, (uintptr_t)*attr, sysctl_handle_attr, "A", "");
+ }
+
+ return (0);
+}
+
+static inline void
+sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp)
+{
+
+ if (kobj->oidp)
+ sysctl_remove_name(kobj->oidp, grp->name, 1, 1);
+}
+
+static inline int
+sysfs_create_groups(struct kobject *kobj, const struct attribute_group **grps)
+{
+ int error = 0;
+ int i;
+
+ if (grps == NULL)
+ goto done;
+ for (i = 0; grps[i] && !error; i++)
+ error = sysfs_create_group(kobj, grps[i]);
+ while (error && --i >= 0)
+ sysfs_remove_group(kobj, grps[i]);
+done:
+ return (error);
+}
+
+static inline void
+sysfs_remove_groups(struct kobject *kobj, const struct attribute_group **grps)
+{
+ int i;
+
+ if (grps == NULL)
+ return;
+ for (i = 0; grps[i]; i++)
+ sysfs_remove_group(kobj, grps[i]);
+}
+
+static inline int
+sysfs_merge_group(struct kobject *kobj, const struct attribute_group *grp)
+{
+
+ /* Really expected behavior is to return failure if group exists. */
+ return (sysfs_create_group(kobj, grp));
+}
+
+static inline void
+sysfs_unmerge_group(struct kobject *kobj, const struct attribute_group *grp)
+{
+ struct attribute **attr;
+ struct sysctl_oid *oidp;
+
+ SYSCTL_FOREACH(oidp, SYSCTL_CHILDREN(kobj->oidp)) {
+ if (strcmp(oidp->oid_name, grp->name) != 0)
+ continue;
+ for (attr = grp->attrs; *attr != NULL; attr++) {
+ sysctl_remove_name(oidp, (*attr)->name, 1, 1);
+ }
+ }
+}
+
+static inline int
+sysfs_create_dir(struct kobject *kobj)
+{
+ struct sysctl_oid *oid;
+
+ oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(kobj->parent->oidp),
+ OID_AUTO, kobj->name, CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, kobj->name);
+ if (!oid) {
+ return (-ENOMEM);
+ }
+ kobj->oidp = oid;
+
+ return (0);
+}
+
+static inline void
+sysfs_remove_dir(struct kobject *kobj)
+{
+
+ if (kobj->oidp == NULL)
+ return;
+ sysctl_remove_oid(kobj->oidp, 1, 1);
+}
+
+static inline bool
+sysfs_streq(const char *s1, const char *s2)
+{
+ int l1, l2;
+
+ l1 = strlen(s1);
+ l2 = strlen(s2);
+
+ if (l1 != 0 && s1[l1-1] == '\n')
+ l1--;
+ if (l2 != 0 && s2[l2-1] == '\n')
+ l2--;
+
+ return (l1 == l2 && strncmp(s1, s2, l1) == 0);
+}
+
+static inline int
+sysfs_emit(char *buf, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ if (!buf || offset_in_page(buf)) {
+ pr_warn("invalid sysfs_emit: buf:%p\n", buf);
+ return (0);
+ }
+
+ va_start(args, fmt);
+ i = vscnprintf(buf, PAGE_SIZE, fmt, args);
+ va_end(args);
+
+ return (i);
+}
+
+static inline int
+sysfs_emit_at(char *buf, int at, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ if (!buf || offset_in_page(buf) || at < 0 || at >= PAGE_SIZE) {
+ pr_warn("invalid sysfs_emit: buf:%p at:%d\n", buf, at);
+ return (0);
+ }
+
+ va_start(args, fmt);
+ i = vscnprintf(buf + at, PAGE_SIZE - at, fmt, args);
+ va_end(args);
+
+ return (i);
+}
+
+static inline int
+_sysfs_match_string(const char * const *a, size_t l, const char *s)
+{
+ const char *p;
+ int i;
+
+ for (i = 0; i < l; i++) {
+ p = a[i];
+ if (p == NULL)
+ break;
+ if (sysfs_streq(p, s))
+ return (i);
+ }
+
+ return (-ENOENT);
+}
+#define sysfs_match_string(a, s) _sysfs_match_string(a, ARRAY_SIZE(a), s)
+
+#define sysfs_attr_init(attr) do {} while(0)
+
+#endif /* _LINUXKPI_LINUX_SYSFS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/tcp.h b/sys/compat/linuxkpi/common/include/linux/tcp.h
new file mode 100644
index 000000000000..3e461d8e7075
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/tcp.h
@@ -0,0 +1,70 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_TCP_H
+#define _LINUXKPI_LINUX_TCP_H
+
+#include <sys/types.h>
+#include <linux/skbuff.h>
+
+/* (u) unconfirmed structure field names; using FreeBSD's meanwhile. */
+struct tcphdr {
+ uint16_t source; /* (u) */
+ uint16_t dest; /* (u) */
+ uint32_t th_seq; /* (u) */
+ uint32_t th_ack; /* (u) */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint8_t th_x2:4, doff:4;
+#elif BYTE_ORDER == BIG_ENDIAN
+ uint8_t doff:4, th_x2:4;
+#endif
+ uint8_t th_flags; /* (u) */
+ uint16_t th_win; /* (u) */
+ uint16_t check;
+ uint16_t th_urg; /* (u) */
+};
+
+static __inline struct tcphdr *
+tcp_hdr(struct sk_buff *skb)
+{
+
+ return (struct tcphdr *)skb_transport_header(skb);
+}
+
+static __inline uint32_t
+tcp_hdrlen(struct sk_buff *skb)
+{
+ struct tcphdr *th;
+
+ th = tcp_hdr(skb);
+ return (4 * th->doff);
+}
+
+#endif /* _LINUXKPI_LINUX_TCP_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/time.h b/sys/compat/linuxkpi/common/include/linux/time.h
new file mode 100644
index 000000000000..ca77a20516ff
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/time.h
@@ -0,0 +1,141 @@
+/*-
+ * Copyright (c) 2014-2015 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_TIME_H_
+#define _LINUXKPI_LINUX_TIME_H_
+
+#define MSEC_PER_SEC 1000L
+
+#define NSEC_PER_USEC 1000L
+#define NSEC_PER_MSEC 1000000L
+#define NSEC_PER_SEC 1000000000L
+
+#define USEC_PER_MSEC 1000L
+#define USEC_PER_SEC 1000000L
+
+#define timespec64 timespec
+
+#include <sys/time.h>
+#include <sys/stdint.h>
+
+#include <linux/math64.h>
+
+typedef int64_t time64_t;
+
+static inline struct timeval
+ns_to_timeval(const int64_t nsec)
+{
+ struct timeval tv;
+ long rem;
+
+ if (nsec == 0) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ return (tv);
+ }
+
+ tv.tv_sec = nsec / NSEC_PER_SEC;
+ rem = nsec % NSEC_PER_SEC;
+ if (rem < 0) {
+ tv.tv_sec--;
+ rem += NSEC_PER_SEC;
+ }
+ tv.tv_usec = rem / 1000;
+ return (tv);
+}
+
+static inline int64_t
+timeval_to_ns(const struct timeval *tv)
+{
+ return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
+ tv->tv_usec * NSEC_PER_USEC;
+}
+
+#define getrawmonotonic(ts) nanouptime(ts)
+
+static inline struct timespec
+timespec_sub(struct timespec lhs, struct timespec rhs)
+{
+ struct timespec ts;
+
+ timespecsub(&lhs, &rhs, &ts);
+
+ return ts;
+}
+
+static inline void
+set_normalized_timespec(struct timespec *ts, time_t sec, int64_t nsec)
+{
+ /* XXX: this doesn't actually normalize anything */
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static inline int64_t
+timespec_to_ns(const struct timespec *ts)
+{
+ return ((ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec);
+}
+
+static inline struct timespec
+ns_to_timespec(const int64_t nsec)
+{
+ struct timespec ts;
+ int32_t rem;
+
+ if (nsec == 0) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ return (ts);
+ }
+
+ ts.tv_sec = nsec / NSEC_PER_SEC;
+ rem = nsec % NSEC_PER_SEC;
+ if (rem < 0) {
+ ts.tv_sec--;
+ rem += NSEC_PER_SEC;
+ }
+ ts.tv_nsec = rem;
+ return (ts);
+}
+
+#define ns_to_timespec64(_x) ns_to_timespec(_x)
+
+static inline int
+timespec_valid(const struct timespec *ts)
+{
+ if (ts->tv_sec < 0 || ts->tv_sec > 100000000 ||
+ ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
+ return (0);
+ return (1);
+}
+
+static inline unsigned long
+get_seconds(void)
+{
+ return time_uptime;
+}
+
+#endif /* _LINUXKPI_LINUX_TIME_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/timer.h b/sys/compat/linuxkpi/common/include/linux/timer.h
new file mode 100644
index 000000000000..a635f0faea59
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/timer.h
@@ -0,0 +1,94 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_TIMER_H_
+#define _LINUXKPI_LINUX_TIMER_H_
+
+#include <linux/types.h>
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/callout.h>
+
+struct timer_list {
+ struct callout callout;
+ union {
+ void (*function) (unsigned long); /* < v4.15 */
+ void (*function_415) (struct timer_list *);
+ };
+ unsigned long data;
+ unsigned long expires;
+};
+
+extern unsigned long linux_timer_hz_mask;
+
+#define TIMER_IRQSAFE 0x0001
+
+#define from_timer(var, arg, field) \
+ container_of(arg, typeof(*(var)), field)
+
+#define timer_setup(timer, func, flags) do { \
+ CTASSERT(((flags) & ~TIMER_IRQSAFE) == 0); \
+ (timer)->function_415 = (func); \
+ (timer)->data = (unsigned long)(timer); \
+ callout_init(&(timer)->callout, 1); \
+} while (0)
+
+#define setup_timer(timer, func, dat) do { \
+ (timer)->function = (func); \
+ (timer)->data = (dat); \
+ callout_init(&(timer)->callout, 1); \
+} while (0)
+
+#define __setup_timer(timer, func, dat, flags) do { \
+ CTASSERT(((flags) & ~TIMER_IRQSAFE) == 0); \
+ setup_timer(timer, func, dat); \
+} while (0)
+
+#define init_timer(timer) do { \
+ (timer)->function = NULL; \
+ (timer)->data = 0; \
+ callout_init(&(timer)->callout, 1); \
+} while (0)
+
+extern int mod_timer(struct timer_list *, unsigned long);
+extern void add_timer(struct timer_list *);
+extern void add_timer_on(struct timer_list *, int cpu);
+extern int del_timer(struct timer_list *);
+extern int del_timer_sync(struct timer_list *);
+extern int timer_delete_sync(struct timer_list *);
+extern int timer_shutdown_sync(struct timer_list *);
+
+#define timer_pending(timer) callout_pending(&(timer)->callout)
+#define round_jiffies(j) \
+ ((unsigned long)(((j) + linux_timer_hz_mask) & ~linux_timer_hz_mask))
+#define round_jiffies_relative(j) round_jiffies(j)
+#define round_jiffies_up(j) round_jiffies(j)
+#define round_jiffies_up_relative(j) round_jiffies_up(j)
+
+#endif /* _LINUXKPI_LINUX_TIMER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/tracepoint.h b/sys/compat/linuxkpi/common/include/linux/tracepoint.h
new file mode 100644
index 000000000000..8ce7992306b9
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/tracepoint.h
@@ -0,0 +1,48 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_TRACEPOINT_H
+#define _LINUXKPI_LINUX_TRACEPOINT_H
+
+#define TP_PROTO(...) __VA_ARGS__
+#define TP_ARGS(...)
+#define TP_STRUCT__entry(...)
+#define TP_fast_assign(...)
+#define TP_printk(...)
+
+#define TRACE_EVENT(_name, _proto, _args, _struct, _assign, _printk) \
+static inline void trace_ ## _name(_proto) \
+{ \
+}
+
+#define DECLARE_EVENT_CLASS(...)
+#define DEFINE_EVENT(_x, _name, _proto, _args) \
+static inline void trace_ ## _name(_proto) \
+{ \
+}
+
+#endif /* _LINUXKPI_LINUX_TRACEPOINT_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/typecheck.h b/sys/compat/linuxkpi/common/include/linux/typecheck.h
new file mode 100644
index 000000000000..0e813962a7f5
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/typecheck.h
@@ -0,0 +1,38 @@
+/*-
+ * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _LINUXKPI_LINUX_TYPECHECK_H_
+#define _LINUXKPI_LINUX_TYPECHECK_H_
+
+#define typecheck(type,x) \
+({ type __var1; \
+ typeof(x) __var2; \
+ (void)(&__var1 == &__var2); \
+ 1; \
+})
+
+
+#endif /* _LINUXKPI_LINUX_TYPECHECK_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/types.h b/sys/compat/linuxkpi/common/include/linux/types.h
new file mode 100644
index 000000000000..fcc455e5f731
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/types.h
@@ -0,0 +1,96 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_TYPES_H_
+#define _LINUXKPI_LINUX_TYPES_H_
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <linux/compiler.h>
+#include <asm/types.h>
+
+#ifndef __bitwise__
+#ifdef __CHECKER__
+#define __bitwise__ __attribute__((bitwise))
+#else
+#define __bitwise__
+#endif
+#endif
+
+typedef uint16_t __le16;
+typedef uint16_t __be16;
+typedef uint32_t __le32;
+typedef uint32_t __be32;
+typedef uint64_t __le64;
+typedef uint64_t __be64;
+
+typedef uint16_t __aligned_u16 __aligned(sizeof(uint16_t));
+typedef uint32_t __aligned_u32 __aligned(sizeof(uint32_t));
+typedef uint64_t __aligned_u64 __aligned(sizeof(uint64_t));
+
+#ifdef _KERNEL
+typedef unsigned short ushort;
+typedef unsigned int uint;
+#endif
+typedef unsigned long ulong;
+typedef unsigned gfp_t;
+typedef off_t loff_t;
+typedef vm_paddr_t resource_size_t;
+typedef uint16_t __bitwise__ __sum16;
+typedef uint32_t __bitwise__ __wsum;
+typedef unsigned long pgoff_t;
+typedef unsigned __poll_t;
+
+typedef uint64_t phys_addr_t;
+
+typedef size_t __kernel_size_t;
+typedef unsigned long kernel_ulong_t;
+
+#define DECLARE_BITMAP(n, bits) \
+ unsigned long n[howmany(bits, sizeof(long) * 8)]
+
+typedef unsigned long irq_hw_number_t;
+
+#ifndef LIST_HEAD_DEF
+#define LIST_HEAD_DEF
+struct list_head {
+ struct list_head *next;
+ struct list_head *prev;
+};
+#endif
+
+struct rcu_head {
+ void *raw[2];
+} __aligned(sizeof(void *));
+
+typedef void (*rcu_callback_t)(struct rcu_head *head);
+typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+typedef int linux_task_fn_t(void *data);
+
+#endif /* _LINUXKPI_LINUX_TYPES_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/uaccess.h b/sys/compat/linuxkpi/common/include/linux/uaccess.h
new file mode 100644
index 000000000000..660e84e6af3b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/uaccess.h
@@ -0,0 +1,115 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
+ * Copyright (c) 2015 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_UACCESS_H_
+#define _LINUXKPI_LINUX_UACCESS_H_
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+
+#include <linux/compiler.h>
+
+#define VERIFY_READ VM_PROT_READ
+#define VERIFY_WRITE VM_PROT_WRITE
+
+#define __get_user(_x, _p) ({ \
+ int __err; \
+ __typeof(*(_p)) __x; \
+ __err = linux_copyin((_p), &(__x), sizeof(*(_p))); \
+ (_x) = __x; \
+ __err; \
+})
+
+#define __put_user(_x, _p) ({ \
+ __typeof(*(_p)) __x = (_x); \
+ linux_copyout(&(__x), (_p), sizeof(*(_p))); \
+})
+#define get_user(_x, _p) linux_copyin((_p), &(_x), sizeof(*(_p)))
+#define put_user(_x, _p) __put_user(_x, _p)
+#define clear_user(...) linux_clear_user(__VA_ARGS__)
+
+#define access_ok(a,b) linux_access_ok(a,b)
+
+extern int linux_copyin(const void *uaddr, void *kaddr, size_t len);
+extern int linux_copyout(const void *kaddr, void *uaddr, size_t len);
+extern size_t linux_clear_user(void *uaddr, size_t len);
+extern int linux_access_ok(const void *uaddr, size_t len);
+
+/*
+ * NOTE: Each pagefault_disable() call must have a corresponding
+ * pagefault_enable() call in the same scope. The former creates a new
+ * block and defines a temporary variable, and the latter uses the
+ * temporary variable and closes the block. Failure to balance the
+ * calls will result in a compile-time error.
+ */
+#define pagefault_disable(void) do { \
+ int __saved_pflags = \
+ vm_fault_disable_pagefaults()
+
+#define pagefault_enable(void) \
+ vm_fault_enable_pagefaults(__saved_pflags); \
+} while (0)
+
+static inline bool
+pagefault_disabled(void)
+{
+ return ((curthread->td_pflags & TDP_NOFAULTING) != 0);
+}
+
+static inline int
+__copy_to_user_inatomic(void __user *to, const void *from, unsigned n)
+{
+
+ return (copyout_nofault(from, to, n) != 0 ? n : 0);
+}
+#define __copy_to_user_inatomic_nocache(to, from, n) \
+ __copy_to_user_inatomic((to), (from), (n))
+
+static inline unsigned long
+__copy_from_user_inatomic(void *to, const void __user *from,
+ unsigned long n)
+{
+ /*
+ * XXXKIB. Equivalent Linux function is implemented using
+ * MOVNTI for aligned moves. For unaligned head and tail,
+ * normal move is performed. As such, it is not incorrect, if
+ * only somewhat slower, to use normal copyin. All uses
+ * except shmem_pwrite_fast() have the destination mapped WC.
+ */
+ return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0));
+}
+#define __copy_from_user_inatomic_nocache(to, from, n) \
+ __copy_from_user_inatomic((to), (from), (n))
+
+#endif /* _LINUXKPI_LINUX_UACCESS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/udp.h b/sys/compat/linuxkpi/common/include/linux/udp.h
new file mode 100644
index 000000000000..f3cd40cf8bb7
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/udp.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_UDP_H
+#define _LINUXKPI_LINUX_UDP_H
+
+#include <sys/types.h>
+#include <linux/skbuff.h>
+
+/* (u) unconfirmed structure field names. */
+struct udphdr {
+ uint16_t source; /* (u) */
+ uint16_t dest;
+ uint16_t len; /* (u) */
+ uint16_t check;
+};
+
+static __inline struct udphdr *
+udp_hdr(struct sk_buff *skb)
+{
+
+ return (struct udphdr *)skb_transport_header(skb);
+}
+
+#endif /* _LINUXKPI_LINUX_UDP_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/units.h b/sys/compat/linuxkpi/common/include/linux/units.h
new file mode 100644
index 000000000000..304b5c27d87f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/units.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2025 The FreeBSD Foundation
+ * Copyright (c) 2025 Jean-Sébastien Pédron
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_UNITS_H_
+#define _LINUXKPI_LINUX_UNITS_H_
+
+#define NANOHZ_PER_HZ 1000000000UL
+#define MICROHZ_PER_HZ 1000000UL
+#define MILLIHZ_PER_HZ 1000UL
+#define HZ_PER_KHZ 1000UL
+#define KHZ_PER_MHZ 1000UL
+#define HZ_PER_MHZ 1000000UL
+
+#endif /* _LINUXKPI_LINUX_UNITS_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/usb.h b/sys/compat/linuxkpi/common/include/linux/usb.h
new file mode 100644
index 000000000000..d9649dcb5471
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/usb.h
@@ -0,0 +1,318 @@
+/*-
+ * Copyright (c) 2007 Luigi Rizzo - Universita` di Pisa. All rights reserved.
+ * Copyright (c) 2007 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_COMPAT_LINUX_H
+#define _USB_COMPAT_LINUX_H
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/condvar.h>
+
+#include <dev/usb/usb.h>
+#include <dev/usb/usbdi.h>
+#include <dev/usb/usbdi_util.h>
+
+#include <linux/pm.h>
+
+struct usb_device;
+struct usb_interface;
+struct usb_driver;
+struct urb;
+
+typedef void (usb_complete_t)(struct urb *);
+
+#define USB_MAX_FULL_SPEED_ISOC_FRAMES (60 * 1)
+#define USB_MAX_HIGH_SPEED_ISOC_FRAMES (60 * 8)
+
+#define USB_DEVICE_ID_MATCH_DEVICE \
+ (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT)
+
+#define USB_DEVICE(vend,prod) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = (vend), \
+ .idProduct = (prod)
+
+/* The "usb_driver" structure holds the Linux USB device driver
+ * callbacks, and a pointer to device ID's which this entry should
+ * match against. Usually this entry is exposed to the USB emulation
+ * layer using the "USB_DRIVER_EXPORT()" macro, which is defined
+ * below.
+ */
+struct usb_driver {
+ const char *name;
+
+ int (*probe)(struct usb_interface *intf,
+ const struct usb_device_id *id);
+
+ void (*disconnect)(struct usb_interface *intf);
+
+ int (*ioctl)(struct usb_interface *intf, unsigned int code, void *buf);
+
+ int (*suspend)(struct usb_interface *intf, pm_message_t message);
+ int (*resume)(struct usb_interface *intf);
+
+ const struct usb_device_id *id_table;
+
+ void (*shutdown)(struct usb_interface *intf);
+
+ LIST_ENTRY(usb_driver) linux_driver_list;
+};
+
+#define USB_DRIVER_EXPORT(id,p_usb_drv) \
+ SYSINIT(id,SI_SUB_KLD,SI_ORDER_FIRST,usb_linux_register,p_usb_drv); \
+ SYSUNINIT(id,SI_SUB_KLD,SI_ORDER_ANY,usb_linux_deregister,p_usb_drv)
+
+#define USB_DT_ENDPOINT_SIZE 7
+#define USB_DT_ENDPOINT_AUDIO_SIZE 9
+
+/*
+ * Endpoints
+ */
+#define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */
+#define USB_ENDPOINT_DIR_MASK 0x80
+
+#define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */
+#define USB_ENDPOINT_XFER_CONTROL 0
+#define USB_ENDPOINT_XFER_ISOC 1
+#define USB_ENDPOINT_XFER_BULK 2
+#define USB_ENDPOINT_XFER_INT 3
+#define USB_ENDPOINT_MAX_ADJUSTABLE 0x80
+
+/* CONTROL REQUEST SUPPORT */
+
+/*
+ * Definition of direction mask for
+ * "bEndpointAddress" and "bmRequestType":
+ */
+#define USB_DIR_MASK 0x80
+#define USB_DIR_OUT 0x00 /* write to USB device */
+#define USB_DIR_IN 0x80 /* read from USB device */
+
+/*
+ * Definition of type mask for
+ * "bmRequestType":
+ */
+#define USB_TYPE_MASK (0x03 << 5)
+#define USB_TYPE_STANDARD (0x00 << 5)
+#define USB_TYPE_CLASS (0x01 << 5)
+#define USB_TYPE_VENDOR (0x02 << 5)
+#define USB_TYPE_RESERVED (0x03 << 5)
+
+/*
+ * Definition of receiver mask for
+ * "bmRequestType":
+ */
+#define USB_RECIP_MASK 0x1f
+#define USB_RECIP_DEVICE 0x00
+#define USB_RECIP_INTERFACE 0x01
+#define USB_RECIP_ENDPOINT 0x02
+#define USB_RECIP_OTHER 0x03
+
+/*
+ * Definition of standard request values for
+ * "bRequest":
+ */
+#define USB_REQ_GET_STATUS 0x00
+#define USB_REQ_CLEAR_FEATURE 0x01
+#define USB_REQ_SET_FEATURE 0x03
+#define USB_REQ_SET_ADDRESS 0x05
+#define USB_REQ_GET_DESCRIPTOR 0x06
+#define USB_REQ_SET_DESCRIPTOR 0x07
+#define USB_REQ_GET_CONFIGURATION 0x08
+#define USB_REQ_SET_CONFIGURATION 0x09
+#define USB_REQ_GET_INTERFACE 0x0A
+#define USB_REQ_SET_INTERFACE 0x0B
+#define USB_REQ_SYNCH_FRAME 0x0C
+
+#define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */
+#define USB_REQ_GET_ENCRYPTION 0x0E
+#define USB_REQ_SET_HANDSHAKE 0x0F
+#define USB_REQ_GET_HANDSHAKE 0x10
+#define USB_REQ_SET_CONNECTION 0x11
+#define USB_REQ_SET_SECURITY_DATA 0x12
+#define USB_REQ_GET_SECURITY_DATA 0x13
+#define USB_REQ_SET_WUSB_DATA 0x14
+#define USB_REQ_LOOPBACK_DATA_WRITE 0x15
+#define USB_REQ_LOOPBACK_DATA_READ 0x16
+#define USB_REQ_SET_INTERFACE_DS 0x17
+
+/*
+ * USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and
+ * are read as a bit array returned by USB_REQ_GET_STATUS. (So there
+ * are at most sixteen features of each type.)
+ */
+#define USB_DEVICE_SELF_POWERED 0 /* (read only) */
+#define USB_DEVICE_REMOTE_WAKEUP 1 /* dev may initiate wakeup */
+#define USB_DEVICE_TEST_MODE 2 /* (wired high speed only) */
+#define USB_DEVICE_BATTERY 2 /* (wireless) */
+#define USB_DEVICE_B_HNP_ENABLE 3 /* (otg) dev may initiate HNP */
+#define USB_DEVICE_WUSB_DEVICE 3 /* (wireless) */
+#define USB_DEVICE_A_HNP_SUPPORT 4 /* (otg) RH port supports HNP */
+#define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */
+#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */
+
+#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
+
+#define PIPE_ISOCHRONOUS 0x01 /* UE_ISOCHRONOUS */
+#define PIPE_INTERRUPT 0x03 /* UE_INTERRUPT */
+#define PIPE_CONTROL 0x00 /* UE_CONTROL */
+#define PIPE_BULK 0x02 /* UE_BULK */
+
+/* Whenever Linux references an USB endpoint:
+ * a) to initialize "urb->endpoint"
+ * b) second argument passed to "usb_control_msg()"
+ *
+ * Then it uses one of the following macros. The "endpoint" argument
+ * is the physical endpoint value masked by 0xF. The "dev" argument
+ * is a pointer to "struct usb_device".
+ */
+#define usb_sndctrlpipe(dev,endpoint) \
+ usb_find_host_endpoint(dev, PIPE_CONTROL, (endpoint) | USB_DIR_OUT)
+
+#define usb_rcvctrlpipe(dev,endpoint) \
+ usb_find_host_endpoint(dev, PIPE_CONTROL, (endpoint) | USB_DIR_IN)
+
+#define usb_sndisocpipe(dev,endpoint) \
+ usb_find_host_endpoint(dev, PIPE_ISOCHRONOUS, (endpoint) | USB_DIR_OUT)
+
+#define usb_rcvisocpipe(dev,endpoint) \
+ usb_find_host_endpoint(dev, PIPE_ISOCHRONOUS, (endpoint) | USB_DIR_IN)
+
+#define usb_sndbulkpipe(dev,endpoint) \
+ usb_find_host_endpoint(dev, PIPE_BULK, (endpoint) | USB_DIR_OUT)
+
+#define usb_rcvbulkpipe(dev,endpoint) \
+ usb_find_host_endpoint(dev, PIPE_BULK, (endpoint) | USB_DIR_IN)
+
+#define usb_sndintpipe(dev,endpoint) \
+ usb_find_host_endpoint(dev, PIPE_INTERRUPT, (endpoint) | USB_DIR_OUT)
+
+#define usb_rcvintpipe(dev,endpoint) \
+ usb_find_host_endpoint(dev, PIPE_INTERRUPT, (endpoint) | USB_DIR_IN)
+
+/*
+ * The following structure is used to extend "struct urb" when we are
+ * dealing with an isochronous endpoint. It contains information about
+ * the data offset and data length of an isochronous packet.
+ * The "actual_length" field is updated before the "complete"
+ * callback in the "urb" structure is called.
+ */
+struct usb_iso_packet_descriptor {
+ uint32_t offset; /* depreciated buffer offset (the
+ * packets are usually back to back) */
+ uint16_t length; /* expected length */
+ uint16_t actual_length;
+ int16_t status; /* transfer status */
+};
+
+/*
+ * The following structure holds various information about an USB
+ * transfer. This structure is used for all kinds of USB transfers.
+ *
+ * URB is short for USB Request Block.
+ */
+struct urb {
+ TAILQ_ENTRY(urb) bsd_urb_list;
+ struct cv cv_wait;
+
+ struct usb_device *dev; /* (in) pointer to associated device */
+ struct usb_host_endpoint *endpoint; /* (in) pipe pointer */
+ uint8_t *setup_packet; /* (in) setup packet (control only) */
+ uint8_t *bsd_data_ptr;
+ void *transfer_buffer; /* (in) associated data buffer */
+ void *context; /* (in) context for completion */
+ usb_complete_t *complete; /* (in) completion routine */
+
+ usb_size_t transfer_buffer_length;/* (in) data buffer length */
+ usb_size_t bsd_length_rem;
+ usb_size_t actual_length; /* (return) actual transfer length */
+ usb_timeout_t timeout; /* FreeBSD specific */
+
+ uint16_t transfer_flags; /* (in) */
+#define URB_SHORT_NOT_OK 0x0001 /* report short transfers like errors */
+#define URB_ISO_ASAP 0x0002 /* ignore "start_frame" field */
+#define URB_ZERO_PACKET 0x0004 /* the USB transfer ends with a short
+ * packet */
+#define URB_NO_TRANSFER_DMA_MAP 0x0008 /* "transfer_dma" is valid on submit */
+#define URB_WAIT_WAKEUP 0x0010 /* custom flags */
+#define URB_IS_SLEEPING 0x0020 /* custom flags */
+
+ usb_frcount_t start_frame; /* (modify) start frame (ISO) */
+ usb_frcount_t number_of_packets; /* (in) number of ISO packets */
+ uint16_t interval; /* (modify) transfer interval
+ * (INT/ISO) */
+ uint16_t error_count; /* (return) number of ISO errors */
+ int16_t status; /* (return) status */
+
+ uint8_t setup_dma; /* (in) not used on FreeBSD */
+ uint8_t transfer_dma; /* (in) not used on FreeBSD */
+ uint8_t bsd_isread;
+ uint8_t kill_count; /* FreeBSD specific */
+
+ struct usb_iso_packet_descriptor iso_frame_desc[]; /* (in) ISO ONLY */
+};
+
+/* various prototypes */
+
+int usb_submit_urb(struct urb *urb, uint16_t mem_flags);
+int usb_unlink_urb(struct urb *urb);
+int usb_clear_halt(struct usb_device *dev, struct usb_host_endpoint *uhe);
+int usb_control_msg(struct usb_device *dev, struct usb_host_endpoint *ep,
+ uint8_t request, uint8_t requesttype, uint16_t value,
+ uint16_t index, void *data, uint16_t size, usb_timeout_t timeout);
+int usb_set_interface(struct usb_device *dev, uint8_t ifnum,
+ uint8_t alternate);
+int usb_setup_endpoint(struct usb_device *dev,
+ struct usb_host_endpoint *uhe, usb_frlength_t bufsize);
+
+struct usb_host_endpoint *usb_find_host_endpoint(struct usb_device *dev,
+ uint8_t type, uint8_t ep);
+struct urb *usb_alloc_urb(uint16_t iso_packets, uint16_t mem_flags);
+struct usb_host_interface *usb_altnum_to_altsetting(
+ const struct usb_interface *intf, uint8_t alt_index);
+struct usb_interface *usb_ifnum_to_if(struct usb_device *dev, uint8_t iface_no);
+
+void *usb_buffer_alloc(struct usb_device *dev, usb_size_t size,
+ uint16_t mem_flags, uint8_t *dma_addr);
+void *usbd_get_intfdata(struct usb_interface *intf);
+
+void usb_buffer_free(struct usb_device *dev, usb_size_t size, void *addr, uint8_t dma_addr);
+void usb_free_urb(struct urb *urb);
+void usb_init_urb(struct urb *urb);
+void usb_kill_urb(struct urb *urb);
+void usb_set_intfdata(struct usb_interface *intf, void *data);
+void usb_linux_register(void *arg);
+void usb_linux_deregister(void *arg);
+
+void usb_fill_bulk_urb(struct urb *, struct usb_device *,
+ struct usb_host_endpoint *, void *, int, usb_complete_t, void *);
+int usb_bulk_msg(struct usb_device *, struct usb_host_endpoint *,
+ void *, int, uint16_t *, usb_timeout_t);
+
+#define interface_to_usbdev(intf) (intf)->linux_udev
+#define interface_to_bsddev(intf) (intf)->linux_udev
+
+#endif /* _USB_COMPAT_LINUX_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/utsname.h b/sys/compat/linuxkpi/common/include/linux/utsname.h
new file mode 100644
index 000000000000..3239801ca17b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/utsname.h
@@ -0,0 +1,51 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_UTSNAME_H
+#define _LINUXKPI_LINUX_UTSNAME_H
+
+#include <sys/types.h>
+#include <sys/jail.h>
+
+struct _utsname {
+ char release[OSRELEASELEN];
+};
+
+struct uts_namespace {
+ struct _utsname name;
+};
+
+extern struct uts_namespace init_uts_ns;
+
+static inline struct _utsname *
+init_utsname(void)
+{
+
+ return &init_uts_ns.name;
+}
+
+#endif /* _LINUXKPI_LINUX_UTSNAME_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/uuid.h b/sys/compat/linuxkpi/common/include/linux/uuid.h
new file mode 100644
index 000000000000..4f6f4a8b34f0
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/uuid.h
@@ -0,0 +1,77 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021,2023 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_UUID_H
+#define _LINUXKPI_LINUX_UUID_H
+
+#include <linux/random.h>
+
+#define UUID_STRING_LEN 36
+
+#define GUID_INIT(x0_3, x4_5, x6_7, x8, x9, x10, x11, x12, x13, x14, x15) \
+ ((guid_t) { .x = { \
+ [0] = (x0_3) & 0xff, \
+ [1] = ((x0_3) >> 8) & 0xff, \
+ [2] = ((x0_3) >> 16) & 0xff, \
+ [3] = ((x0_3) >> 24) & 0xff, \
+ [4] = (x4_5) & 0xff, \
+ [5] = ((x4_5) >> 8) & 0xff, \
+ [6] = (x6_7) & 0xff, \
+ [7] = ((x6_7) >> 8) & 0xff, \
+ [8] = (x8), \
+ [9] = (x9), \
+ [10] = (x10), \
+ [11] = (x11), \
+ [12] = (x12), \
+ [13] = (x13), \
+ [14] = (x14), \
+ [15] = (x15) \
+}})
+
+typedef struct {
+ char x[16];
+} guid_t;
+
+static inline void
+guid_gen(guid_t *g)
+{
+
+ get_random_bytes(g, 16);
+ g->x[7] = (g->x[7] & 0x0f) | 0x40;
+ g->x[8] = (g->x[8] & 0x3f) | 0x80;
+}
+
+static inline void
+guid_copy(guid_t *dst, const guid_t *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+#endif /* _LINUXKPI_LINUX_UUID_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/vgaarb.h b/sys/compat/linuxkpi/common/include/linux/vgaarb.h
new file mode 100644
index 000000000000..d43a88136864
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/vgaarb.h
@@ -0,0 +1,281 @@
+/*
+ * The VGA aribiter manages VGA space routing and VGA resource decode to
+ * allow multiple VGA devices to be used in a system in a safe way.
+ *
+ * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
+ * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _LINUXKPI_LINUX_VGA_H_
+#define _LINUXKPI_LINUX_VGA_H_
+
+#include <video/vga.h>
+
+/* Legacy VGA regions */
+#define VGA_RSRC_NONE 0x00
+#define VGA_RSRC_LEGACY_IO 0x01
+#define VGA_RSRC_LEGACY_MEM 0x02
+#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
+/* Non-legacy access */
+#define VGA_RSRC_NORMAL_IO 0x04
+#define VGA_RSRC_NORMAL_MEM 0x08
+
+/* Passing that instead of a pci_dev to use the system "default"
+ * device, that is the one used by vgacon. Archs will probably
+ * have to provide their own vga_default_device();
+ */
+#define VGA_DEFAULT_DEVICE (NULL)
+
+struct pci_dev;
+
+/* For use by clients */
+
+/**
+ * vga_set_legacy_decoding
+ *
+ * @pdev: pci device of the VGA card
+ * @decodes: bit mask of what legacy regions the card decodes
+ *
+ * Indicates to the arbiter if the card decodes legacy VGA IOs,
+ * legacy VGA Memory, both, or none. All cards default to both,
+ * the card driver (fbdev for example) should tell the arbiter
+ * if it has disabled legacy decoding, so the card can be left
+ * out of the arbitration process (and can be safe to take
+ * interrupts at any time.
+ */
+#if defined(CONFIG_VGA_ARB)
+extern void vga_set_legacy_decoding(struct pci_dev *pdev,
+ unsigned int decodes);
+#else
+static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
+ unsigned int decodes) { };
+#endif
+
+/**
+ * vga_get - acquire & locks VGA resources
+ *
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
+ * @interruptible: blocking should be interruptible by signals ?
+ *
+ * This function acquires VGA resources for the given
+ * card and mark those resources locked. If the resource requested
+ * are "normal" (and not legacy) resources, the arbiter will first check
+ * whether the card is doing legacy decoding for that type of resource. If
+ * yes, the lock is "converted" into a legacy resource lock.
+ * The arbiter will first look for all VGA cards that might conflict
+ * and disable their IOs and/or Memory access, including VGA forwarding
+ * on P2P bridges if necessary, so that the requested resources can
+ * be used. Then, the card is marked as locking these resources and
+ * the IO and/or Memory accesse are enabled on the card (including
+ * VGA forwarding on parent P2P bridges if any).
+ * This function will block if some conflicting card is already locking
+ * one of the required resources (or any resource on a different bus
+ * segment, since P2P bridges don't differenciate VGA memory and IO
+ * afaik). You can indicate whether this blocking should be interruptible
+ * by a signal (for userland interface) or not.
+ * Must not be called at interrupt time or in atomic context.
+ * If the card already owns the resources, the function succeeds.
+ * Nested calls are supported (a per-resource counter is maintained)
+ */
+
+#if defined(CONFIG_VGA_ARB)
+extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
+#else
+static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; }
+#endif
+
+/**
+ * vga_get_interruptible
+ *
+ * Shortcut to vga_get
+ */
+
+static inline int vga_get_interruptible(struct pci_dev *pdev,
+ unsigned int rsrc)
+{
+ return vga_get(pdev, rsrc, 1);
+}
+
+/**
+ * vga_get_uninterruptible
+ *
+ * Shortcut to vga_get
+ */
+
+static inline int vga_get_uninterruptible(struct pci_dev *pdev,
+ unsigned int rsrc)
+{
+ return vga_get(pdev, rsrc, 0);
+}
+
+/**
+ * vga_tryget - try to acquire & lock legacy VGA resources
+ *
+ * @pdev: pci devivce of VGA card or NULL for system default
+ * @rsrc: bit mask of resources to acquire and lock
+ *
+ * This function performs the same operation as vga_get(), but
+ * will return an error (-EBUSY) instead of blocking if the resources
+ * are already locked by another card. It can be called in any context
+ */
+
+#if defined(CONFIG_VGA_ARB)
+extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
+#else
+static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
+#endif
+
+/**
+ * vga_put - release lock on legacy VGA resources
+ *
+ * @pdev: pci device of VGA card or NULL for system default
+ * @rsrc: but mask of resource to release
+ *
+ * This function releases resources previously locked by vga_get()
+ * or vga_tryget(). The resources aren't disabled right away, so
+ * that a subsequence vga_get() on the same card will succeed
+ * immediately. Resources have a counter, so locks are only
+ * released if the counter reaches 0.
+ */
+
+#if defined(CONFIG_VGA_ARB)
+extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
+#else
+#define vga_put(pdev, rsrc)
+#endif
+
+
+/**
+ * vga_default_device
+ *
+ * This can be defined by the platform. The default implementation
+ * is rather dumb and will probably only work properly on single
+ * vga card setups and/or x86 platforms.
+ *
+ * If your VGA default device is not PCI, you'll have to return
+ * NULL here. In this case, I assume it will not conflict with
+ * any PCI card. If this is not true, I'll have to define two archs
+ * hooks for enabling/disabling the VGA default device if that is
+ * possible. This may be a problem with real _ISA_ VGA cards, in
+ * addition to a PCI one. I don't know at this point how to deal
+ * with that card. Can theirs IOs be disabled at all ? If not, then
+ * I suppose it's a matter of having the proper arch hook telling
+ * us about it, so we basically never allow anybody to succeed a
+ * vga_get()...
+ */
+
+#ifdef CONFIG_VGA_ARB
+extern struct pci_dev *vga_default_device(void);
+extern void vga_set_default_device(struct pci_dev *pdev);
+#else
+static inline struct pci_dev *vga_default_device(void) { return NULL; };
+static inline void vga_set_default_device(struct pci_dev *pdev) { };
+#endif
+
+/**
+ * vga_conflicts
+ *
+ * Architectures should define this if they have several
+ * independent PCI domains that can afford concurrent VGA
+ * decoding
+ */
+
+#ifndef __ARCH_HAS_VGA_CONFLICT
+static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
+{
+ return 1;
+}
+#endif
+
+/**
+ * vga_client_register
+ *
+ * @pdev: pci device of the VGA client
+ * @cookie: client cookie to be used in callbacks
+ * @irq_set_state: irq state change callback
+ * @set_vga_decode: vga decode change callback
+ *
+ * return value: 0 on success, -1 on failure
+ * Register a client with the VGA arbitration logic
+ *
+ * Clients have two callback mechanisms they can use.
+ * irq enable/disable callback -
+ * If a client can't disable its GPUs VGA resources, then we
+ * need to be able to ask it to turn off its irqs when we
+ * turn off its mem and io decoding.
+ * set_vga_decode
+ * If a client can disable its GPU VGA resource, it will
+ * get a callback from this to set the encode/decode state
+ *
+ * Rationale: we cannot disable VGA decode resources unconditionally
+ * some single GPU laptops seem to require ACPI or BIOS access to the
+ * VGA registers to control things like backlights etc.
+ * Hopefully newer multi-GPU laptops do something saner, and desktops
+ * won't have any special ACPI for this.
+ * They driver will get a callback when VGA arbitration is first used
+ * by userspace since we some older X servers have issues.
+ */
+#if defined(CONFIG_VGA_ARB)
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51501
+int vga_client_register(struct pci_dev *pdev,
+ unsigned int (*set_vga_decode)(struct pci_dev *pdev, bool state));
+#elif defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51500
+int vga_client_register(struct pci_dev *pdev, void *cookie,
+ unsigned int (*set_vga_decode)(void *cookie, bool state));
+#else
+int vga_client_register(struct pci_dev *pdev, void *cookie,
+ void (*irq_set_state)(void *cookie, bool state),
+ unsigned int (*set_vga_decode)(void *cookie, bool state));
+#endif
+#else
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51501
+static inline int vga_client_register(struct pci_dev *pdev,
+ unsigned int (*set_vga_decode)(struct pci_dev *pdev, bool state))
+#elif defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51500
+static inline int vga_client_register(struct pci_dev *pdev, void *cookie,
+ unsigned int (*set_vga_decode)(void *cookie, bool state))
+#else
+static inline int vga_client_register(struct pci_dev *pdev, void *cookie,
+ void (*irq_set_state)(void *cookie, bool state),
+ unsigned int (*set_vga_decode)(void *cookie, bool state))
+#endif
+{
+ return 0;
+}
+
+static inline int vga_client_unregister(struct pci_dev *pdev)
+{
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51501
+ return (vga_client_register(NULL, NULL));
+#elif defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51500
+ return (vga_client_register(NULL, NULL, NULL));
+#else
+ return (vga_client_register(NULL, NULL, NULL, NULL));
+#endif
+}
+#endif
+
+#endif /* _LINUXKPI_LINUX_VGA_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/vmalloc.h b/sys/compat/linuxkpi/common/include/linux/vmalloc.h
new file mode 100644
index 000000000000..00650a2df9b6
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/vmalloc.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_VMALLOC_H_
+#define _LINUXKPI_LINUX_VMALLOC_H_
+
+#include <linux/overflow.h>
+#include <linux/page.h>
+
+#define VM_MAP 0x0000
+#define PAGE_KERNEL 0x0000
+
+void *vmap(struct page **pages, unsigned int count, unsigned long flags,
+ int prot);
+void vunmap(void *addr);
+
+#endif /* _LINUXKPI_LINUX_VMALLOC_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/wait.h b/sys/compat/linuxkpi/common/include/linux/wait.h
new file mode 100644
index 000000000000..03ddce2c06f5
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/wait.h
@@ -0,0 +1,319 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_WAIT_H_
+#define _LINUXKPI_LINUX_WAIT_H_
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+
+#include <asm/atomic.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#define SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active)
+
+#define might_sleep() \
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()")
+
+#define might_sleep_if(cond) do { \
+ if (cond) { might_sleep(); } \
+} while (0)
+
+struct wait_queue;
+struct wait_queue_head;
+
+#define wait_queue_entry wait_queue
+
+typedef struct wait_queue wait_queue_t;
+typedef struct wait_queue_entry wait_queue_entry_t;
+typedef struct wait_queue_head wait_queue_head_t;
+
+typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *);
+
+#define WQ_FLAG_WOKEN 0x02
+
+/*
+ * Many API consumers directly reference these fields and those of
+ * wait_queue_head.
+ */
+struct wait_queue {
+ unsigned int flags;
+ void *private;
+ wait_queue_func_t *func;
+ union {
+ struct list_head task_list; /* < v4.13 */
+ struct list_head entry; /* >= v4.13 */
+ };
+};
+
+struct wait_queue_head {
+ spinlock_t lock;
+ union {
+ struct list_head task_list; /* < v4.13 */
+ struct list_head head; /* >= v4.13 */
+ };
+};
+
+/*
+ * This function is referenced by at least one DRM driver, so it may not be
+ * renamed and furthermore must be the default wait queue callback.
+ */
+wait_queue_func_t autoremove_wake_function;
+wait_queue_func_t default_wake_function;
+wait_queue_func_t woken_wake_function;
+
+long linux_wait_woken(wait_queue_t *wq, unsigned state, long timeout);
+
+#define wait_woken(wq, state, timeout) \
+ linux_wait_woken((wq), (state), (timeout))
+
+#define DEFINE_WAIT_FUNC(name, function) \
+ wait_queue_t name = { \
+ .private = current, \
+ .func = function, \
+ .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \
+ }
+
+#define DEFINE_WAIT(name) \
+ DEFINE_WAIT_FUNC(name, autoremove_wake_function)
+
+#define DECLARE_WAITQUEUE(name, task) \
+ wait_queue_t name = { \
+ .private = task, \
+ .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \
+ }
+
+#define DECLARE_WAIT_QUEUE_HEAD(name) \
+ wait_queue_head_t name = { \
+ .task_list = LINUX_LIST_HEAD_INIT(name.task_list), \
+ }; \
+ MTX_SYSINIT(name, &(name).lock, spin_lock_name("wqhead"), MTX_DEF)
+
+#define init_waitqueue_head(wqh) do { \
+ mtx_init(&(wqh)->lock, spin_lock_name("wqhead"), \
+ NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); \
+ INIT_LIST_HEAD(&(wqh)->task_list); \
+} while (0)
+
+#define __init_waitqueue_head(wqh, name, lk) init_waitqueue_head(wqh)
+
+void linux_init_wait_entry(wait_queue_t *, int);
+void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool);
+
+#define init_wait_entry(wq, flags) \
+ linux_init_wait_entry(wq, flags)
+#define wake_up(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 1, false)
+#define wake_up_all(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 0, false)
+#define wake_up_locked(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 1, true)
+#define wake_up_all_locked(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 0, true)
+#define wake_up_interruptible(wqh) \
+ linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false)
+#define wake_up_interruptible_all(wqh) \
+ linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false)
+
+int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, long,
+ unsigned int, spinlock_t *);
+
+/*
+ * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if
+ * cond is true after timeout, remaining jiffies (> 0) if cond is true before
+ * timeout.
+ */
+#define __wait_event_common(wqh, cond, timeout, state, lock) ({ \
+ DEFINE_WAIT(__wq); \
+ const long __timeout = ((long)(timeout)) < 1 ? 1 : (timeout); \
+ long __start = jiffies; \
+ long __ret = 0; \
+ \
+ for (;;) { \
+ linux_prepare_to_wait(&(wqh), &__wq, state); \
+ if (cond) \
+ break; \
+ __ret = linux_wait_event_common(&(wqh), &__wq, \
+ __timeout, state, lock); \
+ if (__ret != 0) \
+ break; \
+ } \
+ linux_finish_wait(&(wqh), &__wq); \
+ if (__timeout != MAX_SCHEDULE_TIMEOUT) { \
+ if (__ret == -EWOULDBLOCK) \
+ __ret = !!(cond); \
+ else if (__ret != -ERESTARTSYS) { \
+ __ret = __timeout + __start - jiffies; \
+ /* range check return value */ \
+ if (__ret < 1) \
+ __ret = 1; \
+ else if (__ret > __timeout) \
+ __ret = __timeout; \
+ } \
+ } \
+ __ret; \
+})
+
+#define wait_event(wqh, cond) do { \
+ (void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_UNINTERRUPTIBLE, NULL); \
+} while (0)
+
+#define wait_event_timeout(wqh, cond, timeout) ({ \
+ __wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE, \
+ NULL); \
+})
+
+#define wait_event_killable(wqh, cond) ({ \
+ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_INTERRUPTIBLE, NULL); \
+})
+
+#define wait_event_interruptible(wqh, cond) ({ \
+ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_INTERRUPTIBLE, NULL); \
+})
+
+#define wait_event_interruptible_timeout(wqh, cond, timeout) ({ \
+ __wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE, \
+ NULL); \
+})
+
+/*
+ * Wait queue is already locked.
+ */
+#define wait_event_interruptible_locked(wqh, cond) ({ \
+ int __ret; \
+ \
+ spin_unlock(&(wqh).lock); \
+ __ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_INTERRUPTIBLE, NULL); \
+ spin_lock(&(wqh).lock); \
+ __ret; \
+})
+
+/*
+ * The passed spinlock is held when testing the condition.
+ */
+#define wait_event_interruptible_lock_irq(wqh, cond, lock) ({ \
+ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_INTERRUPTIBLE, &(lock)); \
+})
+
+/*
+ * The passed spinlock is held when testing the condition.
+ */
+#define wait_event_lock_irq(wqh, cond, lock) ({ \
+ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_UNINTERRUPTIBLE, &(lock)); \
+})
+
+static inline void
+__add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+ list_add(&wq->task_list, &wqh->task_list);
+}
+
+static inline void
+add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+
+ spin_lock(&wqh->lock);
+ __add_wait_queue(wqh, wq);
+ spin_unlock(&wqh->lock);
+}
+
+static inline void
+__add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+ list_add_tail(&wq->task_list, &wqh->task_list);
+}
+
+static inline void
+__add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wq)
+{
+ list_add_tail(&wq->entry, &wqh->head);
+}
+
+static inline void
+__remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+ list_del(&wq->task_list);
+}
+
+static inline void
+remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+
+ spin_lock(&wqh->lock);
+ __remove_wait_queue(wqh, wq);
+ spin_unlock(&wqh->lock);
+}
+
+bool linux_waitqueue_active(wait_queue_head_t *);
+
+#define waitqueue_active(wqh) linux_waitqueue_active(wqh)
+
+void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int);
+void linux_finish_wait(wait_queue_head_t *, wait_queue_t *);
+
+#define prepare_to_wait(wqh, wq, state) linux_prepare_to_wait(wqh, wq, state)
+#define finish_wait(wqh, wq) linux_finish_wait(wqh, wq)
+
+void linux_wake_up_bit(void *, int);
+int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, long);
+void linux_wake_up_atomic_t(atomic_t *);
+int linux_wait_on_atomic_t(atomic_t *, unsigned int);
+
+#define wake_up_bit(word, bit) linux_wake_up_bit(word, bit)
+#define wait_on_bit(word, bit, state) \
+ linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT)
+#define wait_on_bit_timeout(word, bit, state, timeout) \
+ linux_wait_on_bit_timeout(word, bit, state, timeout)
+#define wake_up_atomic_t(a) linux_wake_up_atomic_t(a)
+/*
+ * All existing callers have a cb that just schedule()s. To avoid adding
+ * complexity, just emulate that internally. The prototype is different so that
+ * callers must be manually modified; a cb that does something other than call
+ * schedule() will require special treatment.
+ */
+#define wait_on_atomic_t(a, state) linux_wait_on_atomic_t(a, state)
+
+struct task_struct;
+bool linux_wake_up_state(struct task_struct *, unsigned int);
+
+#define wake_up_process(task) linux_wake_up_state(task, TASK_NORMAL)
+#define wake_up_state(task, state) linux_wake_up_state(task, state)
+
+#endif /* _LINUXKPI_LINUX_WAIT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/wait_bit.h b/sys/compat/linuxkpi/common/include/linux/wait_bit.h
new file mode 100644
index 000000000000..573798590b73
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/wait_bit.h
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __LINUXKPI_LINUX_WAITBIT_H__
+#define __LINUXKPI_LINUX_WAITBIT_H__
+
+#include <linux/wait.h>
+#include <linux/bitops.h>
+
+extern wait_queue_head_t linux_bit_waitq;
+extern wait_queue_head_t linux_var_waitq;
+
+#define wait_var_event_killable(var, cond) \
+ wait_event_killable(linux_var_waitq, cond)
+
+#define wait_var_event_interruptible(var, cond) \
+ wait_event_interruptible(linux_var_waitq, cond)
+
+static inline void
+clear_and_wake_up_bit(int bit, void *word)
+{
+ clear_bit_unlock(bit, word);
+ wake_up_bit(word, bit);
+}
+
+static inline wait_queue_head_t *
+bit_waitqueue(void *word, int bit)
+{
+
+ return (&linux_bit_waitq);
+}
+
+static inline void
+wake_up_var(void *var)
+{
+
+ wake_up(&linux_var_waitq);
+}
+
+static inline wait_queue_head_t *
+__var_waitqueue(void *p)
+{
+ return (&linux_var_waitq);
+}
+
+#endif /* __LINUXKPI_LINUX_WAITBIT_H__ */
diff --git a/sys/compat/linuxkpi/common/include/linux/workqueue.h b/sys/compat/linuxkpi/common/include/linux/workqueue.h
new file mode 100644
index 000000000000..66d3981d4229
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/workqueue.h
@@ -0,0 +1,267 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_WORKQUEUE_H_
+#define _LINUXKPI_LINUX_WORKQUEUE_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+
+#include <asm/atomic.h>
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/taskqueue.h>
+#include <sys/mutex.h>
+
+#define WORK_CPU_UNBOUND MAXCPU
+#define WQ_UNBOUND (1 << 0)
+#define WQ_HIGHPRI (1 << 1)
+
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *);
+
+struct work_exec {
+ TAILQ_ENTRY(work_exec) entry;
+ struct work_struct *target;
+};
+
+struct workqueue_struct {
+ struct taskqueue *taskqueue;
+ struct mtx exec_mtx;
+ TAILQ_HEAD(, work_exec) exec_head;
+ atomic_t draining;
+};
+
+#define WQ_EXEC_LOCK(wq) mtx_lock(&(wq)->exec_mtx)
+#define WQ_EXEC_UNLOCK(wq) mtx_unlock(&(wq)->exec_mtx)
+
+struct work_struct {
+ struct task work_task;
+ struct workqueue_struct *work_queue;
+ work_func_t func;
+ atomic_t state;
+};
+
+struct rcu_work {
+ struct work_struct work;
+ struct rcu_head rcu;
+
+ struct workqueue_struct *wq;
+};
+
+#define DECLARE_WORK(name, fn) \
+ struct work_struct name; \
+ static void name##_init(void *arg) \
+ { \
+ INIT_WORK(&name, fn); \
+ } \
+ SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_init, NULL)
+
+struct delayed_work {
+ struct work_struct work;
+ struct {
+ struct callout callout;
+ struct mtx mtx;
+ unsigned long expires;
+ } timer;
+};
+
+#define DECLARE_DELAYED_WORK(name, fn) \
+ struct delayed_work name; \
+ static void __linux_delayed_ ## name ## _init(void *arg) \
+ { \
+ linux_init_delayed_work(&name, fn); \
+ } \
+ SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, \
+ __linux_delayed_ ## name##_init, NULL)
+
+static inline struct delayed_work *
+to_delayed_work(struct work_struct *work)
+{
+ return (container_of(work, struct delayed_work, work));
+}
+
+#define INIT_WORK(work, fn) \
+do { \
+ (work)->func = (fn); \
+ (work)->work_queue = NULL; \
+ atomic_set(&(work)->state, 0); \
+ TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work)); \
+} while (0)
+
+#define INIT_RCU_WORK(_work, _fn) \
+ INIT_WORK(&(_work)->work, (_fn))
+
+#define INIT_WORK_ONSTACK(work, fn) \
+ INIT_WORK(work, fn)
+
+#define INIT_DELAYED_WORK(dwork, fn) \
+ linux_init_delayed_work(dwork, fn)
+
+#define INIT_DELAYED_WORK_ONSTACK(dwork, fn) \
+ linux_init_delayed_work(dwork, fn)
+
+#define INIT_DEFERRABLE_WORK(dwork, fn) \
+ INIT_DELAYED_WORK(dwork, fn)
+
+#define flush_scheduled_work() \
+ taskqueue_drain_all(system_wq->taskqueue)
+
+#define queue_work(wq, work) \
+ linux_queue_work_on(WORK_CPU_UNBOUND, wq, work)
+
+#define schedule_work(work) \
+ linux_queue_work_on(WORK_CPU_UNBOUND, system_wq, work)
+
+#define queue_delayed_work(wq, dwork, delay) \
+ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay)
+
+#define schedule_delayed_work_on(cpu, dwork, delay) \
+ linux_queue_delayed_work_on(cpu, system_wq, dwork, delay)
+
+#define queue_work_on(cpu, wq, work) \
+ linux_queue_work_on(cpu, wq, work)
+
+#define schedule_delayed_work(dwork, delay) \
+ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, system_wq, dwork, delay)
+
+#define queue_delayed_work_on(cpu, wq, dwork, delay) \
+ linux_queue_delayed_work_on(cpu, wq, dwork, delay)
+
+#define create_singlethread_workqueue(name) \
+ linux_create_workqueue_common(name, 1)
+
+#define create_workqueue(name) \
+ linux_create_workqueue_common(name, mp_ncpus)
+
+#define alloc_ordered_workqueue(name, flags) \
+ linux_create_workqueue_common(name, 1)
+
+#define alloc_workqueue(name, flags, max_active) \
+ linux_create_workqueue_common(name, max_active)
+
+#define flush_workqueue(wq) \
+ taskqueue_drain_all((wq)->taskqueue)
+
+#define drain_workqueue(wq) do { \
+ atomic_inc(&(wq)->draining); \
+ taskqueue_drain_all((wq)->taskqueue); \
+ atomic_dec(&(wq)->draining); \
+} while (0)
+
+#define mod_delayed_work(wq, dwork, delay) ({ \
+ bool __retval; \
+ __retval = linux_cancel_delayed_work(dwork); \
+ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, \
+ wq, dwork, delay); \
+ __retval; \
+})
+
+#define delayed_work_pending(dwork) \
+ linux_work_pending(&(dwork)->work)
+
+#define cancel_work(work) \
+ linux_cancel_work(work)
+
+#define cancel_delayed_work(dwork) \
+ linux_cancel_delayed_work(dwork)
+
+#define cancel_work_sync(work) \
+ linux_cancel_work_sync(work)
+
+#define cancel_delayed_work_sync(dwork) \
+ linux_cancel_delayed_work_sync(dwork)
+
+#define flush_work(work) \
+ linux_flush_work(work)
+
+#define queue_rcu_work(wq, rwork) \
+ linux_queue_rcu_work(wq, rwork)
+
+#define flush_rcu_work(rwork) \
+ linux_flush_rcu_work(rwork)
+
+#define flush_delayed_work(dwork) \
+ linux_flush_delayed_work(dwork)
+
+#define work_pending(work) \
+ linux_work_pending(work)
+
+#define work_busy(work) \
+ linux_work_busy(work)
+
+#define destroy_work_on_stack(work) \
+ do { } while (0)
+
+#define destroy_delayed_work_on_stack(dwork) \
+ do { } while (0)
+
+#define destroy_workqueue(wq) \
+ linux_destroy_workqueue(wq)
+
+#define current_work() \
+ linux_current_work()
+
+/* prototypes */
+
+extern struct workqueue_struct *system_wq;
+extern struct workqueue_struct *system_long_wq;
+extern struct workqueue_struct *system_unbound_wq;
+extern struct workqueue_struct *system_highpri_wq;
+extern struct workqueue_struct *system_power_efficient_wq;
+
+extern void linux_init_delayed_work(struct delayed_work *, work_func_t);
+extern void linux_work_fn(void *, int);
+extern void linux_delayed_work_fn(void *, int);
+extern struct workqueue_struct *linux_create_workqueue_common(const char *, int);
+extern void linux_destroy_workqueue(struct workqueue_struct *);
+extern bool linux_queue_work_on(int cpu, struct workqueue_struct *, struct work_struct *);
+extern bool linux_queue_delayed_work_on(int cpu, struct workqueue_struct *,
+ struct delayed_work *, unsigned long delay);
+extern bool linux_cancel_work(struct work_struct *);
+extern bool linux_cancel_delayed_work(struct delayed_work *);
+extern bool linux_cancel_work_sync(struct work_struct *);
+extern bool linux_cancel_delayed_work_sync(struct delayed_work *);
+extern bool linux_flush_work(struct work_struct *);
+extern bool linux_flush_delayed_work(struct delayed_work *);
+extern bool linux_work_pending(struct work_struct *);
+extern bool linux_work_busy(struct work_struct *);
+extern struct work_struct *linux_current_work(void);
+extern bool linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
+extern bool linux_flush_rcu_work(struct rcu_work *rwork);
+
+static inline bool
+queue_work_node(int node __unused, struct workqueue_struct *wq, struct work_struct *work)
+{
+ return (queue_work(wq, work));
+}
+
+#endif /* _LINUXKPI_LINUX_WORKQUEUE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/ww_mutex.h b/sys/compat/linuxkpi/common/include/linux/ww_mutex.h
new file mode 100644
index 000000000000..9219755bb78e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/ww_mutex.h
@@ -0,0 +1,149 @@
+/*-
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_WW_MUTEX_H_
+#define _LINUXKPI_LINUX_WW_MUTEX_H_
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/condvar.h>
+#include <sys/kernel.h>
+
+#include <linux/mutex.h>
+
+struct ww_class {
+ const char *mutex_name;
+};
+
+struct ww_acquire_ctx {
+};
+
+struct ww_mutex {
+ struct mutex base;
+ struct cv condvar;
+ struct ww_acquire_ctx *ctx;
+};
+
+#define DEFINE_WW_CLASS(name) \
+ struct ww_class name = { \
+ .mutex_name = mutex_name(#name "_mutex") \
+ }
+
+#define DEFINE_WW_MUTEX(name, ww_class) \
+ struct ww_mutex name; \
+ static void name##_init(void *arg) \
+ { \
+ ww_mutex_init(&name, &ww_class); \
+ } \
+ SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_init, NULL)
+
+#define DEFINE_WD_CLASS(name) DEFINE_WW_CLASS(name)
+
+#define ww_mutex_is_locked(_m) \
+ sx_xlocked(&(_m)->base.sx)
+
+#define ww_mutex_lock_slow(_m, _x) \
+ ww_mutex_lock(_m, _x)
+
+#define ww_mutex_lock_slow_interruptible(_m, _x) \
+ ww_mutex_lock_interruptible(_m, _x)
+
+#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51600
+static inline int __must_check
+ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx __unused)
+#else
+static inline int __must_check
+ww_mutex_trylock(struct ww_mutex *lock)
+#endif
+{
+ return (mutex_trylock(&lock->base));
+}
+
+extern int linux_ww_mutex_lock_sub(struct ww_mutex *,
+ struct ww_acquire_ctx *, int catch_signal);
+
+static inline int
+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+ if (MUTEX_SKIP())
+ return (0);
+ else if ((struct thread *)SX_OWNER(lock->base.sx.sx_lock) == curthread)
+ return (-EALREADY);
+ else
+ return (linux_ww_mutex_lock_sub(lock, ctx, 0));
+}
+
+static inline int
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+ if (MUTEX_SKIP())
+ return (0);
+ else if ((struct thread *)SX_OWNER(lock->base.sx.sx_lock) == curthread)
+ return (-EALREADY);
+ else
+ return (linux_ww_mutex_lock_sub(lock, ctx, 1));
+}
+
+extern void linux_ww_mutex_unlock_sub(struct ww_mutex *);
+
+static inline void
+ww_mutex_unlock(struct ww_mutex *lock)
+{
+ if (MUTEX_SKIP())
+ return;
+ else
+ linux_ww_mutex_unlock_sub(lock);
+}
+
+static inline void
+ww_mutex_destroy(struct ww_mutex *lock)
+{
+ cv_destroy(&lock->condvar);
+ mutex_destroy(&lock->base);
+}
+
+static inline void
+ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class)
+{
+}
+
+static inline void
+ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class)
+{
+ linux_mutex_init(&lock->base, ww_class->mutex_name, SX_NOWITNESS);
+ cv_init(&lock->condvar, "lkpi-ww");
+}
+
+static inline void
+ww_acquire_fini(struct ww_acquire_ctx *ctx)
+{
+}
+
+static inline void
+ww_acquire_done(struct ww_acquire_ctx *ctx)
+{
+}
+
+#endif /* _LINUXKPI_LINUX_WW_MUTEX_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/xarray.h b/sys/compat/linuxkpi/common/include/linux/xarray.h
new file mode 100644
index 000000000000..fba36eea0ab5
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/xarray.h
@@ -0,0 +1,149 @@
+/*-
+ * Copyright (c) 2020 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_LINUX_XARRAY_H_
+#define _LINUXKPI_LINUX_XARRAY_H_
+
+#include <linux/gfp.h>
+#include <linux/radix-tree.h>
+#include <linux/err.h>
+#include <linux/kconfig.h>
+
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#define XA_LIMIT(min, max) \
+ ({ CTASSERT((min) == 0); (uint32_t)(max); })
+
+#define XA_FLAGS_ALLOC (1U << 0)
+#define XA_FLAGS_LOCK_IRQ (1U << 1)
+#define XA_FLAGS_ALLOC1 (1U << 2)
+
+#define XA_ERROR(x) \
+ ERR_PTR(x)
+
+#define xa_is_err(x) \
+ IS_ERR(x)
+
+#define xa_limit_32b XA_LIMIT(0, 0xFFFFFFFF)
+
+#define XA_ASSERT_LOCKED(xa) mtx_assert(&(xa)->xa_lock, MA_OWNED)
+#define xa_lock(xa) mtx_lock(&(xa)->xa_lock)
+#define xa_unlock(xa) mtx_unlock(&(xa)->xa_lock)
+
+struct xarray {
+ struct radix_tree_root xa_head;
+ struct mtx xa_lock; /* internal mutex */
+ uint32_t xa_flags; /* see XA_FLAGS_XXX */
+};
+
+/*
+ * Extensible arrays API implemented as a wrapper
+ * around the radix tree implementation.
+ */
+void *xa_erase(struct xarray *, uint32_t);
+void *xa_load(struct xarray *, uint32_t);
+int xa_alloc(struct xarray *, uint32_t *, void *, uint32_t, gfp_t);
+int xa_alloc_cyclic(struct xarray *, uint32_t *, void *, uint32_t, uint32_t *, gfp_t);
+int xa_alloc_cyclic_irq(struct xarray *, uint32_t *, void *, uint32_t, uint32_t *, gfp_t);
+int xa_insert(struct xarray *, uint32_t, void *, gfp_t);
+void *xa_store(struct xarray *, uint32_t, void *, gfp_t);
+void xa_init_flags(struct xarray *, uint32_t);
+bool xa_empty(struct xarray *);
+void xa_destroy(struct xarray *);
+void *xa_next(struct xarray *, unsigned long *, bool);
+
+#define xa_for_each(xa, index, entry) \
+ for ((entry) = NULL, (index) = 0; \
+ ((entry) = xa_next(xa, &index, (entry) != NULL)) != NULL; )
+
+/*
+ * Unlocked version of functions above.
+ */
+void *__xa_erase(struct xarray *, uint32_t);
+int __xa_alloc(struct xarray *, uint32_t *, void *, uint32_t, gfp_t);
+int __xa_alloc_cyclic(struct xarray *, uint32_t *, void *, uint32_t, uint32_t *, gfp_t);
+int __xa_insert(struct xarray *, uint32_t, void *, gfp_t);
+void *__xa_store(struct xarray *, uint32_t, void *, gfp_t);
+bool __xa_empty(struct xarray *);
+void *__xa_next(struct xarray *, unsigned long *, bool);
+
+#define xa_store_irq(xa, index, ptr, gfp) \
+ xa_store((xa), (index), (ptr), (gfp))
+
+#define xa_erase_irq(xa, index) \
+ xa_erase((xa), (index))
+
+#define xa_lock_irq(xa) xa_lock(xa)
+#define xa_unlock_irq(xa) xa_unlock(xa)
+
+#define xa_lock_irqsave(xa, flags) \
+ do { \
+ xa_lock((xa)); \
+ flags = 0; \
+ } while (0)
+
+#define xa_unlock_irqrestore(xa, flags) \
+ do { \
+ xa_unlock((xa)); \
+ flags == 0; \
+ } while (0)
+
+static inline int
+xa_err(void *ptr)
+{
+ return (PTR_ERR_OR_ZERO(ptr));
+}
+
+static inline void
+xa_init(struct xarray *xa)
+{
+ xa_init_flags(xa, 0);
+}
+
+static inline void *
+xa_mk_value(unsigned long v)
+{
+ unsigned long r = (v << 1) | 1;
+
+ return ((void *)r);
+}
+
+static inline bool
+xa_is_value(const void *e)
+{
+ unsigned long v = (unsigned long)e;
+
+ return (v & 1);
+}
+
+static inline unsigned long
+xa_to_value(const void *e)
+{
+ unsigned long v = (unsigned long)e;
+
+ return (v >> 1);
+}
+#endif /* _LINUXKPI_LINUX_XARRAY_H_ */
diff --git a/sys/compat/linuxkpi/common/include/net/addrconf.h b/sys/compat/linuxkpi/common/include/net/addrconf.h
new file mode 100644
index 000000000000..33c07792d807
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/addrconf.h
@@ -0,0 +1,49 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_NET_ADDRCONF_H
+#define _LINUXKPI_NET_ADDRCONF_H
+
+#include <sys/types.h>
+#include <netinet/in.h>
+
+static __inline void
+addrconf_addr_solict_mult(struct in6_addr *ia6, struct in6_addr *sol)
+{
+
+ sol->s6_addr16[0] = IPV6_ADDR_INT16_MLL;
+ sol->s6_addr16[1] = 0;
+ sol->s6_addr32[1] = 0;
+ sol->s6_addr32[2] = IPV6_ADDR_INT32_ONE;
+ sol->s6_addr32[3] = ia6->s6_addr32[3];
+ sol->s6_addr8[12] = 0xff;
+}
+
+#endif /* _LINUXKPI_NET_ADDRCONF_H */
diff --git a/sys/compat/linuxkpi/common/include/net/cfg80211.h b/sys/compat/linuxkpi/common/include/net/cfg80211.h
new file mode 100644
index 000000000000..18b34f0e90ec
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/cfg80211.h
@@ -0,0 +1,2140 @@
+/*-
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
+ * Copyright (c) 2021-2022 Bjoern A. Zeeb
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_NET_CFG80211_H
+#define _LINUXKPI_NET_CFG80211_H
+
+#include <linux/types.h>
+#include <linux/nl80211.h>
+#include <linux/ieee80211.h>
+#include <linux/mutex.h>
+#include <linux/if_ether.h>
+#include <linux/ethtool.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <net/regulatory.h>
+
+#include <net80211/ieee80211.h>
+
+/* linux_80211.c */
+extern int linuxkpi_debug_80211;
+#ifndef D80211_TODO
+#define D80211_TODO 0x1
+#endif
+#ifndef D80211_IMPROVE
+#define D80211_IMPROVE 0x2
+#endif
+#define TODO(fmt, ...) if (linuxkpi_debug_80211 & D80211_TODO) \
+ printf("%s:%d: XXX LKPI80211 TODO " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
+#define IMPROVE(...) if (linuxkpi_debug_80211 & D80211_IMPROVE) \
+ printf("%s:%d: XXX LKPI80211 IMPROVE\n", __func__, __LINE__)
+
+enum rfkill_hard_block_reasons {
+ RFKILL_HARD_BLOCK_NOT_OWNER = BIT(0),
+};
+
+#define WIPHY_PARAM_FRAG_THRESHOLD __LINE__ /* TODO FIXME brcmfmac */
+#define WIPHY_PARAM_RETRY_LONG __LINE__ /* TODO FIXME brcmfmac */
+#define WIPHY_PARAM_RETRY_SHORT __LINE__ /* TODO FIXME brcmfmac */
+#define WIPHY_PARAM_RTS_THRESHOLD __LINE__ /* TODO FIXME brcmfmac */
+
+#define CFG80211_SIGNAL_TYPE_MBM __LINE__ /* TODO FIXME brcmfmac */
+
+#define UPDATE_ASSOC_IES 1
+
+#define IEEE80211_MAX_CHAINS 4 /* net80211: IEEE80211_MAX_CHAINS copied */
+
+enum cfg80211_rate_info_flags {
+ RATE_INFO_FLAGS_MCS = BIT(0),
+ RATE_INFO_FLAGS_VHT_MCS = BIT(1),
+ RATE_INFO_FLAGS_SHORT_GI = BIT(2),
+ RATE_INFO_FLAGS_HE_MCS = BIT(4),
+ RATE_INFO_FLAGS_EHT_MCS = BIT(7),
+ /* Max 8 bits as used in struct rate_info. */
+};
+
+#define CFG80211_RATE_INFO_FLAGS_BITS \
+ "\20\1MCS\2VHT_MCS\3SGI\5HE_MCS\10EHT_MCS"
+
+extern const uint8_t rfc1042_header[6];
+extern const uint8_t bridge_tunnel_header[6];
+
+enum ieee80211_privacy {
+ IEEE80211_PRIVACY_ANY,
+};
+
+enum ieee80211_bss_type {
+ IEEE80211_BSS_TYPE_ANY,
+};
+
+enum cfg80211_bss_frame_type {
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ CFG80211_BSS_FTYPE_BEACON,
+ CFG80211_BSS_FTYPE_PRESP,
+};
+
+enum ieee80211_channel_flags {
+ IEEE80211_CHAN_DISABLED = BIT(0),
+ IEEE80211_CHAN_INDOOR_ONLY = BIT(1),
+ IEEE80211_CHAN_IR_CONCURRENT = BIT(2),
+ IEEE80211_CHAN_RADAR = BIT(3),
+ IEEE80211_CHAN_NO_IR = BIT(4),
+ IEEE80211_CHAN_NO_HT40MINUS = BIT(5),
+ IEEE80211_CHAN_NO_HT40PLUS = BIT(6),
+ IEEE80211_CHAN_NO_80MHZ = BIT(7),
+ IEEE80211_CHAN_NO_160MHZ = BIT(8),
+ IEEE80211_CHAN_NO_OFDM = BIT(9),
+ IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = BIT(10),
+ IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = BIT(11),
+ IEEE80211_CHAN_PSD = BIT(12),
+ IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(13),
+ IEEE80211_CHAN_CAN_MONITOR = BIT(14),
+};
+#define IEEE80211_CHAN_NO_HT40 (IEEE80211_CHAN_NO_HT40MINUS|IEEE80211_CHAN_NO_HT40PLUS)
+
+struct ieee80211_txrx_stypes {
+ uint16_t tx;
+ uint16_t rx;
+};
+
+/* XXX net80211 has an ieee80211_channel as well. */
+struct linuxkpi_ieee80211_channel {
+ /* TODO FIXME */
+ uint32_t hw_value; /* ic_ieee */
+ uint32_t center_freq; /* ic_freq */
+ enum ieee80211_channel_flags flags; /* ic_flags */
+ enum nl80211_band band;
+ int8_t max_power; /* ic_maxpower */
+ bool beacon_found;
+ int max_antenna_gain, max_reg_power;
+ int orig_flags;
+ int dfs_cac_ms, dfs_state;
+ int orig_mpwr;
+};
+
+struct cfg80211_bitrate_mask {
+ /* TODO FIXME */
+ struct {
+ uint32_t legacy;
+ uint8_t ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
+ uint16_t vht_mcs[8];
+ uint16_t he_mcs[8];
+ enum nl80211_txrate_gi gi;
+ enum nl80211_he_gi he_gi;
+ uint8_t he_ltf; /* XXX enum? */
+ } control[NUM_NL80211_BANDS];
+};
+
+enum rate_info_bw {
+ RATE_INFO_BW_20 = 0,
+ RATE_INFO_BW_5,
+ RATE_INFO_BW_10,
+ RATE_INFO_BW_40,
+ RATE_INFO_BW_80,
+ RATE_INFO_BW_160,
+ RATE_INFO_BW_HE_RU,
+ RATE_INFO_BW_320,
+ RATE_INFO_BW_EHT_RU,
+};
+
+struct rate_info {
+ uint8_t flags; /* enum cfg80211_rate_info_flags */
+ uint8_t bw; /* enum rate_info_bw */
+ uint16_t legacy;
+ uint8_t mcs;
+ uint8_t nss;
+ uint8_t he_dcm;
+ uint8_t he_gi;
+ uint8_t he_ru_alloc;
+ uint8_t eht_gi;
+};
+
+struct ieee80211_rate {
+ uint32_t flags; /* enum ieee80211_rate_flags */
+ uint16_t bitrate;
+ uint16_t hw_value;
+ uint16_t hw_value_short;
+};
+
+struct ieee80211_sta_ht_cap {
+ bool ht_supported;
+ uint8_t ampdu_density;
+ uint8_t ampdu_factor;
+ uint16_t cap;
+ struct ieee80211_mcs_info mcs;
+};
+
+/* XXX net80211 calls these IEEE80211_VHTCAP_* */
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 /* IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 */
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 /* IEEE80211_VHTCAP_MAX_MPDU_LENGTH_7991 */
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002 /* IEEE80211_VHTCAP_MAX_MPDU_LENGTH_11454 */
+#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003 /* IEEE80211_VHTCAP_MAX_MPDU_MASK */
+
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ (IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_160MHZ << IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_MASK_S)
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ (IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_160_80P80MHZ << IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_MASK_S)
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_MASK
+
+#define IEEE80211_VHT_CAP_RXLDPC 0x00000010 /* IEEE80211_VHTCAP_RXLDPC */
+
+#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020 /* IEEE80211_VHTCAP_SHORT_GI_80 */
+#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040 /* IEEE80211_VHTCAP_SHORT_GI_160 */
+
+#define IEEE80211_VHT_CAP_TXSTBC 0x00000080 /* IEEE80211_VHTCAP_TXSTBC */
+
+#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100 /* IEEE80211_VHTCAP_RXSTBC_1 */
+#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700 /* IEEE80211_VHTCAP_RXSTBC_MASK */
+
+#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 /* IEEE80211_VHTCAP_SU_BEAMFORMER_CAPABLE */
+
+#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 /* IEEE80211_VHTCAP_SU_BEAMFORMEE_CAPABLE */
+
+#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000 /* IEEE80211_VHTCAP_MU_BEAMFORMER_CAPABLE */
+
+#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000 /* IEEE80211_VHTCAP_MU_BEAMFORMEE_CAPABLE */
+
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13 /* IEEE80211_VHTCAP_BEAMFORMEE_STS_SHIFT */
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK (7 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT) /* IEEE80211_VHTCAP_BEAMFORMEE_STS_MASK */
+
+#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000 /* IEEE80211_VHTCAP_HTC_VHT */
+
+#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 /* IEEE80211_VHTCAP_RX_ANTENNA_PATTERN */
+#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 /* IEEE80211_VHTCAP_TX_ANTENNA_PATTERN */
+
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000 /* IEEE80211_VHTCAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB */
+
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT 16 /* IEEE80211_VHTCAP_SOUNDING_DIMENSIONS_SHIFT */
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK \
+ (7 << IEEE80211_VHTCAP_SOUNDING_DIMENSIONS_SHIFT) /* IEEE80211_VHTCAP_SOUNDING_DIMENSIONS_MASK */
+
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23 /* IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT */
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \
+ (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT) /* IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK */
+
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK IEEE80211_VHTCAP_EXT_NSS_BW
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT IEEE80211_VHTCAP_EXT_NSS_BW_S
+
+struct ieee80211_sta_vht_cap {
+ /* TODO FIXME */
+ bool vht_supported;
+ uint32_t cap;
+ struct ieee80211_vht_mcs_info vht_mcs;
+};
+
+enum ieee80211_vht_opmode {
+ IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4,
+};
+
+struct cfg80211_connect_resp_params {
+ /* XXX TODO */
+ uint8_t *bssid;
+ const uint8_t *req_ie;
+ const uint8_t *resp_ie;
+ uint32_t req_ie_len;
+ uint32_t resp_ie_len;
+ int status;
+};
+
+struct cfg80211_inform_bss {
+ /* XXX TODO */
+ int boottime_ns, scan_width, signal;
+ struct linuxkpi_ieee80211_channel *chan;
+};
+
+struct cfg80211_roam_info {
+ /* XXX TODO */
+ uint8_t *bssid;
+ const uint8_t *req_ie;
+ const uint8_t *resp_ie;
+ uint32_t req_ie_len;
+ uint32_t resp_ie_len;
+ struct linuxkpi_ieee80211_channel *channel;
+};
+
+struct cfg80211_bss_ies {
+ uint8_t *data;
+ size_t len;
+};
+
+struct cfg80211_bss {
+ /* XXX TODO */
+ struct cfg80211_bss_ies *ies;
+ struct cfg80211_bss_ies *beacon_ies;
+
+ int32_t signal;
+};
+
+struct cfg80211_chan_def {
+ /* XXX TODO */
+ struct linuxkpi_ieee80211_channel *chan;
+ enum nl80211_chan_width width;
+ uint32_t center_freq1;
+ uint32_t center_freq2;
+ uint16_t punctured;
+};
+
+struct cfg80211_ftm_responder_stats {
+ /* XXX TODO */
+ int asap_num, failed_num, filled, non_asap_num, out_of_window_triggers_num, partial_num, reschedule_requests_num, success_num, total_duration_ms, unknown_triggers_num;
+};
+
+struct cfg80211_pmsr_capabilities {
+ /* XXX TODO */
+ int max_peers, randomize_mac_addr, report_ap_tsf;
+ struct {
+ int asap, bandwidths, max_bursts_exponent, max_ftms_per_burst, non_asap, non_trigger_based, preambles, request_civicloc, request_lci, supported, trigger_based;
+ } ftm;
+};
+
+struct cfg80211_pmsr_ftm_request {
+ /* XXX TODO */
+ int asap, burst_period, ftmr_retries, ftms_per_burst, non_trigger_based, num_bursts_exp, request_civicloc, request_lci, trigger_based;
+ uint8_t bss_color;
+ bool lmr_feedback;
+};
+
+struct cfg80211_pmsr_request_peer {
+ /* XXX TODO */
+ struct cfg80211_chan_def chandef;
+ struct cfg80211_pmsr_ftm_request ftm;
+ uint8_t addr[ETH_ALEN];
+ int report_ap_tsf;
+};
+
+struct cfg80211_pmsr_request {
+ /* XXX TODO */
+ int cookie, n_peers, timeout;
+ uint8_t mac_addr[ETH_ALEN], mac_addr_mask[ETH_ALEN];
+ struct cfg80211_pmsr_request_peer peers[];
+};
+
+struct cfg80211_pmsr_ftm_result {
+ /* XXX TODO */
+ int burst_index, busy_retry_time, failure_reason;
+ int num_ftmr_successes, rssi_avg, rssi_avg_valid, rssi_spread, rssi_spread_valid, rtt_avg, rtt_avg_valid, rtt_spread, rtt_spread_valid, rtt_variance, rtt_variance_valid;
+ uint8_t *lci;
+ uint8_t *civicloc;
+ int lci_len;
+ int civicloc_len;
+};
+
+struct cfg80211_pmsr_result {
+ /* XXX TODO */
+ int ap_tsf, ap_tsf_valid, final, host_time, status, type;
+ uint8_t addr[ETH_ALEN];
+ struct cfg80211_pmsr_ftm_result ftm;
+};
+
+struct cfg80211_sar_freq_ranges {
+ uint32_t start_freq;
+ uint32_t end_freq;
+};
+
+struct cfg80211_sar_sub_specs {
+ uint32_t freq_range_index;
+ int power;
+};
+
+struct cfg80211_sar_specs {
+ enum nl80211_sar_type type;
+ uint32_t num_sub_specs;
+ struct cfg80211_sar_sub_specs sub_specs[];
+};
+
+struct cfg80211_sar_capa {
+ enum nl80211_sar_type type;
+ uint32_t num_freq_ranges;
+ const struct cfg80211_sar_freq_ranges *freq_ranges;
+};
+
+struct cfg80211_ssid {
+ int ssid_len;
+ uint8_t ssid[IEEE80211_MAX_SSID_LEN];
+};
+
+struct cfg80211_scan_6ghz_params {
+ /* XXX TODO */
+ uint8_t *bssid;
+ int channel_idx, psc_no_listen, short_ssid, short_ssid_valid, unsolicited_probe, psd_20;
+};
+
+struct cfg80211_match_set {
+ uint8_t bssid[ETH_ALEN];
+ struct cfg80211_ssid ssid;
+ int rssi_thold;
+};
+
+struct cfg80211_scan_request {
+ /* XXX TODO */
+ bool no_cck;
+ bool scan_6ghz;
+ bool duration_mandatory;
+ int8_t tsf_report_link_id;
+ uint16_t duration;
+ uint32_t flags;
+ struct wireless_dev *wdev;
+ struct wiphy *wiphy;
+ uint64_t scan_start;
+ uint32_t rates[NUM_NL80211_BANDS];
+ int ie_len;
+ uint8_t *ie;
+ uint8_t mac_addr[ETH_ALEN], mac_addr_mask[ETH_ALEN];
+ uint8_t bssid[ETH_ALEN];
+ int n_ssids;
+ int n_6ghz_params;
+ int n_channels;
+ struct cfg80211_ssid *ssids;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params;
+ struct linuxkpi_ieee80211_channel *channels[0];
+};
+
+struct cfg80211_sched_scan_plan {
+ /* XXX TODO */
+ int interval, iterations;
+};
+
+struct cfg80211_sched_scan_request {
+ /* XXX TODO */
+ int delay, flags;
+ uint8_t mac_addr[ETH_ALEN], mac_addr_mask[ETH_ALEN];
+ uint64_t reqid;
+ int n_match_sets;
+ int n_scan_plans;
+ int n_ssids;
+ int n_channels;
+ int ie_len;
+ uint8_t *ie;
+ struct cfg80211_match_set *match_sets;
+ struct cfg80211_sched_scan_plan *scan_plans;
+ struct cfg80211_ssid *ssids;
+ struct linuxkpi_ieee80211_channel *channels[0];
+};
+
+struct cfg80211_scan_info {
+ uint64_t scan_start_tsf;
+ uint8_t tsf_bssid[ETH_ALEN];
+ bool aborted;
+};
+
+struct cfg80211_beacon_data {
+ /* XXX TODO */
+ const uint8_t *head;
+ const uint8_t *tail;
+ uint32_t head_len;
+ uint32_t tail_len;
+ const uint8_t *proberesp_ies;
+ const uint8_t *assocresp_ies;
+ uint32_t proberesp_ies_len;
+ uint32_t assocresp_ies_len;
+};
+
+struct cfg80211_ap_settings {
+ /* XXX TODO */
+ int auth_type, beacon_interval, dtim_period, hidden_ssid, inactivity_timeout;
+ const uint8_t *ssid;
+ size_t ssid_len;
+ struct cfg80211_beacon_data beacon;
+ struct cfg80211_chan_def chandef;
+};
+
+struct cfg80211_bss_selection {
+ /* XXX TODO */
+ enum nl80211_bss_select_attr behaviour;
+ union {
+ enum nl80211_band band_pref;
+ struct {
+ enum nl80211_band band;
+ uint8_t delta;
+ } adjust;
+ } param;
+};
+
+struct cfg80211_crypto { /* XXX made up name */
+ /* XXX TODO */
+ enum nl80211_wpa_versions wpa_versions;
+ uint32_t cipher_group; /* WLAN_CIPHER_SUITE_* */
+ uint32_t *akm_suites;
+ uint32_t *ciphers_pairwise;
+ const uint8_t *sae_pwd;
+ const uint8_t *psk;
+ int n_akm_suites;
+ int n_ciphers_pairwise;
+ int sae_pwd_len;
+};
+
+struct cfg80211_connect_params {
+ /* XXX TODO */
+ struct linuxkpi_ieee80211_channel *channel;
+ uint8_t *bssid;
+ const uint8_t *ie;
+ const uint8_t *ssid;
+ uint32_t ie_len;
+ uint32_t ssid_len;
+ const void *key;
+ uint32_t key_len;
+ int auth_type, key_idx, privacy, want_1x;
+ struct cfg80211_bss_selection bss_select;
+ struct cfg80211_crypto crypto;
+};
+
+enum bss_param_flags { /* Used as bitflags. XXX FIXME values? */
+ BSS_PARAM_FLAGS_CTS_PROT = 0x01,
+ BSS_PARAM_FLAGS_SHORT_PREAMBLE = 0x02,
+ BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 0x04,
+};
+
+struct cfg80211_ibss_params {
+ /* XXX TODO */
+ int basic_rates, beacon_interval;
+ int channel_fixed, ie, ie_len, privacy;
+ int dtim_period;
+ uint8_t *ssid;
+ uint8_t *bssid;
+ int ssid_len;
+ struct cfg80211_chan_def chandef;
+ enum bss_param_flags flags;
+};
+
+struct cfg80211_mgmt_tx_params {
+ /* XXX TODO */
+ struct linuxkpi_ieee80211_channel *chan;
+ const uint8_t *buf;
+ size_t len;
+ int wait;
+};
+
+struct cfg80211_pmk_conf {
+ /* XXX TODO */
+ const uint8_t *pmk;
+ uint8_t pmk_len;
+};
+
+struct cfg80211_pmksa {
+ /* XXX TODO */
+ const uint8_t *bssid;
+ const uint8_t *pmkid;
+};
+
+struct station_del_parameters {
+ /* XXX TODO */
+ const uint8_t *mac;
+ uint32_t reason_code; /* elsewhere uint16_t? */
+};
+
+struct station_info {
+ uint64_t filled; /* enum nl80211_sta_info */
+ uint32_t connected_time;
+ uint32_t inactive_time;
+
+ uint64_t rx_bytes;
+ uint32_t rx_packets;
+ uint32_t rx_dropped_misc;
+
+ uint64_t rx_duration;
+ uint32_t rx_beacon;
+ uint8_t rx_beacon_signal_avg;
+
+ int8_t signal;
+ int8_t signal_avg;
+ int8_t ack_signal;
+ int8_t avg_ack_signal;
+
+ /* gap */
+ int generation;
+
+ uint64_t tx_bytes;
+ uint32_t tx_packets;
+ uint32_t tx_failed;
+ uint64_t tx_duration;
+ uint32_t tx_retries;
+
+ int chains;
+ uint8_t chain_signal[IEEE80211_MAX_CHAINS];
+ uint8_t chain_signal_avg[IEEE80211_MAX_CHAINS];
+
+ uint8_t *assoc_req_ies;
+ size_t assoc_req_ies_len;
+
+ struct rate_info rxrate;
+ struct rate_info txrate;
+ struct cfg80211_ibss_params bss_param;
+ struct nl80211_sta_flag_update sta_flags;
+};
+
+struct station_parameters {
+ /* XXX TODO */
+ int sta_flags_mask, sta_flags_set;
+};
+
+struct key_params {
+ /* XXX TODO */
+ const uint8_t *key;
+ const uint8_t *seq;
+ int key_len;
+ int seq_len;
+ uint32_t cipher; /* WLAN_CIPHER_SUITE_* */
+};
+
+struct mgmt_frame_regs {
+ /* XXX TODO */
+ int interface_stypes;
+};
+
+struct vif_params {
+ /* XXX TODO */
+ uint8_t macaddr[ETH_ALEN];
+};
+
+/* That the world needs so many different structs for this is amazing. */
+struct mac_address {
+ uint8_t addr[ETH_ALEN];
+};
+
+struct ieee80211_reg_rule {
+ /* TODO FIXME */
+ uint32_t flags;
+ int dfs_cac_ms;
+ struct freq_range {
+ int start_freq_khz;
+ int end_freq_khz;
+ int max_bandwidth_khz;
+ } freq_range;
+ struct power_rule {
+ int max_antenna_gain;
+ int max_eirp;
+ } power_rule;
+};
+
+struct linuxkpi_ieee80211_regdomain {
+ /* TODO FIXME */
+ uint8_t alpha2[2];
+ int dfs_region;
+ int n_reg_rules;
+ struct ieee80211_reg_rule reg_rules[];
+};
+
+#define IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS 0x01
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454 0x02
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK 0x03
+#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x04
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x05
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x06
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991 0x07
+#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x08
+
+#define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK 0x01
+
+#define IEEE80211_EHT_MCS_NSS_RX 0x01
+#define IEEE80211_EHT_MCS_NSS_TX 0x02
+
+#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x01
+#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02
+#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK 0x03
+#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x04
+#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO 0x05
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE 0x06
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER 0x07
+
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK 0x01
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK 0x02
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK 0x03
+
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK 0x01
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK 0x02
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK 0x03
+
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK 0x01
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK 0x02
+#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK 0x03
+#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK 0x04
+#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK 0x05
+#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK 0x06
+#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK 0x07
+#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK 0x08
+
+#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI 0x01
+#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO 0x02
+#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP 0x03
+#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK 0x04
+
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US 0x01
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US 0x02
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US 0x03
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US 0x04
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK 0x05
+#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK 0x06
+#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT 0x07
+#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP 0x08
+#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP 0x09
+#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0x0a
+#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF 0x0b
+
+#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x01
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x02
+#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x03
+
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ 0x01
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ 0x02
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ 0x03
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ 0x05
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ 0x06
+
+#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA 0x01
+#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02
+
+#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE 0x01
+#define IEEE80211_EHT_PPE_THRES_NSS_MASK 0x02
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK 0x03
+#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE 0x04
+
+#define IEEE80211_EML_CAP_EMLSR_SUPP 0x01
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT 0x02
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 0x04
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x08
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 0x10
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x20
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 0x40
+
+#define VENDOR_CMD_RAW_DATA (void *)(uintptr_t)(-ENOENT)
+
+/* net80211::net80211_he_cap */
+struct ieee80211_sta_he_cap {
+ bool has_he;
+ struct ieee80211_he_cap_elem he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
+ uint8_t ppe_thres[IEEE80211_HE_CAP_PPE_THRES_MAX];
+};
+
+struct cfg80211_he_bss_color {
+ int color, enabled;
+};
+
+struct ieee80211_he_obss_pd {
+ bool enable;
+ uint8_t min_offset;
+ uint8_t max_offset;
+ uint8_t non_srg_max_offset;
+ uint8_t sr_ctrl;
+ uint8_t bss_color_bitmap[8];
+ uint8_t partial_bssid_bitmap[8];
+};
+
+struct ieee80211_eht_mcs_nss_supp_20mhz_only {
+ union {
+ struct {
+ uint8_t rx_tx_mcs7_max_nss;
+ uint8_t rx_tx_mcs9_max_nss;
+ uint8_t rx_tx_mcs11_max_nss;
+ uint8_t rx_tx_mcs13_max_nss;
+ };
+ uint8_t rx_tx_max_nss[4];
+ };
+};
+
+struct ieee80211_eht_mcs_nss_supp_bw {
+ union {
+ struct {
+ uint8_t rx_tx_mcs9_max_nss;
+ uint8_t rx_tx_mcs11_max_nss;
+ uint8_t rx_tx_mcs13_max_nss;
+ };
+ uint8_t rx_tx_max_nss[3];
+ };
+};
+
+struct ieee80211_eht_cap_elem_fixed {
+ uint8_t mac_cap_info[2];
+ uint8_t phy_cap_info[9];
+};
+
+struct ieee80211_eht_mcs_nss_supp {
+ /* TODO FIXME */
+ /* Can only have either or... */
+ union {
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz;
+ struct {
+ struct ieee80211_eht_mcs_nss_supp_bw _80;
+ struct ieee80211_eht_mcs_nss_supp_bw _160;
+ struct ieee80211_eht_mcs_nss_supp_bw _320;
+ } bw;
+ };
+};
+
+#define IEEE80211_STA_EHT_PPE_THRES_MAX 32
+struct ieee80211_sta_eht_cap {
+ bool has_eht;
+ struct ieee80211_eht_cap_elem_fixed eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp;
+ uint8_t eht_ppe_thres[IEEE80211_STA_EHT_PPE_THRES_MAX];
+};
+
+struct ieee80211_sband_iftype_data {
+ /* TODO FIXME */
+ enum nl80211_iftype types_mask;
+ struct ieee80211_sta_he_cap he_cap;
+ struct ieee80211_he_6ghz_capa he_6ghz_capa;
+ struct ieee80211_sta_eht_cap eht_cap;
+ struct {
+ const uint8_t *data;
+ size_t len;
+ } vendor_elems;
+};
+
+struct ieee80211_supported_band {
+ /* TODO FIXME */
+ struct linuxkpi_ieee80211_channel *channels;
+ struct ieee80211_rate *bitrates;
+ struct ieee80211_sband_iftype_data *iftype_data;
+ int n_channels;
+ int n_bitrates;
+ int n_iftype_data;
+ enum nl80211_band band;
+ struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+};
+
+struct cfg80211_pkt_pattern {
+ /* XXX TODO */
+ uint8_t *mask;
+ uint8_t *pattern;
+ int pattern_len;
+ int pkt_offset;
+};
+
+struct cfg80211_wowlan_nd_match {
+ /* XXX TODO */
+ struct cfg80211_ssid ssid;
+ int n_channels;
+ uint32_t channels[0]; /* freq! = ieee80211_channel_to_frequency() */
+};
+
+struct cfg80211_wowlan_nd_info {
+ /* XXX TODO */
+ int n_matches;
+ struct cfg80211_wowlan_nd_match *matches[0];
+};
+
+enum wiphy_wowlan_support_flags {
+ WIPHY_WOWLAN_DISCONNECT,
+ WIPHY_WOWLAN_MAGIC_PKT,
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY,
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ,
+ WIPHY_WOWLAN_4WAY_HANDSHAKE,
+ WIPHY_WOWLAN_RFKILL_RELEASE,
+ WIPHY_WOWLAN_NET_DETECT,
+};
+
+struct wiphy_wowlan_support {
+ /* XXX TODO */
+ enum wiphy_wowlan_support_flags flags;
+ int max_nd_match_sets, max_pkt_offset, n_patterns, pattern_max_len, pattern_min_len;
+};
+
+struct cfg80211_wowlan_wakeup {
+ /* XXX TODO */
+ uint16_t pattern_idx;
+ bool disconnect;
+ bool unprot_deauth_disassoc;
+ bool eap_identity_req;
+ bool four_way_handshake;
+ bool gtk_rekey_failure;
+ bool magic_pkt;
+ bool rfkill_release;
+ bool tcp_connlost;
+ bool tcp_nomoretokens;
+ bool tcp_match;
+ bool packet_80211;
+ struct cfg80211_wowlan_nd_info *net_detect;
+ uint8_t *packet;
+ uint16_t packet_len;
+ uint16_t packet_present_len;
+};
+
+struct cfg80211_wowlan {
+ /* XXX TODO */
+ bool any;
+ bool disconnect;
+ bool magic_pkt;
+ bool gtk_rekey_failure;
+ bool eap_identity_req;
+ bool four_way_handshake;
+ bool rfkill_release;
+
+ /* Magic packet patterns. */
+ int n_patterns;
+ struct cfg80211_pkt_pattern *patterns;
+
+ /* netdetect? if not assoc? */
+ struct cfg80211_sched_scan_request *nd_config;
+
+ void *tcp; /* XXX ? */
+};
+
+struct cfg80211_gtk_rekey_data {
+ /* XXX TODO */
+ const uint8_t *kck, *kek, *replay_ctr;
+ uint32_t akm;
+ uint8_t kck_len, kek_len;
+};
+
+struct cfg80211_tid_cfg {
+ /* XXX TODO */
+ int mask, noack, retry_long, rtscts, tids, amsdu, ampdu;
+ enum nl80211_tx_rate_setting txrate_type;
+ struct cfg80211_bitrate_mask txrate_mask;
+};
+
+struct cfg80211_tid_config {
+ /* XXX TODO */
+ int n_tid_conf;
+ struct cfg80211_tid_cfg tid_conf[0];
+};
+
+struct ieee80211_iface_limit {
+ /* TODO FIXME */
+ int max, types;
+};
+
+struct ieee80211_iface_combination {
+ /* TODO FIXME */
+ const struct ieee80211_iface_limit *limits;
+ int n_limits;
+ int max_interfaces, num_different_channels;
+ int beacon_int_infra_match, beacon_int_min_gcd;
+ int radar_detect_widths;
+};
+
+struct iface_combination_params {
+ int num_different_channels;
+ int iftype_num[NUM_NL80211_IFTYPES];
+};
+
+struct regulatory_request {
+ /* XXX TODO */
+ uint8_t alpha2[2];
+ enum environment_cap country_ie_env;
+ int initiator, dfs_region;
+ int user_reg_hint_type;
+};
+
+struct cfg80211_set_hw_timestamp {
+ const uint8_t *macaddr;
+ bool enable;
+};
+
+enum wiphy_vendor_cmd_need_flags {
+ WIPHY_VENDOR_CMD_NEED_NETDEV = 0x01,
+ WIPHY_VENDOR_CMD_NEED_RUNNING = 0x02,
+ WIPHY_VENDOR_CMD_NEED_WDEV = 0x04,
+};
+
+struct wiphy_vendor_command {
+ struct {
+ uint32_t vendor_id;
+ uint32_t subcmd;
+ };
+ uint32_t flags;
+ void *policy;
+ int (*doit)(struct wiphy *, struct wireless_dev *, const void *, int);
+};
+
+struct wiphy_iftype_ext_capab {
+ /* TODO FIXME */
+ enum nl80211_iftype iftype;
+ const uint8_t *extended_capabilities;
+ const uint8_t *extended_capabilities_mask;
+ uint8_t extended_capabilities_len;
+ uint16_t eml_capabilities;
+ uint16_t mld_capa_and_ops;
+};
+
+struct tid_config_support {
+ /* TODO FIXME */
+ uint64_t vif; /* enum nl80211_tid_cfg_attr */
+ uint64_t peer; /* enum nl80211_tid_cfg_attr */
+};
+
+enum cfg80211_regulatory {
+ REGULATORY_CUSTOM_REG = BIT(0),
+ REGULATORY_STRICT_REG = BIT(1),
+ REGULATORY_DISABLE_BEACON_HINTS = BIT(2),
+ REGULATORY_ENABLE_RELAX_NO_IR = BIT(3),
+ REGULATORY_WIPHY_SELF_MANAGED = BIT(4),
+ REGULATORY_COUNTRY_IE_IGNORE = BIT(5),
+ REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(6),
+};
+
+struct wiphy_radio_freq_range {
+ uint32_t start_freq;
+ uint32_t end_freq;
+};
+
+struct wiphy_radio {
+ int n_freq_range;
+ int n_iface_combinations;
+ const struct wiphy_radio_freq_range *freq_range;
+ const struct ieee80211_iface_combination *iface_combinations;
+};
+
+enum wiphy_flags {
+ WIPHY_FLAG_AP_UAPSD = BIT(0),
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(1),
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(2),
+ WIPHY_FLAG_HAVE_AP_SME = BIT(3),
+ WIPHY_FLAG_IBSS_RSN = BIT(4),
+ WIPHY_FLAG_NETNS_OK = BIT(5),
+ WIPHY_FLAG_OFFCHAN_TX = BIT(6),
+ WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(7),
+ WIPHY_FLAG_SPLIT_SCAN_6GHZ = BIT(8),
+ WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(9),
+ WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(10),
+ WIPHY_FLAG_SUPPORTS_TDLS = BIT(11),
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(12),
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(13),
+ WIPHY_FLAG_4ADDR_AP = BIT(14),
+ WIPHY_FLAG_4ADDR_STATION = BIT(15),
+ WIPHY_FLAG_SUPPORTS_MLO = BIT(16),
+ WIPHY_FLAG_DISABLE_WEXT = BIT(17),
+};
+
+struct wiphy_work;
+typedef void (*wiphy_work_fn)(struct wiphy *, struct wiphy_work *);
+struct wiphy_work {
+ struct list_head entry;
+ wiphy_work_fn fn;
+};
+struct wiphy_delayed_work {
+ struct wiphy_work work;
+ struct wiphy *wiphy;
+ struct timer_list timer;
+};
+
+struct wiphy {
+ struct mutex mtx;
+ struct device *dev;
+ struct mac_address *addresses;
+ int n_addresses;
+ uint32_t flags;
+ struct ieee80211_supported_band *bands[NUM_NL80211_BANDS];
+ uint8_t perm_addr[ETH_ALEN];
+ uint16_t max_scan_ie_len;
+
+ /* XXX TODO */
+ const struct cfg80211_pmsr_capabilities *pmsr_capa;
+ const struct cfg80211_sar_capa *sar_capa;
+ const struct wiphy_iftype_ext_capab *iftype_ext_capab;
+ const struct linuxkpi_ieee80211_regdomain *regd;
+ char fw_version[ETHTOOL_FWVERS_LEN];
+ const struct ieee80211_iface_combination *iface_combinations;
+ const uint32_t *cipher_suites;
+ int n_iface_combinations;
+ int n_cipher_suites;
+ void(*reg_notifier)(struct wiphy *, struct regulatory_request *);
+ enum cfg80211_regulatory regulatory_flags;
+ int n_vendor_commands;
+ const struct wiphy_vendor_command *vendor_commands;
+ const struct ieee80211_txrx_stypes *mgmt_stypes;
+ uint32_t rts_threshold;
+ uint32_t frag_threshold;
+ struct tid_config_support tid_config_support;
+ uint8_t available_antennas_rx;
+ uint8_t available_antennas_tx;
+
+ int n_radio;
+ const struct wiphy_radio *radio;
+
+ int features, hw_version;
+ int interface_modes, max_match_sets, max_remain_on_channel_duration, max_scan_ssids, max_sched_scan_ie_len, max_sched_scan_plan_interval, max_sched_scan_plan_iterations, max_sched_scan_plans, max_sched_scan_reqs, max_sched_scan_ssids;
+ int num_iftype_ext_capab;
+ int max_ap_assoc_sta, probe_resp_offload, software_iftypes;
+ int bss_select_support, max_num_pmkids, retry_long, retry_short, signal_type;
+ int max_data_retry_count;
+ int tx_queue_len, rfkill;
+ int mbssid_max_interfaces;
+ int hw_timestamp_max_peers;
+ int ema_max_profile_periodicity;
+
+ unsigned long ext_features[BITS_TO_LONGS(NUM_NL80211_EXT_FEATURES)];
+ struct dentry *debugfsdir;
+
+ const struct wiphy_wowlan_support *wowlan;
+ struct cfg80211_wowlan *wowlan_config;
+ /* Lower layer (driver/mac80211) specific data. */
+ /* Must stay last. */
+ uint8_t priv[0] __aligned(CACHE_LINE_SIZE);
+};
+
+#define lockdep_assert_wiphy(wiphy) \
+ lockdep_assert_held(&(wiphy)->mtx)
+
+struct wireless_dev {
+ /* XXX TODO, like ic? */
+ enum nl80211_iftype iftype;
+ uint32_t radio_mask;
+ uint8_t address[ETH_ALEN];
+ struct net_device *netdev;
+ struct wiphy *wiphy;
+};
+
+struct cfg80211_ops {
+ /* XXX TODO */
+ struct wireless_dev *(*add_virtual_intf)(struct wiphy *, const char *, unsigned char, enum nl80211_iftype, struct vif_params *);
+ int (*del_virtual_intf)(struct wiphy *, struct wireless_dev *);
+ s32 (*change_virtual_intf)(struct wiphy *, struct net_device *, enum nl80211_iftype, struct vif_params *);
+ s32 (*scan)(struct wiphy *, struct cfg80211_scan_request *);
+ s32 (*set_wiphy_params)(struct wiphy *, u32);
+ s32 (*join_ibss)(struct wiphy *, struct net_device *, struct cfg80211_ibss_params *);
+ s32 (*leave_ibss)(struct wiphy *, struct net_device *);
+ s32 (*get_station)(struct wiphy *, struct net_device *, const u8 *, struct station_info *);
+ int (*dump_station)(struct wiphy *, struct net_device *, int, u8 *, struct station_info *);
+ s32 (*set_tx_power)(struct wiphy *, struct wireless_dev *, enum nl80211_tx_power_setting, s32);
+ s32 (*get_tx_power)(struct wiphy *, struct wireless_dev *, s32 *);
+ s32 (*add_key)(struct wiphy *, struct net_device *, u8, bool, const u8 *, struct key_params *);
+ s32 (*del_key)(struct wiphy *, struct net_device *, u8, bool, const u8 *);
+ s32 (*get_key)(struct wiphy *, struct net_device *, u8, bool, const u8 *, void *, void(*)(void *, struct key_params *));
+ s32 (*set_default_key)(struct wiphy *, struct net_device *, u8, bool, bool);
+ s32 (*set_default_mgmt_key)(struct wiphy *, struct net_device *, u8);
+ s32 (*set_power_mgmt)(struct wiphy *, struct net_device *, bool, s32);
+ s32 (*connect)(struct wiphy *, struct net_device *, struct cfg80211_connect_params *);
+ s32 (*disconnect)(struct wiphy *, struct net_device *, u16);
+ s32 (*suspend)(struct wiphy *, struct cfg80211_wowlan *);
+ s32 (*resume)(struct wiphy *);
+ s32 (*set_pmksa)(struct wiphy *, struct net_device *, struct cfg80211_pmksa *);
+ s32 (*del_pmksa)(struct wiphy *, struct net_device *, struct cfg80211_pmksa *);
+ s32 (*flush_pmksa)(struct wiphy *, struct net_device *);
+ s32 (*start_ap)(struct wiphy *, struct net_device *, struct cfg80211_ap_settings *);
+ int (*stop_ap)(struct wiphy *, struct net_device *);
+ s32 (*change_beacon)(struct wiphy *, struct net_device *, struct cfg80211_beacon_data *);
+ int (*del_station)(struct wiphy *, struct net_device *, struct station_del_parameters *);
+ int (*change_station)(struct wiphy *, struct net_device *, const u8 *, struct station_parameters *);
+ int (*sched_scan_start)(struct wiphy *, struct net_device *, struct cfg80211_sched_scan_request *);
+ int (*sched_scan_stop)(struct wiphy *, struct net_device *, u64);
+ void (*update_mgmt_frame_registrations)(struct wiphy *, struct wireless_dev *, struct mgmt_frame_regs *);
+ int (*mgmt_tx)(struct wiphy *, struct wireless_dev *, struct cfg80211_mgmt_tx_params *, u64 *);
+ int (*cancel_remain_on_channel)(struct wiphy *, struct wireless_dev *, u64);
+ int (*get_channel)(struct wiphy *, struct wireless_dev *, struct cfg80211_chan_def *);
+ int (*crit_proto_start)(struct wiphy *, struct wireless_dev *, enum nl80211_crit_proto_id, u16);
+ void (*crit_proto_stop)(struct wiphy *, struct wireless_dev *);
+ int (*tdls_oper)(struct wiphy *, struct net_device *, const u8 *, enum nl80211_tdls_operation);
+ int (*update_connect_params)(struct wiphy *, struct net_device *, struct cfg80211_connect_params *, u32);
+ int (*set_pmk)(struct wiphy *, struct net_device *, const struct cfg80211_pmk_conf *);
+ int (*del_pmk)(struct wiphy *, struct net_device *, const u8 *);
+ int (*remain_on_channel)(struct wiphy *, struct wireless_dev *, struct linuxkpi_ieee80211_channel *, unsigned int, u64 *);
+ int (*start_p2p_device)(struct wiphy *, struct wireless_dev *);
+ void (*stop_p2p_device)(struct wiphy *, struct wireless_dev *);
+};
+
+
+/* -------------------------------------------------------------------------- */
+
+/* linux_80211.c */
+
+struct wiphy *linuxkpi_wiphy_new(const struct cfg80211_ops *, size_t);
+void linuxkpi_wiphy_free(struct wiphy *wiphy);
+
+void linuxkpi_wiphy_work_queue(struct wiphy *, struct wiphy_work *);
+void linuxkpi_wiphy_work_cancel(struct wiphy *, struct wiphy_work *);
+void linuxkpi_wiphy_work_flush(struct wiphy *, struct wiphy_work *);
+void lkpi_wiphy_delayed_work_timer(struct timer_list *);
+void linuxkpi_wiphy_delayed_work_queue(struct wiphy *,
+ struct wiphy_delayed_work *, unsigned long);
+void linuxkpi_wiphy_delayed_work_cancel(struct wiphy *,
+ struct wiphy_delayed_work *);
+
+int linuxkpi_regulatory_set_wiphy_regd_sync(struct wiphy *wiphy,
+ struct linuxkpi_ieee80211_regdomain *regd);
+uint32_t linuxkpi_cfg80211_calculate_bitrate(struct rate_info *);
+uint32_t linuxkpi_ieee80211_channel_to_frequency(uint32_t, enum nl80211_band);
+uint32_t linuxkpi_ieee80211_frequency_to_channel(uint32_t, uint32_t);
+struct linuxkpi_ieee80211_channel *
+ linuxkpi_ieee80211_get_channel(struct wiphy *, uint32_t);
+struct cfg80211_bss *linuxkpi_cfg80211_get_bss(struct wiphy *,
+ struct linuxkpi_ieee80211_channel *, const uint8_t *,
+ const uint8_t *, size_t, enum ieee80211_bss_type, enum ieee80211_privacy);
+void linuxkpi_cfg80211_put_bss(struct wiphy *, struct cfg80211_bss *);
+void linuxkpi_cfg80211_bss_flush(struct wiphy *);
+struct linuxkpi_ieee80211_regdomain *
+ lkpi_get_linuxkpi_ieee80211_regdomain(size_t);
+
+/* -------------------------------------------------------------------------- */
+
+static __inline struct wiphy *
+wiphy_new(const struct cfg80211_ops *ops, size_t priv_len)
+{
+
+ return (linuxkpi_wiphy_new(ops, priv_len));
+}
+
+static __inline void
+wiphy_free(struct wiphy *wiphy)
+{
+
+ linuxkpi_wiphy_free(wiphy);
+}
+
+static __inline void *
+wiphy_priv(struct wiphy *wiphy)
+{
+
+ return (wiphy->priv);
+}
+
+static __inline void
+set_wiphy_dev(struct wiphy *wiphy, struct device *dev)
+{
+
+ wiphy->dev = dev;
+}
+
+static __inline struct device *
+wiphy_dev(struct wiphy *wiphy)
+{
+
+ return (wiphy->dev);
+}
+
+#define wiphy_dereference(_w, p) \
+ rcu_dereference_check(p, lockdep_is_held(&(_w)->mtx))
+
+#define wiphy_lock(_w) mutex_lock(&(_w)->mtx)
+#define wiphy_unlock(_w) mutex_unlock(&(_w)->mtx)
+
+static __inline void
+wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
+ enum rfkill_hard_block_reasons reason)
+{
+ TODO();
+}
+
+/* -------------------------------------------------------------------------- */
+
+static inline struct cfg80211_bss *
+cfg80211_get_bss(struct wiphy *wiphy, struct linuxkpi_ieee80211_channel *chan,
+ const uint8_t *bssid, const uint8_t *ssid, size_t ssid_len,
+ enum ieee80211_bss_type bss_type, enum ieee80211_privacy privacy)
+{
+
+ return (linuxkpi_cfg80211_get_bss(wiphy, chan, bssid, ssid, ssid_len,
+ bss_type, privacy));
+}
+
+static inline void
+cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss)
+{
+
+ linuxkpi_cfg80211_put_bss(wiphy, bss);
+}
+
+static inline void
+cfg80211_bss_flush(struct wiphy *wiphy)
+{
+
+ linuxkpi_cfg80211_bss_flush(wiphy);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline bool
+rfkill_blocked(int rfkill) /* argument type? */
+{
+ TODO();
+ return (false);
+}
+
+static __inline bool
+rfkill_soft_blocked(int rfkill)
+{
+ TODO();
+ return (false);
+}
+
+static __inline void
+wiphy_rfkill_start_polling(struct wiphy *wiphy)
+{
+ TODO();
+}
+
+static __inline void
+wiphy_rfkill_stop_polling(struct wiphy *wiphy)
+{
+ TODO();
+}
+
+static __inline int
+reg_query_regdb_wmm(uint8_t *alpha2, uint32_t center_freq,
+ struct ieee80211_reg_rule *rule)
+{
+
+ /* ETSI has special rules. FreeBSD regdb needs to learn about them. */
+ TODO();
+
+ return (-ENXIO);
+}
+
+static __inline const u8 *
+cfg80211_find_ie_match(uint32_t f, const u8 *ies, size_t ies_len,
+ const u8 *match, int x, int y)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline const u8 *
+cfg80211_find_ie(uint8_t eid, const uint8_t *ie, uint32_t ielen)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline void
+cfg80211_pmsr_complete(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_pmsr_report(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ struct cfg80211_pmsr_result *result, gfp_t gfp)
+{
+ TODO();
+}
+
+static inline void
+cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
+ struct linuxkpi_ieee80211_channel *chan, enum nl80211_channel_type chan_type)
+{
+
+ KASSERT(chandef != NULL, ("%s: chandef is NULL\n", __func__));
+ KASSERT(chan != NULL, ("%s: chan is NULL\n", __func__));
+
+ /* memset(chandef, 0, sizeof(*chandef)); */
+ chandef->chan = chan;
+ chandef->center_freq1 = chan->center_freq;
+ /* chandef->width, center_freq2, punctured */
+
+ switch (chan_type) {
+ case NL80211_CHAN_NO_HT:
+ chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
+ break;
+ case NL80211_CHAN_HT20:
+ chandef->width = NL80211_CHAN_WIDTH_20;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ chandef->center_freq1 -= 10;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ chandef->center_freq1 += 10;
+ break;
+ };
+}
+
+static __inline bool
+cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
+{
+ TODO();
+ return (false);
+}
+
+static __inline bool
+cfg80211_chandef_dfs_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef)
+{
+ TODO();
+ return (false);
+}
+
+static __inline unsigned int
+cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef)
+{
+ TODO();
+ return (0);
+}
+
+static __inline bool
+cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef_1,
+ const struct cfg80211_chan_def *chandef_2)
+{
+ TODO();
+ return (false);
+}
+
+static __inline bool
+cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef, uint32_t flags)
+{
+ TODO();
+ return (false);
+}
+
+static __inline void
+cfg80211_bss_iter(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
+ void (*iterfunc)(struct wiphy *, struct cfg80211_bss *, void *), void *data)
+{
+ TODO();
+}
+
+struct element {
+ uint8_t id;
+ uint8_t datalen;
+ uint8_t data[0];
+} __packed;
+
+static inline const struct element *
+lkpi_cfg80211_find_elem_pattern(enum ieee80211_eid eid,
+ const uint8_t *data, size_t len, uint8_t *pattern, size_t plen)
+{
+ const struct element *elem;
+ const uint8_t *p;
+ size_t ielen;
+
+ p = data;
+ elem = (const struct element *)p;
+ ielen = len;
+ while (elem != NULL && ielen > 1) {
+ if ((2 + elem->datalen) > ielen)
+ /* Element overruns our memory. */
+ return (NULL);
+ if (elem->id == eid) {
+ if (pattern == NULL)
+ return (elem);
+ if (elem->datalen >= plen &&
+ memcmp(elem->data, pattern, plen) == 0)
+ return (elem);
+ }
+ ielen -= 2 + elem->datalen;
+ p += 2 + elem->datalen;
+ elem = (const struct element *)p;
+ }
+
+ return (NULL);
+}
+
+static inline const struct element *
+cfg80211_find_elem(enum ieee80211_eid eid, const uint8_t *data, size_t len)
+{
+
+ return (lkpi_cfg80211_find_elem_pattern(eid, data, len, NULL, 0));
+}
+
+static inline const struct element *
+ieee80211_bss_get_elem(struct cfg80211_bss *bss, uint32_t eid)
+{
+
+ if (bss->ies == NULL)
+ return (NULL);
+ return (cfg80211_find_elem(eid, bss->ies->data, bss->ies->len));
+}
+
+static inline const uint8_t *
+ieee80211_bss_get_ie(struct cfg80211_bss *bss, uint32_t eid)
+{
+
+ return ((const uint8_t *)ieee80211_bss_get_elem(bss, eid));
+}
+
+static inline uint8_t *
+cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
+ uint8_t *data, size_t len)
+{
+ const struct element *elem;
+ uint8_t pattern[4] = { oui << 16, oui << 8, oui, oui_type };
+ uint8_t plen = 4; /* >= 3? oui_type always part of this? */
+ IMPROVE("plen currently always incl. oui_type");
+
+ elem = lkpi_cfg80211_find_elem_pattern(IEEE80211_ELEMID_VENDOR,
+ data, len, pattern, plen);
+ if (elem == NULL)
+ return (NULL);
+ return (__DECONST(uint8_t *, elem));
+}
+
+static inline uint32_t
+cfg80211_calculate_bitrate(struct rate_info *rate)
+{
+ return (linuxkpi_cfg80211_calculate_bitrate(rate));
+}
+
+static __inline uint32_t
+ieee80211_channel_to_frequency(uint32_t channel, enum nl80211_band band)
+{
+
+ return (linuxkpi_ieee80211_channel_to_frequency(channel, band));
+}
+
+static __inline uint32_t
+ieee80211_frequency_to_channel(uint32_t freq)
+{
+
+ return (linuxkpi_ieee80211_frequency_to_channel(freq, 0));
+}
+
+static __inline int
+regulatory_set_wiphy_regd_sync(struct wiphy *wiphy,
+ struct linuxkpi_ieee80211_regdomain *regd)
+{
+ IMPROVE();
+ return (linuxkpi_regulatory_set_wiphy_regd_sync(wiphy, regd));
+}
+
+static __inline int
+regulatory_set_wiphy_regd_sync_rtnl(struct wiphy *wiphy,
+ struct linuxkpi_ieee80211_regdomain *regd)
+{
+
+ IMPROVE();
+ return (linuxkpi_regulatory_set_wiphy_regd_sync(wiphy, regd));
+}
+
+static __inline int
+regulatory_set_wiphy_regd(struct wiphy *wiphy,
+ struct linuxkpi_ieee80211_regdomain *regd)
+{
+
+ IMPROVE();
+ if (regd == NULL)
+ return (EINVAL);
+
+ /* XXX-BZ wild guessing here based on brcmfmac. */
+ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
+ wiphy->regd = regd;
+ else
+ return (EPERM);
+
+ /* XXX FIXME, do we have to do anything with reg_notifier? */
+ return (0);
+}
+
+static __inline int
+regulatory_hint(struct wiphy *wiphy, const uint8_t *alpha2)
+{
+ struct linuxkpi_ieee80211_regdomain *regd;
+
+ if (wiphy->regd != NULL)
+ return (-EBUSY);
+
+ regd = lkpi_get_linuxkpi_ieee80211_regdomain(0);
+ if (regd == NULL)
+ return (-ENOMEM);
+
+ regd->alpha2[0] = alpha2[0];
+ regd->alpha2[1] = alpha2[1];
+ wiphy->regd = regd;
+
+ IMPROVE("are there flags who is managing? update net8011?");
+
+ return (0);
+}
+
+static __inline const char *
+reg_initiator_name(enum nl80211_reg_initiator initiator)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline struct linuxkpi_ieee80211_regdomain *
+rtnl_dereference(const struct linuxkpi_ieee80211_regdomain *regd)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline struct ieee80211_reg_rule *
+freq_reg_info(struct wiphy *wiphy, uint32_t center_freq)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline void
+wiphy_apply_custom_regulatory(struct wiphy *wiphy,
+ const struct linuxkpi_ieee80211_regdomain *regd)
+{
+ TODO();
+}
+
+static __inline char *
+wiphy_name(struct wiphy *wiphy)
+{
+ if (wiphy != NULL && wiphy->dev != NULL)
+ return dev_name(wiphy->dev);
+ else {
+ IMPROVE("wlanNA");
+ return ("wlanNA");
+ }
+}
+
+static __inline void
+wiphy_read_of_freq_limits(struct wiphy *wiphy)
+{
+#ifdef FDT
+ TODO();
+#endif
+}
+
+static __inline void
+wiphy_ext_feature_set(struct wiphy *wiphy, enum nl80211_ext_feature ef)
+{
+
+ set_bit(ef, wiphy->ext_features);
+}
+
+static inline bool
+wiphy_ext_feature_isset(struct wiphy *wiphy, enum nl80211_ext_feature ef)
+{
+ return (test_bit(ef, wiphy->ext_features));
+}
+
+static __inline void *
+wiphy_net(struct wiphy *wiphy)
+{
+ TODO();
+ return (NULL); /* XXX passed to dev_net_set() */
+}
+
+static __inline int
+wiphy_register(struct wiphy *wiphy)
+{
+ TODO();
+ return (0);
+}
+
+static __inline void
+wiphy_unregister(struct wiphy *wiphy)
+{
+ TODO();
+}
+
+static __inline void
+wiphy_warn(struct wiphy *wiphy, const char *fmt, ...)
+{
+ TODO();
+}
+
+static __inline int
+cfg80211_check_combinations(struct wiphy *wiphy,
+ struct iface_combination_params *params)
+{
+ TODO();
+ return (-ENOENT);
+}
+
+static __inline uint8_t
+cfg80211_classify8021d(struct sk_buff *skb, void *p)
+{
+ TODO();
+ return (0);
+}
+
+static __inline void
+cfg80211_connect_done(struct net_device *ndev,
+ struct cfg80211_connect_resp_params *conn_params, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_disconnected(struct net_device *ndev, uint16_t reason,
+ void *p, int x, bool locally_generated, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline int
+cfg80211_get_p2p_attr(const u8 *ie, u32 ie_len,
+ enum ieee80211_p2p_attr_ids attr, u8 *p, size_t p_len)
+{
+ TODO();
+ return (-1);
+}
+
+static __inline void
+cfg80211_ibss_joined(struct net_device *ndev, const uint8_t *addr,
+ struct linuxkpi_ieee80211_channel *chan, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline struct cfg80211_bss *
+cfg80211_inform_bss(struct wiphy *wiphy,
+ struct linuxkpi_ieee80211_channel *channel,
+ enum cfg80211_bss_frame_type bss_ftype, const uint8_t *bss, int _x,
+ uint16_t cap, uint16_t intvl, const uint8_t *ie, size_t ie_len,
+ int signal, gfp_t gfp)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline struct cfg80211_bss *
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *bss_data,
+ enum cfg80211_bss_frame_type bss_ftype, const uint8_t *bss, int _x,
+ uint16_t cap, uint16_t intvl, const uint8_t *ie, size_t ie_len, gfp_t gfp)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline void
+cfg80211_mgmt_tx_status(struct wireless_dev *wdev, uint64_t cookie,
+ const u8 *buf, size_t len, bool ack, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_michael_mic_failure(struct net_device *ndev, const uint8_t *addr,
+ enum nl80211_key_type key_type, int _x, void *p, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_new_sta(struct net_device *ndev, const uint8_t *addr,
+ struct station_info *sinfo, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_del_sta(struct net_device *ndev, const uint8_t *addr, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_port_authorized(struct net_device *ndev, const uint8_t *bssid,
+ gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_ready_on_channel(struct wireless_dev *wdev, uint64_t cookie,
+ struct linuxkpi_ieee80211_channel *channel, unsigned int duration,
+ gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_remain_on_channel_expired(struct wireless_dev *wdev,
+ uint64_t cookie, struct linuxkpi_ieee80211_channel *channel, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_report_wowlan_wakeup(void)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_roamed(struct net_device *ndev, struct cfg80211_roam_info *roam_info,
+ gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int _x,
+ uint8_t *p, size_t p_len, int _x2)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_scan_done(struct cfg80211_scan_request *scan_request,
+ struct cfg80211_scan_info *info)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_sched_scan_results(struct wiphy *wiphy, uint64_t reqid)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_sched_scan_stopped(struct wiphy *wiphy, int _x)
+{
+ TODO();
+}
+
+static __inline void
+cfg80211_unregister_wdev(struct wireless_dev *wdev)
+{
+ TODO();
+}
+
+static __inline struct sk_buff *
+cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, unsigned int len)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline int
+cfg80211_vendor_cmd_reply(struct sk_buff *skb)
+{
+ TODO();
+ return (-ENXIO);
+}
+
+static __inline struct linuxkpi_ieee80211_channel *
+ieee80211_get_channel(struct wiphy *wiphy, uint32_t freq)
+{
+
+ return (linuxkpi_ieee80211_get_channel(wiphy, freq));
+}
+
+static inline size_t
+ieee80211_get_hdrlen_from_skb(struct sk_buff *skb)
+{
+ const struct ieee80211_hdr *hdr;
+ size_t len;
+
+ if (skb->len < 10) /* sizeof(ieee80211_frame_[ack,cts]) */
+ return (0);
+
+ hdr = (const struct ieee80211_hdr *)skb->data;
+ len = ieee80211_hdrlen(hdr->frame_control);
+
+ /* If larger than what is in the skb return. */
+ if (len > skb->len)
+ return (0);
+
+ return (len);
+}
+
+static __inline bool
+cfg80211_channel_is_psc(struct linuxkpi_ieee80211_channel *channel)
+{
+
+ /* Only 6Ghz. */
+ if (channel->band != NL80211_BAND_6GHZ)
+ return (false);
+
+ TODO();
+ return (false);
+}
+
+static inline int
+cfg80211_get_ies_channel_number(const uint8_t *ie, size_t len,
+ enum nl80211_band band)
+{
+ const struct element *elem;
+
+ switch (band) {
+ case NL80211_BAND_6GHZ:
+ TODO();
+ break;
+ case NL80211_BAND_5GHZ:
+ case NL80211_BAND_2GHZ:
+ /* DSPARAMS has the channel number. */
+ elem = cfg80211_find_elem(IEEE80211_ELEMID_DSPARMS, ie, len);
+ if (elem != NULL && elem->datalen == 1)
+ return (elem->data[0]);
+ /* HTINFO has the primary center channel. */
+ elem = cfg80211_find_elem(IEEE80211_ELEMID_HTINFO, ie, len);
+ if (elem != NULL &&
+ elem->datalen >= (sizeof(struct ieee80211_ie_htinfo) - 2)) {
+ const struct ieee80211_ie_htinfo *htinfo;
+ htinfo = (const struct ieee80211_ie_htinfo *)elem;
+ return (htinfo->hi_ctrlchannel);
+ }
+ /* What else? */
+ break;
+ default:
+ IMPROVE("Unsupported");
+ break;
+ }
+ return (-1);
+}
+
+/* Used for scanning at least. */
+static __inline void
+get_random_mask_addr(uint8_t *dst, const uint8_t *addr, const uint8_t *mask)
+{
+ int i;
+
+ /* Get a completely random address and then overlay what we want. */
+ get_random_bytes(dst, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++)
+ dst[i] = (dst[i] & ~(mask[i])) | (addr[i] & mask[i]);
+}
+
+static __inline void
+cfg80211_shutdown_all_interfaces(struct wiphy *wiphy)
+{
+ TODO();
+}
+
+static __inline bool
+cfg80211_reg_can_beacon(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype)
+{
+ TODO();
+ return (false);
+}
+
+static __inline void
+cfg80211_background_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline const u8 *
+cfg80211_find_ext_ie(uint8_t eid, const uint8_t *p, size_t len)
+{
+ TODO();
+ return (NULL);
+}
+
+static inline void
+_ieee80211_set_sband_iftype_data(struct ieee80211_supported_band *band,
+ struct ieee80211_sband_iftype_data *iftype_data, size_t nitems)
+{
+ band->iftype_data = iftype_data;
+ band->n_iftype_data = nitems;
+}
+
+static inline const struct ieee80211_sband_iftype_data *
+ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *band,
+ enum nl80211_iftype iftype)
+{
+ const struct ieee80211_sband_iftype_data *iftype_data;
+ int i;
+
+ for (i = 0; i < band->n_iftype_data; i++) {
+ iftype_data = (const void *)&band->iftype_data[i];
+ if (iftype_data->types_mask & BIT(iftype))
+ return (iftype_data);
+ }
+
+ return (NULL);
+}
+
+static inline const struct ieee80211_sta_he_cap *
+ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *band,
+ enum nl80211_iftype iftype)
+{
+ const struct ieee80211_sband_iftype_data *iftype_data;
+ const struct ieee80211_sta_he_cap *he_cap;
+
+ iftype_data = ieee80211_get_sband_iftype_data(band, iftype);
+ if (iftype_data == NULL)
+ return (NULL);
+
+ he_cap = NULL;
+ if (iftype_data->he_cap.has_he)
+ he_cap = &iftype_data->he_cap;
+
+ return (he_cap);
+}
+
+static inline const struct ieee80211_sta_eht_cap *
+ieee80211_get_eht_iftype_cap(const struct ieee80211_supported_band *band,
+ enum nl80211_iftype iftype)
+{
+ const struct ieee80211_sband_iftype_data *iftype_data;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+
+ iftype_data = ieee80211_get_sband_iftype_data(band, iftype);
+ if (iftype_data == NULL)
+ return (NULL);
+
+ eht_cap = NULL;
+ if (iftype_data->eht_cap.has_eht)
+ eht_cap = &iftype_data->eht_cap;
+
+ return (eht_cap);
+}
+
+static inline bool
+cfg80211_ssid_eq(struct cfg80211_ssid *ssid1, struct cfg80211_ssid *ssid2)
+{
+ int error;
+
+ if (ssid1 == NULL || ssid2 == NULL) /* Can we KASSERT this? */
+ return (false);
+
+ if (ssid1->ssid_len != ssid2->ssid_len)
+ return (false);
+ error = memcmp(ssid1->ssid, ssid2->ssid, ssid2->ssid_len);
+ if (error != 0)
+ return (false);
+ return (true);
+}
+
+static inline void
+cfg80211_rx_unprot_mlme_mgmt(struct net_device *ndev, const uint8_t *hdr,
+ uint32_t len)
+{
+ TODO();
+}
+
+static inline const struct wiphy_iftype_ext_capab *
+cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype iftype)
+{
+
+ TODO();
+ return (NULL);
+}
+
+static inline uint16_t
+ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband,
+ enum nl80211_iftype iftype)
+{
+ TODO();
+ return (0);
+}
+
+static inline int
+nl80211_chan_width_to_mhz(enum nl80211_chan_width width)
+{
+ switch (width) {
+ case NL80211_CHAN_WIDTH_5:
+ return (5);
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ return (10);
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return (20);
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ return (40);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ return (80);
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ return (160);
+ break;
+ case NL80211_CHAN_WIDTH_320:
+ return (320);
+ break;
+ }
+}
+
+static __inline ssize_t
+wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize, const char __user *userbuf, size_t count,
+ ssize_t (*handler)(struct wiphy *, struct file *, char *, size_t, void *),
+ void *data)
+{
+ TODO();
+ return (-ENXIO);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static inline void
+wiphy_work_init(struct wiphy_work *wwk, wiphy_work_fn fn)
+{
+ INIT_LIST_HEAD(&wwk->entry);
+ wwk->fn = fn;
+}
+
+static inline void
+wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *wwk)
+{
+ linuxkpi_wiphy_work_queue(wiphy, wwk);
+}
+
+static inline void
+wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *wwk)
+{
+ linuxkpi_wiphy_work_cancel(wiphy, wwk);
+}
+
+static inline void
+wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *wwk)
+{
+ linuxkpi_wiphy_work_flush(wiphy, wwk);
+}
+
+static inline void
+wiphy_delayed_work_init(struct wiphy_delayed_work *wdwk, wiphy_work_fn fn)
+{
+ wiphy_work_init(&wdwk->work, fn);
+ timer_setup(&wdwk->timer, lkpi_wiphy_delayed_work_timer, 0);
+}
+
+static inline void
+wiphy_delayed_work_queue(struct wiphy *wiphy, struct wiphy_delayed_work *wdwk,
+ unsigned long delay)
+{
+ linuxkpi_wiphy_delayed_work_queue(wiphy, wdwk, delay);
+}
+
+static inline void
+wiphy_delayed_work_cancel(struct wiphy *wiphy, struct wiphy_delayed_work *wdwk)
+{
+ linuxkpi_wiphy_delayed_work_cancel(wiphy, wdwk);
+}
+
+/* -------------------------------------------------------------------------- */
+
+#define wiphy_err(_wiphy, _fmt, ...) \
+ dev_err((_wiphy)->dev, _fmt, __VA_ARGS__)
+#define wiphy_info(wiphy, fmt, ...) \
+ dev_info((wiphy)->dev, fmt, ##__VA_ARGS__)
+#define wiphy_info_once(wiphy, fmt, ...) \
+ dev_info_once((wiphy)->dev, fmt, ##__VA_ARGS__)
+
+#ifndef LINUXKPI_NET80211
+#define ieee80211_channel linuxkpi_ieee80211_channel
+#define ieee80211_regdomain linuxkpi_ieee80211_regdomain
+#endif
+
+#include <net/mac80211.h>
+
+#endif /* _LINUXKPI_NET_CFG80211_H */
diff --git a/sys/compat/linuxkpi/common/include/net/ieee80211_radiotap.h b/sys/compat/linuxkpi/common/include/net/ieee80211_radiotap.h
new file mode 100644
index 000000000000..82e554f6b96e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/ieee80211_radiotap.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_NET_IEEE80211_RADIOTAP_H
+#define _LINUXKPI_NET_IEEE80211_RADIOTAP_H
+
+/* Any possibly duplicate content is only maintained in one place now. */
+#include <net80211/ieee80211_radiotap.h>
+
+/*
+ * This structure deviates from
+ * 'https://www.radiotap.org/fields/Vendor%20Namespace.html'
+ * and the net80211::ieee80211_radiotap_vendor_header version.
+ * We consider it LinuxKPI specific so it stays here.
+ */
+struct ieee80211_vendor_radiotap {
+ u32 present;
+ u8 align;
+ u8 oui[3];
+ u8 subns;
+ u8 pad;
+ __le16 len;
+ u8 data[0];
+};
+
+#endif /* _LINUXKPI_NET_IEEE80211_RADIOTAP_H */
diff --git a/sys/compat/linuxkpi/common/include/net/if_inet6.h b/sys/compat/linuxkpi/common/include/net/if_inet6.h
new file mode 100644
index 000000000000..c340909d7098
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/if_inet6.h
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_NET_IF_INET6_H_
+#define _LINUXKPI_NET_IF_INET6_H_
+
+#include <sys/types.h>
+#include <netinet/in.h>
+
+#include <asm/types.h>
+
+struct inet6_dev {
+ /* XXX currently unused but in a declaration. */
+};
+
+static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf)
+{
+/*
+ * +-------+-------+-------+-------+-------+-------+
+ * | 33 | 33 | DST13 | DST14 | DST15 | DST16 |
+ * +-------+-------+-------+-------+-------+-------+
+ */
+
+ buf[0]= 0x33;
+ buf[1]= 0x33;
+
+ memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32));
+}
+
+#endif /* _LINUXKPI_NET_IF_INET6_H_ */
diff --git a/sys/compat/linuxkpi/common/include/net/ip.h b/sys/compat/linuxkpi/common/include/net/ip.h
new file mode 100644
index 000000000000..3e7baab6cc0b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/ip.h
@@ -0,0 +1,101 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_NET_IP_H_
+#define _LINUXKPI_NET_IP_H_
+
+#include "opt_inet.h"
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <net/if_types.h>
+#include <net/if.h>
+#include <net/if_var.h>
+
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
+
+static inline void
+inet_get_local_port_range(struct vnet *vnet, int *low, int *high)
+{
+#ifdef INET
+ CURVNET_SET_QUIET(vnet);
+ *low = V_ipport_firstauto;
+ *high = V_ipport_lastauto;
+ CURVNET_RESTORE();
+#else
+ *low = IPPORT_EPHEMERALFIRST; /* 10000 */
+ *high = IPPORT_EPHEMERALLAST; /* 65535 */
+#endif
+}
+
+static inline void
+ip_eth_mc_map(uint32_t addr, char *buf)
+{
+
+ addr = ntohl(addr);
+
+ buf[0] = 0x01;
+ buf[1] = 0x00;
+ buf[2] = 0x5e;
+ buf[3] = (addr >> 16) & 0x7f;
+ buf[4] = (addr >> 8) & 0xff;
+ buf[5] = (addr & 0xff);
+}
+
+static inline void
+ip_ib_mc_map(uint32_t addr, const unsigned char *bcast, char *buf)
+{
+ unsigned char scope;
+
+ addr = ntohl(addr);
+ scope = bcast[5] & 0xF;
+ buf[0] = 0;
+ buf[1] = 0xff;
+ buf[2] = 0xff;
+ buf[3] = 0xff;
+ buf[4] = 0xff;
+ buf[5] = 0x10 | scope;
+ buf[6] = 0x40;
+ buf[7] = 0x1b;
+ buf[8] = bcast[8];
+ buf[9] = bcast[9];
+ buf[10] = 0;
+ buf[11] = 0;
+ buf[12] = 0;
+ buf[13] = 0;
+ buf[14] = 0;
+ buf[15] = 0;
+ buf[16] = (addr >> 24) & 0xff;
+ buf[17] = (addr >> 16) & 0xff;
+ buf[18] = (addr >> 8) & 0xff;
+ buf[19] = addr & 0xff;
+}
+
+#endif /* _LINUXKPI_NET_IP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/net/ipv6.h b/sys/compat/linuxkpi/common/include/net/ipv6.h
new file mode 100644
index 000000000000..3a85781e3a49
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/ipv6.h
@@ -0,0 +1,117 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_NET_IPV6_H_
+#define _LINUXKPI_NET_IPV6_H_
+
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define IPV6_DEFAULT_HOPLIMIT 64
+
+#define NEXTHDR_HOP IPPROTO_HOPOPTS
+#define NEXTHDR_ROUTING IPPROTO_ROUTING
+#define NEXTHDR_NONE IPPROTO_NONE
+#define NEXTHDR_DEST IPPROTO_DSTOPTS
+
+#define ipv6_addr_loopback(addr) IN6_IS_ADDR_LOOPBACK(addr)
+#define ipv6_addr_any(addr) IN6_IS_ADDR_UNSPECIFIED(addr)
+
+#define ipv6_addr_copy(dst, src) \
+ memcpy((dst), (src), sizeof(struct in6_addr))
+
+static inline void
+ipv6_ib_mc_map(const struct in6_addr *addr, const unsigned char *broadcast,
+ char *buf)
+{
+ unsigned char scope;
+
+ scope = broadcast[5] & 0xF;
+ buf[0] = 0;
+ buf[1] = 0xff;
+ buf[2] = 0xff;
+ buf[3] = 0xff;
+ buf[4] = 0xff;
+ buf[5] = 0x10 | scope;
+ buf[6] = 0x60;
+ buf[7] = 0x1b;
+ buf[8] = broadcast[8];
+ buf[9] = broadcast[9];
+ memcpy(&buf[10], &addr->s6_addr[6], 10);
+}
+
+static inline void __ipv6_addr_set_half(__be32 *addr, __be32 wh, __be32 wl)
+{
+#if BITS_PER_LONG == 64
+#if defined(__BIG_ENDIAN)
+ if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) {
+ *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl));
+ return;
+ }
+#elif defined(__LITTLE_ENDIAN)
+ if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) {
+ *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh));
+ return;
+ }
+#endif
+#endif
+ addr[0] = wh;
+ addr[1] = wl;
+}
+
+static inline void ipv6_addr_set(struct in6_addr *addr,
+ __be32 w1, __be32 w2,
+ __be32 w3, __be32 w4)
+{
+ __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2);
+ __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4);
+}
+
+static inline void ipv6_addr_set_v4mapped(const __be32 addr,
+ struct in6_addr *v4mapped)
+{
+ ipv6_addr_set(v4mapped,
+ 0, 0,
+ htonl(0x0000FFFF),
+ addr);
+}
+
+static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
+{
+ return ((a->s6_addr32[0] | a->s6_addr32[1] |
+ (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0);
+}
+
+static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2)
+{
+ return memcmp(a1, a2, sizeof(struct in6_addr));
+}
+
+#endif /* _LINUXKPI_NET_IPV6_H_ */
diff --git a/sys/compat/linuxkpi/common/include/net/mac80211.h b/sys/compat/linuxkpi/common/include/net/mac80211.h
new file mode 100644
index 000000000000..af3199c38939
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/mac80211.h
@@ -0,0 +1,2686 @@
+/*-
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
+ * Copyright (c) 2020-2025 Bjoern A. Zeeb
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_NET_MAC80211_H
+#define _LINUXKPI_NET_MAC80211_H
+
+#include <sys/types.h>
+
+#include <asm/atomic64.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/dcache.h>
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+#include <net/if_inet6.h>
+
+#define ARPHRD_IEEE80211_RADIOTAP __LINE__ /* XXX TODO brcmfmac */
+
+#define WLAN_OUI_MICROSOFT (0x0050F2)
+#define WLAN_OUI_TYPE_MICROSOFT_WPA (1)
+#define WLAN_OUI_TYPE_MICROSOFT_TPC (8)
+#define WLAN_OUI_TYPE_WFA_P2P (9)
+#define WLAN_OUI_WFA (0x506F9A)
+
+#define IEEE80211_LINK_UNSPECIFIED 0x0f
+
+/* hw->conf.flags */
+enum ieee80211_hw_conf_flags {
+ IEEE80211_CONF_IDLE = BIT(0),
+ IEEE80211_CONF_PS = BIT(1),
+ IEEE80211_CONF_MONITOR = BIT(2),
+ IEEE80211_CONF_OFFCHANNEL = BIT(3),
+};
+
+/* (*ops->config()) */
+enum ieee80211_hw_conf_changed_flags {
+ IEEE80211_CONF_CHANGE_CHANNEL = BIT(0),
+ IEEE80211_CONF_CHANGE_IDLE = BIT(1),
+ IEEE80211_CONF_CHANGE_PS = BIT(2),
+ IEEE80211_CONF_CHANGE_MONITOR = BIT(3),
+ IEEE80211_CONF_CHANGE_POWER = BIT(4),
+};
+
+#define CFG80211_TESTMODE_CMD(_x) /* XXX TODO */
+#define CFG80211_TESTMODE_DUMP(_x) /* XXX TODO */
+
+#define FCS_LEN 4
+
+/* ops.configure_filter() */
+enum mcast_filter_flags {
+ FIF_ALLMULTI = BIT(0),
+ FIF_PROBE_REQ = BIT(1),
+ FIF_BCN_PRBRESP_PROMISC = BIT(2),
+ FIF_FCSFAIL = BIT(3),
+ FIF_OTHER_BSS = BIT(4),
+ FIF_PSPOLL = BIT(5),
+ FIF_CONTROL = BIT(6),
+ FIF_MCAST_ACTION = BIT(7),
+};
+
+enum ieee80211_bss_changed {
+ BSS_CHANGED_ARP_FILTER = BIT(0),
+ BSS_CHANGED_ASSOC = BIT(1),
+ BSS_CHANGED_BANDWIDTH = BIT(2),
+ BSS_CHANGED_BEACON = BIT(3),
+ BSS_CHANGED_BEACON_ENABLED = BIT(4),
+ BSS_CHANGED_BEACON_INFO = BIT(5),
+ BSS_CHANGED_BEACON_INT = BIT(6),
+ BSS_CHANGED_BSSID = BIT(7),
+ BSS_CHANGED_CQM = BIT(8),
+ BSS_CHANGED_ERP_CTS_PROT = BIT(9),
+ BSS_CHANGED_ERP_SLOT = BIT(10),
+ BSS_CHANGED_FTM_RESPONDER = BIT(11),
+ BSS_CHANGED_HT = BIT(12),
+ BSS_CHANGED_IDLE = BIT(13),
+ BSS_CHANGED_MU_GROUPS = BIT(14),
+ BSS_CHANGED_P2P_PS = BIT(15),
+ BSS_CHANGED_PS = BIT(16),
+ BSS_CHANGED_QOS = BIT(17),
+ BSS_CHANGED_TXPOWER = BIT(18),
+ BSS_CHANGED_HE_BSS_COLOR = BIT(19),
+ BSS_CHANGED_AP_PROBE_RESP = BIT(20),
+ BSS_CHANGED_BASIC_RATES = BIT(21),
+ BSS_CHANGED_ERP_PREAMBLE = BIT(22),
+ BSS_CHANGED_IBSS = BIT(23),
+ BSS_CHANGED_MCAST_RATE = BIT(24),
+ BSS_CHANGED_SSID = BIT(25),
+ BSS_CHANGED_FILS_DISCOVERY = BIT(26),
+ BSS_CHANGED_HE_OBSS_PD = BIT(27),
+ BSS_CHANGED_TWT = BIT(28),
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = BIT(30),
+ BSS_CHANGED_EHT_PUNCTURING = BIT(31),
+ BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(32),
+ BSS_CHANGED_MLD_TTLM = BIT_ULL(33),
+ BSS_CHANGED_TPE = BIT_ULL(34),
+};
+
+/* 802.11 Figure 9-256 Suite selector format. [OUI(3), SUITE TYPE(1)] */
+#define WLAN_CIPHER_SUITE_OUI(_oui, _x) (((_oui) << 8) | ((_x) & 0xff))
+
+/* 802.11 Table 9-131 Cipher suite selectors. */
+/* 802.1x suite B 11 */
+#define WLAN_CIPHER_SUITE(_x) WLAN_CIPHER_SUITE_OUI(0x000fac, _x)
+/* Use group 0 */
+#define WLAN_CIPHER_SUITE_WEP40 WLAN_CIPHER_SUITE(1)
+#define WLAN_CIPHER_SUITE_TKIP WLAN_CIPHER_SUITE(2)
+/* Reserved 3 */
+#define WLAN_CIPHER_SUITE_CCMP WLAN_CIPHER_SUITE(4) /* CCMP-128 */
+#define WLAN_CIPHER_SUITE_WEP104 WLAN_CIPHER_SUITE(5)
+#define WLAN_CIPHER_SUITE_AES_CMAC WLAN_CIPHER_SUITE(6) /* BIP-CMAC-128 */
+/* Group addressed traffic not allowed 7 */
+#define WLAN_CIPHER_SUITE_GCMP WLAN_CIPHER_SUITE(8)
+#define WLAN_CIPHER_SUITE_GCMP_256 WLAN_CIPHER_SUITE(9)
+#define WLAN_CIPHER_SUITE_CCMP_256 WLAN_CIPHER_SUITE(10)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 WLAN_CIPHER_SUITE(11)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 WLAN_CIPHER_SUITE(12)
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 WLAN_CIPHER_SUITE(13)
+/* Reserved 14-255 */
+
+/* See ISO/IEC JTC 1 N 9880 Table 11 */
+#define WLAN_CIPHER_SUITE_SMS4 WLAN_CIPHER_SUITE_OUI(0x001472, 1)
+
+
+/* 802.11 Table 9-133 AKM suite selectors. */
+#define WLAN_AKM_SUITE(_x) WLAN_CIPHER_SUITE_OUI(0x000fac, _x)
+/* Reserved 0 */
+#define WLAN_AKM_SUITE_8021X WLAN_AKM_SUITE(1)
+#define WLAN_AKM_SUITE_PSK WLAN_AKM_SUITE(2)
+#define WLAN_AKM_SUITE_FT_8021X WLAN_AKM_SUITE(3)
+#define WLAN_AKM_SUITE_FT_PSK WLAN_AKM_SUITE(4)
+#define WLAN_AKM_SUITE_8021X_SHA256 WLAN_AKM_SUITE(5)
+#define WLAN_AKM_SUITE_PSK_SHA256 WLAN_AKM_SUITE(6)
+/* TDLS 7 */
+#define WLAN_AKM_SUITE_SAE WLAN_AKM_SUITE(8)
+/* FToSAE 9 */
+/* AP peer key 10 */
+/* 802.1x suite B 11 */
+/* 802.1x suite B 384 12 */
+/* FTo802.1x 384 13 */
+/* Reserved 14-255 */
+/* Apparently 11ax defines more. Seen (19,20) mentioned. */
+
+#define TKIP_PN_TO_IV16(_x) ((uint16_t)(_x & 0xffff))
+#define TKIP_PN_TO_IV32(_x) ((uint32_t)((_x >> 16) & 0xffffffff))
+
+enum ieee80211_neg_ttlm_res {
+ NEG_TTLM_RES_ACCEPT,
+ NEG_TTLM_RES_REJECT,
+};
+
+#define IEEE80211_TTLM_NUM_TIDS 8
+struct ieee80211_neg_ttlm {
+ uint16_t downlink[IEEE80211_TTLM_NUM_TIDS];
+ uint16_t uplink[IEEE80211_TTLM_NUM_TIDS];
+};
+
+/* 802.11-2020 9.4.2.55.3 A-MPDU Parameters field */
+#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x3
+#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
+#define IEEE80211_HT_AMPDU_PARM_DENSITY (0x7 << IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT)
+
+struct ieee80211_sta;
+
+struct ieee80211_ampdu_params {
+ struct ieee80211_sta *sta;
+ enum ieee80211_ampdu_mlme_action action;
+ uint16_t buf_size;
+ uint16_t timeout;
+ uint16_t ssn;
+ uint8_t tid;
+ bool amsdu;
+};
+
+struct ieee80211_bar {
+ /* TODO FIXME */
+ int control, start_seq_num;
+ uint8_t *ra;
+ uint16_t frame_control;
+};
+
+struct ieee80211_mutable_offsets {
+ /* TODO FIXME */
+ uint16_t tim_offset;
+ uint16_t cntdwn_counter_offs[2];
+
+ int mbssid_off;
+};
+
+struct mac80211_fils_discovery {
+ uint32_t max_interval;
+};
+
+struct ieee80211_chanctx_conf {
+ struct cfg80211_chan_def def;
+ struct cfg80211_chan_def min_def;
+ struct cfg80211_chan_def ap;
+
+ uint8_t rx_chains_dynamic;
+ uint8_t rx_chains_static;
+ bool radar_enabled;
+
+ /* Must stay last. */
+ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE);
+};
+
+struct ieee80211_rate_status {
+ struct rate_info rate_idx;
+ uint8_t try_count;
+};
+
+struct ieee80211_ema_beacons {
+ uint8_t cnt;
+ struct {
+ struct sk_buff *skb;
+ struct ieee80211_mutable_offsets offs;
+ } bcn[0];
+};
+
+struct ieee80211_chanreq {
+ struct cfg80211_chan_def oper;
+};
+
+#define WLAN_MEMBERSHIP_LEN (8)
+#define WLAN_USER_POSITION_LEN (16)
+
+/*
+ * 802.11ac-2013, 8.4.2.164 VHT Transmit Power Envelope element
+ * 802.11-???? ?
+ */
+struct ieee80211_parsed_tpe_eirp {
+ int8_t power[5];
+ uint8_t count;
+ bool valid;
+};
+struct ieee80211_parsed_tpe_psd {
+ int8_t power[16];
+ uint8_t count;
+ bool valid;
+};
+struct ieee80211_parsed_tpe {
+ /* We see access to [0] so assume at least 2. */
+ struct ieee80211_parsed_tpe_eirp max_local[2];
+ struct ieee80211_parsed_tpe_eirp max_reg_client[2];
+ struct ieee80211_parsed_tpe_psd psd_local[2];
+ struct ieee80211_parsed_tpe_psd psd_reg_client[2];
+};
+
+struct ieee80211_bss_conf {
+ /* TODO FIXME */
+ struct ieee80211_vif *vif;
+ struct cfg80211_bss *bss;
+ const uint8_t *bssid;
+ uint8_t addr[ETH_ALEN];
+ uint8_t link_id;
+ uint8_t _pad0;
+ uint8_t transmitter_bssid[ETH_ALEN];
+ struct ieee80211_ftm_responder_params *ftmr_params;
+ struct ieee80211_p2p_noa_attr p2p_noa_attr;
+ struct ieee80211_chanreq chanreq;
+ __be32 arp_addr_list[1]; /* XXX TODO */
+ struct ieee80211_rate *beacon_rate;
+ struct {
+ uint8_t membership[WLAN_MEMBERSHIP_LEN];
+ uint8_t position[WLAN_USER_POSITION_LEN];
+ } mu_group;
+ struct {
+ uint32_t params;
+ /* single field struct? */
+ } he_oper;
+ struct cfg80211_he_bss_color he_bss_color;
+ struct ieee80211_he_obss_pd he_obss_pd;
+
+ bool ht_ldpc;
+ bool vht_ldpc;
+ bool he_ldpc;
+ bool vht_mu_beamformee;
+ bool vht_mu_beamformer;
+ bool vht_su_beamformee;
+ bool vht_su_beamformer;
+ bool he_mu_beamformer;
+ bool he_su_beamformee;
+ bool he_su_beamformer;
+ bool he_full_ul_mumimo;
+ bool eht_su_beamformee;
+ bool eht_su_beamformer;
+ bool eht_mu_beamformer;
+
+ uint16_t ht_operation_mode;
+ int arp_addr_cnt;
+ uint16_t eht_puncturing;
+
+ uint8_t dtim_period;
+ uint8_t sync_dtim_count;
+ uint8_t bss_param_ch_cnt_link_id;
+ bool qos;
+ bool twt_broadcast;
+ bool use_cts_prot;
+ bool use_short_preamble;
+ bool use_short_slot;
+ bool he_support;
+ bool eht_support;
+ bool csa_active;
+ bool mu_mimo_owner;
+ bool color_change_active;
+ uint32_t sync_device_ts;
+ uint64_t sync_tsf;
+ uint16_t beacon_int;
+ int16_t txpower;
+ uint32_t basic_rates;
+ int mcast_rate[NUM_NL80211_BANDS];
+ enum ieee80211_ap_reg_power power_type;
+ struct cfg80211_bitrate_mask beacon_tx_rate;
+ struct mac80211_fils_discovery fils_discovery;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_vif *mbssid_tx_vif;
+ struct ieee80211_parsed_tpe tpe;
+
+ int ack_enabled, bssid_index, bssid_indicator, cqm_rssi_hyst, cqm_rssi_thold, ema_ap, frame_time_rts_th, ftm_responder;
+ int htc_trig_based_pkt_ext;
+ int multi_sta_back_32bit, nontransmitted;
+ int profile_periodicity;
+ int twt_requester, uora_exists, uora_ocw_range;
+ int assoc_capability, enable_beacon, hidden_ssid, ibss_joined, twt_protected;
+ int twt_responder, unsol_bcast_probe_resp_interval;
+};
+
+struct ieee80211_channel_switch {
+ /* TODO FIXME */
+ int block_tx, count, delay, device_timestamp, timestamp;
+ uint8_t link_id;
+ struct cfg80211_chan_def chandef;
+};
+
+enum ieee80211_event_type {
+ BA_FRAME_TIMEOUT,
+ BAR_RX_EVENT,
+ MLME_EVENT,
+ RSSI_EVENT,
+};
+
+enum ieee80211_rssi_event_data {
+ RSSI_EVENT_LOW,
+ RSSI_EVENT_HIGH,
+};
+
+enum ieee80211_mlme_event_data {
+ ASSOC_EVENT,
+ AUTH_EVENT,
+ DEAUTH_RX_EVENT,
+ DEAUTH_TX_EVENT,
+};
+
+enum ieee80211_mlme_event_status {
+ MLME_DENIED,
+ MLME_TIMEOUT,
+};
+
+struct ieee80211_mlme_event {
+ enum ieee80211_mlme_event_data data;
+ enum ieee80211_mlme_event_status status;
+ int reason;
+};
+
+struct ieee80211_event {
+ /* TODO FIXME */
+ enum ieee80211_event_type type;
+ union {
+ struct {
+ int ssn;
+ struct ieee80211_sta *sta;
+ uint8_t tid;
+ } ba;
+ struct ieee80211_mlme_event mlme;
+ } u;
+};
+
+struct ieee80211_ftm_responder_params {
+ /* TODO FIXME */
+ uint8_t *lci;
+ uint8_t *civicloc;
+ int lci_len;
+ int civicloc_len;
+};
+
+struct ieee80211_conf {
+ int dynamic_ps_timeout;
+ int power_level;
+ uint32_t listen_interval;
+ bool radar_enabled;
+ enum ieee80211_hw_conf_flags flags;
+ struct cfg80211_chan_def chandef;
+};
+
+enum ieee80211_hw_flags {
+ IEEE80211_HW_AMPDU_AGGREGATION,
+ IEEE80211_HW_AP_LINK_PS,
+ IEEE80211_HW_BUFF_MMPDU_TXQ,
+ IEEE80211_HW_CHANCTX_STA_CSA,
+ IEEE80211_HW_CONNECTION_MONITOR,
+ IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP,
+ IEEE80211_HW_HAS_RATE_CONTROL,
+ IEEE80211_HW_MFP_CAPABLE,
+ IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR,
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS,
+ IEEE80211_HW_RX_INCLUDES_FCS,
+ IEEE80211_HW_SIGNAL_DBM,
+ IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+ IEEE80211_HW_SPECTRUM_MGMT,
+ IEEE80211_HW_STA_MMPDU_TXQ,
+ IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU,
+ IEEE80211_HW_SUPPORTS_CLONED_SKBS,
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS,
+ IEEE80211_HW_SUPPORTS_MULTI_BSSID,
+ IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID,
+ IEEE80211_HW_SUPPORTS_PS,
+ IEEE80211_HW_SUPPORTS_REORDERING_BUFFER,
+ IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW,
+ IEEE80211_HW_SUPPORT_FAST_XMIT,
+ IEEE80211_HW_TDLS_WIDER_BW,
+ IEEE80211_HW_TIMING_BEACON_ONLY,
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW,
+ IEEE80211_HW_TX_AMSDU,
+ IEEE80211_HW_TX_FRAG_LIST,
+ IEEE80211_HW_USES_RSS,
+ IEEE80211_HW_WANT_MONITOR_VIF,
+ IEEE80211_HW_SW_CRYPTO_CONTROL,
+ IEEE80211_HW_SUPPORTS_TX_FRAG,
+ IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
+ IEEE80211_HW_SUPPORTS_PER_STA_GTK,
+ IEEE80211_HW_REPORTS_LOW_ACK,
+ IEEE80211_HW_QUEUE_CONTROL,
+ IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD,
+ IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD,
+ IEEE80211_HW_SUPPORTS_RC_TABLE,
+ IEEE80211_HW_DETECTS_COLOR_COLLISION,
+ IEEE80211_HW_DISALLOW_PUNCTURING,
+ IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ,
+ IEEE80211_HW_TX_STATUS_NO_AMPDU_LEN,
+ IEEE80211_HW_HANDLES_QUIET_CSA,
+ IEEE80211_HW_NO_VIRTUAL_MONITOR,
+
+ /* Keep last. */
+ NUM_IEEE80211_HW_FLAGS
+};
+
+struct ieee80211_hw {
+
+ struct wiphy *wiphy;
+
+ /* TODO FIXME */
+ int extra_tx_headroom, weight_multiplier;
+ int max_rate_tries, max_rates, max_report_rates;
+ const char *rate_control_algorithm;
+ struct {
+ uint16_t units_pos; /* radiotap "spec" is .. inconsistent. */
+ uint16_t accuracy;
+ } radiotap_timestamp;
+ size_t sta_data_size;
+ size_t vif_data_size;
+ size_t chanctx_data_size;
+ size_t txq_data_size;
+ uint16_t radiotap_mcs_details;
+ uint16_t radiotap_vht_details;
+ uint16_t queues;
+ uint16_t offchannel_tx_hw_queue;
+ uint16_t uapsd_max_sp_len;
+ uint16_t uapsd_queues;
+ uint16_t max_rx_aggregation_subframes;
+ uint16_t max_tx_aggregation_subframes;
+ uint16_t max_tx_fragments;
+ uint16_t max_listen_interval;
+ uint32_t extra_beacon_tailroom;
+ netdev_features_t netdev_features;
+ unsigned long flags[BITS_TO_LONGS(NUM_IEEE80211_HW_FLAGS)];
+ struct ieee80211_conf conf;
+
+#if 0 /* leave here for documentation purposes. This does NOT work. */
+ /* Must stay last. */
+ uint8_t priv[0] __aligned(CACHE_LINE_SIZE);
+#else
+ void *priv;
+#endif
+};
+
+enum ieee802111_key_flag {
+ IEEE80211_KEY_FLAG_GENERATE_IV = BIT(0),
+ IEEE80211_KEY_FLAG_GENERATE_MMIC = BIT(1),
+ IEEE80211_KEY_FLAG_PAIRWISE = BIT(2),
+ IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(3),
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(4),
+ IEEE80211_KEY_FLAG_SW_MGMT_TX = BIT(5),
+ IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(6),
+ IEEE80211_KEY_FLAG_GENERATE_MMIE = BIT(7),
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(8),
+ IEEE80211_KEY_FLAG_SPP_AMSDU = BIT(9),
+};
+
+#define IEEE80211_KEY_FLAG_BITS \
+ "\20\1GENERATE_IV\2GENERATE_MMIC\3PAIRWISE\4PUT_IV_SPACE" \
+ "\5PUT_MIC_SPACE\6SW_MGMT_TX\7GENERATE_IV_MGMT\10GENERATE_MMIE" \
+ "\11RESERVE_TAILROOM\12SPP_AMSDU"
+
+struct ieee80211_key_conf {
+#if defined(__FreeBSD__)
+ const struct ieee80211_key *_k; /* backpointer to net80211 */
+#endif
+ atomic64_t tx_pn;
+ uint32_t cipher;
+ uint8_t icv_len; /* __unused nowadays? */
+ uint8_t iv_len;
+ uint8_t hw_key_idx; /* Set by drv. */
+ uint8_t keyidx;
+ uint16_t flags;
+ int8_t link_id; /* signed! */
+ uint8_t keylen;
+ uint8_t key[0]; /* Must stay last! */
+};
+
+struct ieee80211_key_seq {
+ /* TODO FIXME */
+ union {
+ struct {
+ uint8_t seq[IEEE80211_MAX_PN_LEN];
+ uint8_t seq_len;
+ } hw;
+ struct {
+ uint8_t pn[IEEE80211_CCMP_PN_LEN];
+ } ccmp;
+ struct {
+ uint8_t pn[IEEE80211_GCMP_PN_LEN];
+ } gcmp;
+ struct {
+ uint8_t pn[IEEE80211_CMAC_PN_LEN];
+ } aes_cmac;
+ struct {
+ uint8_t pn[IEEE80211_GMAC_PN_LEN];
+ } aes_gmac;
+ struct {
+ uint32_t iv32;
+ uint16_t iv16;
+ } tkip;
+ };
+};
+
+
+enum ieee80211_rx_status_flags {
+ RX_FLAG_ALLOW_SAME_PN = BIT(0),
+ RX_FLAG_AMPDU_DETAILS = BIT(1),
+ RX_FLAG_AMPDU_EOF_BIT = BIT(2),
+ RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(3),
+ RX_FLAG_DECRYPTED = BIT(4),
+ RX_FLAG_DUP_VALIDATED = BIT(5),
+ RX_FLAG_FAILED_FCS_CRC = BIT(6),
+ RX_FLAG_ICV_STRIPPED = BIT(7),
+ RX_FLAG_MACTIME = BIT(8) | BIT(9),
+ RX_FLAG_MACTIME_PLCP_START = 1 << 8,
+ RX_FLAG_MACTIME_START = 2 << 8,
+ RX_FLAG_MACTIME_END = 3 << 8,
+ RX_FLAG_MIC_STRIPPED = BIT(10),
+ RX_FLAG_MMIC_ERROR = BIT(11),
+ RX_FLAG_MMIC_STRIPPED = BIT(12),
+ RX_FLAG_NO_PSDU = BIT(13),
+ RX_FLAG_PN_VALIDATED = BIT(14),
+ RX_FLAG_RADIOTAP_HE = BIT(15),
+ RX_FLAG_RADIOTAP_HE_MU = BIT(16),
+ RX_FLAG_RADIOTAP_LSIG = BIT(17),
+ RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(18),
+ RX_FLAG_NO_SIGNAL_VAL = BIT(19),
+ RX_FLAG_IV_STRIPPED = BIT(20),
+ RX_FLAG_AMPDU_IS_LAST = BIT(21),
+ RX_FLAG_AMPDU_LAST_KNOWN = BIT(22),
+ RX_FLAG_AMSDU_MORE = BIT(23),
+ /* = BIT(24), */
+ RX_FLAG_ONLY_MONITOR = BIT(25),
+ RX_FLAG_SKIP_MONITOR = BIT(26),
+ RX_FLAG_8023 = BIT(27),
+ RX_FLAG_RADIOTAP_TLV_AT_END = BIT(28),
+ /* = BIT(29), */
+ RX_FLAG_MACTIME_IS_RTAP_TS64 = BIT(30),
+ RX_FLAG_FAILED_PLCP_CRC = BIT(31),
+};
+
+#define IEEE80211_RX_STATUS_FLAGS_BITS \
+ "\20\1ALLOW_SAME_PN\2AMPDU_DETAILS\3AMPDU_EOF_BIT\4AMPDU_EOF_BIT_KNOWN" \
+ "\5DECRYPTED\6DUP_VALIDATED\7FAILED_FCS_CRC\10ICV_STRIPPED" \
+ "\11MACTIME_PLCP_START\12MACTIME_START\13MIC_STRIPPED" \
+ "\14MMIC_ERROR\15MMIC_STRIPPED\16NO_PSDU\17PN_VALIDATED" \
+ "\20RADIOTAP_HE\21RADIOTAP_HE_MU\22RADIOTAP_LSIG\23RADIOTAP_VENDOR_DATA" \
+ "\24NO_SIGNAL_VAL\25IV_STRIPPED\26AMPDU_IS_LAST\27AMPDU_LAST_KNOWN" \
+ "\30AMSDU_MORE\31MACTIME_END\32ONLY_MONITOR\33SKIP_MONITOR" \
+ "\348023\35RADIOTAP_TLV_AT_END\36MACTIME\37MACTIME_IS_RTAP_TS64" \
+ "\40FAILED_PLCP_CRC"
+
+enum mac80211_rx_encoding {
+ RX_ENC_LEGACY = 0,
+ RX_ENC_HT,
+ RX_ENC_VHT,
+ RX_ENC_HE,
+ RX_ENC_EHT,
+};
+
+struct ieee80211_rx_status {
+ /* TODO FIXME, this is too large. Over-reduce types to u8 where possible. */
+ union {
+ uint64_t boottime_ns;
+ int64_t ack_tx_hwtstamp;
+ };
+ uint64_t mactime;
+ uint32_t device_timestamp;
+ enum ieee80211_rx_status_flags flag;
+ uint16_t freq;
+ uint8_t encoding:3, bw:4; /* enum mac80211_rx_encoding, rate_info_bw */ /* See mt76.h */
+ uint8_t ampdu_reference;
+ uint8_t band;
+ uint8_t chains;
+ int8_t chain_signal[IEEE80211_MAX_CHAINS];
+ int8_t signal;
+ uint8_t enc_flags;
+ union {
+ struct {
+ uint8_t he_ru:3; /* nl80211::enum nl80211_he_ru_alloc */
+ uint8_t he_gi:2; /* nl80211::enum nl80211_he_gi */
+ uint8_t he_dcm:1;
+ };
+ struct {
+ uint8_t ru:4; /* nl80211::enum nl80211_eht_ru_alloc */
+ uint8_t gi:2; /* nl80211::enum nl80211_eht_gi */
+ } eht;
+ };
+ bool link_valid;
+ uint8_t link_id; /* very incosistent sizes? */
+ uint8_t zero_length_psdu_type;
+ uint8_t nss;
+ uint8_t rate_idx;
+};
+
+struct ieee80211_tx_status {
+ struct ieee80211_sta *sta;
+ struct ieee80211_tx_info *info;
+ int64_t ack_hwtstamp;
+
+ u8 n_rates;
+ struct ieee80211_rate_status *rates;
+
+ struct sk_buff *skb;
+ struct list_head *free_list;
+};
+
+struct ieee80211_scan_ies {
+ /* TODO FIXME */
+ int common_ie_len;
+ int len[NUM_NL80211_BANDS];
+ uint8_t *common_ies;
+ uint8_t *ies[NUM_NL80211_BANDS];
+};
+
+struct ieee80211_scan_request {
+ struct ieee80211_scan_ies ies;
+ struct cfg80211_scan_request req;
+};
+
+struct ieee80211_txq {
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ int ac;
+ uint8_t tid;
+
+ /* Must stay last. */
+ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE);
+};
+
+struct ieee80211_sta_rates {
+ /* XXX TODO */
+ /* XXX some _rcu thing */
+ struct {
+ uint8_t idx;
+ uint8_t count;
+ uint16_t flags;
+ } rate[4]; /* XXX what is the real number? */
+};
+
+struct ieee80211_sta_txpwr {
+ /* XXX TODO */
+ enum nl80211_tx_power_setting type;
+ short power;
+};
+
+#define IEEE80211_NUM_TIDS 16 /* net80211::WME_NUM_TID */
+struct ieee80211_sta_agg {
+ uint16_t max_amsdu_len;
+ uint16_t max_rc_amsdu_len;
+ uint16_t max_tid_amsdu_len[IEEE80211_NUM_TIDS];
+};
+
+struct ieee80211_link_sta {
+ struct ieee80211_sta *sta;
+ uint8_t addr[ETH_ALEN];
+ uint8_t link_id;
+ uint32_t supp_rates[NUM_NL80211_BANDS];
+ struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_he_cap he_cap;
+ struct ieee80211_he_6ghz_capa he_6ghz_capa;
+ struct ieee80211_sta_eht_cap eht_cap;
+ uint8_t rx_nss;
+ enum ieee80211_sta_rx_bw bandwidth;
+ enum ieee80211_smps_mode smps_mode;
+ struct ieee80211_sta_agg agg;
+ struct ieee80211_sta_txpwr txpwr;
+};
+
+struct ieee80211_sta {
+ /* TODO FIXME */
+ int max_amsdu_subframes;
+ int mfp, smps_mode, tdls, tdls_initiator;
+ struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1]; /* iwlwifi: 8 and adds +1 to tid_data, net80211::IEEE80211_TID_SIZE */
+ struct ieee80211_sta_rates *rates; /* some rcu thing? */
+ uint8_t addr[ETH_ALEN];
+ uint16_t aid;
+ bool wme;
+ bool mlo;
+ uint8_t max_sp;
+ uint8_t uapsd_queues;
+ uint16_t valid_links;
+
+ struct ieee80211_link_sta deflink;
+ struct ieee80211_link_sta *link[IEEE80211_MLD_MAX_NUM_LINKS]; /* rcu? */
+
+ /* Must stay last. */
+ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE);
+};
+
+struct ieee80211_tdls_ch_sw_params {
+ /* TODO FIXME */
+ int action_code, ch_sw_tm_ie, status, switch_time, switch_timeout, timestamp;
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def *chandef;
+ struct sk_buff *tmpl_skb;
+};
+
+struct ieee80211_tx_control {
+ /* TODO FIXME */
+ struct ieee80211_sta *sta;
+};
+
+struct ieee80211_tx_queue_params {
+ /* These types are based on iwlwifi FW structs. */
+ uint16_t cw_min;
+ uint16_t cw_max;
+ uint16_t txop;
+ uint8_t aifs;
+
+ /* TODO FIXME */
+ int acm, mu_edca, uapsd;
+ struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec;
+};
+
+struct ieee80211_tx_rate {
+ uint8_t idx;
+ uint16_t count:5,
+ flags:11;
+};
+
+enum ieee80211_vif_driver_flags {
+ IEEE80211_VIF_BEACON_FILTER = BIT(0),
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1),
+ IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2),
+#if defined(LINUXKPI_VERSION) && (LINUXKPI_VERSION < 60600) /* v6.6 */
+ IEEE80211_VIF_DISABLE_SMPS_OVERRIDE = BIT(3), /* Renamed to IEEE80211_VIF_EML_ACTIVE. */
+#endif
+ IEEE80211_VIF_EML_ACTIVE = BIT(4),
+ IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW = BIT(5),
+ IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC = BIT(6),
+};
+
+#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
+
+struct ieee80211_vif_cfg {
+ uint16_t aid;
+ uint16_t eml_cap;
+ uint16_t eml_med_sync_delay;
+ bool assoc;
+ bool ps;
+ bool idle;
+ bool ibss_joined;
+ int arp_addr_cnt;
+ size_t ssid_len;
+ uint32_t arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; /* big endian */
+ uint8_t ssid[IEEE80211_NWID_LEN];
+ uint8_t ap_addr[ETH_ALEN];
+};
+
+struct ieee80211_vif {
+ /* TODO FIXME */
+ enum nl80211_iftype type;
+ int cab_queue;
+ int offload_flags;
+ enum ieee80211_vif_driver_flags driver_flags;
+ bool p2p;
+ bool probe_req_reg;
+ uint8_t addr[ETH_ALEN];
+ struct ieee80211_vif_cfg cfg;
+ struct ieee80211_txq *txq;
+ struct ieee80211_bss_conf bss_conf;
+ struct ieee80211_bss_conf *link_conf[IEEE80211_MLD_MAX_NUM_LINKS]; /* rcu? */
+ uint8_t hw_queue[IEEE80211_NUM_ACS];
+ uint16_t active_links;
+ uint16_t valid_links;
+ struct ieee80211_vif *mbssid_tx_vif;
+
+/* #ifdef CONFIG_MAC80211_DEBUGFS */ /* Do not change structure depending on compile-time option. */
+ struct dentry *debugfs_dir;
+/* #endif */
+
+ /* Must stay last. */
+ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE);
+};
+
+struct ieee80211_vif_chanctx_switch {
+ struct ieee80211_chanctx_conf *old_ctx, *new_ctx;
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link_conf;
+};
+
+struct ieee80211_prep_tx_info {
+ u16 duration;
+ bool success;
+ bool was_assoc;
+ int link_id;
+};
+
+/* XXX-BZ too big, over-reduce size to u8, and array sizes to minuimum to fit in skb->cb. */
+/* Also warning: some sizes change by pointer size! This is 64bit only. */
+struct ieee80211_tx_info {
+ enum ieee80211_tx_info_flags flags; /* 32 bits */
+ /* TODO FIXME */
+ enum nl80211_band band; /* 3 bits */
+ uint16_t hw_queue:4, /* 4 bits */
+ tx_time_est:10; /* 10 bits */
+ union {
+ struct {
+ struct ieee80211_tx_rate rates[4];
+ bool use_rts;
+ uint8_t antennas:2;
+ struct ieee80211_vif *vif;
+ struct ieee80211_key_conf *hw_key;
+ enum ieee80211_tx_control_flags flags;
+ } control;
+ struct {
+ struct ieee80211_tx_rate rates[4];
+ uint32_t ack_signal;
+ uint8_t ampdu_ack_len;
+ uint8_t ampdu_len;
+ uint8_t antenna;
+ uint16_t tx_time;
+ uint8_t flags;
+ void *status_driver_data[16 / sizeof(void *)]; /* XXX TODO */
+ } status;
+#define IEEE80211_TX_INFO_DRIVER_DATA_SIZE 40
+ void *driver_data[IEEE80211_TX_INFO_DRIVER_DATA_SIZE / sizeof(void *)];
+ };
+};
+
+/* net80211 conflict */
+struct linuxkpi_ieee80211_tim_ie {
+ uint8_t dtim_count;
+ uint8_t dtim_period;
+ uint8_t bitmap_ctrl;
+ uint8_t *virtual_map;
+};
+#define ieee80211_tim_ie linuxkpi_ieee80211_tim_ie
+
+struct survey_info { /* net80211::struct ieee80211_channel_survey */
+ /* TODO FIXME */
+ uint32_t filled;
+#define SURVEY_INFO_TIME 0x0001
+#define SURVEY_INFO_TIME_RX 0x0002
+#define SURVEY_INFO_TIME_SCAN 0x0004
+#define SURVEY_INFO_TIME_TX 0x0008
+#define SURVEY_INFO_TIME_BSS_RX 0x0010
+#define SURVEY_INFO_TIME_BUSY 0x0020
+#define SURVEY_INFO_IN_USE 0x0040
+#define SURVEY_INFO_NOISE_DBM 0x0080
+ uint32_t noise;
+ uint64_t time;
+ uint64_t time_bss_rx;
+ uint64_t time_busy;
+ uint64_t time_rx;
+ uint64_t time_scan;
+ uint64_t time_tx;
+ struct ieee80211_channel *channel;
+};
+
+enum ieee80211_iface_iter {
+ IEEE80211_IFACE_ITER_NORMAL = BIT(0),
+ IEEE80211_IFACE_ITER_RESUME_ALL = BIT(1),
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER = BIT(2), /* seems to be an iter flag */
+ IEEE80211_IFACE_ITER_ACTIVE = BIT(3),
+
+ /* Internal flags only. */
+ IEEE80211_IFACE_ITER__ATOMIC = BIT(6),
+ IEEE80211_IFACE_ITER__MTX = BIT(8),
+};
+
+enum set_key_cmd {
+ SET_KEY,
+ DISABLE_KEY,
+};
+
+/* 802.11-2020, 9.4.2.55.2 HT Capability Information field. */
+enum rx_enc_flags {
+ RX_ENC_FLAG_SHORTPRE = BIT(0),
+ RX_ENC_FLAG_SHORT_GI = BIT(2),
+ RX_ENC_FLAG_HT_GF = BIT(3),
+ RX_ENC_FLAG_STBC_MASK = BIT(4) | BIT(5),
+#define RX_ENC_FLAG_STBC_SHIFT 4
+ RX_ENC_FLAG_LDPC = BIT(6),
+ RX_ENC_FLAG_BF = BIT(7),
+};
+
+enum sta_notify_cmd {
+ STA_NOTIFY_AWAKE,
+ STA_NOTIFY_SLEEP,
+};
+
+struct ieee80211_low_level_stats {
+ /* Can we make them uint64_t? */
+ uint32_t dot11ACKFailureCount;
+ uint32_t dot11FCSErrorCount;
+ uint32_t dot11RTSFailureCount;
+ uint32_t dot11RTSSuccessCount;
+};
+
+enum ieee80211_offload_flags {
+ IEEE80211_OFFLOAD_ENCAP_4ADDR,
+ IEEE80211_OFFLOAD_ENCAP_ENABLED,
+ IEEE80211_OFFLOAD_DECAP_ENABLED,
+};
+
+struct ieee80211_ops {
+ /* TODO FIXME */
+ int (*start)(struct ieee80211_hw *);
+ void (*stop)(struct ieee80211_hw *, bool);
+
+ int (*config)(struct ieee80211_hw *, u32);
+ void (*reconfig_complete)(struct ieee80211_hw *, enum ieee80211_reconfig_type);
+
+ void (*prep_add_interface)(struct ieee80211_hw *, enum nl80211_iftype);
+ int (*add_interface)(struct ieee80211_hw *, struct ieee80211_vif *);
+ void (*remove_interface)(struct ieee80211_hw *, struct ieee80211_vif *);
+ int (*change_interface)(struct ieee80211_hw *, struct ieee80211_vif *, enum nl80211_iftype, bool);
+
+ void (*sw_scan_start)(struct ieee80211_hw *, struct ieee80211_vif *, const u8 *);
+ void (*sw_scan_complete)(struct ieee80211_hw *, struct ieee80211_vif *);
+ int (*sched_scan_start)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_sched_scan_request *, struct ieee80211_scan_ies *);
+ int (*sched_scan_stop)(struct ieee80211_hw *, struct ieee80211_vif *);
+ int (*hw_scan)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_scan_request *);
+ void (*cancel_hw_scan)(struct ieee80211_hw *, struct ieee80211_vif *);
+
+ int (*conf_tx)(struct ieee80211_hw *, struct ieee80211_vif *, u32, u16, const struct ieee80211_tx_queue_params *);
+ void (*tx)(struct ieee80211_hw *, struct ieee80211_tx_control *, struct sk_buff *);
+ int (*tx_last_beacon)(struct ieee80211_hw *);
+ void (*wake_tx_queue)(struct ieee80211_hw *, struct ieee80211_txq *);
+
+ void (*mgd_prepare_tx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *);
+ void (*mgd_complete_tx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *);
+ void (*mgd_protect_tdls_discover)(struct ieee80211_hw *, struct ieee80211_vif *, unsigned int);
+
+ void (*flush)(struct ieee80211_hw *, struct ieee80211_vif *, u32, bool);
+ void (*flush_sta)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *);
+
+ int (*set_frag_threshold)(struct ieee80211_hw *, u32);
+
+ void (*sync_rx_queues)(struct ieee80211_hw *);
+
+ void (*allow_buffered_frames)(struct ieee80211_hw *, struct ieee80211_sta *, u16, int, enum ieee80211_frame_release_type, bool);
+ void (*release_buffered_frames)(struct ieee80211_hw *, struct ieee80211_sta *, u16, int, enum ieee80211_frame_release_type, bool);
+
+ int (*sta_add)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *);
+ int (*sta_remove)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *);
+ int (*sta_set_txpwr)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *);
+ void (*sta_statistics)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct station_info *);
+ void (*sta_pre_rcu_remove)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *);
+ int (*sta_state)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, enum ieee80211_sta_state, enum ieee80211_sta_state);
+ void (*sta_notify)(struct ieee80211_hw *, struct ieee80211_vif *, enum sta_notify_cmd, struct ieee80211_sta *);
+ void (*sta_rc_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u32);
+ void (*link_sta_rc_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_link_sta *, u32);
+ void (*sta_rate_tbl_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *);
+ void (*sta_set_4addr)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, bool);
+ void (*sta_set_decap_offload)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, bool);
+
+ u64 (*prepare_multicast)(struct ieee80211_hw *, struct netdev_hw_addr_list *);
+
+ int (*ampdu_action)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_ampdu_params *);
+
+ bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *, struct sk_buff *, struct sk_buff *);
+
+ int (*pre_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *);
+ int (*post_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *);
+ void (*channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *);
+ void (*channel_switch_beacon)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_chan_def *);
+ void (*abort_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *);
+ void (*channel_switch_rx_beacon)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *);
+ int (*tdls_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u8, struct cfg80211_chan_def *, struct sk_buff *, u32);
+ void (*tdls_cancel_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *);
+ void (*tdls_recv_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_tdls_ch_sw_params *);
+
+ int (*add_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *);
+ void (*remove_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *);
+ void (*change_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, u32);
+ int (*assign_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf *);
+ void (*unassign_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf *);
+ int (*switch_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif_chanctx_switch *, int, enum ieee80211_chanctx_switch_mode);
+
+ int (*get_antenna)(struct ieee80211_hw *, u32 *, u32 *);
+ int (*set_antenna)(struct ieee80211_hw *, u32, u32);
+
+ int (*remain_on_channel)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel *, int, enum ieee80211_roc_type);
+ int (*cancel_remain_on_channel)(struct ieee80211_hw *, struct ieee80211_vif *);
+
+ void (*configure_filter)(struct ieee80211_hw *, unsigned int, unsigned int *, u64);
+ void (*config_iface_filter)(struct ieee80211_hw *, struct ieee80211_vif *, unsigned int, unsigned int);
+
+ void (*bss_info_changed)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, u64);
+ void (*link_info_changed)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, u64);
+
+ int (*set_rts_threshold)(struct ieee80211_hw *, u32);
+ void (*event_callback)(struct ieee80211_hw *, struct ieee80211_vif *, const struct ieee80211_event *);
+ int (*get_survey)(struct ieee80211_hw *, int, struct survey_info *);
+ int (*get_ftm_responder_stats)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_ftm_responder_stats *);
+
+ uint64_t (*get_tsf)(struct ieee80211_hw *, struct ieee80211_vif *);
+ void (*set_tsf)(struct ieee80211_hw *, struct ieee80211_vif *, uint64_t);
+ void (*offset_tsf)(struct ieee80211_hw *, struct ieee80211_vif *, s64);
+
+ int (*set_bitrate_mask)(struct ieee80211_hw *, struct ieee80211_vif *, const struct cfg80211_bitrate_mask *);
+ void (*set_coverage_class)(struct ieee80211_hw *, s16);
+ int (*set_tim)(struct ieee80211_hw *, struct ieee80211_sta *, bool);
+
+ int (*set_key)(struct ieee80211_hw *, enum set_key_cmd, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *);
+ void (*update_tkip_key)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_key_conf *, struct ieee80211_sta *, u32, u16 *);
+
+ int (*start_pmsr)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_pmsr_request *);
+ void (*abort_pmsr)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_pmsr_request *);
+
+ int (*start_ap)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *link_conf);
+ void (*stop_ap)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *link_conf);
+ int (*join_ibss)(struct ieee80211_hw *, struct ieee80211_vif *);
+ void (*leave_ibss)(struct ieee80211_hw *, struct ieee80211_vif *);
+
+ int (*set_sar_specs)(struct ieee80211_hw *, const struct cfg80211_sar_specs *);
+
+ int (*set_tid_config)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct cfg80211_tid_config *);
+ int (*reset_tid_config)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u8);
+
+ int (*get_et_sset_count)(struct ieee80211_hw *, struct ieee80211_vif *, int);
+ void (*get_et_stats)(struct ieee80211_hw *, struct ieee80211_vif *, struct ethtool_stats *, u64 *);
+ void (*get_et_strings)(struct ieee80211_hw *, struct ieee80211_vif *, u32, u8 *);
+
+ void (*update_vif_offload)(struct ieee80211_hw *, struct ieee80211_vif *);
+
+ int (*get_txpower)(struct ieee80211_hw *, struct ieee80211_vif *, unsigned int, int *);
+ int (*get_stats)(struct ieee80211_hw *, struct ieee80211_low_level_stats *);
+
+ int (*set_radar_background)(struct ieee80211_hw *, struct cfg80211_chan_def *);
+
+ void (*add_twt_setup)(struct ieee80211_hw *, struct ieee80211_sta *, struct ieee80211_twt_setup *);
+ void (*twt_teardown_request)(struct ieee80211_hw *, struct ieee80211_sta *, u8);
+
+ int (*set_hw_timestamp)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_set_hw_timestamp *);
+
+ void (*vif_cfg_changed)(struct ieee80211_hw *, struct ieee80211_vif *, u64);
+
+ int (*change_vif_links)(struct ieee80211_hw *, struct ieee80211_vif *, u16, u16, struct ieee80211_bss_conf *[IEEE80211_MLD_MAX_NUM_LINKS]);
+ int (*change_sta_links)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u16, u16);
+ bool (*can_activate_links)(struct ieee80211_hw *, struct ieee80211_vif *, u16);
+ enum ieee80211_neg_ttlm_res (*can_neg_ttlm)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_neg_ttlm *);
+
+ void (*rfkill_poll)(struct ieee80211_hw *);
+
+/* #ifdef CONFIG_MAC80211_DEBUGFS */ /* Do not change depending on compile-time option. */
+ void (*sta_add_debugfs)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct dentry *);
+ void (*vif_add_debugfs)(struct ieee80211_hw *, struct ieee80211_vif *);
+ void (*link_sta_add_debugfs)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_link_sta *, struct dentry *);
+ void (*link_add_debugfs)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, struct dentry *);
+/* #endif */
+/* #ifdef CONFIG_PM_SLEEP */ /* Do not change depending on compile-time option. */
+ int (*suspend)(struct ieee80211_hw *, struct cfg80211_wowlan *);
+ int (*resume)(struct ieee80211_hw *);
+ void (*set_wakeup)(struct ieee80211_hw *, bool);
+ void (*set_rekey_data)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_gtk_rekey_data *);
+ void (*set_default_unicast_key)(struct ieee80211_hw *, struct ieee80211_vif *, int);
+/* #if IS_ENABLED(CONFIG_IPV6) */
+ void (*ipv6_addr_change)(struct ieee80211_hw *, struct ieee80211_vif *, struct inet6_dev *);
+/* #endif */
+/* #endif CONFIG_PM_SLEEP */
+};
+
+/* -------------------------------------------------------------------------- */
+
+/* linux_80211.c */
+extern const struct cfg80211_ops linuxkpi_mac80211cfgops;
+
+struct ieee80211_hw *linuxkpi_ieee80211_alloc_hw(size_t,
+ const struct ieee80211_ops *);
+void linuxkpi_ieee80211_iffree(struct ieee80211_hw *);
+void linuxkpi_set_ieee80211_dev(struct ieee80211_hw *, char *);
+int linuxkpi_ieee80211_ifattach(struct ieee80211_hw *);
+void linuxkpi_ieee80211_ifdetach(struct ieee80211_hw *);
+void linuxkpi_ieee80211_unregister_hw(struct ieee80211_hw *);
+struct ieee80211_hw * linuxkpi_wiphy_to_ieee80211_hw(struct wiphy *);
+void linuxkpi_ieee80211_restart_hw(struct ieee80211_hw *);
+void linuxkpi_ieee80211_iterate_interfaces(
+ struct ieee80211_hw *hw, enum ieee80211_iface_iter flags,
+ void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *),
+ void *);
+void linuxkpi_ieee80211_iterate_keys(struct ieee80211_hw *,
+ struct ieee80211_vif *,
+ void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_sta *, struct ieee80211_key_conf *, void *),
+ void *, bool);
+void linuxkpi_ieee80211_iterate_chan_contexts(struct ieee80211_hw *,
+ void(*iterfunc)(struct ieee80211_hw *,
+ struct ieee80211_chanctx_conf *, void *),
+ void *);
+void linuxkpi_ieee80211_iterate_stations_atomic(struct ieee80211_hw *,
+ void (*iterfunc)(void *, struct ieee80211_sta *), void *);
+void linuxkpi_ieee80211_scan_completed(struct ieee80211_hw *,
+ struct cfg80211_scan_info *);
+void linuxkpi_ieee80211_rx(struct ieee80211_hw *, struct sk_buff *,
+ struct ieee80211_sta *, struct napi_struct *, struct list_head *);
+uint8_t linuxkpi_ieee80211_get_tid(struct ieee80211_hdr *, bool);
+struct ieee80211_sta *linuxkpi_ieee80211_find_sta(struct ieee80211_vif *,
+ const u8 *);
+struct ieee80211_sta *linuxkpi_ieee80211_find_sta_by_ifaddr(
+ struct ieee80211_hw *, const uint8_t *, const uint8_t *);
+struct sk_buff *linuxkpi_ieee80211_tx_dequeue(struct ieee80211_hw *,
+ struct ieee80211_txq *);
+bool linuxkpi_ieee80211_is_ie_id_in_ie_buf(const u8, const u8 *, size_t);
+bool linuxkpi_ieee80211_ie_advance(size_t *, const u8 *, size_t);
+void linuxkpi_ieee80211_free_txskb(struct ieee80211_hw *, struct sk_buff *,
+ int);
+void linuxkpi_ieee80211_queue_delayed_work(struct ieee80211_hw *,
+ struct delayed_work *, int);
+void linuxkpi_ieee80211_queue_work(struct ieee80211_hw *, struct work_struct *);
+struct sk_buff *linuxkpi_ieee80211_pspoll_get(struct ieee80211_hw *,
+ struct ieee80211_vif *);
+struct sk_buff *linuxkpi_ieee80211_nullfunc_get(struct ieee80211_hw *,
+ struct ieee80211_vif *, int, bool);
+void linuxkpi_ieee80211_txq_get_depth(struct ieee80211_txq *, unsigned long *,
+ unsigned long *);
+struct wireless_dev *linuxkpi_ieee80211_vif_to_wdev(struct ieee80211_vif *);
+void linuxkpi_ieee80211_connection_loss(struct ieee80211_vif *);
+void linuxkpi_ieee80211_beacon_loss(struct ieee80211_vif *);
+struct sk_buff *linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *,
+ uint8_t *, uint8_t *, size_t, size_t);
+void linuxkpi_ieee80211_tx_status(struct ieee80211_hw *, struct sk_buff *);
+void linuxkpi_ieee80211_tx_status_ext(struct ieee80211_hw *,
+ struct ieee80211_tx_status *);
+void linuxkpi_ieee80211_stop_queues(struct ieee80211_hw *);
+void linuxkpi_ieee80211_wake_queues(struct ieee80211_hw *);
+void linuxkpi_ieee80211_stop_queue(struct ieee80211_hw *, int);
+void linuxkpi_ieee80211_wake_queue(struct ieee80211_hw *, int);
+void linuxkpi_ieee80211_txq_schedule_start(struct ieee80211_hw *, uint8_t);
+struct ieee80211_txq *linuxkpi_ieee80211_next_txq(struct ieee80211_hw *, uint8_t);
+void linuxkpi_ieee80211_schedule_txq(struct ieee80211_hw *,
+ struct ieee80211_txq *, bool);
+void linuxkpi_ieee80211_handle_wake_tx_queue(struct ieee80211_hw *,
+ struct ieee80211_txq *);
+
+/* -------------------------------------------------------------------------- */
+
+static __inline void
+_ieee80211_hw_set(struct ieee80211_hw *hw, enum ieee80211_hw_flags flag)
+{
+
+ set_bit(flag, hw->flags);
+}
+
+static __inline bool
+__ieee80211_hw_check(struct ieee80211_hw *hw, enum ieee80211_hw_flags flag)
+{
+
+ return (test_bit(flag, hw->flags));
+}
+
+/* They pass in shortened flag names; how confusingly inconsistent. */
+#define ieee80211_hw_set(_hw, _flag) \
+ _ieee80211_hw_set(_hw, IEEE80211_HW_ ## _flag)
+#define ieee80211_hw_check(_hw, _flag) \
+ __ieee80211_hw_check(_hw, IEEE80211_HW_ ## _flag)
+
+/* XXX-BZ add CTASSERTS that size of struct is <= sizeof skb->cb. */
+CTASSERT(sizeof(struct ieee80211_tx_info) <= sizeof(((struct sk_buff *)0)->cb));
+#define IEEE80211_SKB_CB(_skb) \
+ ((struct ieee80211_tx_info *)((_skb)->cb))
+
+CTASSERT(sizeof(struct ieee80211_rx_status) <= sizeof(((struct sk_buff *)0)->cb));
+#define IEEE80211_SKB_RXCB(_skb) \
+ ((struct ieee80211_rx_status *)((_skb)->cb))
+
+static __inline void
+ieee80211_free_hw(struct ieee80211_hw *hw)
+{
+
+ linuxkpi_ieee80211_iffree(hw);
+
+ if (hw->wiphy != NULL)
+ wiphy_free(hw->wiphy);
+ /* Note that *hw is not valid any longer after this. */
+
+ IMPROVE();
+}
+
+static __inline struct ieee80211_hw *
+ieee80211_alloc_hw(size_t priv_len, const struct ieee80211_ops *ops)
+{
+
+ return (linuxkpi_ieee80211_alloc_hw(priv_len, ops));
+}
+
+static __inline void
+SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev)
+{
+
+ set_wiphy_dev(hw->wiphy, dev);
+ linuxkpi_set_ieee80211_dev(hw, dev_name(dev));
+
+ IMPROVE();
+}
+
+static __inline int
+ieee80211_register_hw(struct ieee80211_hw *hw)
+{
+ int error;
+
+ error = wiphy_register(hw->wiphy);
+ if (error != 0)
+ return (error);
+
+ /*
+ * At this point the driver has set all the options, flags, bands,
+ * ciphers, hw address(es), ... basically mac80211/cfg80211 hw/wiphy
+ * setup is done.
+ * We need to replicate a lot of information from here into net80211.
+ */
+ error = linuxkpi_ieee80211_ifattach(hw);
+
+ IMPROVE();
+
+ return (error);
+}
+
+static inline void
+ieee80211_unregister_hw(struct ieee80211_hw *hw)
+{
+
+ linuxkpi_ieee80211_unregister_hw(hw);
+}
+
+static __inline struct ieee80211_hw *
+wiphy_to_ieee80211_hw(struct wiphy *wiphy)
+{
+
+ return (linuxkpi_wiphy_to_ieee80211_hw(wiphy));
+}
+
+static inline void
+ieee80211_restart_hw(struct ieee80211_hw *hw)
+{
+ linuxkpi_ieee80211_restart_hw(hw);
+}
+
+static inline void
+ieee80211_hw_restart_disconnect(struct ieee80211_vif *vif)
+{
+ TODO();
+}
+
+/* -------------------------------------------------------------------------- */
+
+#define link_conf_dereference_check(_vif, _linkid) \
+ rcu_dereference_check((_vif)->link_conf[_linkid], true)
+
+#define link_conf_dereference_protected(_vif, _linkid) \
+ rcu_dereference_protected((_vif)->link_conf[_linkid], true)
+
+#define link_sta_dereference_check(_sta, _linkid) \
+ rcu_dereference_check((_sta)->link[_linkid], true)
+
+#define link_sta_dereference_protected(_sta, _linkid) \
+ rcu_dereference_protected((_sta)->link[_linkid], true)
+
+#define for_each_vif_active_link(_vif, _link, _linkid) \
+ for (_linkid = 0; _linkid < nitems((_vif)->link_conf); _linkid++) \
+ if ( ((_vif)->active_links == 0 /* no MLO */ || \
+ ((_vif)->active_links & BIT(_linkid)) != 0) && \
+ (_link = rcu_dereference((_vif)->link_conf[_linkid])) )
+
+#define for_each_sta_active_link(_vif, _sta, _linksta, _linkid) \
+ for (_linkid = 0; _linkid < nitems((_sta)->link); _linkid++) \
+ if ( ((_vif)->active_links == 0 /* no MLO */ || \
+ ((_vif)->active_links & BIT(_linkid)) != 0) && \
+ (_linksta = link_sta_dereference_protected((_sta), (_linkid))) )
+
+/* -------------------------------------------------------------------------- */
+
+static __inline bool
+ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
+{
+ TODO();
+ return (false);
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* Receive functions (air/driver to mac80211/net80211). */
+
+
+static __inline void
+ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct napi_struct *napi)
+{
+
+ linuxkpi_ieee80211_rx(hw, skb, sta, napi, NULL);
+}
+
+static __inline void
+ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct list_head *list)
+{
+
+ linuxkpi_ieee80211_rx(hw, skb, sta, NULL, list);
+}
+
+static __inline void
+ieee80211_rx_ni(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+
+ linuxkpi_ieee80211_rx(hw, skb, NULL, NULL, NULL);
+}
+
+static __inline void
+ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+
+ linuxkpi_ieee80211_rx(hw, skb, NULL, NULL, NULL);
+}
+
+static __inline void
+ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+
+ linuxkpi_ieee80211_rx(hw, skb, NULL, NULL, NULL);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static inline void
+ieee80211_stop_queues(struct ieee80211_hw *hw)
+{
+ linuxkpi_ieee80211_stop_queues(hw);
+}
+
+static inline void
+ieee80211_wake_queues(struct ieee80211_hw *hw)
+{
+ linuxkpi_ieee80211_wake_queues(hw);
+}
+
+static inline void
+ieee80211_stop_queue(struct ieee80211_hw *hw, int qnum)
+{
+ linuxkpi_ieee80211_stop_queue(hw, qnum);
+}
+
+static inline void
+ieee80211_wake_queue(struct ieee80211_hw *hw, int qnum)
+{
+ linuxkpi_ieee80211_wake_queue(hw, qnum);
+}
+
+static inline void
+ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ linuxkpi_ieee80211_schedule_txq(hw, txq, true);
+}
+
+static inline void
+ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+ bool withoutpkts)
+{
+ linuxkpi_ieee80211_schedule_txq(hw, txq, withoutpkts);
+}
+
+static inline void
+ieee80211_txq_schedule_start(struct ieee80211_hw *hw, uint8_t ac)
+{
+ linuxkpi_ieee80211_txq_schedule_start(hw, ac);
+}
+
+static inline void
+ieee80211_txq_schedule_end(struct ieee80211_hw *hw, uint8_t ac)
+{
+ /* DO_NADA; */
+}
+
+static inline struct ieee80211_txq *
+ieee80211_next_txq(struct ieee80211_hw *hw, uint8_t ac)
+{
+ return (linuxkpi_ieee80211_next_txq(hw, ac));
+}
+
+static inline void
+ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ linuxkpi_ieee80211_handle_wake_tx_queue(hw, txq);
+}
+
+static inline void
+ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+ struct sk_buff_head *skbs)
+{
+ TODO();
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline uint8_t
+ieee80211_get_tid(struct ieee80211_hdr *hdr)
+{
+
+ return (linuxkpi_ieee80211_get_tid(hdr, false));
+}
+
+static __inline struct sk_buff *
+ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ uint16_t *tim_offset, uint16_t *tim_len, uint32_t link_id)
+{
+
+ if (tim_offset != NULL)
+ *tim_offset = 0;
+ if (tim_len != NULL)
+ *tim_len = 0;
+ TODO();
+ return (NULL);
+}
+
+static __inline void
+ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
+ enum ieee80211_iface_iter flags,
+ void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *),
+ void *arg)
+{
+
+ flags |= IEEE80211_IFACE_ITER__ATOMIC;
+ flags |= IEEE80211_IFACE_ITER_ACTIVE;
+ linuxkpi_ieee80211_iterate_interfaces(hw, flags, iterfunc, arg);
+}
+
+static __inline void
+ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
+ enum ieee80211_iface_iter flags,
+ void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *),
+ void *arg)
+{
+
+ flags |= IEEE80211_IFACE_ITER_ACTIVE;
+ linuxkpi_ieee80211_iterate_interfaces(hw, flags, iterfunc, arg);
+}
+
+static __inline void
+ieee80211_iterate_active_interfaces_mtx(struct ieee80211_hw *hw,
+ enum ieee80211_iface_iter flags,
+ void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *),
+ void *arg)
+{
+ flags |= IEEE80211_IFACE_ITER_ACTIVE;
+ flags |= IEEE80211_IFACE_ITER__MTX;
+ linuxkpi_ieee80211_iterate_interfaces(hw, flags, iterfunc, arg);
+}
+
+static __inline void
+ieee80211_iterate_interfaces(struct ieee80211_hw *hw,
+ enum ieee80211_iface_iter flags,
+ void (*iterfunc)(void *, uint8_t *, struct ieee80211_vif *),
+ void *arg)
+{
+
+ linuxkpi_ieee80211_iterate_interfaces(hw, flags, iterfunc, arg);
+}
+
+static inline void
+ieee80211_iter_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_sta *, struct ieee80211_key_conf *, void *),
+ void *arg)
+{
+ linuxkpi_ieee80211_iterate_keys(hw, vif, iterfunc, arg, false);
+}
+
+static inline void
+ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_sta *, struct ieee80211_key_conf *, void *),
+ void *arg)
+{
+ linuxkpi_ieee80211_iterate_keys(hw, vif, iterfunc, arg, true);
+}
+
+static __inline void
+ieee80211_iter_chan_contexts_atomic(struct ieee80211_hw *hw,
+ void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, void *),
+ void *arg)
+{
+
+ linuxkpi_ieee80211_iterate_chan_contexts(hw, iterfunc, arg);
+}
+
+static __inline void
+ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
+ void (*iterfunc)(void *, struct ieee80211_sta *), void *arg)
+{
+
+ linuxkpi_ieee80211_iterate_stations_atomic(hw, iterfunc, arg);
+}
+
+static __inline struct wireless_dev *
+ieee80211_vif_to_wdev(struct ieee80211_vif *vif)
+{
+
+ return (linuxkpi_ieee80211_vif_to_wdev(vif));
+}
+
+static __inline struct sk_buff *
+ieee80211_beacon_get_template(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, struct ieee80211_mutable_offsets *offs,
+ uint32_t link_id)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline void
+ieee80211_beacon_loss(struct ieee80211_vif *vif)
+{
+ linuxkpi_ieee80211_beacon_loss(vif);
+}
+
+static __inline void
+ieee80211_chswitch_done(struct ieee80211_vif *vif, bool t, uint32_t link_id)
+{
+ TODO();
+}
+
+static __inline bool
+ieee80211_csa_is_complete(struct ieee80211_vif *vif)
+{
+ TODO();
+ return (false);
+}
+
+static __inline void
+ieee80211_csa_set_counter(struct ieee80211_vif *vif, uint8_t counter)
+{
+ TODO();
+}
+
+static __inline int
+ieee80211_csa_update_counter(struct ieee80211_vif *vif)
+{
+ TODO();
+ return (-1);
+}
+
+static __inline void
+ieee80211_csa_finish(struct ieee80211_vif *vif, uint32_t link_id)
+{
+ TODO();
+}
+
+static inline enum nl80211_iftype
+ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
+{
+
+ /* If we are not p2p enabled, just return the type. */
+ if (!vif->p2p)
+ return (vif->type);
+
+ /* If we are p2p, depending on side, return type. */
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ return (NL80211_IFTYPE_P2P_GO);
+ case NL80211_IFTYPE_STATION:
+ return (NL80211_IFTYPE_P2P_CLIENT);
+ default:
+ fallthrough;
+ }
+ return (vif->type);
+}
+
+static __inline unsigned long
+ieee80211_tu_to_usec(unsigned long tu)
+{
+
+ return (tu * IEEE80211_DUR_TU);
+}
+
+/*
+ * Below we assume that the two values from different emums are the same.
+ * Make sure this does not accidentally change.
+ */
+CTASSERT((int)IEEE80211_ACTION_SM_TPCREP == (int)IEEE80211_ACTION_RADIO_MEASUREMENT_LMREP);
+
+static __inline bool
+ieee80211_action_contains_tpc(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt;
+
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+
+ /* Check that this is a mgmt/action frame? */
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return (false);
+
+ /*
+ * This is a bit convoluted but according to docs both actions
+ * are checked for this. Kind-of makes sense for the only consumer
+ * (iwlwifi) I am aware off given the txpower fields are at the
+ * same location so firmware can update the value.
+ */
+ /* 80211-2020 9.6.2 Spectrum Management Action frames */
+ /* 80211-2020 9.6.2.5 TPC Report frame format */
+ /* 80211-2020 9.6.6 Radio Measurement action details */
+ /* 80211-2020 9.6.6.4 Link Measurement Report frame format */
+ /* Check that it is Spectrum Management or Radio Measurement? */
+ if (mgmt->u.action.category != IEEE80211_ACTION_CAT_SM &&
+ mgmt->u.action.category != IEEE80211_ACTION_CAT_RADIO_MEASUREMENT)
+ return (false);
+
+ /*
+ * Check that it is TPC Report or Link Measurement Report?
+ * The values of each are the same (see CTASSERT above function).
+ */
+ if (mgmt->u.action.u.tpc_report.spec_mgmt != IEEE80211_ACTION_SM_TPCREP)
+ return (false);
+
+ /* 80211-2020 9.4.2.16 TPC Report element */
+ /* Check that the ELEMID and length are correct? */
+ if (mgmt->u.action.u.tpc_report.tpc_elem_id != IEEE80211_ELEMID_TPCREP ||
+ mgmt->u.action.u.tpc_report.tpc_elem_length != 4)
+ return (false);
+
+ /* All the right fields in the right place. */
+ return (true);
+}
+
+static __inline void
+ieee80211_connection_loss(struct ieee80211_vif *vif)
+{
+
+ linuxkpi_ieee80211_connection_loss(vif);
+}
+
+static __inline struct ieee80211_sta *
+ieee80211_find_sta(struct ieee80211_vif *vif, const u8 *peer)
+{
+
+ return (linuxkpi_ieee80211_find_sta(vif, peer));
+}
+
+static __inline struct ieee80211_sta *
+ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, const uint8_t *addr,
+ const uint8_t *ourvifaddr)
+{
+
+ return (linuxkpi_ieee80211_find_sta_by_ifaddr(hw, addr, ourvifaddr));
+}
+
+static __inline size_t
+ieee80211_ie_split(const u8 *ies, size_t ies_len,
+ const u8 *ie_ids, size_t ie_ids_len, size_t start)
+{
+ size_t x;
+
+ x = start;
+
+ /* XXX FIXME, we need to deal with "Element ID Extension" */
+ while (x < ies_len) {
+
+ /* Is this IE[s] one of the ie_ids? */
+ if (!linuxkpi_ieee80211_is_ie_id_in_ie_buf(ies[x],
+ ie_ids, ie_ids_len))
+ break;
+
+ if (!linuxkpi_ieee80211_ie_advance(&x, ies, ies_len))
+ break;
+ }
+
+ return (x);
+}
+
+static __inline void
+ieee80211_request_smps(struct ieee80211_vif *vif, u_int link_id,
+ enum ieee80211_smps_mode smps)
+{
+ static const char *smps_mode_name[] = {
+ "SMPS_OFF",
+ "SMPS_STATIC",
+ "SMPS_DYNAMIC",
+ "SMPS_AUTOMATIC",
+ "SMPS_NUM_MODES"
+ };
+
+ if (linuxkpi_debug_80211 & D80211_TODO)
+ printf("%s:%d: XXX LKPI80211 TODO smps %d %s\n",
+ __func__, __LINE__, smps, smps_mode_name[smps]);
+}
+
+static __inline void
+ieee80211_tdls_oper_request(struct ieee80211_vif *vif, uint8_t *addr,
+ enum nl80211_tdls_operation oper, enum ieee80211_reason_code code,
+ gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool state)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ IMPROVE();
+
+ /*
+ * This is called on transmit failure.
+ * Use a not-so-random random high status error so we can distinguish
+ * it from normal low values flying around in net80211 ("ETX").
+ */
+ linuxkpi_ieee80211_free_txskb(hw, skb, 0x455458);
+}
+
+static __inline void
+ieee80211_ready_on_channel(struct ieee80211_hw *hw)
+{
+ TODO();
+/* XXX-BZ We need to see that. */
+}
+
+static __inline void
+ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
+ enum nl80211_cqm_rssi_threshold_event crte, int sig, gfp_t gfp)
+{
+ TODO();
+}
+
+/* -------------------------------------------------------------------------- */
+
+static inline bool
+ieee80211_sn_less(uint16_t sn1, uint16_t sn2)
+{
+ return (IEEE80211_SEQ_BA_BEFORE(sn1, sn2));
+}
+
+static inline uint16_t
+ieee80211_sn_inc(uint16_t sn)
+{
+ return (IEEE80211_SEQ_INC(sn));
+}
+
+static inline uint16_t
+ieee80211_sn_add(uint16_t sn, uint16_t a)
+{
+ return (IEEE80211_SEQ_ADD(sn, a));
+}
+
+static inline uint16_t
+ieee80211_sn_sub(uint16_t sa, uint16_t sb)
+{
+ return (IEEE80211_SEQ_SUB(sa, sb));
+}
+
+static __inline void
+ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *sta, uint8_t tid,
+ uint32_t ssn, uint64_t bitmap, uint16_t received_mpdu)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, uint32_t x, uint8_t *addr)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif, uint8_t *addr,
+ uint8_t tid)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif, uint8_t *addr,
+ uint8_t tid)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif, uint8_t *addr,
+ uint8_t tid)
+{
+ TODO();
+}
+
+/* -------------------------------------------------------------------------- */
+
+static inline void
+ieee80211_rate_set_vht(struct ieee80211_tx_rate *r, uint8_t mcs, uint8_t nss)
+{
+
+ /* XXX-BZ make it KASSERTS? */
+ if (((mcs & 0xF0) != 0) || (((nss - 1) & 0xf8) != 0)) {
+ printf("%s:%d: mcs %#04x nss %#04x invalid\n",
+ __func__, __LINE__, mcs, nss);
+ return;
+ }
+
+ r->idx = mcs;
+ r->idx |= ((nss - 1) << 4);
+}
+
+static inline uint8_t
+ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *r)
+{
+ return (((r->idx >> 4) & 0x07) + 1);
+}
+
+static inline uint8_t
+ieee80211_rate_get_vht_mcs(const struct ieee80211_tx_rate *r)
+{
+ return (r->idx & 0x0f);
+}
+
+static inline int
+ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *vht_cap,
+ enum ieee80211_vht_chanwidth chanwidth, /* defined in net80211. */
+ int mcs /* always 0 */, bool ext_nss_bw_cap /* always true */, int max_nss)
+{
+ enum ieee80211_vht_mcs_support mcs_s;
+ uint32_t supp_cw, ext_nss_bw;
+
+ switch (mcs) {
+ case 0 ... 7:
+ mcs_s = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs_s = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs_s = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ default:
+ printf("%s: unsupported mcs value %d\n", __func__, mcs);
+ return (0);
+ }
+
+ if (max_nss == 0) {
+ uint16_t map;
+
+ map = le16toh(vht_cap->supp_mcs.rx_mcs_map);
+ for (int i = 7; i >= 0; i--) {
+ uint8_t val;
+
+ val = (map >> (2 * i)) & 0x03;
+ if (val == IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ continue;
+ if (val >= mcs_s) {
+ max_nss = i + 1;
+ break;
+ }
+ }
+ }
+
+ if (max_nss == 0)
+ return (0);
+
+ if ((le16toh(vht_cap->supp_mcs.tx_mcs_map) &
+ IEEE80211_VHT_EXT_NSS_BW_CAPABLE) == 0)
+ return (max_nss);
+
+ supp_cw = le32_get_bits(vht_cap->vht_cap_info,
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK);
+ ext_nss_bw = le32_get_bits(vht_cap->vht_cap_info,
+ IEEE80211_VHT_CAP_EXT_NSS_BW_MASK);
+
+ /* If requested as ext nss not supported assume ext_nss_bw 0. */
+ if (!ext_nss_bw_cap)
+ ext_nss_bw = 0;
+
+ /*
+ * Cover 802.11-2016, Table 9-250.
+ */
+
+ /* Unsupported settings. */
+ if (supp_cw == 3)
+ return (0);
+ if (supp_cw == 2 && (ext_nss_bw == 1 || ext_nss_bw == 2))
+ return (0);
+
+ /* Settings with factor != 1 or unsupported. */
+ switch (chanwidth) {
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ if (supp_cw == 0 && (ext_nss_bw == 0 || ext_nss_bw == 1))
+ return (0);
+ if (supp_cw == 1 && ext_nss_bw == 0)
+ return (0);
+ if ((supp_cw == 0 || supp_cw == 1) && ext_nss_bw == 2)
+ return (max_nss / 2);
+ if ((supp_cw == 0 || supp_cw == 1) && ext_nss_bw == 3)
+ return (3 * max_nss / 4);
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ if (supp_cw == 0 && ext_nss_bw == 0)
+ return (0);
+ if (supp_cw == 0 && (ext_nss_bw == 1 || ext_nss_bw == 2))
+ return (max_nss / 2);
+ if (supp_cw == 0 && ext_nss_bw == 3)
+ return (3 * max_nss / 4);
+ if (supp_cw == 1 && ext_nss_bw == 3)
+ return (2 * max_nss);
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ if ((supp_cw == 1 || supp_cw == 2) && ext_nss_bw == 3)
+ return (2 * max_nss);
+ break;
+ }
+
+ /* Everything else has a factor of 1. */
+ return (max_nss);
+}
+
+
+static __inline void
+ieee80211_reserve_tid(struct ieee80211_sta *sta, uint8_t tid)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_unreserve_tid(struct ieee80211_sta *sta, uint8_t tid)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_send_eosp_nullfunc(struct ieee80211_sta *sta, uint8_t tid)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_sta_block_awake(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool disable)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool sleeping)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_sta_pspoll(struct ieee80211_sta *sta)
+{
+ TODO();
+}
+
+static inline void
+ieee80211_sta_recalc_aggregates(struct ieee80211_sta *sta)
+{
+ if (sta->valid_links) {
+ TODO();
+ }
+}
+
+static __inline void
+ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, int ntids)
+{
+ TODO();
+}
+
+static inline struct sk_buff *
+ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+
+ return (linuxkpi_ieee80211_tx_dequeue(hw, txq));
+}
+
+static inline struct sk_buff *
+ieee80211_tx_dequeue_ni(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ struct sk_buff *skb;
+
+ local_bh_disable();
+ skb = linuxkpi_ieee80211_tx_dequeue(hw, txq);
+ local_bh_enable();
+
+ return (skb);
+}
+
+static __inline void
+ieee80211_update_mu_groups(struct ieee80211_vif *vif,
+ u_int _i, uint8_t *ms, uint8_t *up)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_sta_set_buffered(struct ieee80211_sta *sta, uint8_t tid, bool t)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_sched_scan_results(struct ieee80211_hw *hw)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_sta_eosp(struct ieee80211_sta *sta)
+{
+ TODO();
+}
+
+static __inline int
+ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, uint8_t tid, int x)
+{
+ TODO("rtw8x");
+ return (-EINVAL);
+}
+
+static __inline int
+ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, uint8_t tid)
+{
+ TODO("rtw89");
+ return (-EINVAL);
+}
+
+static __inline void
+ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, uint8_t *addr,
+ uint8_t tid)
+{
+ TODO("iwlwifi");
+}
+
+static __inline void
+ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, uint8_t *addr,
+ uint8_t tid)
+{
+ TODO("iwlwifi/rtw8x/...");
+}
+
+static __inline void
+ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_scan_completed(struct ieee80211_hw *hw,
+ struct cfg80211_scan_info *info)
+{
+
+ linuxkpi_ieee80211_scan_completed(hw, info);
+}
+
+static __inline struct sk_buff *
+ieee80211_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ uint32_t link_id)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline struct sk_buff *
+ieee80211_pspoll_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+
+ /* Only STA needs this. Otherwise return NULL and panic bad drivers. */
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return (NULL);
+
+ return (linuxkpi_ieee80211_pspoll_get(hw, vif));
+}
+
+static __inline struct sk_buff *
+ieee80211_proberesp_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline struct sk_buff *
+ieee80211_nullfunc_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int linkid, bool qos)
+{
+
+ /* Only STA needs this. Otherwise return NULL and panic bad drivers. */
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return (NULL);
+
+ return (linuxkpi_ieee80211_nullfunc_get(hw, vif, linkid, qos));
+}
+
+static __inline struct sk_buff *
+ieee80211_probereq_get(struct ieee80211_hw *hw, uint8_t *addr,
+ uint8_t *ssid, size_t ssid_len, size_t tailroom)
+{
+
+ return (linuxkpi_ieee80211_probereq_get(hw, addr, ssid, ssid_len,
+ tailroom));
+}
+
+static __inline void
+ieee80211_queue_delayed_work(struct ieee80211_hw *hw, struct delayed_work *w,
+ int delay)
+{
+
+ linuxkpi_ieee80211_queue_delayed_work(hw, w, delay);
+}
+
+static __inline void
+ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *w)
+{
+
+ linuxkpi_ieee80211_queue_work(hw, w);
+}
+
+static __inline bool
+ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct sk_buff *skb, enum nl80211_band band, struct ieee80211_sta **sta)
+{
+ TODO();
+ return (false);
+}
+
+static __inline void
+ieee80211_tx_status_skb(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ linuxkpi_ieee80211_tx_status(hw, skb);
+}
+
+static inline void
+ieee80211_tx_status_noskb(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct ieee80211_tx_info *info)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ IMPROVE();
+ linuxkpi_ieee80211_tx_status(hw, skb);
+}
+
+static __inline void
+ieee80211_tx_status_ni(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ IMPROVE();
+ linuxkpi_ieee80211_tx_status(hw, skb);
+}
+
+static __inline void
+ieee80211_tx_status_ext(struct ieee80211_hw *hw,
+ struct ieee80211_tx_status *txstat)
+{
+
+ linuxkpi_ieee80211_tx_status_ext(hw, txstat);
+}
+
+static __inline void
+ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
+{
+ int i;
+
+ /*
+ * Apparently clearing flags and some other fields is not right.
+ * Given the function is called "status" we work on that part of
+ * the union.
+ */
+ for (i = 0; i < nitems(info->status.rates); i++)
+ info->status.rates[i].count = 0;
+ /*
+ * Unclear if ack_signal should be included or not but we clear the
+ * "valid" bool so this field is no longer valid.
+ */
+ memset(&info->status.ack_signal, 0, sizeof(*info) -
+ offsetof(struct ieee80211_tx_info, status.ack_signal));
+}
+
+static __inline void
+ieee80211_txq_get_depth(struct ieee80211_txq *txq, unsigned long *frame_cnt,
+ unsigned long *byte_cnt)
+{
+
+ if (frame_cnt == NULL && byte_cnt == NULL)
+ return;
+
+ linuxkpi_ieee80211_txq_get_depth(txq, frame_cnt, byte_cnt);
+}
+
+static __inline void
+SET_IEEE80211_PERM_ADDR (struct ieee80211_hw *hw, uint8_t *addr)
+{
+
+ ether_addr_copy(hw->wiphy->perm_addr, addr);
+}
+
+static __inline void
+ieee80211_report_low_ack(struct ieee80211_sta *sta, int x)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_tx_rate_update(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct ieee80211_tx_info *info)
+{
+ TODO();
+}
+
+static __inline bool
+ieee80211_txq_may_transmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ TODO();
+ return (false);
+}
+
+static __inline void
+ieee80211_radar_detected(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_sta_register_airtime(struct ieee80211_sta *sta,
+ uint8_t tid, uint32_t duration, int x)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter)
+{
+ TODO();
+}
+
+static __inline int
+ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif, uint32_t link_id)
+{
+ TODO();
+ return (-1);
+}
+
+static __inline bool
+ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif, uint32_t link_id)
+{
+ TODO();
+ return (true);
+}
+
+static __inline void
+ieee80211_disconnect(struct ieee80211_vif *vif, bool _x)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif)
+{
+ TODO();
+}
+
+static __inline uint32_t
+ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *rxstat, int len)
+{
+ TODO();
+ return (0);
+}
+
+static __inline void
+ieee80211_get_tx_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct ieee80211_tx_rate *txrate, int nrates)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_color_change_finish(struct ieee80211_vif *vif, uint8_t link_id)
+{
+ TODO();
+}
+
+static __inline struct sk_buff *
+ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline struct sk_buff *
+ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline void
+linuxkpi_ieee80211_send_bar(struct ieee80211_vif *vif, uint8_t *ra, uint16_t tid,
+ uint16_t ssn)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_resume_disconnect(struct ieee80211_vif *vif)
+{
+ TODO();
+}
+
+static __inline int
+ieee80211_data_to_8023(struct sk_buff *skb, const uint8_t *addr,
+ enum nl80211_iftype iftype)
+{
+ TODO();
+ return (-1);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline void
+ieee80211_key_mic_failure(struct ieee80211_key_conf *key)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_key_replay(struct ieee80211_key_conf *key)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_remove_key(struct ieee80211_key_conf *key)
+{
+ TODO();
+}
+
+static __inline struct ieee80211_key_conf *
+ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key, int link_id)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline void
+ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const uint8_t *bssid,
+ const uint8_t *replay_ctr, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_tkip_add_iv(u8 *crypto_hdr, struct ieee80211_key_conf *keyconf,
+ uint64_t pn)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf,
+ const u8 *addr, uint32_t iv32, u16 *p1k)
+{
+
+ KASSERT(keyconf != NULL && addr != NULL && p1k != NULL,
+ ("%s: keyconf %p addr %p p1k %p\n", __func__, keyconf, addr, p1k));
+
+ TODO();
+ memset(p1k, 0xfa, 5 * sizeof(*p1k)); /* Just initializing. */
+}
+
+static __inline void
+ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *key,
+ uint32_t iv32, uint16_t *p1k)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
+ struct sk_buff *skb_frag, u8 *key)
+{
+ TODO();
+}
+
+static inline void
+ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, int8_t tid,
+ struct ieee80211_key_seq *seq)
+{
+ const struct ieee80211_key *k;
+ const uint8_t *p;
+
+ KASSERT(keyconf != NULL && seq != NULL, ("%s: keyconf %p seq %p\n",
+ __func__, keyconf, seq));
+ k = keyconf->_k;
+ KASSERT(k != NULL, ("%s: keyconf %p ieee80211_key is NULL\n", __func__, keyconf));
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (tid < 0 || tid >= IEEE80211_NUM_TIDS)
+ return;
+ /* See net80211::tkip_decrypt() */
+ seq->tkip.iv32 = TKIP_PN_TO_IV32(k->wk_keyrsc[tid]);
+ seq->tkip.iv16 = TKIP_PN_TO_IV16(k->wk_keyrsc[tid]);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ if (tid < -1 || tid >= IEEE80211_NUM_TIDS)
+ return;
+ if (tid == -1)
+ p = (const uint8_t *)&k->wk_keyrsc[IEEE80211_NUM_TIDS]; /* IEEE80211_NONQOS_TID */
+ else
+ p = (const uint8_t *)&k->wk_keyrsc[tid];
+ memcpy(seq->ccmp.pn, p, sizeof(seq->ccmp.pn));
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (tid < -1 || tid >= IEEE80211_NUM_TIDS)
+ return;
+ if (tid == -1)
+ p = (const uint8_t *)&k->wk_keyrsc[IEEE80211_NUM_TIDS]; /* IEEE80211_NONQOS_TID */
+ else
+ p = (const uint8_t *)&k->wk_keyrsc[tid];
+ memcpy(seq->gcmp.pn, p, sizeof(seq->gcmp.pn));
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ TODO();
+ memset(seq->aes_cmac.pn, 0xfa, sizeof(seq->aes_cmac.pn)); /* XXX TODO */
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ TODO();
+ memset(seq->aes_gmac.pn, 0xfa, sizeof(seq->aes_gmac.pn)); /* XXX TODO */
+ break;
+ default:
+ pr_debug("%s: unsupported cipher suite %d\n", __func__, keyconf->cipher);
+ break;
+ }
+}
+
+static __inline void
+ieee80211_set_key_rx_seq(struct ieee80211_key_conf *key, int tid,
+ struct ieee80211_key_seq *seq)
+{
+ TODO();
+}
+
+/* -------------------------------------------------------------------------- */
+
+static __inline void
+ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif,
+ struct cfg80211_wowlan_wakeup *wakeup, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
+ uint64_t obss_color_bitmap, gfp_t gfp)
+{
+ TODO();
+}
+
+static __inline void
+ieee80211_refresh_tx_agg_session_timer(struct ieee80211_sta *sta,
+ uint8_t tid)
+{
+ TODO();
+}
+
+static __inline struct ieee80211_ema_beacons *
+ieee80211_beacon_get_template_ema_list(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, uint32_t link_id)
+{
+ TODO();
+ return (NULL);
+}
+
+static __inline void
+ieee80211_beacon_free_ema_list(struct ieee80211_ema_beacons *bcns)
+{
+ TODO();
+}
+
+static inline bool
+ieee80211_vif_is_mld(const struct ieee80211_vif *vif)
+{
+
+ /* If valid_links is non-zero, the vif is an MLD. */
+ return (vif->valid_links != 0);
+}
+
+static inline const struct ieee80211_sta_he_cap *
+ieee80211_get_he_iftype_cap_vif(const struct ieee80211_supported_band *band,
+ struct ieee80211_vif *vif)
+{
+ enum nl80211_iftype iftype;
+
+ iftype = ieee80211_vif_type_p2p(vif);
+ return (ieee80211_get_he_iftype_cap(band, iftype));
+}
+
+static inline const struct ieee80211_sta_eht_cap *
+ieee80211_get_eht_iftype_cap_vif(const struct ieee80211_supported_band *band,
+ struct ieee80211_vif *vif)
+{
+ enum nl80211_iftype iftype;
+
+ iftype = ieee80211_vif_type_p2p(vif);
+ return (ieee80211_get_eht_iftype_cap(band, iftype));
+}
+
+static inline uint32_t
+ieee80211_vif_usable_links(const struct ieee80211_vif *vif)
+{
+ IMPROVE("MLO usable links likely are not just valid");
+ return (vif->valid_links);
+}
+
+static inline bool
+ieee80211_vif_link_active(const struct ieee80211_vif *vif, uint8_t link_id)
+{
+ if (ieee80211_vif_is_mld(vif))
+ return (vif->active_links & BIT(link_id));
+ return (link_id == 0);
+}
+
+static inline void
+ieee80211_set_active_links_async(struct ieee80211_vif *vif,
+ uint32_t new_active_links)
+{
+ TODO();
+}
+
+static inline int
+ieee80211_set_active_links(struct ieee80211_vif *vif,
+ uint32_t active_links)
+{
+ TODO();
+ return (-ENXIO);
+}
+
+static inline void
+ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp __unused)
+{
+ IMPROVE("we notify user space by a vap state change eventually");
+ linuxkpi_ieee80211_beacon_loss(vif);
+}
+
+#define ieee80211_send_bar(_v, _r, _t, _s) \
+ linuxkpi_ieee80211_send_bar(_v, _r, _t, _s)
+
+/* -------------------------------------------------------------------------- */
+
+int lkpi_80211_update_chandef(struct ieee80211_hw *,
+ struct ieee80211_chanctx_conf *);
+
+static inline int
+ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf)
+{
+ int error;
+
+ hw->conf.radar_enabled = chanctx_conf->radar_enabled;
+ error = lkpi_80211_update_chandef(hw, chanctx_conf);
+ return (error);
+}
+
+static inline void
+ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf __unused)
+{
+ hw->conf.radar_enabled = false;
+ lkpi_80211_update_chandef(hw, NULL);
+}
+
+static inline void
+ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf, uint32_t changed __unused)
+{
+ hw->conf.radar_enabled = chanctx_conf->radar_enabled;
+ lkpi_80211_update_chandef(hw, chanctx_conf);
+}
+
+static inline int
+ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs, int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode __unused)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ int error;
+
+ /* Sanity check. */
+ if (n_vifs <= 0)
+ return (-EINVAL);
+ if (vifs == NULL || vifs[0].new_ctx == NULL)
+ return (-EINVAL);
+
+ /*
+ * What to do if n_vifs > 1?
+ * Does that make sense for drivers not supporting chanctx?
+ */
+ hw->conf.radar_enabled = vifs[0].new_ctx->radar_enabled;
+ chanctx_conf = vifs[0].new_ctx;
+ error = lkpi_80211_update_chandef(hw, chanctx_conf);
+ return (error);
+}
+
+/* -------------------------------------------------------------------------- */
+
+#endif /* _LINUXKPI_NET_MAC80211_H */
diff --git a/sys/compat/linuxkpi/common/include/net/netevent.h b/sys/compat/linuxkpi/common/include/net/netevent.h
new file mode 100644
index 000000000000..c1c39af3a772
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/netevent.h
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_NET_NETEVENT_H_
+#define _LINUXKPI_NET_NETEVENT_H_
+
+#include <sys/types.h>
+#include <sys/eventhandler.h>
+
+#include <linux/notifier.h>
+
+enum netevent_notif_type {
+ NETEVENT_NEIGH_UPDATE = 0,
+#if 0 /* Unsupported events. */
+ NETEVENT_PMTU_UPDATE,
+ NETEVENT_REDIRECT,
+#endif
+};
+
+struct llentry;
+
+static inline void
+_handle_arp_update_event(void *arg, struct llentry *lle, int evt __unused)
+{
+ struct notifier_block *nb;
+
+ nb = arg;
+ nb->notifier_call(nb, NETEVENT_NEIGH_UPDATE, lle);
+}
+
+static inline int
+register_netevent_notifier(struct notifier_block *nb)
+{
+ nb->tags[NETEVENT_NEIGH_UPDATE] = EVENTHANDLER_REGISTER(
+ lle_event, _handle_arp_update_event, nb, 0);
+ return (0);
+}
+
+static inline int
+unregister_netevent_notifier(struct notifier_block *nb)
+{
+
+ EVENTHANDLER_DEREGISTER(lle_event, nb->tags[NETEVENT_NEIGH_UPDATE]);
+
+ return (0);
+}
+
+#endif /* _LINUXKPI_NET_NETEVENT_H_ */
diff --git a/sys/compat/linuxkpi/common/include/net/netlink.h b/sys/compat/linuxkpi/common/include/net/netlink.h
new file mode 100644
index 000000000000..ae250177d18b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/netlink.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2020,2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_NET_NETLINK_H
+#define _LINUXKPI_NET_NETLINK_H
+
+#include <netlink/netlink.h>
+
+struct nla_policy {
+};
+
+struct netlink_callback {
+ int args[8];
+};
+
+static __inline int
+nla_put(struct sk_buff *skb, int attr, size_t len, void *data)
+{
+
+ pr_debug("%s: TODO -- now native?\n", __func__);
+ return (-ENXIO);
+}
+
+static __inline int
+nla_put_u16(struct sk_buff *skb, int attr, uint32_t val)
+{
+
+ return (nla_put(skb, attr, sizeof(uint32_t), &val));
+}
+
+#endif /* _LINUXKPI_NET_NETLINK_H */
diff --git a/sys/compat/linuxkpi/common/include/net/page_pool.h b/sys/compat/linuxkpi/common/include/net/page_pool.h
new file mode 100644
index 000000000000..2dc8f74b31f3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/page_pool.h
@@ -0,0 +1,119 @@
+/*-
+ * Copyright (c) 2023 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_NET_PAGE_POOL_H
+#define _LINUXKPI_NET_PAGE_POOL_H
+
+#include <linux/kernel.h> /* pr_debug */
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+
+struct device;
+
+struct page_pool_params {
+ struct device *dev;
+ uint32_t flags;
+ uint32_t order;
+ uint32_t pool_size;
+ uint32_t max_len;
+ uint32_t offset;
+ int nid; /* NUMA */
+ enum dma_data_direction dma_dir;
+ struct napi_struct *napi;
+};
+
+struct page_pool {
+};
+
+#define PP_FLAG_DMA_MAP BIT(0)
+#define PP_FLAG_DMA_SYNC_DEV BIT(1)
+#define PP_FLAG_PAGE_FRAG BIT(2)
+
+static inline struct page_pool *
+page_pool_create(const struct page_pool_params *ppparams)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (NULL);
+}
+
+static inline void
+page_pool_destroy(struct page_pool *ppool)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static inline struct page *
+page_pool_dev_alloc_frag(struct page_pool *ppool, uint32_t *offset,
+ size_t size)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (NULL);
+}
+
+static inline dma_addr_t
+page_pool_get_dma_addr(struct page *page)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (0);
+}
+
+static inline enum dma_data_direction
+page_pool_get_dma_dir(const struct page_pool *ppool)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (DMA_BIDIRECTIONAL);
+}
+
+static inline void
+page_pool_put_full_page(struct page_pool *ppool, struct page *page,
+ bool allow_direct)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+}
+
+static inline int
+page_pool_ethtool_stats_get_count(void)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (0);
+}
+
+static inline uint8_t *
+page_pool_ethtool_stats_get_strings(uint8_t *x)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+ return (x);
+}
+
+#endif /* _LINUXKPI_NET_PAGE_POOL_H */
diff --git a/sys/compat/linuxkpi/common/include/net/regulatory.h b/sys/compat/linuxkpi/common/include/net/regulatory.h
new file mode 100644
index 000000000000..0a538f1531f9
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/regulatory.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ * Copyright (c) 2021-2022 Bjoern A. Zeeb
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_NET_REGULATORY_H
+#define _LINUXKPI_NET_REGULATORY_H
+
+enum environment_cap {
+ ENVIRON_INDOOR = 1, /* keep != 0 */
+};
+
+#define REG_RULE(_begin, _end, _bw, _mag, _meirp, _flags) \
+{ \
+ .flags = (_flags), \
+ .freq_range.start_freq_khz = MHZ_TO_KHZ(_begin), \
+ .freq_range.end_freq_khz = MHZ_TO_KHZ(_end), \
+ .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(_bw), \
+ .power_rule.max_antenna_gain = DBI_TO_MBI(_mag), \
+ .power_rule.max_eirp = DBI_TO_MBI(_meirp), \
+}
+
+#endif /* _LINUXKPI_NET_REGULATORY_H */
diff --git a/sys/compat/linuxkpi/common/include/net/tcp.h b/sys/compat/linuxkpi/common/include/net/tcp.h
new file mode 100644
index 000000000000..4804a2b09b9d
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/net/tcp.h
@@ -0,0 +1,38 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LINUXKPI_NET_TCP_H_
+#define _LINUXKPI_NET_TCP_H_
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+
+#include <net/ip.h>
+
+#endif /* _LINUXKPI_NET_TCP_H_ */
diff --git a/sys/compat/linuxkpi/common/include/stdarg.h b/sys/compat/linuxkpi/common/include/stdarg.h
new file mode 100644
index 000000000000..698ac45e9198
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/stdarg.h
@@ -0,0 +1,33 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_STDARG_H_
+#define _LINUXKPI_STDARG_H_
+
+#include <sys/stdarg.h>
+
+#endif /* _LINUXKPI_STDARG_H_ */
diff --git a/sys/compat/linuxkpi/common/include/video/cmdline.h b/sys/compat/linuxkpi/common/include/video/cmdline.h
new file mode 100644
index 000000000000..eaa9a998fda2
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/video/cmdline.h
@@ -0,0 +1,44 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Serenity Cyber Security, LLC.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _VIDEO_CMDLINE_H_
+#define _VIDEO_CMDLINE_H_
+
+#include <linux/types.h>
+
+#define CONFIG_VIDEO_CMDLINE
+
+#if defined(CONFIG_VIDEO_CMDLINE)
+const char *video_get_options(const char *name);
+#else
+static inline const char *
+video_get_options(const char *name)
+{
+ return (NULL);
+}
+#endif
+#endif /* _VIDEO_CMDLINE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/video/mipi_display.h b/sys/compat/linuxkpi/common/include/video/mipi_display.h
new file mode 100644
index 000000000000..ea079a57d39f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/video/mipi_display.h
@@ -0,0 +1,64 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_VIDEO_MIPI_DISPLAY_H_
+#define _LINUXKPI_VIDEO_MIPI_DISPLAY_H_
+
+#define MIPI_DSI_V_SYNC_START 0x01
+#define MIPI_DSI_V_SYNC_END 0x11
+#define MIPI_DSI_H_SYNC_START 0x21
+#define MIPI_DSI_H_SYNC_END 0x31
+#define MIPI_DSI_COMPRESSION_MODE 0x07
+#define MIPI_DSI_END_OF_TRANSMISSION 0x08
+#define MIPI_DSI_COLOR_MODE_OFF 0x02
+#define MIPI_DSI_COLOR_MODE_ON 0x12
+#define MIPI_DSI_SHUTDOWN_PERIPHERAL 0x22
+#define MIPI_DSI_TURN_ON_PERIPHERAL 0x32
+#define MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM 0x03
+#define MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM 0x13
+#define MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM 0x23
+#define MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM 0x04
+#define MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM 0x14
+#define MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM 0x24
+#define MIPI_DSI_DCS_SHORT_WRITE 0x05
+#define MIPI_DSI_DCS_SHORT_WRITE_PARAM 0x15
+#define MIPI_DSI_DCS_READ 0x06
+#define MIPI_DSI_EXECUTE_QUEUE 0x16
+#define MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE 0x37
+#define MIPI_DSI_NULL_PACKET 0x09
+#define MIPI_DSI_BLANKING_PACKET 0x19
+#define MIPI_DSI_GENERIC_LONG_WRITE 0x29
+#define MIPI_DSI_DCS_LONG_WRITE 0x39
+#define MIPI_DSI_PICTURE_PARAMETER_SET 0x0a
+#define MIPI_DSI_COMPRESSED_PIXEL_STREAM 0x0b
+#define MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20 0x0c
+#define MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24 0x1c
+#define MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16 0x2c
+#define MIPI_DSI_PACKED_PIXEL_STREAM_30 0x0d
+#define MIPI_DSI_PACKED_PIXEL_STREAM_36 0x1d
+#define MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12 0x3d
+#define MIPI_DSI_PACKED_PIXEL_STREAM_16 0x0e
+#define MIPI_DSI_PACKED_PIXEL_STREAM_18 0x1e
+#define MIPI_DSI_PIXEL_STREAM_3BYTE_18 0x2e
+#define MIPI_DSI_PACKED_PIXEL_STREAM_24 0x3e
+
+#define MIPI_DCS_NOP 0x00
+#define MIPI_DCS_SOFT_RESET 0x01
+#define MIPI_DCS_GET_POWER_MODE 0x0a
+#define MIPI_DCS_GET_PIXEL_FORMAT 0x0c
+#define MIPI_DCS_ENTER_SLEEP_MODE 0x10
+#define MIPI_DCS_EXIT_SLEEP_MODE 0x11
+#define MIPI_DCS_SET_DISPLAY_OFF 0x28
+#define MIPI_DCS_SET_DISPLAY_ON 0x29
+#define MIPI_DCS_SET_COLUMN_ADDRESS 0x2a
+#define MIPI_DCS_SET_PAGE_ADDRESS 0x2b
+#define MIPI_DCS_SET_TEAR_OFF 0x34
+#define MIPI_DCS_SET_TEAR_ON 0x35
+#define MIPI_DCS_SET_PIXEL_FORMAT 0x3a
+#define MIPI_DCS_SET_TEAR_SCANLINE 0x44
+#define MIPI_DCS_SET_DISPLAY_BRIGHTNESS 0x51
+#define MIPI_DCS_GET_DISPLAY_BRIGHTNESS 0x52
+#define MIPI_DCS_WRITE_CONTROL_DISPLAY 0x53
+#define MIPI_DCS_GET_CONTROL_DISPLAY 0x54
+#define MIPI_DCS_WRITE_POWER_SAVE 0x55
+
+#endif /* _LINUXKPI_VIDEO_MIPI_DISPLAY_H_ */
diff --git a/sys/compat/linuxkpi/common/include/video/vga.h b/sys/compat/linuxkpi/common/include/video/vga.h
new file mode 100644
index 000000000000..a5012d9e2f3f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/video/vga.h
@@ -0,0 +1,19 @@
+/* Public domain. */
+
+#ifndef _LINUXKPI_VIDEO_VGA_H
+#define _LINUXKPI_VIDEO_VGA_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+#define VGA_MIS_W 0x3c2
+#define VGA_SEQ_I 0x3c4
+#define VGA_SEQ_D 0x3c5
+#define VGA_MIS_R 0x3cc
+
+#define VGA_SR01_SCREEN_OFF (1 << 5)
+
+#define VGA_FB_PHYS_BASE 0xA0000
+#define VGA_FB_PHYS_SIZE 65536
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/xen/xen.h b/sys/compat/linuxkpi/common/include/xen/xen.h
new file mode 100644
index 000000000000..16e77724111d
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/xen/xen.h
@@ -0,0 +1,37 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Serenity Cyber Security, LLC.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_XEN_XEN_H_
+#define _LINUXKPI_XEN_XEN_H_
+
+#define xen_initial_domain() lkpi_xen_initial_domain()
+#define xen_pv_domain() lkpi_xen_pv_domain()
+
+bool lkpi_xen_initial_domain(void);
+bool lkpi_xen_pv_domain(void);
+
+#endif /* _LINUXKPI_XEN_XEN_H_ */
diff --git a/sys/compat/linuxkpi/common/src/linux_80211.c b/sys/compat/linuxkpi/common/src/linux_80211.c
new file mode 100644
index 000000000000..1d00e8da8f9a
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_80211.c
@@ -0,0 +1,8292 @@
+/*-
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
+ * Copyright (c) 2020-2025 Bjoern A. Zeeb
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Public functions are called linuxkpi_*().
+ * Internal (static) functions are called lkpi_*().
+ *
+ * The internal structures holding metadata over public structures are also
+ * called lkpi_xxx (usually with a member at the end called xxx).
+ * Note: we do not replicate the structure names but the general variable names
+ * for these (e.g., struct hw -> struct lkpi_hw, struct sta -> struct lkpi_sta).
+ * There are macros to access one from the other.
+ * We call the internal versions lxxx (e.g., hw -> lhw, sta -> lsta).
+ */
+
+/*
+ * TODO:
+ * - lots :)
+ * - HW_CRYPTO: we need a "keystore" and an ordered list for suspend/resume.
+ */
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/errno.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/sbuf.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+#include <sys/libkern.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_proto.h>
+#include <net80211/ieee80211_ratectl.h>
+#include <net80211/ieee80211_radiotap.h>
+#include <net80211/ieee80211_vht.h>
+
+#define LINUXKPI_NET80211
+#include <net/mac80211.h>
+
+#include <linux/workqueue.h>
+#include <linux/rculist.h>
+#include "linux_80211.h"
+
+#define LKPI_80211_WME
+#define LKPI_80211_HW_CRYPTO
+#define LKPI_80211_HT
+#define LKPI_80211_VHT
+
+#if defined(LKPI_80211_VHT) && !defined(LKPI_80211_HT)
+#define LKPI_80211_HT
+#endif
+#if defined(LKPI_80211_HT) && !defined(LKPI_80211_HW_CRYPTO)
+#define LKPI_80211_HW_CRYPTO
+#endif
+
+static MALLOC_DEFINE(M_LKPI80211, "lkpi80211", "LinuxKPI 80211 compat");
+
+/* XXX-BZ really want this and others in queue.h */
+#define TAILQ_ELEM_INIT(elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = NULL; \
+} while (0)
+
+/* -------------------------------------------------------------------------- */
+
+SYSCTL_DECL(_compat_linuxkpi);
+SYSCTL_NODE(_compat_linuxkpi, OID_AUTO, 80211, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "LinuxKPI 802.11 compatibility layer");
+
+#if defined(LKPI_80211_HW_CRYPTO)
+static bool lkpi_hwcrypto = false;
+SYSCTL_BOOL(_compat_linuxkpi_80211, OID_AUTO, hw_crypto, CTLFLAG_RDTUN,
+ &lkpi_hwcrypto, 0, "Enable LinuxKPI 802.11 hardware crypto offload");
+
+static bool lkpi_hwcrypto_tkip = false;
+SYSCTL_BOOL(_compat_linuxkpi_80211, OID_AUTO, tkip, CTLFLAG_RDTUN,
+ &lkpi_hwcrypto_tkip, 0, "Enable LinuxKPI 802.11 TKIP crypto offload");
+#endif
+
+/* Keep public for as long as header files are using it too. */
+int linuxkpi_debug_80211;
+
+#ifdef LINUXKPI_DEBUG_80211
+SYSCTL_INT(_compat_linuxkpi_80211, OID_AUTO, debug, CTLFLAG_RWTUN,
+ &linuxkpi_debug_80211, 0, "LinuxKPI 802.11 debug level");
+
+#define UNIMPLEMENTED if (linuxkpi_debug_80211 & D80211_TODO) \
+ printf("XXX-TODO %s:%d: UNIMPLEMENTED\n", __func__, __LINE__)
+#define TRACEOK() if (linuxkpi_debug_80211 & D80211_TRACEOK) \
+ printf("XXX-TODO %s:%d: TRACEPOINT\n", __func__, __LINE__)
+#else
+#define UNIMPLEMENTED do { } while (0)
+#define TRACEOK() do { } while (0)
+#endif
+
+/* #define PREP_TX_INFO_DURATION (IEEE80211_TRANS_WAIT * 1000) */
+#ifndef PREP_TX_INFO_DURATION
+#define PREP_TX_INFO_DURATION 0 /* Let the driver do its thing. */
+#endif
+
+/* This is DSAP | SSAP | CTRL | ProtoID/OrgCode{3}. */
+const uint8_t rfc1042_header[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+
+/* IEEE 802.11-05/0257r1 */
+const uint8_t bridge_tunnel_header[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+
+/* IEEE 802.11e Table 20i-UP-to-AC mappings. */
+static const uint8_t ieee80211e_up_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO,
+#if 0
+ IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
+#endif
+};
+
+const struct cfg80211_ops linuxkpi_mac80211cfgops = {
+ /*
+ * XXX TODO need a "glue layer" to link cfg80211 ops to
+ * mac80211 and to the driver or net80211.
+ * Can we pass some on 1:1? Need to compare the (*f)().
+ */
+};
+
+#if 0
+static struct lkpi_sta *lkpi_find_lsta_by_ni(struct lkpi_vif *,
+ struct ieee80211_node *);
+#endif
+static void lkpi_80211_txq_tx_one(struct lkpi_sta *, struct mbuf *);
+static void lkpi_80211_txq_task(void *, int);
+static void lkpi_80211_lhw_rxq_task(void *, int);
+static void lkpi_ieee80211_free_skb_mbuf(void *);
+#ifdef LKPI_80211_WME
+static int lkpi_wme_update(struct lkpi_hw *, struct ieee80211vap *, bool);
+#endif
+static void lkpi_ieee80211_wake_queues_locked(struct ieee80211_hw *);
+
+static const char *
+lkpi_rate_info_bw_to_str(enum rate_info_bw bw)
+{
+
+ switch (bw) {
+
+ case RATE_INFO_BW_20:
+ return ("20");
+ break;
+ case RATE_INFO_BW_5:
+ return ("5");
+ break;
+ case RATE_INFO_BW_10:
+ return ("10");
+ break;
+ case RATE_INFO_BW_40:
+ return ("40");
+ break;
+ case RATE_INFO_BW_80:
+ return ("80");
+ break;
+ case RATE_INFO_BW_160:
+ return ("160");
+ break;
+ case RATE_INFO_BW_HE_RU:
+ IMPROVE("nl80211_he_ru_alloc");
+ return ("HE_RU");
+ break;
+ case RATE_INFO_BW_320:
+ return ("320");
+ break;
+ case RATE_INFO_BW_EHT_RU:
+ IMPROVE("nl80211_eht_ru_alloc");
+ return ("EHT_RU");
+ break;
+ default:
+ return ("?");
+ break;
+ }
+}
+
+static void
+lkpi_nl80211_sta_info_to_str(struct sbuf *s, const char *prefix,
+ const uint64_t flags)
+{
+ int bit, i;
+
+ sbuf_printf(s, "%s %#010jx", prefix, flags);
+
+ i = 0;
+ for (bit = 0; bit < BITS_PER_TYPE(flags); bit++) {
+
+ if ((flags & BIT_ULL(bit)) == 0)
+ continue;
+
+#define EXPAND_CASE(_flag) \
+ case NL80211_STA_INFO_ ## _flag: \
+ sbuf_printf(s, "%c%s", (i == 0) ? '<' : ',', #_flag); \
+ i++; \
+ break;
+
+ switch (bit) {
+ EXPAND_CASE(BEACON_RX)
+ EXPAND_CASE(BEACON_SIGNAL_AVG)
+ EXPAND_CASE(BSS_PARAM)
+ EXPAND_CASE(CHAIN_SIGNAL)
+ EXPAND_CASE(CHAIN_SIGNAL_AVG)
+ EXPAND_CASE(CONNECTED_TIME)
+ EXPAND_CASE(INACTIVE_TIME)
+ EXPAND_CASE(SIGNAL)
+ EXPAND_CASE(SIGNAL_AVG)
+ EXPAND_CASE(STA_FLAGS)
+ EXPAND_CASE(RX_BITRATE)
+ EXPAND_CASE(RX_PACKETS)
+ EXPAND_CASE(RX_BYTES)
+ EXPAND_CASE(RX_DROP_MISC)
+ EXPAND_CASE(TX_BITRATE)
+ EXPAND_CASE(TX_PACKETS)
+ EXPAND_CASE(TX_BYTES)
+ EXPAND_CASE(TX_BYTES64)
+ EXPAND_CASE(RX_BYTES64)
+ EXPAND_CASE(TX_FAILED)
+ EXPAND_CASE(TX_RETRIES)
+ EXPAND_CASE(RX_DURATION)
+ EXPAND_CASE(TX_DURATION)
+ EXPAND_CASE(ACK_SIGNAL)
+ EXPAND_CASE(ACK_SIGNAL_AVG)
+ default:
+ sbuf_printf(s, "%c?%d", (i == 0) ? '<' : ',', bit);
+ break;
+ }
+ }
+#undef EXPAND_CASE
+ if (i > 0)
+ sbuf_printf(s, ">");
+ sbuf_printf(s, "\n");
+}
+
+static int
+lkpi_80211_dump_stas(SYSCTL_HANDLER_ARGS)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct ieee80211vap *vap;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct station_info sinfo;
+ struct sbuf s;
+ int error;
+
+ if (req->newptr)
+ return (EPERM);
+
+ lvif = (struct lkpi_vif *)arg1;
+ vif = LVIF_TO_VIF(lvif);
+ vap = LVIF_TO_VAP(lvif);
+ lhw = vap->iv_ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+
+ sbuf_new_for_sysctl(&s, NULL, 1024, req);
+
+ wiphy_lock(hw->wiphy);
+ list_for_each_entry(lsta, &lvif->lsta_list, lsta_list) {
+ sta = LSTA_TO_STA(lsta);
+
+ sbuf_putc(&s, '\n');
+ sbuf_printf(&s, "lsta %p sta %p added_to_drv %d\n", lsta, sta, lsta->added_to_drv);
+
+ memset(&sinfo, 0, sizeof(sinfo));
+ error = lkpi_80211_mo_sta_statistics(hw, vif, sta, &sinfo);
+ if (error == EEXIST) /* Not added to driver. */
+ continue;
+ if (error == ENOTSUPP) {
+ sbuf_printf(&s, " sta_statistics not supported\n");
+ continue;
+ }
+ if (error != 0) {
+ sbuf_printf(&s, " sta_statistics failed: %d\n", error);
+ continue;
+ }
+
+ /* If no RX_BITRATE is reported, try to fill it in from the lsta sinfo. */
+ if ((sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) == 0 &&
+ (lsta->sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) != 0) {
+ memcpy(&sinfo.rxrate, &lsta->sinfo.rxrate, sizeof(sinfo.rxrate));
+ sinfo.filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ }
+
+ lkpi_nl80211_sta_info_to_str(&s, " nl80211_sta_info (valid fields)", sinfo.filled);
+ sbuf_printf(&s, " connected_time %u inactive_time %u\n",
+ sinfo.connected_time, sinfo.inactive_time);
+ sbuf_printf(&s, " rx_bytes %ju rx_packets %u rx_dropped_misc %u\n",
+ (uintmax_t)sinfo.rx_bytes, sinfo.rx_packets, sinfo.rx_dropped_misc);
+ sbuf_printf(&s, " rx_duration %ju rx_beacon %u rx_beacon_signal_avg %d\n",
+ (uintmax_t)sinfo.rx_duration, sinfo.rx_beacon, (int8_t)sinfo.rx_beacon_signal_avg);
+
+ sbuf_printf(&s, " tx_bytes %ju tx_packets %u tx_failed %u\n",
+ (uintmax_t)sinfo.tx_bytes, sinfo.tx_packets, sinfo.tx_failed);
+ sbuf_printf(&s, " tx_duration %ju tx_retries %u\n",
+ (uintmax_t)sinfo.tx_duration, sinfo.tx_retries);
+
+ sbuf_printf(&s, " signal %d signal_avg %d ack_signal %d avg_ack_signal %d\n",
+ sinfo.signal, sinfo.signal_avg, sinfo.ack_signal, sinfo.avg_ack_signal);
+
+ sbuf_printf(&s, " generation %d assoc_req_ies_len %zu chains %d\n",
+ sinfo.generation, sinfo.assoc_req_ies_len, sinfo.chains);
+
+ for (int i = 0; i < sinfo.chains && i < IEEE80211_MAX_CHAINS; i++) {
+ sbuf_printf(&s, " chain[%d] signal %d signal_avg %d\n",
+ i, (int8_t)sinfo.chain_signal[i], (int8_t)sinfo.chain_signal_avg[i]);
+ }
+
+ /* assoc_req_ies, bss_param, sta_flags */
+
+ sbuf_printf(&s, " rxrate: flags %b bw %u(%s) legacy %u kbit/s mcs %u nss %u\n",
+ sinfo.rxrate.flags, CFG80211_RATE_INFO_FLAGS_BITS,
+ sinfo.rxrate.bw, lkpi_rate_info_bw_to_str(sinfo.rxrate.bw),
+ sinfo.rxrate.legacy * 100,
+ sinfo.rxrate.mcs, sinfo.rxrate.nss);
+ sbuf_printf(&s, " he_dcm %u he_gi %u he_ru_alloc %u eht_gi %u\n",
+ sinfo.rxrate.he_dcm, sinfo.rxrate.he_gi, sinfo.rxrate.he_ru_alloc,
+ sinfo.rxrate.eht_gi);
+ sbuf_printf(&s, " txrate: flags %b bw %u(%s) legacy %u kbit/s mcs %u nss %u\n",
+ sinfo.txrate.flags, CFG80211_RATE_INFO_FLAGS_BITS,
+ sinfo.txrate.bw, lkpi_rate_info_bw_to_str(sinfo.txrate.bw),
+ sinfo.txrate.legacy * 100,
+ sinfo.txrate.mcs, sinfo.txrate.nss);
+ sbuf_printf(&s, " he_dcm %u he_gi %u he_ru_alloc %u eht_gi %u\n",
+ sinfo.txrate.he_dcm, sinfo.txrate.he_gi, sinfo.txrate.he_ru_alloc,
+ sinfo.txrate.eht_gi);
+ }
+ wiphy_unlock(hw->wiphy);
+
+ sbuf_finish(&s);
+ sbuf_delete(&s);
+
+ return (0);
+}
+
+static enum ieee80211_sta_rx_bw
+lkpi_cw_to_rx_bw(enum nl80211_chan_width cw)
+{
+ switch (cw) {
+ case NL80211_CHAN_WIDTH_320:
+ return (IEEE80211_STA_RX_BW_320);
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_80P80:
+ return (IEEE80211_STA_RX_BW_160);
+ case NL80211_CHAN_WIDTH_80:
+ return (IEEE80211_STA_RX_BW_80);
+ case NL80211_CHAN_WIDTH_40:
+ return (IEEE80211_STA_RX_BW_40);
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ return (IEEE80211_STA_RX_BW_20);
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ /* Unsupported input. */
+ return (IEEE80211_STA_RX_BW_20);
+ }
+}
+
+static enum nl80211_chan_width
+lkpi_rx_bw_to_cw(enum ieee80211_sta_rx_bw rx_bw)
+{
+ switch (rx_bw) {
+ case IEEE80211_STA_RX_BW_20:
+ return (NL80211_CHAN_WIDTH_20); /* _NOHT */
+ case IEEE80211_STA_RX_BW_40:
+ return (NL80211_CHAN_WIDTH_40);
+ case IEEE80211_STA_RX_BW_80:
+ return (NL80211_CHAN_WIDTH_80);
+ case IEEE80211_STA_RX_BW_160:
+ return (NL80211_CHAN_WIDTH_160); /* 80P80 */
+ case IEEE80211_STA_RX_BW_320:
+ return (NL80211_CHAN_WIDTH_320);
+ }
+}
+
+static void
+lkpi_sync_chanctx_cw_from_rx_bw(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum ieee80211_sta_rx_bw old_bw;
+ uint32_t changed;
+
+ chanctx_conf = rcu_dereference_protected(vif->bss_conf.chanctx_conf,
+ lockdep_is_held(&hw->wiphy->mtx));
+ if (chanctx_conf == NULL)
+ return;
+
+ old_bw = lkpi_cw_to_rx_bw(chanctx_conf->def.width);
+ if (old_bw == sta->deflink.bandwidth)
+ return;
+
+ chanctx_conf->def.width = lkpi_rx_bw_to_cw(sta->deflink.bandwidth);
+ if (chanctx_conf->def.width == NL80211_CHAN_WIDTH_20 &&
+ !sta->deflink.ht_cap.ht_supported)
+ chanctx_conf->def.width = NL80211_CHAN_WIDTH_20_NOHT;
+
+ chanctx_conf->min_def = chanctx_conf->def;
+
+ vif->bss_conf.chanreq.oper.width = chanctx_conf->def.width;
+
+ changed = IEEE80211_CHANCTX_CHANGE_MIN_WIDTH;
+ changed |= IEEE80211_CHANCTX_CHANGE_WIDTH;
+ lkpi_80211_mo_change_chanctx(hw, chanctx_conf, changed);
+}
+
+#if defined(LKPI_80211_HT)
+static void
+lkpi_sta_sync_ht_from_ni(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap;
+ uint8_t *ie;
+ struct ieee80211_ht_cap *htcap;
+ int i, rx_nss;
+
+ if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
+ sta->deflink.ht_cap.ht_supported = false;
+ return;
+ }
+
+ sta->deflink.ht_cap.ht_supported = true;
+
+ /* htcap->ampdu_params_info */
+ vap = ni->ni_vap;
+ sta->deflink.ht_cap.ampdu_density = _IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY);
+ if (sta->deflink.ht_cap.ampdu_density > vap->iv_ampdu_density)
+ sta->deflink.ht_cap.ampdu_density = vap->iv_ampdu_density;
+ sta->deflink.ht_cap.ampdu_factor = _IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU);
+ if (sta->deflink.ht_cap.ampdu_factor > vap->iv_ampdu_rxmax)
+ sta->deflink.ht_cap.ampdu_factor = vap->iv_ampdu_rxmax;
+
+ ie = ni->ni_ies.htcap_ie;
+ KASSERT(ie != NULL, ("%s: HT but no htcap_ie on ni %p\n", __func__, ni));
+ if (ie[0] == IEEE80211_ELEMID_VENDOR)
+ ie += 4;
+ ie += 2;
+ htcap = (struct ieee80211_ht_cap *)ie;
+ sta->deflink.ht_cap.cap = htcap->cap_info;
+ sta->deflink.ht_cap.mcs = htcap->mcs;
+
+ /*
+ * 802.11n-2009 20.6 Parameters for HT MCSs gives the mandatory/
+ * optional MCS for Nss=1..4. We need to check the first four
+ * MCS sets from the Rx MCS Bitmask; then there is MCS 32 and
+ * MCS33.. is UEQM.
+ */
+ rx_nss = 0;
+ for (i = 0; i < 4; i++) {
+ if (htcap->mcs.rx_mask[i] != 0)
+ rx_nss++;
+ }
+ if (rx_nss > 0) {
+ sta->deflink.rx_nss = rx_nss;
+ } else {
+ sta->deflink.ht_cap.ht_supported = false;
+ return;
+ }
+
+ if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) != 0 &&
+ IEEE80211_IS_CHAN_HT40(ni->ni_chan))
+ sta->deflink.bandwidth = IEEE80211_STA_RX_BW_40;
+ else
+ sta->deflink.bandwidth = IEEE80211_STA_RX_BW_20;
+
+ IMPROVE("sta->wme");
+
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_MAX_AMSDU)
+ sta->deflink.agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_7935;
+ else
+ sta->deflink.agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839;
+ sta->deflink.agg.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
+#ifdef __handled_by_driver__ /* iwlwifi only? actually unused? */
+ for (i = 0; i < nitems(sta.deflink.agg.max_tid_amsdu_len); i++) {
+ sta->deflink.agg.max_tid_amsdu_len[j] = ;
+ }
+#endif
+}
+#endif
+
+#if defined(LKPI_80211_VHT)
+static void
+lkpi_sta_sync_vht_from_ni(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_node *ni)
+{
+ enum ieee80211_sta_rx_bw bw;
+ uint32_t width;
+ int rx_nss;
+ uint16_t rx_mcs_map;
+ uint8_t mcs;
+
+ if ((ni->ni_flags & IEEE80211_NODE_VHT) == 0 ||
+ !IEEE80211_IS_CHAN_VHT_5GHZ(ni->ni_chan)) {
+ sta->deflink.vht_cap.vht_supported = false;
+ return;
+ }
+
+ sta->deflink.vht_cap.vht_supported = true;
+
+ sta->deflink.vht_cap.cap = ni->ni_vhtcap;
+ sta->deflink.vht_cap.vht_mcs = ni->ni_vht_mcsinfo;
+
+ /*
+ * If VHT20/40 are selected do not update the bandwidth
+ * from HT but stya on VHT.
+ */
+ if (ni->ni_vht_chanwidth == IEEE80211_VHT_CHANWIDTH_USE_HT)
+ goto skip_bw;
+
+ bw = sta->deflink.bandwidth;
+ width = (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK);
+ switch (width) {
+ /* Deprecated. */
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ bw = IEEE80211_STA_RX_BW_160;
+ break;
+ default:
+ /* Check if we do support 160Mhz somehow after all. */
+ if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) != 0)
+ bw = IEEE80211_STA_RX_BW_160;
+ else
+ bw = IEEE80211_STA_RX_BW_80;
+ }
+ /*
+ * While we can set what is possibly supported we also need to be
+ * on a channel which supports that bandwidth; e.g., we can support
+ * VHT160 but the AP only does VHT80.
+ * Further ni_chan will also have filtered out what we disabled
+ * by configuration.
+ * Once net80211 channel selection is fixed for 802.11-2020 and
+ * VHT160 we can possibly spare ourselves the above.
+ */
+ if (bw == IEEE80211_STA_RX_BW_160 &&
+ !IEEE80211_IS_CHAN_VHT160(ni->ni_chan) &&
+ !IEEE80211_IS_CHAN_VHT80P80(ni->ni_chan))
+ bw = IEEE80211_STA_RX_BW_80;
+ if (bw == IEEE80211_STA_RX_BW_80 &&
+ !IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
+ bw = sta->deflink.bandwidth;
+ sta->deflink.bandwidth = bw;
+skip_bw:
+
+ rx_nss = 0;
+ rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
+ for (int i = 7; i >= 0; i--) {
+ mcs = rx_mcs_map >> (2 * i);
+ mcs &= 0x3;
+ if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ rx_nss = i + 1;
+ break;
+ }
+ }
+ if (rx_nss > 0)
+ sta->deflink.rx_nss = rx_nss;
+
+ switch (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+ sta->deflink.agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ break;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+ sta->deflink.agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ break;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
+ default:
+ sta->deflink.agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ break;
+ }
+}
+#endif
+
+static void
+lkpi_sta_sync_from_ni(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct ieee80211_node *ni, bool updchnctx)
+{
+
+#if defined(LKPI_80211_HT)
+ lkpi_sta_sync_ht_from_ni(vif, sta, ni);
+#endif
+#if defined(LKPI_80211_VHT)
+ lkpi_sta_sync_vht_from_ni(vif, sta, ni);
+#endif
+
+ /*
+ * Ensure rx_nss is at least 1 as otherwise drivers run into
+ * unexpected problems.
+ */
+ sta->deflink.rx_nss = MAX(1, sta->deflink.rx_nss);
+
+ /*
+ * We are also called from node allocation which net80211
+ * can do even on `ifconfig down`; in that case the chanctx
+ * may still be valid and we get a discrepancy between
+ * sta and chanctx. Thus do not try to update the chanctx
+ * when called from lkpi_lsta_alloc().
+ */
+ if (updchnctx)
+ lkpi_sync_chanctx_cw_from_rx_bw(hw, vif, sta);
+}
+
+static uint8_t
+lkpi_get_max_rx_chains(struct ieee80211_node *ni)
+{
+ uint8_t chains;
+#if defined(LKPI_80211_HT) || defined(LKPI_80211_VHT)
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+
+ lsta = ni->ni_drv_data;
+ sta = LSTA_TO_STA(lsta);
+#endif
+
+ chains = 1;
+#if defined(LKPI_80211_HT)
+ IMPROVE("We should factor counting MCS/NSS out for sync and here");
+ if (sta->deflink.ht_cap.ht_supported)
+ chains = MAX(chains, sta->deflink.rx_nss);
+#endif
+
+#if defined(LKPI_80211_VHT)
+ if (sta->deflink.vht_cap.vht_supported)
+ chains = MAX(chains, sta->deflink.rx_nss);
+#endif
+
+ return (chains);
+}
+
+static void
+lkpi_lsta_dump(struct lkpi_sta *lsta, struct ieee80211_node *ni,
+ const char *_f, int _l)
+{
+
+#ifdef LINUXKPI_DEBUG_80211
+ if ((linuxkpi_debug_80211 & D80211_TRACE_STA) == 0)
+ return;
+ if (lsta == NULL)
+ return;
+
+ printf("%s:%d lsta %p ni %p sta %p\n",
+ _f, _l, lsta, ni, &lsta->sta);
+ if (ni != NULL)
+ ieee80211_dump_node(NULL, ni);
+ printf("\ttxq_task txq len %d mtx\n", mbufq_len(&lsta->txq));
+ printf("\tkc %p state %d added_to_drv %d in_mgd %d\n",
+ &lsta->kc[0], lsta->state, lsta->added_to_drv, lsta->in_mgd);
+#endif
+}
+
+static void
+lkpi_lsta_remove(struct lkpi_sta *lsta, struct lkpi_vif *lvif)
+{
+
+ lockdep_assert_wiphy(lsta->hw->wiphy);
+
+ KASSERT(!list_empty(&lsta->lsta_list),
+ ("%s: lsta %p ni %p\n", __func__, lsta, lsta->ni));
+ list_del_init(&lsta->lsta_list);
+}
+
+static struct lkpi_sta *
+lkpi_lsta_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN],
+ struct ieee80211_hw *hw, struct ieee80211_node *ni)
+{
+ struct lkpi_sta *lsta;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ int band, i, tid;
+
+ lsta = malloc(sizeof(*lsta) + hw->sta_data_size, M_LKPI80211,
+ M_NOWAIT | M_ZERO);
+ if (lsta == NULL)
+ return (NULL);
+
+ lsta->hw = hw;
+ lsta->added_to_drv = false;
+ lsta->state = IEEE80211_STA_NOTEXIST;
+ /*
+ * Link the ni to the lsta here without taking a reference.
+ * For one we would have to take the reference in node_init()
+ * as ieee80211_alloc_node() will initialise the refcount after us.
+ * For the other a ni and an lsta are 1:1 mapped and always together
+ * from [ic_]node_alloc() to [ic_]node_free() so we are essentally
+ * using the ni references for the lsta as well despite it being
+ * two separate allocations.
+ */
+ lsta->ni = ni;
+ /* The back-pointer "drv_data" to net80211_node let's us get lsta. */
+ ni->ni_drv_data = lsta;
+
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+ sta = LSTA_TO_STA(lsta);
+
+ IEEE80211_ADDR_COPY(sta->addr, mac);
+
+ /* TXQ */
+ for (tid = 0; tid < nitems(sta->txq); tid++) {
+ struct lkpi_txq *ltxq;
+
+ /* We are not limiting ourselves to hw.queues here. */
+ ltxq = malloc(sizeof(*ltxq) + hw->txq_data_size,
+ M_LKPI80211, M_NOWAIT | M_ZERO);
+ if (ltxq == NULL)
+ goto cleanup;
+ /* iwlwifi//mvm/sta.c::tid_to_mac80211_ac[] */
+ if (tid == IEEE80211_NUM_TIDS) {
+ if (!ieee80211_hw_check(hw, STA_MMPDU_TXQ)) {
+ free(ltxq, M_LKPI80211);
+ continue;
+ }
+ IMPROVE("AP/if we support non-STA here too");
+ ltxq->txq.ac = IEEE80211_AC_VO;
+ } else {
+ ltxq->txq.ac = ieee80211e_up_to_ac[tid & 7];
+ }
+ ltxq->seen_dequeue = false;
+ ltxq->stopped = false;
+ ltxq->txq.vif = vif;
+ ltxq->txq.tid = tid;
+ ltxq->txq.sta = sta;
+ TAILQ_ELEM_INIT(ltxq, txq_entry);
+ skb_queue_head_init(&ltxq->skbq);
+ LKPI_80211_LTXQ_LOCK_INIT(ltxq);
+ sta->txq[tid] = &ltxq->txq;
+ }
+
+ /* Deflink information. */
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *supband;
+
+ supband = hw->wiphy->bands[band];
+ if (supband == NULL)
+ continue;
+
+ for (i = 0; i < supband->n_bitrates; i++) {
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ switch (supband->bitrates[i].bitrate) {
+ case 240: /* 11g only */
+ case 120: /* 11g only */
+ case 110:
+ case 60: /* 11g only */
+ case 55:
+ case 20:
+ case 10:
+ sta->deflink.supp_rates[band] |= BIT(i);
+ break;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ switch (supband->bitrates[i].bitrate) {
+ case 240:
+ case 120:
+ case 60:
+ sta->deflink.supp_rates[band] |= BIT(i);
+ break;
+ }
+ break;
+ }
+ }
+ }
+
+ sta->deflink.smps_mode = IEEE80211_SMPS_OFF;
+ sta->deflink.bandwidth = IEEE80211_STA_RX_BW_20;
+ sta->deflink.rx_nss = 1;
+
+ lkpi_sta_sync_from_ni(hw, vif, sta, ni, false);
+
+ IMPROVE("he, eht, bw_320, ... smps_mode, ..");
+
+ /* Link configuration. */
+ IEEE80211_ADDR_COPY(sta->deflink.addr, sta->addr);
+ sta->link[0] = &sta->deflink;
+ for (i = 1; i < nitems(sta->link); i++) {
+ IMPROVE("more links; only link[0] = deflink currently.");
+ }
+ IMPROVE("11be");
+ sta->mlo = false;
+
+ /* Deferred TX path. */
+ LKPI_80211_LSTA_TXQ_LOCK_INIT(lsta);
+ TASK_INIT(&lsta->txq_task, 0, lkpi_80211_txq_task, lsta);
+ mbufq_init(&lsta->txq, 32 * NAPI_POLL_WEIGHT);
+ lsta->txq_ready = true;
+
+ return (lsta);
+
+cleanup:
+ for (; tid >= 0; tid--) {
+ struct lkpi_txq *ltxq;
+
+ ltxq = TXQ_TO_LTXQ(sta->txq[tid]);
+ LKPI_80211_LTXQ_LOCK_DESTROY(ltxq);
+ free(sta->txq[tid], M_LKPI80211);
+ }
+ free(lsta, M_LKPI80211);
+ return (NULL);
+}
+
+static void
+lkpi_lsta_free(struct lkpi_sta *lsta, struct ieee80211_node *ni)
+{
+ struct mbuf *m;
+
+ if (lsta->added_to_drv)
+ panic("%s: Trying to free an lsta still known to firmware: "
+ "lsta %p ni %p added_to_drv %d\n",
+ __func__, lsta, ni, lsta->added_to_drv);
+
+ /* XXX-BZ free resources, ... */
+ IMPROVE();
+
+ /* Drain sta->txq[] */
+
+ LKPI_80211_LSTA_TXQ_LOCK(lsta);
+ lsta->txq_ready = false;
+ LKPI_80211_LSTA_TXQ_UNLOCK(lsta);
+
+ /* Drain taskq, won't be restarted until added_to_drv is set again. */
+ while (taskqueue_cancel(taskqueue_thread, &lsta->txq_task, NULL) != 0)
+ taskqueue_drain(taskqueue_thread, &lsta->txq_task);
+
+ /* Flush mbufq (make sure to release ni refs!). */
+ m = mbufq_dequeue(&lsta->txq);
+ while (m != NULL) {
+ struct ieee80211_node *nim;
+
+ nim = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ if (nim != NULL)
+ ieee80211_free_node(nim);
+ m_freem(m);
+ m = mbufq_dequeue(&lsta->txq);
+ }
+ KASSERT(mbufq_empty(&lsta->txq), ("%s: lsta %p has txq len %d != 0\n",
+ __func__, lsta, mbufq_len(&lsta->txq)));
+ LKPI_80211_LSTA_TXQ_LOCK_DESTROY(lsta);
+
+ /* Remove lsta from vif; that is done by the state machine. Should assert it? */
+
+ IMPROVE("Make sure everything is cleaned up.");
+
+ /* Free lsta. */
+ lsta->ni = NULL;
+ ni->ni_drv_data = NULL;
+ free(lsta, M_LKPI80211);
+}
+
+
+static enum nl80211_band
+lkpi_net80211_chan_to_nl80211_band(struct ieee80211_channel *c)
+{
+
+ if (IEEE80211_IS_CHAN_2GHZ(c))
+ return (NL80211_BAND_2GHZ);
+ else if (IEEE80211_IS_CHAN_5GHZ(c))
+ return (NL80211_BAND_5GHZ);
+#ifdef __notyet__
+ else if ()
+ return (NL80211_BAND_6GHZ);
+ else if ()
+ return (NL80211_BAND_60GHZ);
+ else if (IEEE80211_IS_CHAN_GSM(c))
+ return (NL80211_BAND_XXX);
+#endif
+ else
+ panic("%s: unsupported band. c %p flags %#x\n",
+ __func__, c, c->ic_flags);
+}
+
+static uint32_t
+lkpi_nl80211_band_to_net80211_band(enum nl80211_band band)
+{
+
+ /* XXX-BZ this is just silly; net80211 is too convoluted. */
+ /* IEEE80211_CHAN_A / _G / .. doesn't really work either. */
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return (IEEE80211_CHAN_2GHZ);
+ break;
+ case NL80211_BAND_5GHZ:
+ return (IEEE80211_CHAN_5GHZ);
+ break;
+ case NL80211_BAND_60GHZ:
+ break;
+ case NL80211_BAND_6GHZ:
+ break;
+ default:
+ panic("%s: unsupported band %u\n", __func__, band);
+ break;
+ }
+
+ IMPROVE();
+ return (0x00);
+}
+
+#if 0
+static enum ieee80211_ac_numbers
+lkpi_ac_net_to_l80211(int ac)
+{
+
+ switch (ac) {
+ case WME_AC_VO:
+ return (IEEE80211_AC_VO);
+ case WME_AC_VI:
+ return (IEEE80211_AC_VI);
+ case WME_AC_BE:
+ return (IEEE80211_AC_BE);
+ case WME_AC_BK:
+ return (IEEE80211_AC_BK);
+ default:
+ printf("%s: invalid WME_AC_* input: ac = %d\n", __func__, ac);
+ return (IEEE80211_AC_BE);
+ }
+}
+#endif
+
+static enum nl80211_iftype
+lkpi_opmode_to_vif_type(enum ieee80211_opmode opmode)
+{
+
+ switch (opmode) {
+ case IEEE80211_M_IBSS:
+ return (NL80211_IFTYPE_ADHOC);
+ break;
+ case IEEE80211_M_STA:
+ return (NL80211_IFTYPE_STATION);
+ break;
+ case IEEE80211_M_WDS:
+ return (NL80211_IFTYPE_WDS);
+ break;
+ case IEEE80211_M_HOSTAP:
+ return (NL80211_IFTYPE_AP);
+ break;
+ case IEEE80211_M_MONITOR:
+ return (NL80211_IFTYPE_MONITOR);
+ break;
+ case IEEE80211_M_MBSS:
+ return (NL80211_IFTYPE_MESH_POINT);
+ break;
+ case IEEE80211_M_AHDEMO:
+ /* FALLTHROUGH */
+ default:
+ printf("ERROR: %s: unsupported opmode %d\n", __func__, opmode);
+ /* FALLTHROUGH */
+ }
+ return (NL80211_IFTYPE_UNSPECIFIED);
+}
+
+#ifdef LKPI_80211_HW_CRYPTO
+static const char *
+lkpi_cipher_suite_to_name(uint32_t wlan_cipher_suite)
+{
+ switch (wlan_cipher_suite) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return ("WEP40");
+ case WLAN_CIPHER_SUITE_WEP104:
+ return ("WEP104");
+ case WLAN_CIPHER_SUITE_TKIP:
+ return ("TKIP");
+ case WLAN_CIPHER_SUITE_CCMP:
+ return ("CCMP");
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return ("CCMP_256");
+ case WLAN_CIPHER_SUITE_GCMP:
+ return ("GCMP");
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return ("GCMP_256");
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return ("AES_CMAC");
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ return ("BIP_CMAC_256");
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ return ("BIP_GMAC_128");
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ return ("BIP_GMAC_256");
+ default:
+ return ("??");
+ }
+}
+
+static uint32_t
+lkpi_l80211_to_net80211_cyphers(struct ieee80211com *ic,
+ uint32_t wlan_cipher_suite)
+{
+ switch (wlan_cipher_suite) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return (IEEE80211_CRYPTO_WEP);
+ case WLAN_CIPHER_SUITE_WEP104:
+ return (IEEE80211_CRYPTO_WEP);
+ case WLAN_CIPHER_SUITE_TKIP:
+ return (IEEE80211_CRYPTO_TKIP);
+ case WLAN_CIPHER_SUITE_CCMP:
+ return (IEEE80211_CRYPTO_AES_CCM);
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return (IEEE80211_CRYPTO_AES_CCM_256);
+ case WLAN_CIPHER_SUITE_GCMP:
+ return (IEEE80211_CRYPTO_AES_GCM_128);
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return (IEEE80211_CRYPTO_AES_GCM_256);
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return (IEEE80211_CRYPTO_BIP_CMAC_128);
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ return (IEEE80211_CRYPTO_BIP_CMAC_256);
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ return (IEEE80211_CRYPTO_BIP_GMAC_128);
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ return (IEEE80211_CRYPTO_BIP_GMAC_256);
+ default:
+ ic_printf(ic, "%s: unknown WLAN Cipher Suite %#08x | %u (%s)\n",
+ __func__,
+ wlan_cipher_suite >> 8, wlan_cipher_suite & 0xff,
+ lkpi_cipher_suite_to_name(wlan_cipher_suite));
+ return (0);
+ }
+}
+
+static uint32_t
+lkpi_net80211_to_l80211_cipher_suite(uint32_t cipher, uint8_t keylen)
+{
+
+ switch (cipher) {
+ case IEEE80211_CIPHER_WEP:
+ if (keylen == (40/NBBY))
+ return (WLAN_CIPHER_SUITE_WEP40);
+ else if (keylen == (104/NBBY))
+ return (WLAN_CIPHER_SUITE_WEP104);
+ else {
+ printf("%s: WEP with unsupported keylen %d\n",
+ __func__, keylen * NBBY);
+ return (0);
+ }
+ break;
+ case IEEE80211_CIPHER_TKIP:
+ return (WLAN_CIPHER_SUITE_TKIP);
+ case IEEE80211_CIPHER_AES_CCM:
+ return (WLAN_CIPHER_SUITE_CCMP);
+ case IEEE80211_CIPHER_AES_CCM_256:
+ return (WLAN_CIPHER_SUITE_CCMP_256);
+ case IEEE80211_CIPHER_AES_GCM_128:
+ return (WLAN_CIPHER_SUITE_GCMP);
+ case IEEE80211_CIPHER_AES_GCM_256:
+ return (WLAN_CIPHER_SUITE_GCMP_256);
+ case IEEE80211_CIPHER_BIP_CMAC_128:
+ return (WLAN_CIPHER_SUITE_AES_CMAC);
+ case IEEE80211_CIPHER_BIP_CMAC_256:
+ return (WLAN_CIPHER_SUITE_BIP_CMAC_256);
+ case IEEE80211_CIPHER_BIP_GMAC_128:
+ return (WLAN_CIPHER_SUITE_BIP_GMAC_128);
+ case IEEE80211_CIPHER_BIP_GMAC_256:
+ return (WLAN_CIPHER_SUITE_BIP_GMAC_256);
+
+ case IEEE80211_CIPHER_AES_OCB:
+ case IEEE80211_CIPHER_TKIPMIC:
+ /*
+ * TKIP w/ hw MIC support
+ * (gone wrong; should really be a crypto flag in net80211).
+ */
+ case IEEE80211_CIPHER_CKIP:
+ case IEEE80211_CIPHER_NONE:
+ printf("%s: unsupported cipher %#010x\n", __func__, cipher);
+ break;
+ default:
+ printf("%s: unknown cipher %#010x\n", __func__, cipher);
+ };
+ return (0);
+}
+#endif
+
+#ifdef __notyet__
+static enum ieee80211_sta_state
+lkpi_net80211_state_to_sta_state(enum ieee80211_state state)
+{
+
+ /*
+ * XXX-BZ The net80211 states are "try to ..", the lkpi8011 states are
+ * "done". Also ASSOC/AUTHORIZED are both "RUN" then?
+ */
+ switch (state) {
+ case IEEE80211_S_INIT:
+ return (IEEE80211_STA_NOTEXIST);
+ case IEEE80211_S_SCAN:
+ return (IEEE80211_STA_NONE);
+ case IEEE80211_S_AUTH:
+ return (IEEE80211_STA_AUTH);
+ case IEEE80211_S_ASSOC:
+ return (IEEE80211_STA_ASSOC);
+ case IEEE80211_S_RUN:
+ return (IEEE80211_STA_AUTHORIZED);
+ case IEEE80211_S_CAC:
+ case IEEE80211_S_CSA:
+ case IEEE80211_S_SLEEP:
+ default:
+ UNIMPLEMENTED;
+ };
+
+ return (IEEE80211_STA_NOTEXIST);
+}
+#endif
+
+static struct linuxkpi_ieee80211_channel *
+lkpi_find_lkpi80211_chan(struct lkpi_hw *lhw,
+ struct ieee80211_channel *c)
+{
+ struct ieee80211_hw *hw;
+ struct linuxkpi_ieee80211_channel *channels;
+ enum nl80211_band band;
+ int i, nchans;
+
+ hw = LHW_TO_HW(lhw);
+ band = lkpi_net80211_chan_to_nl80211_band(c);
+ if (hw->wiphy->bands[band] == NULL)
+ return (NULL);
+
+ nchans = hw->wiphy->bands[band]->n_channels;
+ if (nchans <= 0)
+ return (NULL);
+
+ channels = hw->wiphy->bands[band]->channels;
+ for (i = 0; i < nchans; i++) {
+ if (channels[i].hw_value == c->ic_ieee)
+ return (&channels[i]);
+ }
+
+ return (NULL);
+}
+
+#if 0
+static struct linuxkpi_ieee80211_channel *
+lkpi_get_lkpi80211_chan(struct ieee80211com *ic, struct ieee80211_node *ni)
+{
+ struct linuxkpi_ieee80211_channel *chan;
+ struct ieee80211_channel *c;
+ struct lkpi_hw *lhw;
+
+ chan = NULL;
+ if (ni != NULL && ni->ni_chan != IEEE80211_CHAN_ANYC)
+ c = ni->ni_chan;
+ else if (ic->ic_bsschan != IEEE80211_CHAN_ANYC)
+ c = ic->ic_bsschan;
+ else if (ic->ic_curchan != IEEE80211_CHAN_ANYC)
+ c = ic->ic_curchan;
+ else
+ c = NULL;
+
+ if (c != NULL && c != IEEE80211_CHAN_ANYC) {
+ lhw = ic->ic_softc;
+ chan = lkpi_find_lkpi80211_chan(lhw, c);
+ }
+
+ return (chan);
+}
+#endif
+
+struct linuxkpi_ieee80211_channel *
+linuxkpi_ieee80211_get_channel(struct wiphy *wiphy, uint32_t freq)
+{
+ enum nl80211_band band;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *supband;
+ struct linuxkpi_ieee80211_channel *channels;
+ int i;
+
+ supband = wiphy->bands[band];
+ if (supband == NULL || supband->n_channels == 0)
+ continue;
+
+ channels = supband->channels;
+ for (i = 0; i < supband->n_channels; i++) {
+ if (channels[i].center_freq == freq)
+ return (&channels[i]);
+ }
+ }
+
+ return (NULL);
+}
+
+#ifdef LKPI_80211_HW_CRYPTO
+static int
+lkpi_sta_del_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct lkpi_sta *lsta)
+{
+ int error;
+
+ if (!lkpi_hwcrypto)
+ return (0);
+
+ lockdep_assert_wiphy(hw->wiphy);
+ ieee80211_ref_node(lsta->ni);
+
+ error = 0;
+ for (ieee80211_keyix keyix = 0; keyix < nitems(lsta->kc); keyix++) {
+ struct ieee80211_key_conf *kc;
+ int err;
+
+ if (lsta->kc[keyix] == NULL)
+ continue;
+ kc = lsta->kc[keyix];
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO)
+ ic_printf(lsta->ni->ni_ic, "%d %lu %s: running set_key cmd %d(%s) for "
+ "sta %6D: keyidx %u hw_key_idx %u flags %b\n",
+ curthread->td_tid, jiffies, __func__,
+ DISABLE_KEY, "DISABLE", lsta->sta.addr, ":",
+ kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS);
+#endif
+
+ err = lkpi_80211_mo_set_key(hw, DISABLE_KEY, vif,
+ LSTA_TO_STA(lsta), kc);
+ if (err != 0) {
+ ic_printf(lsta->ni->ni_ic, "%d %lu %s: set_key cmd %d(%s) for "
+ "sta %6D failed: %d\n", curthread->td_tid, jiffies, __func__,
+ DISABLE_KEY, "DISABLE", lsta->sta.addr, ":", err);
+ error++;
+
+ /*
+ * If we free the key here we will never be able to get it
+ * removed from the driver/fw which will likely make us
+ * crash (firmware).
+ */
+ continue;
+ }
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO)
+ ic_printf(lsta->ni->ni_ic, "%d %lu %s: set_key cmd %d(%s) for "
+ "sta %6D succeeded: keyidx %u hw_key_idx %u flags %b\n",
+ curthread->td_tid, jiffies, __func__,
+ DISABLE_KEY, "DISABLE", lsta->sta.addr, ":",
+ kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS);
+#endif
+
+ lsta->kc[keyix] = NULL;
+ free(kc, M_LKPI80211);
+ }
+ ieee80211_free_node(lsta->ni);
+ return (error);
+}
+
+/* XXX-BZ one day we should replace this iterating over VIFs, or node list? */
+/* See also lkpi_sta_del_keys() these days. */
+static int
+lkpi_iv_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct ieee80211_node *ni;
+ struct ieee80211_key_conf *kc;
+ int error;
+
+ ic = vap->iv_ic;
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+
+ /*
+ * Make sure we do not make it here without going through
+ * lkpi_iv_key_update_begin() first.
+ */
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (IEEE80211_KEY_UNDEFINED(k)) {
+ ic_printf(ic, "%s: vap %p key %p is undefined: %p %u\n",
+ __func__, vap, k, k->wk_cipher, k->wk_keyix);
+ return (0);
+ }
+
+ if (vap->iv_bss == NULL) {
+ ic_printf(ic, "%s: iv_bss %p for vap %p is NULL\n",
+ __func__, vap->iv_bss, vap);
+ return (0);
+ }
+
+ ni = ieee80211_ref_node(vap->iv_bss);
+ lsta = ni->ni_drv_data;
+ if (lsta == NULL) {
+ ic_printf(ic, "%s: ni %p (%6D) with lsta NULL\n",
+ __func__, ni, ni->ni_bssid, ":");
+ ieee80211_free_node(ni);
+ return (0);
+ }
+ sta = LSTA_TO_STA(lsta);
+
+ if (lsta->kc[k->wk_keyix] == NULL) {
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO)
+ ic_printf(ic, "%d %lu %s: sta %6D and no key information, "
+ "keyidx %u wk_macaddr %6D; returning success\n",
+ curthread->td_tid, jiffies, __func__, sta->addr, ":",
+ k->wk_keyix, k->wk_macaddr, ":");
+#endif
+ ieee80211_free_node(ni);
+ return (1);
+ }
+ kc = lsta->kc[k->wk_keyix];
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO)
+ ic_printf(ic, "%d %lu %s: running set_key cmd %d(%s) for sta %6D: "
+ "keyidx %u hw_key_idx %u flags %b\n",
+ curthread->td_tid, jiffies, __func__,
+ DISABLE_KEY, "DISABLE", sta->addr, ":",
+ kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS);
+#endif
+
+ vif = LVIF_TO_VIF(lvif);
+ error = lkpi_80211_mo_set_key(hw, DISABLE_KEY, vif, sta, kc);
+ if (error != 0) {
+ ic_printf(ic, "%d %lu %s: set_key cmd %d(%s) for sta %6D failed: %d\n",
+ curthread->td_tid, jiffies, __func__,
+ DISABLE_KEY, "DISABLE", sta->addr, ":", error);
+ error = 0;
+ goto out;
+ }
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO)
+ ic_printf(ic, "%d %lu %s: set_key cmd %d(%s) for sta %6D succeeded: "
+ "keyidx %u hw_key_idx %u flags %b\n",
+ curthread->td_tid, jiffies, __func__,
+ DISABLE_KEY, "DISABLE", sta->addr, ":",
+ kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS);
+#endif
+ lsta->kc[k->wk_keyix] = NULL;
+ free(kc, M_LKPI80211);
+ error = 1;
+out:
+ ieee80211_free_node(ni);
+ return (error);
+}
+
+static int
+lkpi_iv_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct ieee80211_node *ni;
+ struct ieee80211_key_conf *kc;
+ uint32_t lcipher;
+ uint16_t exp_flags;
+ uint8_t keylen;
+ int error;
+
+ ic = vap->iv_ic;
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+
+ /*
+ * Make sure we do not make it here without going through
+ * lkpi_iv_key_update_begin() first.
+ */
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (IEEE80211_KEY_UNDEFINED(k)) {
+ ic_printf(ic, "%s: vap %p key %p is undefined: %p %u\n",
+ __func__, vap, k, k->wk_cipher, k->wk_keyix);
+ return (0);
+ }
+
+ if (vap->iv_bss == NULL) {
+ ic_printf(ic, "%s: iv_bss %p for vap %p is NULL\n",
+ __func__, vap->iv_bss, vap);
+ return (0);
+ }
+ ni = ieee80211_ref_node(vap->iv_bss);
+ lsta = ni->ni_drv_data;
+ if (lsta == NULL) {
+ ic_printf(ic, "%s: ni %p (%6D) with lsta NULL\n",
+ __func__, ni, ni->ni_bssid, ":");
+ ieee80211_free_node(ni);
+ return (0);
+ }
+ sta = LSTA_TO_STA(lsta);
+
+ keylen = k->wk_keylen;
+ lcipher = lkpi_net80211_to_l80211_cipher_suite(
+ k->wk_cipher->ic_cipher, k->wk_keylen);
+ switch (lcipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ keylen += 2 * k->wk_cipher->ic_miclen;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ break;
+ default:
+ ic_printf(ic, "%s: CIPHER SUITE %#x (%s) not supported\n",
+ __func__, lcipher, lkpi_cipher_suite_to_name(lcipher));
+ IMPROVE();
+ ieee80211_free_node(ni);
+ return (0);
+ }
+
+ if (lsta->kc[k->wk_keyix] != NULL) {
+ IMPROVE("Still in firmware? Del first. Can we assert this cannot happen?");
+ ic_printf(ic, "%s: sta %6D found with key information\n",
+ __func__, sta->addr, ":");
+ kc = lsta->kc[k->wk_keyix];
+ lsta->kc[k->wk_keyix] = NULL;
+ free(kc, M_LKPI80211);
+ kc = NULL; /* safeguard */
+ }
+
+ kc = malloc(sizeof(*kc) + keylen, M_LKPI80211, M_WAITOK | M_ZERO);
+ kc->_k = k; /* Save the pointer to net80211. */
+ kc->cipher = lcipher;
+ kc->keyidx = k->wk_keyix;
+#if 0
+ kc->hw_key_idx = /* set by hw and needs to be passed for TX */;
+#endif
+ atomic64_set(&kc->tx_pn, k->wk_keytsc);
+ kc->keylen = k->wk_keylen;
+ memcpy(kc->key, k->wk_key, k->wk_keylen);
+
+ if (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))
+ kc->flags |= IEEE80211_KEY_FLAG_PAIRWISE;
+ if (k->wk_flags & IEEE80211_KEY_GROUP)
+ kc->flags &= ~IEEE80211_KEY_FLAG_PAIRWISE;
+
+ kc->iv_len = k->wk_cipher->ic_header;
+ kc->icv_len = k->wk_cipher->ic_trailer;
+
+ switch (kc->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ memcpy(kc->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, k->wk_txmic, k->wk_cipher->ic_miclen);
+ memcpy(kc->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, k->wk_rxmic, k->wk_cipher->ic_miclen);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ break;
+ default:
+ /* currently UNREACH */
+ IMPROVE();
+ break;
+ };
+ lsta->kc[k->wk_keyix] = kc;
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO)
+ ic_printf(ic, "%d %lu %s: running set_key cmd %d(%s) for sta %6D: "
+ "kc %p keyidx %u hw_key_idx %u keylen %u flags %b\n",
+ curthread->td_tid, jiffies, __func__,
+ SET_KEY, "SET", sta->addr, ":", kc, kc->keyidx, kc->hw_key_idx,
+ kc->keylen, kc->flags, IEEE80211_KEY_FLAG_BITS);
+#endif
+
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+ error = lkpi_80211_mo_set_key(hw, SET_KEY, vif, sta, kc);
+ if (error != 0) {
+ ic_printf(ic, "%d %lu %s: set_key cmd %d(%s) for sta %6D failed: %d\n",
+ curthread->td_tid, jiffies, __func__,
+ SET_KEY, "SET", sta->addr, ":", error);
+ lsta->kc[k->wk_keyix] = NULL;
+ free(kc, M_LKPI80211);
+ ieee80211_free_node(ni);
+ return (0);
+ }
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO)
+ ic_printf(ic, "%d %lu %s: set_key cmd %d(%s) for sta %6D succeeded: "
+ "kc %p keyidx %u hw_key_idx %u flags %b\n",
+ curthread->td_tid, jiffies, __func__,
+ SET_KEY, "SET", sta->addr, ":",
+ kc, kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS);
+#endif
+
+ exp_flags = 0;
+ switch (kc->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ exp_flags = (IEEE80211_KEY_FLAG_PAIRWISE |
+ IEEE80211_KEY_FLAG_PUT_IV_SPACE |
+ IEEE80211_KEY_FLAG_GENERATE_MMIC |
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE);
+#define TKIP_INVAL_COMBINATION \
+ (IEEE80211_KEY_FLAG_PUT_MIC_SPACE|IEEE80211_KEY_FLAG_GENERATE_MMIC)
+ if ((kc->flags & TKIP_INVAL_COMBINATION) == TKIP_INVAL_COMBINATION) {
+ ic_printf(ic, "%s: SET_KEY for %s returned invalid "
+ "combination %b\n", __func__,
+ lkpi_cipher_suite_to_name(kc->cipher),
+ kc->flags, IEEE80211_KEY_FLAG_BITS);
+ }
+#undef TKIP_INVAL_COMBINATION
+#ifdef __notyet__
+ /* Do flags surgery; special see linuxkpi_ieee80211_ifattach(). */
+ if ((kc->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) != 0) {
+ k->wk_flags &= ~(IEEE80211_KEY_NOMICMGT|IEEE80211_KEY_NOMIC);
+ k->wk_flags |= IEEE80211_KEY_SWMIC;
+ ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC
+ }
+#endif
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ exp_flags = (IEEE80211_KEY_FLAG_PAIRWISE |
+ IEEE80211_KEY_FLAG_PUT_IV_SPACE |
+ IEEE80211_KEY_FLAG_GENERATE_IV |
+ IEEE80211_KEY_FLAG_GENERATE_IV_MGMT | /* Only needs IV geeration for MGMT frames. */
+ IEEE80211_KEY_FLAG_SW_MGMT_TX); /* MFP in software */
+ break;
+ }
+ if ((kc->flags & ~exp_flags) != 0)
+ ic_printf(ic, "%s: SET_KEY for %s returned unexpected key flags: "
+ " %#06x & ~%#06x = %b\n", __func__,
+ lkpi_cipher_suite_to_name(kc->cipher), kc->flags, exp_flags,
+ (kc->flags & ~exp_flags), IEEE80211_KEY_FLAG_BITS);
+
+#ifdef __notyet__
+ /* Do flags surgery. */
+ if ((kc->flags & IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) == 0)
+ k->wk_flags |= IEEE80211_KEY_NOIVMGT;
+ if ((kc->flags & IEEE80211_KEY_FLAG_GENERATE_IV) == 0)
+ k->wk_flags |= IEEE80211_KEY_NOIV;
+#endif
+
+ ieee80211_free_node(ni);
+ return (1);
+}
+
+static void
+lkpi_iv_key_update_begin(struct ieee80211vap *vap)
+{
+ struct ieee80211_node_table *nt;
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_node *ni;
+ bool icislocked, ntislocked;
+
+ ic = vap->iv_ic;
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ nt = &ic->ic_sta;
+
+ icislocked = IEEE80211_IS_LOCKED(ic);
+ ntislocked = IEEE80211_NODE_IS_LOCKED(nt);
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO)
+ ic_printf(ic, "%d %lu %s: vap %p ic %p %slocked nt %p %slocked "
+ "lvif ic_unlocked %d nt_unlocked %d\n",
+ curthread->td_tid, jiffies, __func__, vap,
+ ic, icislocked ? "" : "un", nt, ntislocked ? "" : "un",
+ lvif->ic_unlocked, lvif->nt_unlocked);
+#endif
+
+ /*
+ * This is inconsistent net80211 locking to be fixed one day.
+ */
+ /* Try to make sure the node does not go away while possibly unlocked. */
+ ni = NULL;
+ if (icislocked || ntislocked) {
+ if (vap->iv_bss != NULL)
+ ni = ieee80211_ref_node(vap->iv_bss);
+ }
+
+ if (icislocked)
+ IEEE80211_UNLOCK(ic);
+ if (ntislocked)
+ IEEE80211_NODE_UNLOCK(nt);
+
+ wiphy_lock(hw->wiphy);
+
+ KASSERT(lvif->key_update_iv_bss == NULL, ("%s: key_update_iv_bss not NULL %p",
+ __func__, lvif->key_update_iv_bss));
+ lvif->key_update_iv_bss = ni;
+
+ /*
+ * ic/nt_unlocked could be a bool given we are under the lock and there
+ * must only be a single thread.
+ * In case anything in the future disturbs the order the refcnt will
+ * help us catching problems a lot easier.
+ */
+ if (icislocked)
+ refcount_acquire(&lvif->ic_unlocked);
+ if (ntislocked)
+ refcount_acquire(&lvif->nt_unlocked);
+
+ /*
+ * Stop the queues while doing key updates.
+ */
+ ieee80211_stop_queues(hw);
+}
+
+static void
+lkpi_iv_key_update_end(struct ieee80211vap *vap)
+{
+ struct ieee80211_node_table *nt;
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ bool icislocked, ntislocked;
+
+ ic = vap->iv_ic;
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ nt = &ic->ic_sta;
+
+ /*
+ * Re-enabled the queues after the key update.
+ */
+ lkpi_ieee80211_wake_queues_locked(hw);
+
+ icislocked = IEEE80211_IS_LOCKED(ic);
+ MPASS(!icislocked);
+ ntislocked = IEEE80211_NODE_IS_LOCKED(nt);
+ MPASS(!ntislocked);
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO)
+ ic_printf(ic, "%d %lu %s: vap %p ic %p %slocked nt %p %slocked "
+ "lvif ic_unlocked %d nt_unlocked %d\n",
+ curthread->td_tid, jiffies, __func__, vap,
+ ic, icislocked ? "" : "un", nt, ntislocked ? "" : "un",
+ lvif->ic_unlocked, lvif->nt_unlocked);
+#endif
+
+ /*
+ * Check under lock; see comment in lkpi_iv_key_update_begin().
+ * In case the refcnt gets out of sync locking in net80211 will
+ * quickly barf as well (trying to unlock a lock not held).
+ */
+ icislocked = refcount_release_if_last(&lvif->ic_unlocked);
+ ntislocked = refcount_release_if_last(&lvif->nt_unlocked);
+
+ if (lvif->key_update_iv_bss != NULL) {
+ ieee80211_free_node(lvif->key_update_iv_bss);
+ lvif->key_update_iv_bss = NULL;
+ }
+
+ wiphy_unlock(hw->wiphy);
+
+ /*
+ * This is inconsistent net80211 locking to be fixed one day.
+ * ic before nt to avoid a LOR.
+ */
+ if (icislocked)
+ IEEE80211_LOCK(ic);
+ if (ntislocked)
+ IEEE80211_NODE_LOCK(nt);
+}
+#endif
+
+static u_int
+lkpi_ic_update_mcast_copy(void *arg, struct sockaddr_dl *sdl, u_int cnt)
+{
+ struct netdev_hw_addr_list *mc_list;
+ struct netdev_hw_addr *addr;
+
+ KASSERT(arg != NULL && sdl != NULL, ("%s: arg %p sdl %p cnt %u\n",
+ __func__, arg, sdl, cnt));
+
+ mc_list = arg;
+ /* If it is on the list already skip it. */
+ netdev_hw_addr_list_for_each(addr, mc_list) {
+ if (!memcmp(addr->addr, LLADDR(sdl), sdl->sdl_alen))
+ return (0);
+ }
+
+ addr = malloc(sizeof(*addr), M_LKPI80211, M_NOWAIT | M_ZERO);
+ if (addr == NULL)
+ return (0);
+
+ INIT_LIST_HEAD(&addr->addr_list);
+ memcpy(addr->addr, LLADDR(sdl), sdl->sdl_alen);
+ /* XXX this should be a netdev function? */
+ list_add(&addr->addr_list, &mc_list->addr_list);
+ mc_list->count++;
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE)
+ printf("%s:%d: mc_list count %d: added %6D\n",
+ __func__, __LINE__, mc_list->count, addr->addr, ":");
+#endif
+
+ return (1);
+}
+
+static void
+lkpi_update_mcast_filter(struct ieee80211com *ic, bool force)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct netdev_hw_addr_list mc_list;
+ struct list_head *le, *next;
+ struct netdev_hw_addr *addr;
+ struct ieee80211vap *vap;
+ u64 mc;
+ unsigned int changed_flags, total_flags;
+
+ lhw = ic->ic_softc;
+
+ if (lhw->ops->prepare_multicast == NULL ||
+ lhw->ops->configure_filter == NULL)
+ return;
+
+ if (!lhw->update_mc && !force)
+ return;
+
+ changed_flags = total_flags = 0;
+ mc_list.count = 0;
+ INIT_LIST_HEAD(&mc_list.addr_list);
+ if (ic->ic_allmulti == 0) {
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if_foreach_llmaddr(vap->iv_ifp,
+ lkpi_ic_update_mcast_copy, &mc_list);
+ } else {
+ changed_flags |= FIF_ALLMULTI;
+ }
+
+ hw = LHW_TO_HW(lhw);
+ mc = lkpi_80211_mo_prepare_multicast(hw, &mc_list);
+ /*
+ * XXX-BZ make sure to get this sorted what is a change,
+ * what gets all set; what was already set?
+ */
+ total_flags = changed_flags;
+ lkpi_80211_mo_configure_filter(hw, changed_flags, &total_flags, mc);
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE)
+ printf("%s: changed_flags %#06x count %d total_flags %#010x\n",
+ __func__, changed_flags, mc_list.count, total_flags);
+#endif
+
+ if (mc_list.count != 0) {
+ list_for_each_safe(le, next, &mc_list.addr_list) {
+ addr = list_entry(le, struct netdev_hw_addr, addr_list);
+ free(addr, M_LKPI80211);
+ mc_list.count--;
+ }
+ }
+ KASSERT(mc_list.count == 0, ("%s: mc_list %p count %d != 0\n",
+ __func__, &mc_list, mc_list.count));
+}
+
+static enum ieee80211_bss_changed
+lkpi_update_dtim_tsf(struct ieee80211_vif *vif, struct ieee80211_node *ni,
+ struct ieee80211vap *vap, const char *_f, int _l)
+{
+ enum ieee80211_bss_changed bss_changed;
+
+ bss_changed = 0;
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE)
+ printf("%s:%d [%s:%d] assoc %d aid %d beacon_int %u "
+ "dtim_period %u sync_dtim_count %u sync_tsf %ju "
+ "sync_device_ts %u bss_changed %#010jx\n",
+ __func__, __LINE__, _f, _l,
+ vif->cfg.assoc, vif->cfg.aid,
+ vif->bss_conf.beacon_int, vif->bss_conf.dtim_period,
+ vif->bss_conf.sync_dtim_count,
+ (uintmax_t)vif->bss_conf.sync_tsf,
+ vif->bss_conf.sync_device_ts,
+ (uintmax_t)bss_changed);
+#endif
+
+ if (vif->bss_conf.beacon_int != ni->ni_intval) {
+ vif->bss_conf.beacon_int = ni->ni_intval;
+ /* iwlwifi FW bug workaround; iwl_mvm_mac_sta_state. */
+ if (vif->bss_conf.beacon_int < 16)
+ vif->bss_conf.beacon_int = 16;
+ bss_changed |= BSS_CHANGED_BEACON_INT;
+ }
+ if (vif->bss_conf.dtim_period != vap->iv_dtim_period &&
+ vap->iv_dtim_period > 0) {
+ vif->bss_conf.dtim_period = vap->iv_dtim_period;
+ bss_changed |= BSS_CHANGED_BEACON_INFO;
+ }
+
+ vif->bss_conf.sync_dtim_count = vap->iv_dtim_count;
+ vif->bss_conf.sync_tsf = le64toh(ni->ni_tstamp.tsf);
+ /* vif->bss_conf.sync_device_ts = set in linuxkpi_ieee80211_rx. */
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE)
+ printf("%s:%d [%s:%d] assoc %d aid %d beacon_int %u "
+ "dtim_period %u sync_dtim_count %u sync_tsf %ju "
+ "sync_device_ts %u bss_changed %#010jx\n",
+ __func__, __LINE__, _f, _l,
+ vif->cfg.assoc, vif->cfg.aid,
+ vif->bss_conf.beacon_int, vif->bss_conf.dtim_period,
+ vif->bss_conf.sync_dtim_count,
+ (uintmax_t)vif->bss_conf.sync_tsf,
+ vif->bss_conf.sync_device_ts,
+ (uintmax_t)bss_changed);
+#endif
+
+ return (bss_changed);
+}
+
+static void
+lkpi_stop_hw_scan(struct lkpi_hw *lhw, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw;
+ int error;
+ bool cancel;
+
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ cancel = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0;
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+ if (!cancel)
+ return;
+
+ hw = LHW_TO_HW(lhw);
+
+ IEEE80211_UNLOCK(lhw->ic);
+ wiphy_lock(hw->wiphy);
+ /* Need to cancel the scan. */
+ lkpi_80211_mo_cancel_hw_scan(hw, vif);
+ wiphy_unlock(hw->wiphy);
+
+ /* Need to make sure we see ieee80211_scan_completed. */
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0)
+ error = msleep(lhw, &lhw->scan_mtx, 0, "lhwscanstop", hz/2);
+ cancel = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0;
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+
+ IEEE80211_LOCK(lhw->ic);
+
+ if (cancel)
+ ic_printf(lhw->ic, "%s: failed to cancel scan: %d (%p, %p)\n",
+ __func__, error, lhw, vif);
+}
+
+static void
+lkpi_hw_conf_idle(struct ieee80211_hw *hw, bool new)
+{
+ struct lkpi_hw *lhw;
+ int error;
+ bool old;
+
+ old = hw->conf.flags & IEEE80211_CONF_IDLE;
+ if (old == new)
+ return;
+
+ hw->conf.flags ^= IEEE80211_CONF_IDLE;
+ error = lkpi_80211_mo_config(hw, IEEE80211_CONF_CHANGE_IDLE);
+ if (error != 0 && error != EOPNOTSUPP) {
+ lhw = HW_TO_LHW(hw);
+ ic_printf(lhw->ic, "ERROR: %s: config %#0x returned %d\n",
+ __func__, IEEE80211_CONF_CHANGE_IDLE, error);
+ }
+}
+
+static enum ieee80211_bss_changed
+lkpi_disassoc(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ struct lkpi_hw *lhw)
+{
+ enum ieee80211_bss_changed changed;
+
+ changed = 0;
+ sta->aid = 0;
+ if (vif->cfg.assoc) {
+
+ lhw->update_mc = true;
+ lkpi_update_mcast_filter(lhw->ic, true);
+
+ vif->cfg.assoc = false;
+ vif->cfg.aid = 0;
+ changed |= BSS_CHANGED_ASSOC;
+ IMPROVE();
+
+ /*
+ * Executing the bss_info_changed(BSS_CHANGED_ASSOC) with
+ * assoc = false right away here will remove the sta from
+ * firmware for iwlwifi.
+ * We no longer do this but only return the BSS_CHNAGED value.
+ * The caller is responsible for removing the sta gong to
+ * IEEE80211_STA_NOTEXIST and then executing the
+ * bss_info_changed() update.
+ * See lkpi_sta_run_to_init() for more detailed comment.
+ */
+ }
+
+ return (changed);
+}
+
+static void
+lkpi_wake_tx_queues(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool dequeue_seen, bool no_emptyq)
+{
+ struct lkpi_txq *ltxq;
+ int tid;
+ bool ltxq_empty;
+
+ /* Wake up all queues to know they are allocated in the driver. */
+ for (tid = 0; tid < nitems(sta->txq); tid++) {
+
+ if (tid == IEEE80211_NUM_TIDS) {
+ IMPROVE("station specific?");
+ if (!ieee80211_hw_check(hw, STA_MMPDU_TXQ))
+ continue;
+ } else if (tid >= hw->queues)
+ continue;
+
+ if (sta->txq[tid] == NULL)
+ continue;
+
+ ltxq = TXQ_TO_LTXQ(sta->txq[tid]);
+ if (dequeue_seen && !ltxq->seen_dequeue)
+ continue;
+
+ LKPI_80211_LTXQ_LOCK(ltxq);
+ ltxq_empty = skb_queue_empty(&ltxq->skbq);
+ LKPI_80211_LTXQ_UNLOCK(ltxq);
+ if (no_emptyq && ltxq_empty)
+ continue;
+
+ lkpi_80211_mo_wake_tx_queue(hw, sta->txq[tid]);
+ }
+}
+
+/*
+ * On the way down from RUN -> ASSOC -> AUTH we may send a DISASSOC or DEAUTH
+ * packet. The problem is that the state machine functions tend to hold the
+ * LHW lock which will prevent lkpi_80211_txq_tx_one() from sending the packet.
+ * We call this after dropping the ic lock and before acquiring the LHW lock.
+ * we make sure no further packets are queued and if they are queued the task
+ * will finish or be cancelled. At the end if a packet is left we manually
+ * send it. scan_to_auth() would re-enable sending if the lsta would be
+ * re-used.
+ */
+static void
+lkpi_80211_flush_tx(struct lkpi_hw *lhw, struct lkpi_sta *lsta)
+{
+ struct ieee80211_hw *hw;
+ struct mbufq mq;
+ struct mbuf *m;
+ int len;
+
+ /* There is no lockdep_assert_not_held_wiphy(). */
+ hw = LHW_TO_HW(lhw);
+ lockdep_assert_not_held(&hw->wiphy->mtx);
+
+ /* Do not accept any new packets until scan_to_auth or lsta_free(). */
+ LKPI_80211_LSTA_TXQ_LOCK(lsta);
+ lsta->txq_ready = false;
+ LKPI_80211_LSTA_TXQ_UNLOCK(lsta);
+
+ while (taskqueue_cancel(taskqueue_thread, &lsta->txq_task, NULL) != 0)
+ taskqueue_drain(taskqueue_thread, &lsta->txq_task);
+
+ LKPI_80211_LSTA_TXQ_LOCK(lsta);
+ len = mbufq_len(&lsta->txq);
+ if (len <= 0) {
+ LKPI_80211_LSTA_TXQ_UNLOCK(lsta);
+ return;
+ }
+
+ mbufq_init(&mq, IFQ_MAXLEN);
+ mbufq_concat(&mq, &lsta->txq);
+ LKPI_80211_LSTA_TXQ_UNLOCK(lsta);
+
+ m = mbufq_dequeue(&mq);
+ while (m != NULL) {
+ lkpi_80211_txq_tx_one(lsta, m);
+ m = mbufq_dequeue(&mq);
+ }
+}
+
+
+static void
+lkpi_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct lkpi_chanctx *lchanctx;
+
+ chanctx_conf = rcu_dereference_protected(vif->bss_conf.chanctx_conf,
+ lockdep_is_held(&hw->wiphy->mtx));
+
+ if (chanctx_conf == NULL)
+ return;
+
+ /* Remove vif context. */
+ lkpi_80211_mo_unassign_vif_chanctx(hw, vif, &vif->bss_conf, chanctx_conf);
+
+ lkpi_hw_conf_idle(hw, true);
+
+ /* Remove chan ctx. */
+ lkpi_80211_mo_remove_chanctx(hw, chanctx_conf);
+
+ /* Cleanup. */
+ rcu_assign_pointer(vif->bss_conf.chanctx_conf, NULL);
+ lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf);
+ list_del(&lchanctx->entry);
+ free(lchanctx, M_LKPI80211);
+}
+
+
+/* -------------------------------------------------------------------------- */
+
+static int
+lkpi_sta_state_do_nada(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+
+ return (0);
+}
+
+/* lkpi_iv_newstate() handles the stop scan case generally. */
+#define lkpi_sta_scan_to_init(_v, _n, _a) lkpi_sta_state_do_nada(_v, _n, _a)
+
+static int
+lkpi_sta_scan_to_auth(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct linuxkpi_ieee80211_channel *chan;
+ struct lkpi_chanctx *lchanctx;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_node *ni;
+ struct lkpi_sta *lsta;
+ enum ieee80211_bss_changed bss_changed;
+ struct ieee80211_prep_tx_info prep_tx_info;
+ uint32_t changed;
+ int error;
+ bool synched;
+
+ /*
+ * In here we use vap->iv_bss until lvif->lvif_bss is set.
+ * For all later (STATE >= AUTH) functions we need to use the lvif
+ * cache which will be tracked even through (*iv_update_bss)().
+ */
+
+ if (vap->iv_bss == NULL) {
+ ic_printf(vap->iv_ic, "%s: no iv_bss for vap %p\n", __func__, vap);
+ return (EINVAL);
+ }
+ /*
+ * Keep the ni alive locally. In theory (and practice) iv_bss can change
+ * once we unlock here. This is due to net80211 allowing state changes
+ * and new join1() despite having an active node as well as due to
+ * the fact that the iv_bss can be swapped under the hood in (*iv_update_bss).
+ */
+ ni = ieee80211_ref_node(vap->iv_bss);
+ if (ni->ni_chan == NULL || ni->ni_chan == IEEE80211_CHAN_ANYC) {
+ ic_printf(vap->iv_ic, "%s: no channel set for iv_bss ni %p "
+ "on vap %p\n", __func__, ni, vap);
+ ieee80211_free_node(ni); /* Error handling for the local ni. */
+ return (EINVAL);
+ }
+
+ lhw = vap->iv_ic->ic_softc;
+ chan = lkpi_find_lkpi80211_chan(lhw, ni->ni_chan);
+ if (chan == NULL) {
+ ic_printf(vap->iv_ic, "%s: failed to get LKPI channel from "
+ "iv_bss ni %p on vap %p\n", __func__, ni, vap);
+ ieee80211_free_node(ni); /* Error handling for the local ni. */
+ return (ESRCH);
+ }
+
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+ /* XXX-BZ KASSERT later? */
+ if (lvif->lvif_bss_synched || lvif->lvif_bss != NULL) {
+ ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p "
+ "lvif_bss->ni %p synched %d\n", __func__, __LINE__,
+ lvif, vap, vap->iv_bss, lvif->lvif_bss,
+ (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL,
+ lvif->lvif_bss_synched);
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ ieee80211_free_node(ni); /* Error handling for the local ni. */
+ return (EBUSY);
+ }
+ LKPI_80211_LVIF_UNLOCK(lvif);
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+ wiphy_lock(hw->wiphy);
+
+ /* Add chanctx (or if exists, change it). */
+ chanctx_conf = rcu_dereference_protected(vif->bss_conf.chanctx_conf,
+ lockdep_is_held(&hw->wiphy->mtx));
+ if (chanctx_conf != NULL) {
+ lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf);
+ IMPROVE("diff changes for changed, working on live copy, rcu");
+ } else {
+ /* Keep separate alloc as in Linux this is rcu managed? */
+ lchanctx = malloc(sizeof(*lchanctx) + hw->chanctx_data_size,
+ M_LKPI80211, M_WAITOK | M_ZERO);
+ chanctx_conf = &lchanctx->chanctx_conf;
+ }
+
+ chanctx_conf->rx_chains_static = 1;
+ chanctx_conf->rx_chains_dynamic = 1;
+ chanctx_conf->radar_enabled =
+ (chan->flags & IEEE80211_CHAN_RADAR) ? true : false;
+ chanctx_conf->def.chan = chan;
+ chanctx_conf->def.width = NL80211_CHAN_WIDTH_20_NOHT;
+ chanctx_conf->def.center_freq1 = ieee80211_get_channel_center_freq1(ni->ni_chan);
+ chanctx_conf->def.center_freq2 = ieee80211_get_channel_center_freq2(ni->ni_chan);
+ IMPROVE("Check vht_cap from band not just chan?");
+ KASSERT(ni->ni_chan != NULL && ni->ni_chan != IEEE80211_CHAN_ANYC,
+ ("%s:%d: ni %p ni_chan %p\n", __func__, __LINE__, ni, ni->ni_chan));
+
+#ifdef LKPI_80211_HT
+ if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
+ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
+ chanctx_conf->def.width = NL80211_CHAN_WIDTH_40;
+ else
+ chanctx_conf->def.width = NL80211_CHAN_WIDTH_20;
+ }
+#endif
+#ifdef LKPI_80211_VHT
+ if (IEEE80211_IS_CHAN_VHT_5GHZ(ni->ni_chan)) {
+ if (IEEE80211_IS_CHAN_VHT80P80(ni->ni_chan))
+ chanctx_conf->def.width = NL80211_CHAN_WIDTH_80P80;
+ else if (IEEE80211_IS_CHAN_VHT160(ni->ni_chan))
+ chanctx_conf->def.width = NL80211_CHAN_WIDTH_160;
+ else if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
+ chanctx_conf->def.width = NL80211_CHAN_WIDTH_80;
+ }
+#endif
+ chanctx_conf->rx_chains_dynamic = lkpi_get_max_rx_chains(ni);
+ /* Responder ... */
+#if 0
+ chanctx_conf->min_def.chan = chanctx_conf->def.chan;
+ chanctx_conf->min_def.width = NL80211_CHAN_WIDTH_20_NOHT;
+#ifdef LKPI_80211_HT
+ if (IEEE80211_IS_CHAN_HT(ni->ni_chan) || IEEE80211_IS_CHAN_VHT(ni->ni_chan))
+ chanctx_conf->min_def.width = NL80211_CHAN_WIDTH_20;
+#endif
+ chanctx_conf->min_def.center_freq1 = chanctx_conf->def.center_freq1;
+ chanctx_conf->min_def.center_freq2 = chanctx_conf->def.center_freq2;
+#else
+ chanctx_conf->min_def = chanctx_conf->def;
+#endif
+
+ /* Set bss info (bss_info_changed). */
+ bss_changed = 0;
+ vif->bss_conf.bssid = ni->ni_bssid;
+ bss_changed |= BSS_CHANGED_BSSID;
+ vif->bss_conf.txpower = ni->ni_txpower;
+ bss_changed |= BSS_CHANGED_TXPOWER;
+ vif->cfg.idle = false;
+ bss_changed |= BSS_CHANGED_IDLE;
+
+ /* vif->bss_conf.basic_rates ? Where exactly? */
+
+ /* Should almost assert it is this. */
+ vif->cfg.assoc = false;
+ vif->cfg.aid = 0;
+
+ bss_changed |= lkpi_update_dtim_tsf(vif, ni, vap, __func__, __LINE__);
+
+ error = 0;
+ if (vif->bss_conf.chanctx_conf == chanctx_conf) {
+ changed = IEEE80211_CHANCTX_CHANGE_MIN_WIDTH;
+ changed |= IEEE80211_CHANCTX_CHANGE_RADAR;
+ changed |= IEEE80211_CHANCTX_CHANGE_RX_CHAINS;
+ changed |= IEEE80211_CHANCTX_CHANGE_WIDTH;
+ lkpi_80211_mo_change_chanctx(hw, chanctx_conf, changed);
+ } else {
+ error = lkpi_80211_mo_add_chanctx(hw, chanctx_conf);
+ if (error == 0 || error == EOPNOTSUPP) {
+ vif->bss_conf.chanreq.oper.chan = chanctx_conf->def.chan;
+ vif->bss_conf.chanreq.oper.width = chanctx_conf->def.width;
+ vif->bss_conf.chanreq.oper.center_freq1 =
+ chanctx_conf->def.center_freq1;
+ vif->bss_conf.chanreq.oper.center_freq2 =
+ chanctx_conf->def.center_freq2;
+ } else {
+ ic_printf(vap->iv_ic, "%s:%d: mo_add_chanctx "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ list_add_rcu(&lchanctx->entry, &lhw->lchanctx_list);
+ rcu_assign_pointer(vif->bss_conf.chanctx_conf, chanctx_conf);
+
+ /* Assign vif chanctx. */
+ if (error == 0)
+ error = lkpi_80211_mo_assign_vif_chanctx(hw, vif,
+ &vif->bss_conf, chanctx_conf);
+ if (error == EOPNOTSUPP)
+ error = 0;
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: mo_assign_vif_chanctx "
+ "failed: %d\n", __func__, __LINE__, error);
+ lkpi_80211_mo_remove_chanctx(hw, chanctx_conf);
+ rcu_assign_pointer(vif->bss_conf.chanctx_conf, NULL);
+ lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf);
+ list_del(&lchanctx->entry);
+ free(lchanctx, M_LKPI80211);
+ goto out;
+ }
+ }
+ IMPROVE("update radiotap chan fields too");
+
+ /* RATES */
+ IMPROVE("bss info: not all needs to come now and rates are missing");
+ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed);
+
+ /*
+ * Given ni and lsta are 1:1 from alloc to free we can assert that
+ * ni always has lsta data attach despite net80211 node swapping
+ * under the hoods.
+ */
+ KASSERT(ni->ni_drv_data != NULL, ("%s: ni %p ni_drv_data %p\n",
+ __func__, ni, ni->ni_drv_data));
+ lsta = ni->ni_drv_data;
+
+ /* Insert the [l]sta into the list of known stations. */
+ list_add_tail(&lsta->lsta_list, &lvif->lsta_list);
+
+ /* Add (or adjust) sta and change state (from NOTEXIST) to NONE. */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_NOTEXIST, ("%s: lsta %p state not "
+ "NOTEXIST: %#x\n", __func__, lsta, lsta->state));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NONE);
+ if (error != 0) {
+ IMPROVE("do we need to undo the chan ctx?");
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NONE) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+#if 0
+ lsta->added_to_drv = true; /* mo manages. */
+#endif
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+#if 0
+ /*
+ * Wakeup all queues now that sta is there so we have as much time to
+ * possibly prepare the queue in the driver to be ready for the 1st
+ * packet; lkpi_80211_txq_tx_one() still has a workaround as there
+ * is no guarantee or way to check.
+ * XXX-BZ and by now we know that this does not work on all drivers
+ * for all queues.
+ */
+ lkpi_wake_tx_queues(hw, LSTA_TO_STA(lsta), false, false);
+#endif
+
+ /* Start mgd_prepare_tx. */
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.duration = PREP_TX_INFO_DURATION;
+ lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = true;
+
+ /*
+ * What is going to happen next:
+ * - <twiddle> .. we should end up in "auth_to_assoc"
+ * - event_callback
+ * - update sta_state (NONE to AUTH)
+ * - mgd_complete_tx
+ * (ideally we'd do that on a callback for something else ...)
+ */
+
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+ /* Re-check given (*iv_update_bss) could have happened while we were unlocked. */
+ if (lvif->lvif_bss_synched || lvif->lvif_bss != NULL ||
+ lsta->ni != vap->iv_bss)
+ ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p "
+ "lvif_bss->ni %p synched %d, ni %p lsta %p\n", __func__, __LINE__,
+ lvif, vap, vap->iv_bss, lvif->lvif_bss,
+ (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL,
+ lvif->lvif_bss_synched, ni, lsta);
+
+ /*
+ * Reference the "ni" for caching the lsta/ni in lvif->lvif_bss.
+ * Given we cache lsta we use lsta->ni instead of ni here (even though
+ * lsta->ni == ni) to be distinct from the rest of the code where we do
+ * assume that ni == vap->iv_bss which it may or may not be.
+ * So do NOT use iv_bss here anymore as that may have diverged from our
+ * function local ni already while ic was unlocked and would lead to
+ * inconsistencies. Go and see if we lost a race and do not update
+ * lvif_bss_synched in that case.
+ */
+ ieee80211_ref_node(lsta->ni);
+ lvif->lvif_bss = lsta;
+ if (lsta->ni == vap->iv_bss) {
+ lvif->lvif_bss_synched = synched = true;
+ } else {
+ /* Set to un-synched no matter what. */
+ lvif->lvif_bss_synched = synched = false;
+ /*
+ * We do not error as someone has to take us down.
+ * If we are followed by a 2nd, new net80211::join1() going to
+ * AUTH lkpi_sta_a_to_a() will error, lkpi_sta_auth_to_{scan,init}()
+ * will take the lvif->lvif_bss node down eventually.
+ * What happens with the vap->iv_bss node will entirely be up
+ * to net80211 as we never used the node beyond alloc()/free()
+ * and we do not hold an extra reference for that anymore given
+ * ni : lsta == 1:1.
+ * Problem is if we do not error a MGMT/AUTH frame will be
+ * sent from net80211::sta_newstate(); disable lsta queue below.
+ */
+ }
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ /*
+ * Make sure in case the sta did not change and we re-added it,
+ * that we can tx again but only if the vif/iv_bss are in sync.
+ * Otherwise this should prevent the MGMT/AUTH frame from being
+ * sent triggering a warning in iwlwifi.
+ */
+ LKPI_80211_LSTA_TXQ_LOCK(lsta);
+ lsta->txq_ready = synched;
+ LKPI_80211_LSTA_TXQ_UNLOCK(lsta);
+ goto out_relocked;
+
+out:
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+out_relocked:
+ /*
+ * Release the reference that kept the ni stable locally
+ * during the work of this function.
+ */
+ if (ni != NULL)
+ ieee80211_free_node(ni);
+ return (error);
+}
+
+static int
+lkpi_sta_auth_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_node *ni;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct ieee80211_prep_tx_info prep_tx_info;
+ int error;
+
+ lhw = vap->iv_ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+#ifdef LINUXKPI_DEBUG_80211
+ /* XXX-BZ KASSERT later; state going down so no action. */
+ if (lvif->lvif_bss == NULL)
+ ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p "
+ "lvif_bss->ni %p synched %d\n", __func__, __LINE__,
+ lvif, vap, vap->iv_bss, lvif->lvif_bss,
+ (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL,
+ lvif->lvif_bss_synched);
+#endif
+
+ lsta = lvif->lvif_bss;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p "
+ "lvif %p vap %p\n", __func__,
+ lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap));
+ ni = lsta->ni; /* Reference held for lvif_bss. */
+ sta = LSTA_TO_STA(lsta);
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+ wiphy_lock(hw->wiphy);
+
+ /* flush, drop. */
+ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true);
+
+ /* Wake tx queues to get packet(s) out. */
+ lkpi_wake_tx_queues(hw, sta, false, true);
+
+ /* flush, no drop */
+ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false);
+
+ /* End mgd_complete_tx. */
+ if (lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.success = false;
+ lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = false;
+ }
+
+ /* sync_rx_queues */
+ lkpi_80211_mo_sync_rx_queues(hw);
+
+ /* sta_pre_rcu_remove */
+ lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta);
+
+ /* Take the station down. */
+
+ /* Adjust sta and change state (from NONE) to NOTEXIST. */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not "
+ "NONE: %#x, nstate %d arg %d\n", __func__, lsta, lsta->state, nstate, arg));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NOTEXIST);
+ if (error != 0) {
+ IMPROVE("do we need to undo the chan ctx?");
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NOTEXIST) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+#if 0
+ lsta->added_to_drv = false; /* mo manages. */
+#endif
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+ /* Remove ni reference for this cache of lsta. */
+ lvif->lvif_bss = NULL;
+ lvif->lvif_bss_synched = false;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ lkpi_lsta_remove(lsta, lvif);
+ /*
+ * The very last release the reference on the ni for the ni/lsta on
+ * lvif->lvif_bss. Upon return from this both ni and lsta are invalid
+ * and potentially freed.
+ */
+ ieee80211_free_node(ni);
+
+ /* conf_tx */
+
+ lkpi_remove_chanctx(hw, vif);
+
+out:
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+ return (error);
+}
+
+static int
+lkpi_sta_auth_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ int error;
+
+ error = lkpi_sta_auth_to_scan(vap, nstate, arg);
+ if (error == 0)
+ error = lkpi_sta_scan_to_init(vap, nstate, arg);
+ return (error);
+}
+
+static int
+lkpi_sta_auth_to_assoc(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_prep_tx_info prep_tx_info;
+ int error;
+
+ lhw = vap->iv_ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+ wiphy_lock(hw->wiphy);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+ /* XXX-BZ KASSERT later? */
+ if (!lvif->lvif_bss_synched || lvif->lvif_bss == NULL) {
+#ifdef LINUXKPI_DEBUG_80211
+ ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p "
+ "lvif_bss->ni %p synched %d\n", __func__, __LINE__,
+ lvif, vap, vap->iv_bss, lvif->lvif_bss,
+ (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL,
+ lvif->lvif_bss_synched);
+#endif
+ error = ENOTRECOVERABLE;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ goto out;
+ }
+ lsta = lvif->lvif_bss;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+
+ KASSERT(lsta != NULL, ("%s: lsta %p\n", __func__, lsta));
+
+ /* Finish auth. */
+ IMPROVE("event callback");
+
+ /* Update sta_state (NONE to AUTH). */
+ KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not "
+ "NONE: %#x\n", __func__, lsta, lsta->state));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTH);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTH) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ /* End mgd_complete_tx. */
+ if (lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.success = true;
+ lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = false;
+ }
+
+ /* Now start assoc. */
+
+ /* Start mgd_prepare_tx. */
+ if (!lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.duration = PREP_TX_INFO_DURATION;
+ lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = true;
+ }
+
+ /* Wake tx queue to get packet out. */
+ lkpi_wake_tx_queues(hw, LSTA_TO_STA(lsta), false, true);
+
+ /*
+ * <twiddle> .. we end up in "assoc_to_run"
+ * - update sta_state (AUTH to ASSOC)
+ * - conf_tx [all]
+ * - bss_info_changed (assoc, aid, ssid, ..)
+ * - change_chanctx (if needed)
+ * - event_callback
+ * - mgd_complete_tx
+ */
+
+out:
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+ return (error);
+}
+
+/* auth_to_auth, assoc_to_assoc. */
+static int
+lkpi_sta_a_to_a(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_prep_tx_info prep_tx_info;
+ int error;
+
+ lhw = vap->iv_ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+ wiphy_lock(hw->wiphy);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+ /* XXX-BZ KASSERT later? */
+ if (!lvif->lvif_bss_synched || lvif->lvif_bss == NULL) {
+#ifdef LINUXKPI_DEBUG_80211
+ ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p "
+ "lvif_bss->ni %p synched %d\n", __func__, __LINE__,
+ lvif, vap, vap->iv_bss, lvif->lvif_bss,
+ (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL,
+ lvif->lvif_bss_synched);
+#endif
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ error = ENOTRECOVERABLE;
+ goto out;
+ }
+ lsta = lvif->lvif_bss;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+
+ KASSERT(lsta != NULL, ("%s: lsta %p! lvif %p vap %p\n", __func__,
+ lsta, lvif, vap));
+
+ IMPROVE("event callback?");
+
+ /* End mgd_complete_tx. */
+ if (lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.success = false;
+ lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = false;
+ }
+
+ /* Now start assoc. */
+
+ /* Start mgd_prepare_tx. */
+ if (!lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.duration = PREP_TX_INFO_DURATION;
+ lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = true;
+ }
+
+ error = 0;
+out:
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+
+ return (error);
+}
+
+static int
+_lkpi_sta_assoc_to_down(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_node *ni;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct ieee80211_prep_tx_info prep_tx_info;
+ enum ieee80211_bss_changed bss_changed;
+ int error;
+
+ lhw = vap->iv_ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+ wiphy_lock(hw->wiphy);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+#ifdef LINUXKPI_DEBUG_80211
+ /* XXX-BZ KASSERT later; state going down so no action. */
+ if (lvif->lvif_bss == NULL)
+ ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p "
+ "lvif_bss->ni %p synched %d\n", __func__, __LINE__,
+ lvif, vap, vap->iv_bss, lvif->lvif_bss,
+ (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL,
+ lvif->lvif_bss_synched);
+#endif
+ lsta = lvif->lvif_bss;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p "
+ "lvif %p vap %p\n", __func__,
+ lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap));
+
+ ni = lsta->ni; /* Reference held for lvif_bss. */
+ sta = LSTA_TO_STA(lsta);
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ /* flush, drop. */
+ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true);
+
+ IMPROVE("What are the proper conditions for DEAUTH_NEED_MGD_TX_PREP?");
+ if (ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP) &&
+ !lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.duration = PREP_TX_INFO_DURATION;
+ prep_tx_info.was_assoc = true;
+ lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = true;
+ }
+
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+
+ /* Call iv_newstate first so we get potential DEAUTH packet out. */
+ error = lvif->iv_newstate(vap, nstate, arg);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: iv_newstate(%p, %d, %d) "
+ "failed: %d\n", __func__, __LINE__, vap, nstate, arg, error);
+ goto outni;
+ }
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+
+ /* Ensure the packets get out. */
+ lkpi_80211_flush_tx(lhw, lsta);
+
+ wiphy_lock(hw->wiphy);
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ /* Wake tx queues to get packet(s) out. */
+ lkpi_wake_tx_queues(hw, sta, false, true);
+
+ /* flush, no drop */
+ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false);
+
+ /* End mgd_complete_tx. */
+ if (lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.success = false;
+ prep_tx_info.was_assoc = true;
+ lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = false;
+ }
+
+ /* sync_rx_queues */
+ lkpi_80211_mo_sync_rx_queues(hw);
+
+ /* sta_pre_rcu_remove */
+ lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta);
+
+ /* Take the station down. */
+
+ /* Update sta and change state (from AUTH) to NONE. */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_AUTH, ("%s: lsta %p state not "
+ "AUTH: %#x\n", __func__, lsta, lsta->state));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NONE);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NONE) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ /* See comment in lkpi_sta_run_to_init(). */
+ bss_changed = 0;
+ bss_changed |= lkpi_disassoc(sta, vif, lhw);
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ /* Adjust sta and change state (from NONE) to NOTEXIST. */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not "
+ "NONE: %#x, nstate %d arg %d\n", __func__, lsta, lsta->state, nstate, arg));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NOTEXIST);
+ if (error != 0) {
+ IMPROVE("do we need to undo the chan ctx?");
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NOTEXIST) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* sta no longer save to use. */
+
+ IMPROVE("Any bss_info changes to announce?");
+ vif->bss_conf.qos = 0;
+ bss_changed |= BSS_CHANGED_QOS;
+ vif->cfg.ssid_len = 0;
+ memset(vif->cfg.ssid, '\0', sizeof(vif->cfg.ssid));
+ bss_changed |= BSS_CHANGED_BSSID;
+ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+ /* Remove ni reference for this cache of lsta. */
+ lvif->lvif_bss = NULL;
+ lvif->lvif_bss_synched = false;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ lkpi_lsta_remove(lsta, lvif);
+ /*
+ * The very last release the reference on the ni for the ni/lsta on
+ * lvif->lvif_bss. Upon return from this both ni and lsta are invalid
+ * and potentially freed.
+ */
+ ieee80211_free_node(ni);
+
+ /* conf_tx */
+
+ lkpi_remove_chanctx(hw, vif);
+
+ error = EALREADY;
+out:
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+outni:
+ return (error);
+}
+
+static int
+lkpi_sta_assoc_to_auth(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ int error;
+
+ error = _lkpi_sta_assoc_to_down(vap, nstate, arg);
+ if (error != 0 && error != EALREADY)
+ return (error);
+
+ /* At this point iv_bss is long a new node! */
+
+ error |= lkpi_sta_scan_to_auth(vap, nstate, 0);
+ return (error);
+}
+
+static int
+lkpi_sta_assoc_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ int error;
+
+ error = _lkpi_sta_assoc_to_down(vap, nstate, arg);
+ return (error);
+}
+
+static int
+lkpi_sta_assoc_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ int error;
+
+ error = _lkpi_sta_assoc_to_down(vap, nstate, arg);
+ return (error);
+}
+
+static int
+lkpi_sta_assoc_to_run(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_node *ni;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct ieee80211_prep_tx_info prep_tx_info;
+ enum ieee80211_bss_changed bss_changed;
+ int error;
+
+ lhw = vap->iv_ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+ wiphy_lock(hw->wiphy);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+ /* XXX-BZ KASSERT later? */
+ if (!lvif->lvif_bss_synched || lvif->lvif_bss == NULL) {
+#ifdef LINUXKPI_DEBUG_80211
+ ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p "
+ "lvif_bss->ni %p synched %d\n", __func__, __LINE__,
+ lvif, vap, vap->iv_bss, lvif->lvif_bss,
+ (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL,
+ lvif->lvif_bss_synched);
+#endif
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ error = ENOTRECOVERABLE;
+ goto out;
+ }
+ lsta = lvif->lvif_bss;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p "
+ "lvif %p vap %p\n", __func__,
+ lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap));
+
+ ni = lsta->ni; /* Reference held for lvif_bss. */
+
+ IMPROVE("ponder some of this moved to ic_newassoc, scan_assoc_success, "
+ "and to lesser extend ieee80211_notify_node_join");
+
+ /* Finish assoc. */
+ /* Update sta_state (AUTH to ASSOC) and set aid. */
+ KASSERT(lsta->state == IEEE80211_STA_AUTH, ("%s: lsta %p state not "
+ "AUTH: %#x\n", __func__, lsta, lsta->state));
+ sta = LSTA_TO_STA(lsta);
+ sta->aid = IEEE80211_NODE_AID(ni);
+#ifdef LKPI_80211_WME
+ if (vap->iv_flags & IEEE80211_F_WME)
+ sta->wme = true;
+#endif
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_ASSOC);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(ASSOC) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ IMPROVE("wme / conf_tx [all]");
+
+ /* Update bss info (bss_info_changed) (assoc, aid, ..). */
+ bss_changed = 0;
+#ifdef LKPI_80211_WME
+ bss_changed |= lkpi_wme_update(lhw, vap, true);
+#endif
+ if (!vif->cfg.assoc || vif->cfg.aid != IEEE80211_NODE_AID(ni)) {
+ vif->cfg.assoc = true;
+ vif->cfg.aid = IEEE80211_NODE_AID(ni);
+ bss_changed |= BSS_CHANGED_ASSOC;
+ }
+ /* We set SSID but this is not BSSID! */
+ vif->cfg.ssid_len = ni->ni_esslen;
+ memcpy(vif->cfg.ssid, ni->ni_essid, ni->ni_esslen);
+ if ((vap->iv_flags & IEEE80211_F_SHPREAMBLE) !=
+ vif->bss_conf.use_short_preamble) {
+ vif->bss_conf.use_short_preamble ^= 1;
+ /* bss_changed |= BSS_CHANGED_??? */
+ }
+ if ((vap->iv_flags & IEEE80211_F_SHSLOT) !=
+ vif->bss_conf.use_short_slot) {
+ vif->bss_conf.use_short_slot ^= 1;
+ /* bss_changed |= BSS_CHANGED_??? */
+ }
+ if ((ni->ni_flags & IEEE80211_NODE_QOS) !=
+ vif->bss_conf.qos) {
+ vif->bss_conf.qos ^= 1;
+ bss_changed |= BSS_CHANGED_QOS;
+ }
+
+ bss_changed |= lkpi_update_dtim_tsf(vif, ni, vap, __func__, __LINE__);
+ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed);
+
+ /* - change_chanctx (if needed)
+ * - event_callback
+ */
+
+ /* End mgd_complete_tx. */
+ if (lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.success = true;
+ lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = false;
+ }
+
+ lkpi_hw_conf_idle(hw, false);
+
+ /*
+ * And then:
+ * - (more packets)?
+ * - set_key
+ * - set_default_unicast_key
+ * - set_key (?)
+ * - ipv6_addr_change (?)
+ */
+ /* Prepare_multicast && configure_filter. */
+ lhw->update_mc = true;
+ lkpi_update_mcast_filter(vap->iv_ic, true);
+
+ if (!ieee80211_node_is_authorized(ni)) {
+ IMPROVE("net80211 does not consider node authorized");
+ }
+
+ IMPROVE("Is this the right spot, has net80211 done all updates already?");
+ lkpi_sta_sync_from_ni(hw, vif, sta, ni, true);
+
+ /* Update thresholds. */
+ hw->wiphy->frag_threshold = vap->iv_fragthreshold;
+ lkpi_80211_mo_set_frag_threshold(hw, vap->iv_fragthreshold);
+ hw->wiphy->rts_threshold = vap->iv_rtsthreshold;
+ lkpi_80211_mo_set_rts_threshold(hw, vap->iv_rtsthreshold);
+
+ /* Update sta_state (ASSOC to AUTHORIZED). */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_ASSOC, ("%s: lsta %p state not "
+ "ASSOC: %#x\n", __func__, lsta, lsta->state));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTHORIZED);
+ if (error != 0) {
+ IMPROVE("undo some changes?");
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTHORIZED) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ /* - drv_config (?)
+ * - bss_info_changed
+ * - set_rekey_data (?)
+ *
+ * And now we should be passing packets.
+ */
+ IMPROVE("Need that bssid setting, and the keys");
+
+ bss_changed = 0;
+ bss_changed |= lkpi_update_dtim_tsf(vif, ni, vap, __func__, __LINE__);
+ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed);
+
+out:
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+ return (error);
+}
+
+static int
+lkpi_sta_auth_to_run(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ int error;
+
+ error = lkpi_sta_auth_to_assoc(vap, nstate, arg);
+ if (error == 0)
+ error = lkpi_sta_assoc_to_run(vap, nstate, arg);
+ return (error);
+}
+
+static int
+lkpi_sta_run_to_assoc(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_node *ni;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct ieee80211_prep_tx_info prep_tx_info;
+#if 0
+ enum ieee80211_bss_changed bss_changed;
+#endif
+ int error;
+
+ lhw = vap->iv_ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+#ifdef LINUXKPI_DEBUG_80211
+ /* XXX-BZ KASSERT later; state going down so no action. */
+ if (lvif->lvif_bss == NULL)
+ ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p "
+ "lvif_bss->ni %p synched %d\n", __func__, __LINE__,
+ lvif, vap, vap->iv_bss, lvif->lvif_bss,
+ (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL,
+ lvif->lvif_bss_synched);
+#endif
+ lsta = lvif->lvif_bss;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p "
+ "lvif %p vap %p\n", __func__,
+ lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap));
+
+ ni = lsta->ni; /* Reference held for lvif_bss. */
+ sta = LSTA_TO_STA(lsta);
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+ wiphy_lock(hw->wiphy);
+
+ /* flush, drop. */
+ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true);
+
+ IMPROVE("What are the proper conditions for DEAUTH_NEED_MGD_TX_PREP?");
+ if (ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP) &&
+ !lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.duration = PREP_TX_INFO_DURATION;
+ prep_tx_info.was_assoc = true;
+ lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = true;
+ }
+
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+
+ /* Call iv_newstate first so we get potential DISASSOC packet out. */
+ error = lvif->iv_newstate(vap, nstate, arg);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: iv_newstate(%p, %d, %d) "
+ "failed: %d\n", __func__, __LINE__, vap, nstate, arg, error);
+ goto outni;
+ }
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+
+ /* Ensure the packets get out. */
+ lkpi_80211_flush_tx(lhw, lsta);
+
+ wiphy_lock(hw->wiphy);
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ /* Wake tx queues to get packet(s) out. */
+ lkpi_wake_tx_queues(hw, sta, false, true);
+
+ /* flush, no drop */
+ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false);
+
+ /* End mgd_complete_tx. */
+ if (lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.success = false;
+ prep_tx_info.was_assoc = true;
+ lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = false;
+ }
+
+#if 0
+ /* sync_rx_queues */
+ lkpi_80211_mo_sync_rx_queues(hw);
+
+ /* sta_pre_rcu_remove */
+ lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta);
+#endif
+
+ /* Take the station down. */
+
+ /* Adjust sta and change state (from AUTHORIZED) to ASSOC. */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_AUTHORIZED, ("%s: lsta %p state not "
+ "AUTHORIZED: %#x\n", __func__, lsta, lsta->state));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_ASSOC);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(ASSOC) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+#ifdef LKPI_80211_HW_CRYPTO
+ if (lkpi_hwcrypto) {
+ error = lkpi_sta_del_keys(hw, vif, lsta);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: lkpi_sta_del_keys "
+ "failed: %d\n", __func__, __LINE__, error);
+ /*
+ * Either drv/fw will crash or cleanup itself,
+ * otherwise net80211 will delete the keys (at a
+ * less appropriate time).
+ */
+ /* goto out; */
+ }
+ }
+#endif
+
+ /* Update sta_state (ASSOC to AUTH). */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_ASSOC, ("%s: lsta %p state not "
+ "ASSOC: %#x\n", __func__, lsta, lsta->state));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTH);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTH) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+#if 0
+ /* Update bss info (bss_info_changed) (assoc, aid, ..). */
+ lkpi_disassoc(sta, vif, lhw);
+#endif
+
+ error = EALREADY;
+out:
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+outni:
+ return (error);
+}
+
+static int
+lkpi_sta_run_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_node *ni;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct ieee80211_prep_tx_info prep_tx_info;
+ enum ieee80211_bss_changed bss_changed;
+ int error;
+
+ lhw = vap->iv_ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+ wiphy_lock(hw->wiphy);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+#ifdef LINUXKPI_DEBUG_80211
+ /* XXX-BZ KASSERT later; state going down so no action. */
+ if (lvif->lvif_bss == NULL)
+ ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p "
+ "lvif_bss->ni %p synched %d\n", __func__, __LINE__,
+ lvif, vap, vap->iv_bss, lvif->lvif_bss,
+ (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL,
+ lvif->lvif_bss_synched);
+#endif
+ lsta = lvif->lvif_bss;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p "
+ "lvif %p vap %p\n", __func__,
+ lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap));
+
+ ni = lsta->ni; /* Reference held for lvif_bss. */
+ sta = LSTA_TO_STA(lsta);
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ /* flush, drop. */
+ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true);
+
+ IMPROVE("What are the proper conditions for DEAUTH_NEED_MGD_TX_PREP?");
+ if (ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP) &&
+ !lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.duration = PREP_TX_INFO_DURATION;
+ prep_tx_info.was_assoc = true;
+ lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = true;
+ }
+
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+
+ /* Call iv_newstate first so we get potential DISASSOC packet out. */
+ error = lvif->iv_newstate(vap, nstate, arg);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: iv_newstate(%p, %d, %d) "
+ "failed: %d\n", __func__, __LINE__, vap, nstate, arg, error);
+ goto outni;
+ }
+
+ IEEE80211_UNLOCK(vap->iv_ic);
+
+ /* Ensure the packets get out. */
+ lkpi_80211_flush_tx(lhw, lsta);
+
+ wiphy_lock(hw->wiphy);
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ /* Wake tx queues to get packet(s) out. */
+ lkpi_wake_tx_queues(hw, sta, false, true);
+
+ /* flush, no drop */
+ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false);
+
+ /* End mgd_complete_tx. */
+ if (lsta->in_mgd) {
+ memset(&prep_tx_info, 0, sizeof(prep_tx_info));
+ prep_tx_info.success = false;
+ prep_tx_info.was_assoc = true;
+ lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info);
+ lsta->in_mgd = false;
+ }
+
+ /* sync_rx_queues */
+ lkpi_80211_mo_sync_rx_queues(hw);
+
+ /* sta_pre_rcu_remove */
+ lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta);
+
+ /* Take the station down. */
+
+ /* Adjust sta and change state (from AUTHORIZED) to ASSOC. */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_AUTHORIZED, ("%s: lsta %p state not "
+ "AUTHORIZED: %#x\n", __func__, lsta, lsta->state));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_ASSOC);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(ASSOC) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+#ifdef LKPI_80211_HW_CRYPTO
+ if (lkpi_hwcrypto) {
+ error = lkpi_sta_del_keys(hw, vif, lsta);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: lkpi_sta_del_keys "
+ "failed: %d\n", __func__, __LINE__, error);
+ /*
+ * Either drv/fw will crash or cleanup itself,
+ * otherwise net80211 will delete the keys (at a
+ * less appropriate time).
+ */
+ /* goto out; */
+ }
+ }
+#endif
+
+ /* Update sta_state (ASSOC to AUTH). */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_ASSOC, ("%s: lsta %p state not "
+ "ASSOC: %#x\n", __func__, lsta, lsta->state));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTH);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTH) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ /* Update sta and change state (from AUTH) to NONE. */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_AUTH, ("%s: lsta %p state not "
+ "AUTH: %#x\n", __func__, lsta, lsta->state));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NONE);
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NONE) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ bss_changed = 0;
+ /*
+ * Start updating bss info (bss_info_changed) (assoc, aid, ..).
+ *
+ * One would expect this to happen when going off AUTHORIZED.
+ * See comment there; removes the sta from fw if not careful
+ * (bss_info_changed() change is executed right away).
+ *
+ * We need to do this now, before sta changes to IEEE80211_STA_NOTEXIST
+ * as otherwise drivers (iwlwifi at least) will silently not remove
+ * the sta from the firmware and when we will add a new one trigger
+ * a fw assert.
+ *
+ * The order which works best so far avoiding early removal or silent
+ * non-removal seems to be (for iwlwifi::mld-mac80211.c cases;
+ * the iwlwifi:mac80211.c case still to be tested):
+ * 1) lkpi_disassoc(): set vif->cfg.assoc = false (aid=0 side effect here)
+ * 2) call the last sta_state update -> IEEE80211_STA_NOTEXIST
+ * (removes the sta given assoc is false)
+ * 3) add the remaining BSS_CHANGED changes and call bss_info_changed()
+ * 4) call unassign_vif_chanctx
+ * 5) call lkpi_hw_conf_idle
+ * 6) call remove_chanctx
+ */
+ bss_changed |= lkpi_disassoc(sta, vif, lhw);
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
+
+ /* Adjust sta and change state (from NONE) to NOTEXIST. */
+ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni));
+ KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not "
+ "NONE: %#x, nstate %d arg %d\n", __func__, lsta, lsta->state, nstate, arg));
+ error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NOTEXIST);
+ if (error != 0) {
+ IMPROVE("do we need to undo the chan ctx?");
+ ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NOTEXIST) "
+ "failed: %d\n", __func__, __LINE__, error);
+ goto out;
+ }
+
+ lkpi_lsta_remove(lsta, lvif);
+
+ lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* sta no longer save to use. */
+
+ IMPROVE("Any bss_info changes to announce?");
+ vif->bss_conf.qos = 0;
+ bss_changed |= BSS_CHANGED_QOS;
+ vif->cfg.ssid_len = 0;
+ memset(vif->cfg.ssid, '\0', sizeof(vif->cfg.ssid));
+ bss_changed |= BSS_CHANGED_BSSID;
+ vif->bss_conf.use_short_preamble = false;
+ vif->bss_conf.qos = false;
+ /* XXX BSS_CHANGED_???? */
+ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+ /* Remove ni reference for this cache of lsta. */
+ lvif->lvif_bss = NULL;
+ lvif->lvif_bss_synched = false;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ /*
+ * The very last release the reference on the ni for the ni/lsta on
+ * lvif->lvif_bss. Upon return from this both ni and lsta are invalid
+ * and potentially freed.
+ */
+ ieee80211_free_node(ni);
+
+ /* conf_tx */
+
+ lkpi_remove_chanctx(hw, vif);
+
+ error = EALREADY;
+out:
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(vap->iv_ic);
+outni:
+ return (error);
+}
+
+static int
+lkpi_sta_run_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+
+ return (lkpi_sta_run_to_init(vap, nstate, arg));
+}
+
+static int
+lkpi_sta_run_to_auth(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ int error;
+
+ error = lkpi_sta_run_to_init(vap, nstate, arg);
+ if (error != 0 && error != EALREADY)
+ return (error);
+
+ /* At this point iv_bss is long a new node! */
+
+ error |= lkpi_sta_scan_to_auth(vap, nstate, 0);
+ return (error);
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * The matches the documented state changes in net80211::sta_newstate().
+ * XXX (1) without CSA and SLEEP yet, * XXX (2) not all unhandled cases
+ * there are "invalid" (so there is a room for failure here).
+ */
+struct fsm_state {
+ /* INIT, SCAN, AUTH, ASSOC, CAC, RUN, CSA, SLEEP */
+ enum ieee80211_state ostate;
+ enum ieee80211_state nstate;
+ int (*handler)(struct ieee80211vap *, enum ieee80211_state, int);
+} sta_state_fsm[] = {
+ { IEEE80211_S_INIT, IEEE80211_S_INIT, lkpi_sta_state_do_nada },
+ { IEEE80211_S_SCAN, IEEE80211_S_INIT, lkpi_sta_state_do_nada }, /* scan_to_init */
+ { IEEE80211_S_AUTH, IEEE80211_S_INIT, lkpi_sta_auth_to_init }, /* not explicitly in sta_newstate() */
+ { IEEE80211_S_ASSOC, IEEE80211_S_INIT, lkpi_sta_assoc_to_init }, /* Send DEAUTH. */
+ { IEEE80211_S_RUN, IEEE80211_S_INIT, lkpi_sta_run_to_init }, /* Send DISASSOC. */
+
+ { IEEE80211_S_INIT, IEEE80211_S_SCAN, lkpi_sta_state_do_nada },
+ { IEEE80211_S_SCAN, IEEE80211_S_SCAN, lkpi_sta_state_do_nada },
+ { IEEE80211_S_AUTH, IEEE80211_S_SCAN, lkpi_sta_auth_to_scan },
+ { IEEE80211_S_ASSOC, IEEE80211_S_SCAN, lkpi_sta_assoc_to_scan },
+ { IEEE80211_S_RUN, IEEE80211_S_SCAN, lkpi_sta_run_to_scan }, /* Beacon miss. */
+
+ { IEEE80211_S_INIT, IEEE80211_S_AUTH, lkpi_sta_scan_to_auth }, /* Send AUTH. */
+ { IEEE80211_S_SCAN, IEEE80211_S_AUTH, lkpi_sta_scan_to_auth }, /* Send AUTH. */
+ { IEEE80211_S_AUTH, IEEE80211_S_AUTH, lkpi_sta_a_to_a }, /* Send ?AUTH. */
+ { IEEE80211_S_ASSOC, IEEE80211_S_AUTH, lkpi_sta_assoc_to_auth }, /* Send ?AUTH. */
+ { IEEE80211_S_RUN, IEEE80211_S_AUTH, lkpi_sta_run_to_auth }, /* Send ?AUTH. */
+
+ { IEEE80211_S_AUTH, IEEE80211_S_ASSOC, lkpi_sta_auth_to_assoc }, /* Send ASSOCREQ. */
+ { IEEE80211_S_ASSOC, IEEE80211_S_ASSOC, lkpi_sta_a_to_a }, /* Send ASSOCREQ. */
+ { IEEE80211_S_RUN, IEEE80211_S_ASSOC, lkpi_sta_run_to_assoc }, /* Send ASSOCREQ/REASSOCREQ. */
+
+ { IEEE80211_S_AUTH, IEEE80211_S_RUN, lkpi_sta_auth_to_run },
+ { IEEE80211_S_ASSOC, IEEE80211_S_RUN, lkpi_sta_assoc_to_run },
+ { IEEE80211_S_RUN, IEEE80211_S_RUN, lkpi_sta_state_do_nada },
+
+ /* Dummy at the end without handler. */
+ { IEEE80211_S_INIT, IEEE80211_S_INIT, NULL },
+};
+
+static int
+lkpi_iv_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct fsm_state *s;
+ enum ieee80211_state ostate;
+ int error;
+
+ ic = vap->iv_ic;
+ IEEE80211_LOCK_ASSERT(ic);
+ ostate = vap->iv_state;
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE)
+ ic_printf(vap->iv_ic, "%s:%d: vap %p nstate %#x arg %#x\n",
+ __func__, __LINE__, vap, nstate, arg);
+#endif
+
+ if (vap->iv_opmode == IEEE80211_M_STA) {
+
+ lhw = ic->ic_softc;
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ /* No need to replicate this in most state handlers. */
+ if (ostate == IEEE80211_S_SCAN && nstate != IEEE80211_S_SCAN)
+ lkpi_stop_hw_scan(lhw, vif);
+
+ s = sta_state_fsm;
+
+ } else {
+ ic_printf(vap->iv_ic, "%s: only station mode currently supported: "
+ "cap %p iv_opmode %d\n", __func__, vap, vap->iv_opmode);
+ return (ENOSYS);
+ }
+
+ error = 0;
+ for (; s->handler != NULL; s++) {
+ if (ostate == s->ostate && nstate == s->nstate) {
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE)
+ ic_printf(vap->iv_ic, "%s: new state %d (%s) ->"
+ " %d (%s): arg %d.\n", __func__,
+ ostate, ieee80211_state_name[ostate],
+ nstate, ieee80211_state_name[nstate], arg);
+#endif
+ error = s->handler(vap, nstate, arg);
+ break;
+ }
+ }
+ IEEE80211_LOCK_ASSERT(vap->iv_ic);
+
+ if (s->handler == NULL) {
+ IMPROVE("turn this into a KASSERT\n");
+ ic_printf(vap->iv_ic, "%s: unsupported state transition "
+ "%d (%s) -> %d (%s)\n", __func__,
+ ostate, ieee80211_state_name[ostate],
+ nstate, ieee80211_state_name[nstate]);
+ return (ENOSYS);
+ }
+
+ if (error == EALREADY) {
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE)
+ ic_printf(vap->iv_ic, "%s: state transition %d (%s) -> "
+ "%d (%s): iv_newstate already handled: %d.\n",
+ __func__, ostate, ieee80211_state_name[ostate],
+ nstate, ieee80211_state_name[nstate], error);
+#endif
+ return (0);
+ }
+
+ if (error != 0) {
+ ic_printf(vap->iv_ic, "%s: error %d during state transition "
+ "%d (%s) -> %d (%s)\n", __func__, error,
+ ostate, ieee80211_state_name[ostate],
+ nstate, ieee80211_state_name[nstate]);
+ return (error);
+ }
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE)
+ ic_printf(vap->iv_ic, "%s:%d: vap %p nstate %#x arg %#x "
+ "calling net80211 parent\n",
+ __func__, __LINE__, vap, nstate, arg);
+#endif
+
+ return (lvif->iv_newstate(vap, nstate, arg));
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * We overload (*iv_update_bss) as otherwise we have cases in, e.g.,
+ * net80211::ieee80211_sta_join1() where vap->iv_bss gets replaced by a
+ * new node without us knowing and thus our ni/lsta are out of sync.
+ */
+static struct ieee80211_node *
+lkpi_iv_update_bss(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+ struct lkpi_vif *lvif;
+ struct ieee80211_node *rni;
+
+ IEEE80211_LOCK_ASSERT(vap->iv_ic);
+
+ lvif = VAP_TO_LVIF(vap);
+
+ LKPI_80211_LVIF_LOCK(lvif);
+ lvif->lvif_bss_synched = false;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+
+ rni = lvif->iv_update_bss(vap, ni);
+ return (rni);
+}
+
+#ifdef LKPI_80211_WME
+static int
+lkpi_wme_update(struct lkpi_hw *lhw, struct ieee80211vap *vap, bool planned)
+{
+ struct ieee80211com *ic;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct chanAccParams chp;
+ struct wmeParams wmeparr[WME_NUM_AC];
+ struct ieee80211_tx_queue_params txqp;
+ enum ieee80211_bss_changed changed;
+ int error;
+ uint16_t ac;
+
+ hw = LHW_TO_HW(lhw);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ IMPROVE();
+ KASSERT(WME_NUM_AC == IEEE80211_NUM_ACS, ("%s: WME_NUM_AC %d != "
+ "IEEE80211_NUM_ACS %d\n", __func__, WME_NUM_AC, IEEE80211_NUM_ACS));
+
+ if (vap == NULL)
+ return (0);
+
+ if ((vap->iv_flags & IEEE80211_F_WME) == 0)
+ return (0);
+
+ if (lhw->ops->conf_tx == NULL)
+ return (0);
+
+ if (!planned && (vap->iv_state != IEEE80211_S_RUN)) {
+ lhw->update_wme = true;
+ return (0);
+ }
+ lhw->update_wme = false;
+
+ ic = lhw->ic;
+ ieee80211_wme_ic_getparams(ic, &chp);
+ IEEE80211_LOCK(ic);
+ for (ac = 0; ac < WME_NUM_AC; ac++)
+ wmeparr[ac] = chp.cap_wmeParams[ac];
+ IEEE80211_UNLOCK(ic);
+
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ /* Configure tx queues (conf_tx) & send BSS_CHANGED_QOS. */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ struct wmeParams *wmep;
+
+ wmep = &wmeparr[ac];
+ bzero(&txqp, sizeof(txqp));
+ txqp.cw_min = wmep->wmep_logcwmin;
+ txqp.cw_max = wmep->wmep_logcwmax;
+ txqp.txop = wmep->wmep_txopLimit;
+ txqp.aifs = wmep->wmep_aifsn;
+ error = lkpi_80211_mo_conf_tx(hw, vif, /* link_id */0, ac, &txqp);
+ if (error != 0)
+ ic_printf(ic, "%s: conf_tx ac %u failed %d\n",
+ __func__, ac, error);
+ }
+ changed = BSS_CHANGED_QOS;
+ if (!planned)
+ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, changed);
+
+ return (changed);
+}
+#endif
+
+static int
+lkpi_ic_wme_update(struct ieee80211com *ic)
+{
+#ifdef LKPI_80211_WME
+ struct ieee80211vap *vap;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+
+ IMPROVE("Use the per-VAP callback in net80211.");
+ vap = TAILQ_FIRST(&ic->ic_vaps);
+ if (vap == NULL)
+ return (0);
+
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+
+ wiphy_lock(hw->wiphy);
+ lkpi_wme_update(lhw, vap, false);
+ wiphy_unlock(hw->wiphy);
+#endif
+ return (0); /* unused */
+}
+
+/*
+ * Change link-layer address on the vif (if the vap is not started/"UP").
+ * This can happen if a user changes 'ether' using ifconfig.
+ * The code is based on net80211/ieee80211_freebsd.c::wlan_iflladdr() but
+ * we do use a per-[l]vif event handler to be sure we exist as we
+ * cannot assume that from every vap derives a vif and we have a hard
+ * time checking based on net80211 information.
+ * Should this ever become a real problem we could add a callback function
+ * to wlan_iflladdr() to be set optionally but that would be for a
+ * single-consumer (or needs a list) -- was just too complicated for an
+ * otherwise perfect mechanism FreeBSD already provides.
+ */
+static void
+lkpi_vif_iflladdr(void *arg, struct ifnet *ifp)
+{
+ struct epoch_tracker et;
+ struct ieee80211_vif *vif;
+
+ NET_EPOCH_ENTER(et);
+ /* NB: identify vap's by if_transmit; left as an extra check. */
+ if (if_gettransmitfn(ifp) != ieee80211_vap_transmit ||
+ (if_getflags(ifp) & IFF_UP) != 0) {
+ NET_EPOCH_EXIT(et);
+ return;
+ }
+
+ vif = arg;
+ IEEE80211_ADDR_COPY(vif->bss_conf.addr, if_getlladdr(ifp));
+ NET_EPOCH_EXIT(et);
+}
+
+static struct ieee80211vap *
+lkpi_ic_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ],
+ int unit, enum ieee80211_opmode opmode, int flags,
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211vap *vap;
+ struct ieee80211_vif *vif;
+ struct ieee80211_tx_queue_params txqp;
+ enum ieee80211_bss_changed changed;
+ struct sysctl_oid *node;
+ size_t len;
+ int error, i;
+ uint16_t ac;
+
+ if (!TAILQ_EMPTY(&ic->ic_vaps)) /* 1 so far. Add <n> once this works. */
+ return (NULL);
+
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+
+ len = sizeof(*lvif);
+ len += hw->vif_data_size; /* vif->drv_priv */
+
+ lvif = malloc(len, M_80211_VAP, M_WAITOK | M_ZERO);
+ mtx_init(&lvif->mtx, "lvif", NULL, MTX_DEF);
+ INIT_LIST_HEAD(&lvif->lsta_list);
+ lvif->lvif_bss = NULL;
+ refcount_init(&lvif->nt_unlocked, 0);
+ lvif->lvif_bss_synched = false;
+ vap = LVIF_TO_VAP(lvif);
+
+ vif = LVIF_TO_VIF(lvif);
+ memcpy(vif->addr, mac, IEEE80211_ADDR_LEN);
+ vif->p2p = false;
+ vif->probe_req_reg = false;
+ vif->type = lkpi_opmode_to_vif_type(opmode);
+ lvif->wdev.iftype = vif->type;
+ /* Need to fill in other fields as well. */
+ IMPROVE();
+
+ /* XXX-BZ hardcoded for now! */
+#if 1
+ RCU_INIT_POINTER(vif->bss_conf.chanctx_conf, NULL);
+ vif->bss_conf.vif = vif;
+ /* vap->iv_myaddr is not set until net80211::vap_setup or vap_attach. */
+ IEEE80211_ADDR_COPY(vif->bss_conf.addr, mac);
+ lvif->lvif_ifllevent = EVENTHANDLER_REGISTER(iflladdr_event,
+ lkpi_vif_iflladdr, vif, EVENTHANDLER_PRI_ANY);
+ vif->bss_conf.link_id = 0; /* Non-MLO operation. */
+ vif->bss_conf.chanreq.oper.width = NL80211_CHAN_WIDTH_20_NOHT;
+ vif->bss_conf.use_short_preamble = false; /* vap->iv_flags IEEE80211_F_SHPREAMBLE */
+ vif->bss_conf.use_short_slot = false; /* vap->iv_flags IEEE80211_F_SHSLOT */
+ vif->bss_conf.qos = false;
+ vif->bss_conf.use_cts_prot = false; /* vap->iv_protmode */
+ vif->bss_conf.ht_operation_mode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+ vif->cfg.aid = 0;
+ vif->cfg.assoc = false;
+ vif->cfg.idle = true;
+ vif->cfg.ps = false;
+ IMPROVE("Check other fields and then figure out whats is left elsewhere of them");
+ /*
+ * We need to initialize it to something as the bss_info_changed call
+ * will try to copy from it in iwlwifi and NULL is a panic.
+ * We will set the proper one in scan_to_auth() before being assoc.
+ */
+ vif->bss_conf.bssid = ieee80211broadcastaddr;
+#endif
+#if 0
+ vif->bss_conf.dtim_period = 0; /* IEEE80211_DTIM_DEFAULT ; must stay 0. */
+ IEEE80211_ADDR_COPY(vif->bss_conf.bssid, bssid);
+ vif->bss_conf.beacon_int = ic->ic_bintval;
+ /* iwlwifi bug. */
+ if (vif->bss_conf.beacon_int < 16)
+ vif->bss_conf.beacon_int = 16;
+#endif
+
+ /* Link Config */
+ vif->link_conf[0] = &vif->bss_conf;
+ for (i = 0; i < nitems(vif->link_conf); i++) {
+ IMPROVE("more than 1 link one day");
+ }
+
+ /* Setup queue defaults; driver may override in (*add_interface). */
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (ieee80211_hw_check(hw, QUEUE_CONTROL))
+ vif->hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
+ else if (hw->queues >= IEEE80211_NUM_ACS)
+ vif->hw_queue[i] = i;
+ else
+ vif->hw_queue[i] = 0;
+
+ /* Initialize the queue to running. Stopped? */
+ lvif->hw_queue_stopped[i] = false;
+ }
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+
+ IMPROVE();
+
+ error = lkpi_80211_mo_start(hw);
+ if (error != 0) {
+ ic_printf(ic, "%s: failed to start hw: %d\n", __func__, error);
+ mtx_destroy(&lvif->mtx);
+ free(lvif, M_80211_VAP);
+ return (NULL);
+ }
+
+ error = lkpi_80211_mo_add_interface(hw, vif);
+ if (error != 0) {
+ IMPROVE(); /* XXX-BZ mo_stop()? */
+ ic_printf(ic, "%s: failed to add interface: %d\n", __func__, error);
+ mtx_destroy(&lvif->mtx);
+ free(lvif, M_80211_VAP);
+ return (NULL);
+ }
+
+ LKPI_80211_LHW_LVIF_LOCK(lhw);
+ TAILQ_INSERT_TAIL(&lhw->lvif_head, lvif, lvif_entry);
+ LKPI_80211_LHW_LVIF_UNLOCK(lhw);
+
+ /* Set bss_info. */
+ changed = 0;
+ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, changed);
+
+ /* Configure tx queues (conf_tx), default WME & send BSS_CHANGED_QOS. */
+ IMPROVE("Hardcoded values; to fix see 802.11-2016, 9.4.2.29 EDCA Parameter Set element");
+ wiphy_lock(hw->wiphy);
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+
+ bzero(&txqp, sizeof(txqp));
+ txqp.cw_min = 15;
+ txqp.cw_max = 1023;
+ txqp.txop = 0;
+ txqp.aifs = 2;
+ error = lkpi_80211_mo_conf_tx(hw, vif, /* link_id */0, ac, &txqp);
+ if (error != 0)
+ ic_printf(ic, "%s: conf_tx ac %u failed %d\n",
+ __func__, ac, error);
+ }
+ wiphy_unlock(hw->wiphy);
+ changed = BSS_CHANGED_QOS;
+ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, changed);
+
+ /* Force MC init. */
+ lkpi_update_mcast_filter(ic, true);
+
+ IMPROVE();
+
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
+
+ /* Override with LinuxKPI method so we can drive mac80211/cfg80211. */
+ lvif->iv_newstate = vap->iv_newstate;
+ vap->iv_newstate = lkpi_iv_newstate;
+ lvif->iv_update_bss = vap->iv_update_bss;
+ vap->iv_update_bss = lkpi_iv_update_bss;
+
+#ifdef LKPI_80211_HW_CRYPTO
+ /* Key management. */
+ if (lkpi_hwcrypto && lhw->ops->set_key != NULL) {
+ vap->iv_key_set = lkpi_iv_key_set;
+ vap->iv_key_delete = lkpi_iv_key_delete;
+ vap->iv_key_update_begin = lkpi_iv_key_update_begin;
+ vap->iv_key_update_end = lkpi_iv_key_update_end;
+ }
+#endif
+
+#ifdef LKPI_80211_HT
+ /* Stay with the iv_ampdu_rxmax,limit / iv_ampdu_density defaults until later. */
+#endif
+
+ ieee80211_ratectl_init(vap);
+
+ /* Complete setup. */
+ ieee80211_vap_attach(vap, ieee80211_media_change,
+ ieee80211_media_status, mac);
+
+#ifdef LKPI_80211_HT
+ /*
+ * Modern chipset/fw/drv will do A-MPDU in drv/fw and fail
+ * to do so if they cannot do the crypto too.
+ */
+ if (!lkpi_hwcrypto && ieee80211_hw_check(hw, AMPDU_AGGREGATION))
+ vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_RX;
+#endif
+#if defined(LKPI_80211_HT)
+ /* 20250125-BZ Keep A-MPDU TX cleared until we sorted out AddBA for all drivers. */
+ vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_TX;
+#endif
+
+ if (hw->max_listen_interval == 0)
+ hw->max_listen_interval = 7 * (ic->ic_lintval / ic->ic_bintval);
+ hw->conf.listen_interval = hw->max_listen_interval;
+ ic->ic_set_channel(ic);
+
+ /* XXX-BZ do we need to be able to update these? */
+ hw->wiphy->frag_threshold = vap->iv_fragthreshold;
+ lkpi_80211_mo_set_frag_threshold(hw, vap->iv_fragthreshold);
+ hw->wiphy->rts_threshold = vap->iv_rtsthreshold;
+ lkpi_80211_mo_set_rts_threshold(hw, vap->iv_rtsthreshold);
+ /* any others? */
+
+ /* Add per-VIF/VAP sysctls. */
+ sysctl_ctx_init(&lvif->sysctl_ctx);
+
+ node = SYSCTL_ADD_NODE(&lvif->sysctl_ctx,
+ SYSCTL_CHILDREN(&sysctl___compat_linuxkpi_80211),
+ OID_AUTO, if_name(vap->iv_ifp),
+ CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, "VIF Information");
+
+ SYSCTL_ADD_PROC(&lvif->sysctl_ctx,
+ SYSCTL_CHILDREN(node), OID_AUTO, "dump_stas",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, lvif, 0,
+ lkpi_80211_dump_stas, "A", "Dump sta statistics of this vif");
+
+ IMPROVE();
+
+ return (vap);
+}
+
+void
+linuxkpi_ieee80211_unregister_hw(struct ieee80211_hw *hw)
+{
+
+ wiphy_unregister(hw->wiphy);
+ linuxkpi_ieee80211_ifdetach(hw);
+
+ IMPROVE();
+}
+
+void
+linuxkpi_ieee80211_restart_hw(struct ieee80211_hw *hw)
+{
+
+ TODO();
+}
+
+static void
+lkpi_ic_vap_delete(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+ ic = vap->iv_ic;
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+
+ EVENTHANDLER_DEREGISTER(iflladdr_event, lvif->lvif_ifllevent);
+
+ /* Clear up per-VIF/VAP sysctls. */
+ sysctl_ctx_free(&lvif->sysctl_ctx);
+
+ LKPI_80211_LHW_LVIF_LOCK(lhw);
+ TAILQ_REMOVE(&lhw->lvif_head, lvif, lvif_entry);
+ LKPI_80211_LHW_LVIF_UNLOCK(lhw);
+
+ ieee80211_ratectl_deinit(vap);
+ ieee80211_vap_detach(vap);
+
+ IMPROVE("clear up other bits in this state");
+
+ lkpi_80211_mo_remove_interface(hw, vif);
+
+ /* Single VAP, so we can do this here. */
+ lkpi_80211_mo_stop(hw, false); /* XXX SUSPEND */
+
+ mtx_destroy(&lvif->mtx);
+ free(lvif, M_80211_VAP);
+}
+
+static void
+lkpi_ic_update_mcast(struct ieee80211com *ic)
+{
+
+ lkpi_update_mcast_filter(ic, false);
+ TRACEOK();
+}
+
+static void
+lkpi_ic_update_promisc(struct ieee80211com *ic)
+{
+
+ UNIMPLEMENTED;
+}
+
+static void
+lkpi_ic_update_chw(struct ieee80211com *ic)
+{
+
+ UNIMPLEMENTED;
+}
+
+/* Start / stop device. */
+static void
+lkpi_ic_parent(struct ieee80211com *ic)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+#ifdef HW_START_STOP
+ int error;
+#endif
+ bool start_all;
+
+ IMPROVE();
+
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ start_all = false;
+
+ /* IEEE80211_UNLOCK(ic); */
+ wiphy_lock(hw->wiphy);
+ if (ic->ic_nrunning > 0) {
+#ifdef HW_START_STOP
+ error = lkpi_80211_mo_start(hw);
+ if (error == 0)
+#endif
+ start_all = true;
+ } else {
+#ifdef HW_START_STOP
+ lkpi_80211_mo_stop(hw, false); /* XXX SUSPEND */
+#endif
+ }
+ wiphy_unlock(hw->wiphy);
+ /* IEEE80211_LOCK(ic); */
+
+ if (start_all)
+ ieee80211_start_all(ic);
+}
+
+bool
+linuxkpi_ieee80211_is_ie_id_in_ie_buf(const u8 ie, const u8 *ie_ids,
+ size_t ie_ids_len)
+{
+ int i;
+
+ for (i = 0; i < ie_ids_len; i++) {
+ if (ie == *ie_ids)
+ return (true);
+ }
+
+ return (false);
+}
+
+/* Return true if skipped; false if error. */
+bool
+linuxkpi_ieee80211_ie_advance(size_t *xp, const u8 *ies, size_t ies_len)
+{
+ size_t x;
+ uint8_t l;
+
+ x = *xp;
+
+ KASSERT(x < ies_len, ("%s: x %zu ies_len %zu ies %p\n",
+ __func__, x, ies_len, ies));
+ l = ies[x + 1];
+ x += 2 + l;
+
+ if (x > ies_len)
+ return (false);
+
+ *xp = x;
+ return (true);
+}
+
+static uint8_t *
+lkpi_scan_ies_add(uint8_t *p, struct ieee80211_scan_ies *scan_ies,
+ uint32_t band_mask, struct ieee80211vap *vap, struct ieee80211_hw *hw)
+{
+ struct ieee80211_supported_band *supband;
+ struct linuxkpi_ieee80211_channel *channels;
+ struct ieee80211com *ic;
+ const struct ieee80211_channel *chan;
+ const struct ieee80211_rateset *rs;
+ uint8_t *pb;
+ int band, i;
+
+ ic = vap->iv_ic;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if ((band_mask & (1 << band)) == 0)
+ continue;
+
+ supband = hw->wiphy->bands[band];
+ /*
+ * This should not happen;
+ * band_mask is a bitmask of valid bands to scan on.
+ */
+ if (supband == NULL || supband->n_channels == 0)
+ continue;
+
+ /* Find a first channel to get the mode and rates from. */
+ channels = supband->channels;
+ chan = NULL;
+ for (i = 0; i < supband->n_channels; i++) {
+ uint32_t flags;
+
+ if (channels[i].flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ flags = 0;
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ flags |= IEEE80211_CHAN_G;
+ break;
+ case NL80211_BAND_5GHZ:
+ flags |= IEEE80211_CHAN_A;
+ break;
+ default:
+ panic("%s:%d: unupported band %d\n",
+ __func__, __LINE__, band);
+ }
+
+ chan = ieee80211_find_channel(ic,
+ channels[i].center_freq, flags);
+ if (chan != NULL)
+ break;
+ }
+
+ /* This really should not happen. */
+ if (chan == NULL)
+ continue;
+
+ pb = p;
+ rs = ieee80211_get_suprates(ic, chan); /* calls chan2mode */
+ p = ieee80211_add_rates(p, rs);
+ p = ieee80211_add_xrates(p, rs);
+
+#if defined(LKPI_80211_HT)
+ if ((vap->iv_flags_ht & IEEE80211_FHT_HT) != 0) {
+ struct ieee80211_channel *c;
+
+ c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan,
+ vap->iv_flags_ht);
+ p = ieee80211_add_htcap_ch(p, vap, c);
+ }
+#endif
+#if defined(LKPI_80211_VHT)
+ if (band == NL80211_BAND_5GHZ &&
+ (vap->iv_vht_flags & IEEE80211_FVHT_VHT) != 0) {
+ struct ieee80211_channel *c;
+
+ c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan,
+ vap->iv_flags_ht);
+ c = ieee80211_vht_adjust_channel(ic, c,
+ vap->iv_vht_flags);
+ p = ieee80211_add_vhtcap_ch(p, vap, c);
+ }
+#endif
+
+ scan_ies->ies[band] = pb;
+ scan_ies->len[band] = p - pb;
+ }
+
+ /* Add common_ies */
+ pb = p;
+ if ((vap->iv_flags & IEEE80211_F_WPA1) != 0 &&
+ vap->iv_wpa_ie != NULL) {
+ memcpy(p, vap->iv_wpa_ie, 2 + vap->iv_wpa_ie[1]);
+ p += 2 + vap->iv_wpa_ie[1];
+ }
+ if (vap->iv_appie_probereq != NULL) {
+ memcpy(p, vap->iv_appie_probereq->ie_data,
+ vap->iv_appie_probereq->ie_len);
+ p += vap->iv_appie_probereq->ie_len;
+ }
+ scan_ies->common_ies = pb;
+ scan_ies->common_ie_len = p - pb;
+
+ return (p);
+}
+
+static void
+lkpi_ic_scan_start(struct ieee80211com *ic)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_scan_state *ss;
+ struct ieee80211vap *vap;
+ int error;
+ bool is_hw_scan;
+
+ lhw = ic->ic_softc;
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0) {
+ /* A scan is still running. */
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+ return;
+ }
+ is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0;
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+
+ ss = ic->ic_scan;
+ vap = ss->ss_vap;
+ if (vap->iv_state != IEEE80211_S_SCAN) {
+ IMPROVE("We need to be able to scan if not in S_SCAN");
+ return;
+ }
+
+ hw = LHW_TO_HW(lhw);
+ if (!is_hw_scan) {
+ /* If hw_scan is cleared clear FEXT_SCAN_OFFLOAD too. */
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCAN_OFFLOAD;
+sw_scan:
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ if (vap->iv_state == IEEE80211_S_SCAN)
+ lkpi_hw_conf_idle(hw, false);
+
+ lkpi_80211_mo_sw_scan_start(hw, vif, vif->addr);
+ /* net80211::scan_start() handled PS for us. */
+ IMPROVE();
+ /* XXX Also means it is too late to flush queues?
+ * need to check iv_sta_ps or overload? */
+ /* XXX want to adjust ss end time/ maxdwell? */
+
+ } else {
+ struct ieee80211_scan_request *hw_req;
+ struct linuxkpi_ieee80211_channel *lc, **cpp;
+ struct cfg80211_ssid *ssids;
+ struct cfg80211_scan_6ghz_params *s6gp;
+ size_t chan_len, nchan, ssids_len, s6ghzlen;
+ int band, i, ssid_count, common_ie_len;
+ uint32_t band_mask;
+ uint8_t *ie, *ieend;
+ bool running;
+
+ ssid_count = min(ss->ss_nssid, hw->wiphy->max_scan_ssids);
+ ssids_len = ssid_count * sizeof(*ssids);
+ s6ghzlen = 0 * (sizeof(*s6gp)); /* XXX-BZ */
+
+ band_mask = 0;
+ nchan = 0;
+ if (ieee80211_hw_check(hw, SINGLE_SCAN_ON_ALL_BANDS)) {
+#if 0 /* Avoid net80211 scan lists until it has proper scan offload support. */
+ for (i = ss->ss_next; i < ss->ss_last; i++) {
+ nchan++;
+ band = lkpi_net80211_chan_to_nl80211_band(
+ ss->ss_chans[ss->ss_next + i]);
+ band_mask |= (1 << band);
+ }
+#else
+ /* Instead we scan for all channels all the time. */
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ case NL80211_BAND_5GHZ:
+ break;
+ default:
+ continue;
+ }
+ if (hw->wiphy->bands[band] != NULL) {
+ nchan += hw->wiphy->bands[band]->n_channels;
+ band_mask |= (1 << band);
+ }
+ }
+#endif
+ } else {
+ IMPROVE("individual band scans not yet supported, only scanning first band");
+ /* In theory net80211 should drive this. */
+ /* Probably we need to add local logic for now;
+ * need to deal with scan_complete
+ * and cancel_scan and keep local state.
+ * Also cut the nchan down above.
+ */
+ /* XXX-BZ ath10k does not set this but still does it? &$%^ */
+ }
+
+ chan_len = nchan * (sizeof(lc) + sizeof(*lc));
+
+ common_ie_len = 0;
+ if ((vap->iv_flags & IEEE80211_F_WPA1) != 0 &&
+ vap->iv_wpa_ie != NULL)
+ common_ie_len += vap->iv_wpa_ie[1];
+ if (vap->iv_appie_probereq != NULL)
+ common_ie_len += vap->iv_appie_probereq->ie_len;
+
+ /* We would love to check this at an earlier stage... */
+ if (common_ie_len > hw->wiphy->max_scan_ie_len) {
+ ic_printf(ic, "WARNING: %s: common_ie_len %d > "
+ "wiphy->max_scan_ie_len %d\n", __func__,
+ common_ie_len, hw->wiphy->max_scan_ie_len);
+ }
+
+ hw_req = malloc(sizeof(*hw_req) + ssids_len +
+ s6ghzlen + chan_len + lhw->supbands * lhw->scan_ie_len +
+ common_ie_len, M_LKPI80211, M_WAITOK | M_ZERO);
+
+ hw_req->req.flags = 0; /* XXX ??? */
+ /* hw_req->req.wdev */
+ hw_req->req.wiphy = hw->wiphy;
+ hw_req->req.no_cck = false; /* XXX */
+#if 0
+ /* This seems to pessimise default scanning behaviour. */
+ hw_req->req.duration_mandatory = TICKS_2_USEC(ss->ss_mindwell);
+ hw_req->req.duration = TICKS_2_USEC(ss->ss_maxdwell);
+#endif
+#ifdef __notyet__
+ hw_req->req.flags |= NL80211_SCAN_FLAG_RANDOM_ADDR;
+ memcpy(hw_req->req.mac_addr, xxx, IEEE80211_ADDR_LEN);
+ memset(hw_req->req.mac_addr_mask, 0xxx, IEEE80211_ADDR_LEN);
+#endif
+ eth_broadcast_addr(hw_req->req.bssid);
+
+ hw_req->req.n_channels = nchan;
+ cpp = (struct linuxkpi_ieee80211_channel **)(hw_req + 1);
+ lc = (struct linuxkpi_ieee80211_channel *)(cpp + nchan);
+ for (i = 0; i < nchan; i++) {
+ *(cpp + i) =
+ (struct linuxkpi_ieee80211_channel *)(lc + i);
+ }
+#if 0 /* Avoid net80211 scan lists until it has proper scan offload support. */
+ for (i = 0; i < nchan; i++) {
+ struct ieee80211_channel *c;
+
+ c = ss->ss_chans[ss->ss_next + i];
+ lc->hw_value = c->ic_ieee;
+ lc->center_freq = c->ic_freq; /* XXX */
+ /* lc->flags */
+ lc->band = lkpi_net80211_chan_to_nl80211_band(c);
+ lc->max_power = c->ic_maxpower;
+ /* lc-> ... */
+ lc++;
+ }
+#else
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *supband;
+ struct linuxkpi_ieee80211_channel *channels;
+
+ /* Band disabled for scanning? */
+ if ((band_mask & (1 << band)) == 0)
+ continue;
+
+ /* Nothing to scan in band? */
+ supband = hw->wiphy->bands[band];
+ if (supband == NULL || supband->n_channels == 0)
+ continue;
+
+ channels = supband->channels;
+ for (i = 0; i < supband->n_channels; i++) {
+ *lc = channels[i];
+ lc++;
+ }
+ }
+#endif
+
+ hw_req->req.n_ssids = ssid_count;
+ if (hw_req->req.n_ssids > 0) {
+ ssids = (struct cfg80211_ssid *)lc;
+ hw_req->req.ssids = ssids;
+ for (i = 0; i < ssid_count; i++) {
+ ssids->ssid_len = ss->ss_ssid[i].len;
+ memcpy(ssids->ssid, ss->ss_ssid[i].ssid,
+ ss->ss_ssid[i].len);
+ ssids++;
+ }
+ s6gp = (struct cfg80211_scan_6ghz_params *)ssids;
+ } else {
+ s6gp = (struct cfg80211_scan_6ghz_params *)lc;
+ }
+
+ /* 6GHz one day. */
+ hw_req->req.n_6ghz_params = 0;
+ hw_req->req.scan_6ghz_params = NULL;
+ hw_req->req.scan_6ghz = false; /* Weird boolean; not what you think. */
+ /* s6gp->... */
+
+ ie = ieend = (uint8_t *)s6gp;
+ /* Copy per-band IEs, copy common IEs */
+ ieend = lkpi_scan_ies_add(ie, &hw_req->ies, band_mask, vap, hw);
+ hw_req->req.ie = ie;
+ hw_req->req.ie_len = ieend - ie;
+
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ /* Re-check under lock. */
+ running = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0;
+ if (!running) {
+ KASSERT(lhw->hw_req == NULL, ("%s: ic %p lhw %p hw_req %p "
+ "!= NULL\n", __func__, ic, lhw, lhw->hw_req));
+
+ lhw->scan_flags |= LKPI_LHW_SCAN_RUNNING;
+ lhw->hw_req = hw_req;
+ }
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+ if (running) {
+ free(hw_req, M_LKPI80211);
+ return;
+ }
+
+ error = lkpi_80211_mo_hw_scan(hw, vif, hw_req);
+ if (error != 0) {
+ ieee80211_cancel_scan(vap);
+
+ /*
+ * ieee80211_scan_completed must be called in either
+ * case of error or none. So let the free happen there
+ * and only there.
+ * That would be fine in theory but in practice drivers
+ * behave differently:
+ * ath10k does not return hw_scan until after scan_complete
+ * and can then still return an error.
+ * rtw88 can return 1 or -EBUSY without scan_complete
+ * iwlwifi can return various errors before scan starts
+ * ...
+ * So we cannot rely on that behaviour and have to check
+ * and balance between both code paths.
+ */
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0) {
+ free(lhw->hw_req, M_LKPI80211);
+ lhw->hw_req = NULL;
+ lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING;
+ }
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+
+ /*
+ * XXX-SIGH magic number.
+ * rtw88 has a magic "return 1" if offloading scan is
+ * not possible. Fall back to sw scan in that case.
+ */
+ if (error == 1) {
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ lhw->scan_flags &= ~LKPI_LHW_SCAN_HW;
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+ /*
+ * XXX If we clear this now and later a driver
+ * thinks it * can do a hw_scan again, we will
+ * currently not re-enable it?
+ */
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCAN_OFFLOAD;
+ ieee80211_start_scan(vap,
+ IEEE80211_SCAN_ACTIVE |
+ IEEE80211_SCAN_NOPICK |
+ IEEE80211_SCAN_ONCE,
+ IEEE80211_SCAN_FOREVER,
+ ss->ss_mindwell ? ss->ss_mindwell : msecs_to_ticks(20),
+ ss->ss_maxdwell ? ss->ss_maxdwell : msecs_to_ticks(200),
+ vap->iv_des_nssid, vap->iv_des_ssid);
+ goto sw_scan;
+ }
+
+ ic_printf(ic, "ERROR: %s: hw_scan returned %d\n",
+ __func__, error);
+ }
+ }
+}
+
+static void
+lkpi_ic_scan_end(struct ieee80211com *ic)
+{
+ struct lkpi_hw *lhw;
+ bool is_hw_scan;
+
+ lhw = ic->ic_softc;
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) == 0) {
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+ return;
+ }
+ is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0;
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+
+ if (!is_hw_scan) {
+ struct ieee80211_scan_state *ss;
+ struct ieee80211vap *vap;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+
+ ss = ic->ic_scan;
+ vap = ss->ss_vap;
+ hw = LHW_TO_HW(lhw);
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ lkpi_80211_mo_sw_scan_complete(hw, vif);
+
+ /* Send PS to stop buffering if n80211 does not for us? */
+
+ if (vap->iv_state == IEEE80211_S_SCAN)
+ lkpi_hw_conf_idle(hw, true);
+ }
+}
+
+static void
+lkpi_ic_scan_curchan(struct ieee80211_scan_state *ss,
+ unsigned long maxdwell)
+{
+ struct lkpi_hw *lhw;
+ bool is_hw_scan;
+
+ lhw = ss->ss_ic->ic_softc;
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0;
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+ if (!is_hw_scan)
+ lhw->ic_scan_curchan(ss, maxdwell);
+}
+
+static void
+lkpi_ic_scan_mindwell(struct ieee80211_scan_state *ss)
+{
+ struct lkpi_hw *lhw;
+ bool is_hw_scan;
+
+ lhw = ss->ss_ic->ic_softc;
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0;
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+ if (!is_hw_scan)
+ lhw->ic_scan_mindwell(ss);
+}
+
+static void
+lkpi_ic_set_channel(struct ieee80211com *ic)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct ieee80211_channel *c;
+ struct linuxkpi_ieee80211_channel *chan;
+ int error;
+ bool hw_scan_running;
+
+ lhw = ic->ic_softc;
+
+ /* If we do not support (*config)() save us the work. */
+ if (lhw->ops->config == NULL)
+ return;
+
+ /* If we have a hw_scan running do not switch channels. */
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ hw_scan_running =
+ (lhw->scan_flags & (LKPI_LHW_SCAN_RUNNING|LKPI_LHW_SCAN_HW)) ==
+ (LKPI_LHW_SCAN_RUNNING|LKPI_LHW_SCAN_HW);
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+ if (hw_scan_running)
+ return;
+
+ c = ic->ic_curchan;
+ if (c == NULL || c == IEEE80211_CHAN_ANYC) {
+ ic_printf(ic, "%s: c %p ops->config %p\n", __func__,
+ c, lhw->ops->config);
+ return;
+ }
+
+ chan = lkpi_find_lkpi80211_chan(lhw, c);
+ if (chan == NULL) {
+ ic_printf(ic, "%s: c %p chan %p\n", __func__,
+ c, chan);
+ return;
+ }
+
+ /* XXX max power for scanning? */
+ IMPROVE();
+
+ hw = LHW_TO_HW(lhw);
+ cfg80211_chandef_create(&hw->conf.chandef, chan,
+#ifdef LKPI_80211_HT
+ (ic->ic_flags_ht & IEEE80211_FHT_HT) ? NL80211_CHAN_HT20 :
+#endif
+ NL80211_CHAN_NO_HT);
+
+ error = lkpi_80211_mo_config(hw, IEEE80211_CONF_CHANGE_CHANNEL);
+ if (error != 0 && error != EOPNOTSUPP) {
+ ic_printf(ic, "ERROR: %s: config %#0x returned %d\n",
+ __func__, IEEE80211_CONF_CHANGE_CHANNEL, error);
+ /* XXX should we unroll to the previous chandef? */
+ IMPROVE();
+ } else {
+ /* Update radiotap channels as well. */
+ lhw->rtap_tx.wt_chan_freq = htole16(c->ic_freq);
+ lhw->rtap_tx.wt_chan_flags = htole16(c->ic_flags);
+ lhw->rtap_rx.wr_chan_freq = htole16(c->ic_freq);
+ lhw->rtap_rx.wr_chan_flags = htole16(c->ic_flags);
+ }
+
+ /* Currently PS is hard coded off! Not sure it belongs here. */
+ IMPROVE();
+ if (ieee80211_hw_check(hw, SUPPORTS_PS) &&
+ (hw->conf.flags & IEEE80211_CONF_PS) != 0) {
+ hw->conf.flags &= ~IEEE80211_CONF_PS;
+ error = lkpi_80211_mo_config(hw, IEEE80211_CONF_CHANGE_PS);
+ if (error != 0 && error != EOPNOTSUPP)
+ ic_printf(ic, "ERROR: %s: config %#0x returned "
+ "%d\n", __func__, IEEE80211_CONF_CHANGE_PS,
+ error);
+ }
+}
+
+static struct ieee80211_node *
+lkpi_ic_node_alloc(struct ieee80211vap *vap,
+ const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_node *ni;
+ struct ieee80211_hw *hw;
+ struct lkpi_sta *lsta;
+
+ ic = vap->iv_ic;
+ lhw = ic->ic_softc;
+
+ /* We keep allocations de-coupled so we can deal with the two worlds. */
+ if (lhw->ic_node_alloc == NULL)
+ return (NULL);
+
+ ni = lhw->ic_node_alloc(vap, mac);
+ if (ni == NULL)
+ return (NULL);
+
+ hw = LHW_TO_HW(lhw);
+ lsta = lkpi_lsta_alloc(vap, mac, hw, ni);
+ if (lsta == NULL) {
+ if (lhw->ic_node_free != NULL)
+ lhw->ic_node_free(ni);
+ return (NULL);
+ }
+
+ return (ni);
+}
+
+static int
+lkpi_ic_node_init(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ int error;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+
+ if (lhw->ic_node_init != NULL) {
+ error = lhw->ic_node_init(ni);
+ if (error != 0)
+ return (error);
+ }
+
+ /* XXX-BZ Sync other state over. */
+ IMPROVE();
+
+ return (0);
+}
+
+static void
+lkpi_ic_node_cleanup(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+
+ /* XXX-BZ remove from driver, ... */
+ IMPROVE();
+
+ if (lhw->ic_node_cleanup != NULL)
+ lhw->ic_node_cleanup(ni);
+}
+
+static void
+lkpi_ic_node_free(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct lkpi_sta *lsta;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+ lsta = ni->ni_drv_data;
+
+ /* KASSERT lsta is not NULL here. Print ni/ni__refcnt. */
+
+ /*
+ * Pass in the original ni just in case of error we could check that
+ * it is the same as lsta->ni.
+ */
+ lkpi_lsta_free(lsta, ni);
+
+ if (lhw->ic_node_free != NULL)
+ lhw->ic_node_free(ni);
+}
+
+/*
+ * lkpi_xmit() called from both the (*ic_raw_xmit) as well as the (*ic_transmit)
+ * call path.
+ * Unfortunately they have slightly different invariants. See
+ * ieee80211_raw_output() and ieee80211_parent_xmitpkt().
+ * Both take care of the ni reference in case of error, and otherwise during
+ * the callback after transmit.
+ * The difference is that in case of error (*ic_raw_xmit) needs us to release
+ * the mbuf, while (*ic_transmit) will free the mbuf itself.
+ */
+static int
+lkpi_xmit(struct ieee80211_node *ni, struct mbuf *m,
+ const struct ieee80211_bpf_params *params __unused,
+ bool freem)
+{
+ struct lkpi_sta *lsta;
+ int error;
+
+ lsta = ni->ni_drv_data;
+ LKPI_80211_LSTA_TXQ_LOCK(lsta);
+#if 0
+ if (!lsta->added_to_drv || !lsta->txq_ready) {
+#else
+ /*
+ * Backout this part of 886653492945f which breaks rtw88 or
+ * in general drivers without (*sta_state)() but only the
+ * legacy fallback to (*sta_add)().
+ */
+ if (!lsta->txq_ready) {
+#endif
+ LKPI_80211_LSTA_TXQ_UNLOCK(lsta);
+ if (freem)
+ m_free(m);
+ return (ENETDOWN);
+ }
+
+ /* Queue the packet and enqueue the task to handle it. */
+ error = mbufq_enqueue(&lsta->txq, m);
+ if (error != 0) {
+ LKPI_80211_LSTA_TXQ_UNLOCK(lsta);
+ if (freem)
+ m_free(m);
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_TX)
+ ic_printf(ni->ni_ic, "%s: mbufq_enqueue failed: %d\n",
+ __func__, error);
+#endif
+ return (ENETDOWN);
+ }
+ taskqueue_enqueue(taskqueue_thread, &lsta->txq_task);
+ LKPI_80211_LSTA_TXQ_UNLOCK(lsta);
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_TX)
+ printf("%s:%d lsta %p ni %p %6D mbuf_qlen %d\n",
+ __func__, __LINE__, lsta, ni, ni->ni_macaddr, ":",
+ mbufq_len(&lsta->txq));
+#endif
+
+ return (0);
+}
+
+static int
+lkpi_ic_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
+ const struct ieee80211_bpf_params *params __unused)
+{
+ return (lkpi_xmit(ni, m, NULL, true));
+}
+
+#ifdef LKPI_80211_HW_CRYPTO
+/*
+ * This is a bit of a hack given we know we are operating on a
+ * single frame and we know that hardware will deal with it.
+ * But otherwise the enmic bit and the encrypt bit need to be
+ * decoupled.
+ */
+static int
+lkpi_hw_crypto_prepare_tkip(struct ieee80211_key *k,
+ struct ieee80211_key_conf *kc, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ uint32_t hlen, hdrlen;
+ uint8_t *p;
+
+ /*
+ * TKIP only happens on data.
+ */
+ hdr = (void *)skb->data;
+ if (!ieee80211_is_data_present(hdr->frame_control))
+ return (0);
+
+ /*
+ * "enmic" (though we do not do that).
+ */
+ /* any conditions to not apply this? */
+ if (skb_tailroom(skb) < k->wk_cipher->ic_miclen)
+ return (ENOBUFS);
+
+ p = skb_put(skb, k->wk_cipher->ic_miclen);
+ if ((kc->flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) != 0)
+ goto encrypt;
+
+ /*
+ * (*enmic) which we hopefully do not have to do with hw accel.
+ * That means if we make it here we have a problem.
+ */
+ TODO("(*enmic)");
+ return (ENXIO);
+
+encrypt:
+ /*
+ * "encrypt" (though we do not do that).
+ */
+ /*
+ * Check if we have anything to do as requested by driver
+ * or if we are done?
+ */
+ if ((kc->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) == 0 &&
+ (kc->flags & IEEE80211_KEY_FLAG_GENERATE_IV) == 0)
+ return (0);
+
+ hlen = k->wk_cipher->ic_header;
+ if (skb_headroom(skb) < hlen)
+ return (ENOBUFS);
+
+ hdr = (void *)skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ p = skb_push(skb, hlen);
+ memmove(p, p + hlen, hdrlen);
+
+ /* If driver request space only we are done. */
+ if ((kc->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) != 0)
+ return (0);
+
+ p += hdrlen;
+ k->wk_cipher->ic_setiv(k, p);
+
+ /* If we make it hear we do sw encryption. */
+ TODO("sw encrypt");
+ return (ENXIO);
+}
+
+static int
+lkpi_hw_crypto_prepare_ccmp(struct ieee80211_key *k,
+ struct ieee80211_key_conf *kc, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ uint32_t hlen, hdrlen;
+ uint8_t *p;
+
+ hdr = (void *)skb->data;
+
+ /*
+ * Check if we have anythig to do as requested by driver
+ * or if we are done?
+ */
+ if ((kc->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) == 0 &&
+ (kc->flags & IEEE80211_KEY_FLAG_GENERATE_IV) == 0 &&
+ /* MFP */
+ !((kc->flags & IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) != 0 &&
+ ieee80211_is_mgmt(hdr->frame_control)))
+ return (0);
+
+ hlen = k->wk_cipher->ic_header;
+ if (skb_headroom(skb) < hlen)
+ return (ENOBUFS);
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ p = skb_push(skb, hlen);
+ memmove(p, p + hlen, hdrlen);
+
+ /* If driver request space only we are done. */
+ if ((kc->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) != 0)
+ return (0);
+
+ p += hdrlen;
+ k->wk_cipher->ic_setiv(k, p);
+
+ return (0);
+}
+
+static int
+lkpi_hw_crypto_prepare(struct lkpi_sta *lsta, struct ieee80211_key *k,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info;
+ struct ieee80211_key_conf *kc;
+
+ KASSERT(lsta != NULL, ("%s: lsta is NULL", __func__));
+ KASSERT(k != NULL, ("%s: key is NULL", __func__));
+ KASSERT(skb != NULL, ("%s: skb is NULL", __func__));
+
+ kc = lsta->kc[k->wk_keyix];
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.hw_key = kc;
+
+ /* MUST NOT happen. KASSERT? */
+ if (kc == NULL) {
+ ic_printf(lsta->ni->ni_ic, "%s: lsta %p k %p skb %p, "
+ "kc is NULL on hw crypto offload\n", __func__, lsta, k, skb);
+ return (ENXIO);
+ }
+
+ switch (kc->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ return (lkpi_hw_crypto_prepare_tkip(k, kc, skb));
+ case WLAN_CIPHER_SUITE_CCMP:
+ return (lkpi_hw_crypto_prepare_ccmp(k, kc, skb));
+ case WLAN_CIPHER_SUITE_GCMP:
+ return (lkpi_hw_crypto_prepare_ccmp(k, kc, skb));
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ default:
+ ic_printf(lsta->ni->ni_ic, "%s: lsta %p k %p kc %p skb %p, "
+ "unsupported cipher suite %u (%s)\n", __func__, lsta, k, kc,
+ skb, kc->cipher, lkpi_cipher_suite_to_name(kc->cipher));
+ return (EOPNOTSUPP);
+ }
+}
+
+static uint8_t
+lkpi_hw_crypto_tailroom(struct lkpi_sta *lsta, struct ieee80211_key *k)
+{
+ struct ieee80211_key_conf *kc;
+
+ kc = lsta->kc[k->wk_keyix];
+ if (kc == NULL)
+ return (0);
+
+ IMPROVE("which other flags need tailroom?");
+ if (kc->flags & (IEEE80211_KEY_FLAG_PUT_MIC_SPACE))
+ return (32); /* Large enough to hold everything and pow2. */
+
+ return (0);
+}
+#endif
+
+static void
+lkpi_80211_txq_tx_one(struct lkpi_sta *lsta, struct mbuf *m)
+{
+ struct ieee80211_node *ni;
+ struct ieee80211_frame *wh;
+ struct ieee80211_key *k;
+ struct sk_buff *skb;
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_channel *c;
+ struct ieee80211_tx_control control;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ struct lkpi_txq *ltxq;
+ void *buf;
+ ieee80211_keyix keyix;
+ uint8_t ac, tid, tailroom;
+
+ M_ASSERTPKTHDR(m);
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_TX_DUMP)
+ hexdump(mtod(m, const void *), m->m_len, "RAW TX (plain) ", 0);
+#endif
+
+ ni = lsta->ni;
+ k = NULL;
+ keyix = IEEE80211_KEYIX_NONE;
+ wh = mtod(m, struct ieee80211_frame *);
+ if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
+
+#ifdef LKPI_80211_HW_CRYPTO
+ if (lkpi_hwcrypto) {
+ k = ieee80211_crypto_get_txkey(ni, m);
+ if (k != NULL && lsta->kc[k->wk_keyix] != NULL)
+ keyix = k->wk_keyix;
+ }
+#endif
+
+ /* Encrypt the frame if need be. */
+ if (keyix == IEEE80211_KEYIX_NONE) {
+ /* Retrieve key for TX && do software encryption. */
+ k = ieee80211_crypto_encap(ni, m);
+ if (k == NULL) {
+ ieee80211_free_node(ni);
+ m_freem(m);
+ return;
+ }
+ }
+ }
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ c = ni->ni_chan;
+
+ if (ieee80211_radiotap_active_vap(ni->ni_vap)) {
+ struct lkpi_radiotap_tx_hdr *rtap;
+
+ rtap = &lhw->rtap_tx;
+ rtap->wt_flags = 0;
+ if (k != NULL)
+ rtap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
+ if (m->m_flags & M_FRAG)
+ rtap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
+ IMPROVE();
+ rtap->wt_rate = 0;
+ if (c != NULL && c != IEEE80211_CHAN_ANYC) {
+ rtap->wt_chan_freq = htole16(c->ic_freq);
+ rtap->wt_chan_flags = htole16(c->ic_flags);
+ }
+
+ ieee80211_radiotap_tx(ni->ni_vap, m);
+ }
+
+#ifdef LKPI_80211_HW_CRYPTO
+ if (lkpi_hwcrypto && keyix != IEEE80211_KEYIX_NONE)
+ tailroom = lkpi_hw_crypto_tailroom(lsta, k);
+ else
+#endif
+ tailroom = 0;
+
+ /*
+ * net80211 should handle hw->extra_tx_headroom.
+ * Though for as long as we are copying we don't mind.
+ * XXX-BZ rtw88 asks for too much headroom for ipv6+tcp:
+ * https://lists.freebsd.org/archives/freebsd-transport/2022-February/000012.html
+ */
+ skb = dev_alloc_skb(hw->extra_tx_headroom + tailroom + m->m_pkthdr.len);
+ if (skb == NULL) {
+ static uint8_t skb_alloc_failures = 0;
+
+ if (skb_alloc_failures++ == 0) {
+ int tid;
+
+ sta = LSTA_TO_STA(lsta);
+ ic_printf(ic, "ERROR %s: skb alloc failed %d + %d, lsta %p sta %p ni %p\n",
+ __func__, hw->extra_tx_headroom, m->m_pkthdr.len, lsta, sta, ni);
+ for (tid = 0; tid < nitems(sta->txq); tid++) {
+ if (sta->txq[tid] == NULL)
+ continue;
+ ltxq = TXQ_TO_LTXQ(sta->txq[tid]);
+ ic_printf(ic, " tid %d ltxq %p seen_dequeue %d stopped %d skb_queue_len %u\n",
+ tid, ltxq, ltxq->seen_dequeue, ltxq->stopped, skb_queue_len(&ltxq->skbq));
+ }
+ }
+ ieee80211_free_node(ni);
+ m_freem(m);
+ return;
+ }
+ skb_reserve(skb, hw->extra_tx_headroom);
+
+ /* XXX-BZ we need a SKB version understanding mbuf. */
+ /* Save the mbuf for ieee80211_tx_complete(). */
+ skb->m_free_func = lkpi_ieee80211_free_skb_mbuf;
+ skb->m = m;
+#if 0
+ skb_put_data(skb, m->m_data, m->m_pkthdr.len);
+#else
+ buf = skb_put(skb, m->m_pkthdr.len);
+ m_copydata(m, 0, m->m_pkthdr.len, buf);
+#endif
+ /* Save the ni. */
+ m->m_pkthdr.PH_loc.ptr = ni;
+
+ lvif = VAP_TO_LVIF(ni->ni_vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ hdr = (void *)skb->data;
+ tid = linuxkpi_ieee80211_get_tid(hdr, true);
+ if (tid == IEEE80211_NONQOS_TID) { /* == IEEE80211_NUM_TIDS */
+ if (!ieee80211_is_data(hdr->frame_control)) {
+ /* MGMT and CTRL frames go on TID 7/VO. */
+ skb->priority = 7;
+ ac = IEEE80211_AC_VO;
+ } else {
+ /* Other non-QOS traffic goes to BE. */
+ /* Contrary to net80211 we MUST NOT promote M_EAPOL. */
+ skb->priority = 0;
+ ac = IEEE80211_AC_BE;
+ }
+ } else {
+ skb->priority = tid & IEEE80211_QOS_CTL_TID_MASK;
+ ac = ieee80211e_up_to_ac[tid & 7];
+ }
+ skb_set_queue_mapping(skb, ac);
+
+ info = IEEE80211_SKB_CB(skb);
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ /* Slight delay; probably only happens on scanning so fine? */
+ if (c == NULL || c == IEEE80211_CHAN_ANYC)
+ c = ic->ic_curchan;
+ info->band = lkpi_net80211_chan_to_nl80211_band(c);
+ info->hw_queue = vif->hw_queue[ac];
+ if (m->m_flags & M_EAPOL)
+ info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+ info->control.vif = vif;
+ /* XXX-BZ info->control.rates */
+#ifdef __notyet__
+#ifdef LKPI_80211_HT
+ info->control.rts_cts_rate_idx=
+ info->control.use_rts= /* RTS */
+ info->control.use_cts_prot= /* RTS/CTS*/
+#endif
+#endif
+
+ sta = LSTA_TO_STA(lsta);
+#ifdef LKPI_80211_HW_CRYPTO
+ if (lkpi_hwcrypto && keyix != IEEE80211_KEYIX_NONE) {
+ int error;
+
+ error = lkpi_hw_crypto_prepare(lsta, k, skb);
+ if (error != 0) {
+ /*
+ * We only have to free the skb which will free the
+ * mbuf and release the reference on the ni.
+ */
+ dev_kfree_skb(skb);
+ return;
+ }
+ }
+#endif
+
+ IMPROVE();
+
+ ltxq = NULL;
+ if (!ieee80211_is_data_present(hdr->frame_control)) {
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ lsta->added_to_drv &&
+ sta->txq[IEEE80211_NUM_TIDS] != NULL)
+ ltxq = TXQ_TO_LTXQ(sta->txq[IEEE80211_NUM_TIDS]);
+ } else if (lsta->added_to_drv &&
+ sta->txq[skb->priority] != NULL) {
+ ltxq = TXQ_TO_LTXQ(sta->txq[skb->priority]);
+ }
+ if (ltxq == NULL)
+ goto ops_tx;
+
+ KASSERT(ltxq != NULL, ("%s: lsta %p sta %p m %p skb %p "
+ "ltxq %p != NULL\n", __func__, lsta, sta, m, skb, ltxq));
+
+ LKPI_80211_LTXQ_LOCK(ltxq);
+ skb_queue_tail(&ltxq->skbq, skb);
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_TX)
+ printf("%s:%d mo_wake_tx_queue :: %d %lu lsta %p sta %p "
+ "ni %p %6D skb %p lxtq %p { qlen %u, ac %d tid %u } "
+ "WAKE_TX_Q ac %d prio %u qmap %u\n",
+ __func__, __LINE__,
+ curthread->td_tid, jiffies,
+ lsta, sta, ni, ni->ni_macaddr, ":", skb, ltxq,
+ skb_queue_len(&ltxq->skbq), ltxq->txq.ac,
+ ltxq->txq.tid, ac, skb->priority, skb->qmap);
+#endif
+ LKPI_80211_LTXQ_UNLOCK(ltxq);
+ wiphy_lock(hw->wiphy);
+ lkpi_80211_mo_wake_tx_queue(hw, &ltxq->txq);
+ wiphy_unlock(hw->wiphy);
+ return;
+
+ops_tx:
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_TX)
+ printf("%s:%d mo_tx :: lsta %p sta %p ni %p %6D skb %p "
+ "TX ac %d prio %u qmap %u\n",
+ __func__, __LINE__, lsta, sta, ni, ni->ni_macaddr, ":",
+ skb, ac, skb->priority, skb->qmap);
+#endif
+ memset(&control, 0, sizeof(control));
+ control.sta = sta;
+ wiphy_lock(hw->wiphy);
+ lkpi_80211_mo_tx(hw, &control, skb);
+ wiphy_unlock(hw->wiphy);
+}
+
+static void
+lkpi_80211_txq_task(void *ctx, int pending)
+{
+ struct lkpi_sta *lsta;
+ struct mbufq mq;
+ struct mbuf *m;
+ bool shall_tx;
+
+ lsta = ctx;
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_TX)
+ printf("%s:%d lsta %p ni %p %6D pending %d mbuf_qlen %d\n",
+ __func__, __LINE__, lsta, lsta->ni, lsta->ni->ni_macaddr, ":",
+ pending, mbufq_len(&lsta->txq));
+#endif
+
+ mbufq_init(&mq, IFQ_MAXLEN);
+
+ LKPI_80211_LSTA_TXQ_LOCK(lsta);
+ /*
+ * Do not re-check lsta->txq_ready here; we may have a pending
+ * disassoc/deauth frame still. On the contrary if txq_ready is
+ * false we do not have a valid sta anymore in the firmware so no
+ * point to try to TX.
+ * We also use txq_ready as a semaphore and will drain the txq manually
+ * if needed on our way towards SCAN/INIT in the state machine.
+ */
+#if 0
+ shall_tx = lsta->added_to_drv && lsta->txq_ready;
+#else
+ /*
+ * Backout this part of 886653492945f which breaks rtw88 or
+ * in general drivers without (*sta_state)() but only the
+ * legacy fallback to (*sta_add)().
+ */
+ shall_tx = lsta->txq_ready;
+#endif
+ if (__predict_true(shall_tx))
+ mbufq_concat(&mq, &lsta->txq);
+ /*
+ * else a state change will push the packets out manually or
+ * lkpi_lsta_free() will drain the lsta->txq and free the mbufs.
+ */
+ LKPI_80211_LSTA_TXQ_UNLOCK(lsta);
+
+ m = mbufq_dequeue(&mq);
+ while (m != NULL) {
+ lkpi_80211_txq_tx_one(lsta, m);
+ m = mbufq_dequeue(&mq);
+ }
+}
+
+static int
+lkpi_ic_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+
+ /* XXX TODO */
+ IMPROVE();
+
+ /* Quick and dirty cheating hack. */
+ struct ieee80211_node *ni;
+
+ ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ return (lkpi_xmit(ni, m, NULL, false));
+}
+
+#ifdef LKPI_80211_HT
+static int
+lkpi_ic_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+
+ IMPROVE_HT("recv_action called; nothing to do in lkpi; make debugging");
+
+ return (lhw->ic_recv_action(ni, wh, frm, efrm));
+}
+
+static int
+lkpi_ic_send_action(struct ieee80211_node *ni, int category, int action, void *sa)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+
+ IMPROVE_HT("send_action called; nothing to do in lkpi; make debugging");
+
+ return (lhw->ic_send_action(ni, category, action, sa));
+}
+
+
+static int
+lkpi_ic_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+
+ IMPROVE_HT("ieee80211_ampdu_enable called; nothing to do in lkpi for now; make debugging");
+
+ return (lhw->ic_ampdu_enable(ni, tap));
+}
+
+/*
+ * (*ic_addba_request)() is called by ieee80211_ampdu_request() before
+ * calling send_action(CAT_BA, BA_ADDBA_REQUEST).
+ *
+ * NB: returns 0 on ERROR!
+ */
+static int
+lkpi_ic_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int dialogtoken, int baparamset, int batimeout)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct ieee80211vap *vap;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct ieee80211_ampdu_params params = { };
+ int error;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ vap = ni->ni_vap;
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+ lsta = ni->ni_drv_data;
+ sta = LSTA_TO_STA(lsta);
+
+ if (!lsta->added_to_drv) {
+ ic_printf(ic, "%s: lsta %p ni %p, sta %p not added to firmware\n",
+ __func__, lsta, ni, sta);
+ return (0);
+ }
+
+ params.sta = sta;
+ params.action = IEEE80211_AMPDU_TX_START;
+ /* Keep 0 here! */
+ params.buf_size = 0;
+ params.timeout = 0;
+ params.ssn = tap->txa_start & (IEEE80211_SEQ_RANGE-1);
+ params.tid = tap->txa_tid;
+ params.amsdu = false;
+
+ IEEE80211_UNLOCK(ic);
+ wiphy_lock(hw->wiphy);
+ error = lkpi_80211_mo_ampdu_action(hw, vif, &params);
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(ic);
+ if (error != 0) {
+ ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p tap %p\n",
+ __func__, error, ni, tap);
+ return (0);
+ }
+
+ return (lhw->ic_addba_request(ni, tap, dialogtoken, baparamset, batimeout));
+}
+
+/*
+ * (*ic_addba_response)() is called from ht_recv_action_ba_addba_response()
+ * and calls the default ieee80211_addba_response() which always returns 1.
+ *
+ * NB: No error checking in net80211!
+ * Staying with 0 is an error.
+ */
+static int
+lkpi_ic_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int status, int baparamset, int batimeout)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct ieee80211vap *vap;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct ieee80211_ampdu_params params = { };
+ int error;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ vap = ni->ni_vap;
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+ lsta = ni->ni_drv_data;
+ sta = LSTA_TO_STA(lsta);
+
+ if (!lsta->added_to_drv) {
+ ic_printf(ic, "%s: lsta %p ni %p, sta %p not added to firmware\n",
+ __func__, lsta, ni, sta);
+ return (0);
+ }
+
+ if (status == IEEE80211_STATUS_SUCCESS) {
+ params.sta = sta;
+ params.action = IEEE80211_AMPDU_TX_OPERATIONAL;
+ params.buf_size = tap->txa_wnd;
+ params.timeout = 0;
+ params.ssn = 0;
+ params.tid = tap->txa_tid;
+ if ((tap->txa_flags & IEEE80211_AGGR_AMSDU) != 0)
+ params.amsdu = true;
+ else
+ params.amsdu = false;
+ } else {
+ /* We need to free the allocated resources. */
+ params.sta = sta;
+ switch (status) {
+ /* params.action = FLUSH, FLUSH_CONT */
+ default:
+ params.action = IEEE80211_AMPDU_TX_STOP_CONT;
+ break;
+ }
+ params.buf_size = 0;
+ params.timeout = 0;
+ params.ssn = 0;
+ params.tid = tap->txa_tid;
+ params.amsdu = false;
+ }
+
+ IEEE80211_UNLOCK(ic);
+ wiphy_lock(hw->wiphy);
+ error = lkpi_80211_mo_ampdu_action(hw, vif, &params);
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(ic);
+ if (error != 0) {
+ ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p tap %p\n",
+ __func__, error, ni, tap);
+ return (0);
+ }
+
+ IMPROVE_HT("who unleashes the TXQ? and when?, do we need to ni->ni_txseqs[tid] = tap->txa_start & 0xfff;");
+
+ return (lhw->ic_addba_response(ni, tap, status, baparamset, batimeout));
+}
+
+/*
+ * (*ic_addba_stop)() is called from ampdu_tx_stop(), ht_recv_action_ba_delba(),
+ * and ieee80211_ampdu_stop() and calls the default ieee80211_addba_stop().
+ */
+static void
+lkpi_ic_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct ieee80211vap *vap;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct ieee80211_ampdu_params params = { };
+ int error;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ vap = ni->ni_vap;
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+ lsta = ni->ni_drv_data;
+ sta = LSTA_TO_STA(lsta);
+
+ if (!lsta->added_to_drv) {
+ ic_printf(ic, "%s: lsta %p ni %p, sta %p not added to firmware\n",
+ __func__, lsta, ni, sta);
+ goto n80211;
+ }
+
+ /* We need to free the allocated resources. */
+ params.sta = sta;
+ IMPROVE("net80211 does not provide a reason to us");
+ params.action = IEEE80211_AMPDU_TX_STOP_CONT; /* params.action = FLUSH, FLUSH_CONT */
+ params.buf_size = 0;
+ params.timeout = 0;
+ params.ssn = 0;
+ params.tid = tap->txa_tid;
+ params.amsdu = false;
+
+ IEEE80211_UNLOCK(ic);
+ wiphy_lock(hw->wiphy);
+ error = lkpi_80211_mo_ampdu_action(hw, vif, &params);
+ wiphy_unlock(hw->wiphy);
+ IEEE80211_LOCK(ic);
+ if (error != 0) {
+ ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p tap %p\n",
+ __func__, error, ni, tap);
+ goto n80211;
+ }
+
+ IMPROVE_HT("anyting else?");
+
+n80211:
+ lhw->ic_addba_stop(ni, tap);
+}
+
+static void
+lkpi_ic_addba_response_timeout(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+
+ IMPROVE_HT();
+
+ lhw->ic_addba_response_timeout(ni, tap);
+}
+
+static void
+lkpi_ic_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int status)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+
+ IMPROVE_HT();
+
+ lhw->ic_bar_response(ni, tap, status);
+}
+
+static int
+lkpi_ic_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
+ int baparamset, int batimeout, int baseqctl)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct ieee80211vap *vap;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct ieee80211_ampdu_params params = { };
+ int error;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+ vap = ni->ni_vap;
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+ lsta = ni->ni_drv_data;
+ sta = LSTA_TO_STA(lsta);
+
+ IEEE80211_UNLOCK_ASSERT(ic);
+
+ if (!lsta->added_to_drv) {
+ ic_printf(ic, "%s: lsta %p ni %p vap %p, sta %p not added to firmware\n",
+ __func__, lsta, ni, vap, sta);
+ return (-ENXIO);
+ }
+
+ params.sta = sta;
+ params.action = IEEE80211_AMPDU_RX_START;
+ params.buf_size = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
+ if (params.buf_size == 0)
+ params.buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
+ else
+ params.buf_size = min(params.buf_size, IEEE80211_MAX_AMPDU_BUF_HT);
+ if (hw->max_rx_aggregation_subframes > 0 &&
+ params.buf_size > hw->max_rx_aggregation_subframes)
+ params.buf_size = hw->max_rx_aggregation_subframes;
+ params.timeout = le16toh(batimeout);
+ params.ssn = _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
+ params.tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
+
+ /* Based on net80211::ampdu_rx_start(). */
+ if ((vap->iv_htcaps & IEEE80211_HTC_RX_AMSDU_AMPDU) &&
+ (_IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_AMSDU)))
+ params.amsdu = true;
+ else
+ params.amsdu = false;
+
+ wiphy_lock(hw->wiphy);
+ error = lkpi_80211_mo_ampdu_action(hw, vif, &params);
+ wiphy_unlock(hw->wiphy);
+ if (error != 0) {
+ ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p rap %p\n",
+ __func__, error, ni, rap);
+ return (error);
+ }
+
+ if (!ieee80211_hw_check(hw, SUPPORTS_REORDERING_BUFFER)) {
+ IMPROVE("%s: TODO: SUPPORTS_REORDERING_BUFFER not set; check net80211\n", __func__);
+ }
+
+ IMPROVE_HT("net80211 is missing the error check on return and assumes success");
+
+ error = lhw->ic_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
+ return (error);
+}
+
+static void
+lkpi_ic_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct ieee80211vap *vap;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+ struct ieee80211_ampdu_params params = { };
+ int error;
+ uint8_t tid;
+ bool ic_locked;
+
+ ic = ni->ni_ic;
+ lhw = ic->ic_softc;
+
+ /*
+ * We should not (cannot) call into mac80211 ops with AMPDU_RX_STOP if
+ * we did not START. Some drivers pass it down to firmware which will
+ * simply barf and net80211 calls ieee80211_ht_node_cleanup() from
+ * ieee80211_ht_node_init() amongst others which will iterate over all
+ * tid and call ic_ampdu_rx_stop() unconditionally.
+ * XXX net80211 should probably be more "gentle" in these cases and
+ * track some state itself.
+ */
+ if ((rap->rxa_flags & IEEE80211_AGGR_RUNNING) == 0)
+ goto net80211_only;
+
+ hw = LHW_TO_HW(lhw);
+ vap = ni->ni_vap;
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+ lsta = ni->ni_drv_data;
+ sta = LSTA_TO_STA(lsta);
+
+ IMPROVE_HT("This really should be passed from ht_recv_action_ba_delba.");
+ for (tid = 0; tid < WME_NUM_TID; tid++) {
+ if (&ni->ni_rx_ampdu[tid] == rap)
+ break;
+ }
+
+ params.sta = sta;
+ params.action = IEEE80211_AMPDU_RX_STOP;
+ params.buf_size = 0;
+ params.timeout = 0;
+ params.ssn = 0;
+ params.tid = tid;
+ params.amsdu = false;
+
+ ic_locked = IEEE80211_IS_LOCKED(ic);
+ if (ic_locked)
+ IEEE80211_UNLOCK(ic);
+ wiphy_lock(hw->wiphy);
+ error = lkpi_80211_mo_ampdu_action(hw, vif, &params);
+ wiphy_unlock(hw->wiphy);
+ if (ic_locked)
+ IEEE80211_LOCK(ic);
+ if (error != 0)
+ ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p rap %p\n",
+ __func__, error, ni, rap);
+
+net80211_only:
+ lhw->ic_ampdu_rx_stop(ni, rap);
+}
+#endif
+
+static void
+lkpi_ic_getradiocaps_ht(struct ieee80211com *ic, struct ieee80211_hw *hw,
+ uint8_t *bands, int *chan_flags, enum nl80211_band band)
+{
+#ifdef LKPI_80211_HT
+ struct ieee80211_sta_ht_cap *ht_cap;
+
+ ht_cap = &hw->wiphy->bands[band]->ht_cap;
+ if (!ht_cap->ht_supported)
+ return;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ setbit(bands, IEEE80211_MODE_11NG);
+ break;
+ case NL80211_BAND_5GHZ:
+ setbit(bands, IEEE80211_MODE_11NA);
+ break;
+ default:
+ IMPROVE("Unsupported band %d", band);
+ return;
+ }
+
+ ic->ic_htcaps = IEEE80211_HTC_HT; /* HT operation */
+
+ /*
+ * Rather than manually checking each flag and
+ * translating IEEE80211_HT_CAP_ to IEEE80211_HTCAP_,
+ * simply copy the 16bits.
+ */
+ ic->ic_htcaps |= ht_cap->cap;
+
+ /* Then deal with the other flags. */
+ if (ieee80211_hw_check(hw, AMPDU_AGGREGATION))
+ ic->ic_htcaps |= IEEE80211_HTC_AMPDU;
+#ifdef __notyet__
+ if (ieee80211_hw_check(hw, TX_AMSDU))
+ ic->ic_htcaps |= IEEE80211_HTC_AMSDU;
+ if (ieee80211_hw_check(hw, SUPPORTS_AMSDU_IN_AMPDU))
+ ic->ic_htcaps |= (IEEE80211_HTC_RX_AMSDU_AMPDU |
+ IEEE80211_HTC_TX_AMSDU_AMPDU);
+#endif
+
+ IMPROVE("PS, ampdu_*, ht_cap.mcs.tx_params, ...");
+ ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_OFF;
+
+ /* Only add HT40 channels if supported. */
+ if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) != 0 &&
+ chan_flags != NULL)
+ *chan_flags |= NET80211_CBW_FLAG_HT40;
+#endif
+}
+
+static void
+lkpi_ic_getradiocaps(struct ieee80211com *ic, int maxchan,
+ int *n, struct ieee80211_channel *c)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct linuxkpi_ieee80211_channel *channels;
+ uint8_t bands[IEEE80211_MODE_BYTES];
+ int chan_flags, error, i, nchans;
+
+ /* Channels */
+ lhw = ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+
+ /* NL80211_BAND_2GHZ */
+ nchans = 0;
+ if (hw->wiphy->bands[NL80211_BAND_2GHZ] != NULL)
+ nchans = hw->wiphy->bands[NL80211_BAND_2GHZ]->n_channels;
+ if (nchans > 0) {
+ memset(bands, 0, sizeof(bands));
+ chan_flags = 0;
+ setbit(bands, IEEE80211_MODE_11B);
+ /* XXX-BZ unclear how to check for 11g. */
+
+ IMPROVE("the bitrates may have flags?");
+ setbit(bands, IEEE80211_MODE_11G);
+
+ lkpi_ic_getradiocaps_ht(ic, hw, bands, &chan_flags,
+ NL80211_BAND_2GHZ);
+
+ channels = hw->wiphy->bands[NL80211_BAND_2GHZ]->channels;
+ for (i = 0; i < nchans && *n < maxchan; i++) {
+ uint32_t nflags = 0;
+ int cflags = chan_flags;
+
+ if (channels[i].flags & IEEE80211_CHAN_DISABLED) {
+ ic_printf(ic, "%s: Skipping disabled chan "
+ "[%u/%u/%#x]\n", __func__,
+ channels[i].hw_value,
+ channels[i].center_freq, channels[i].flags);
+ continue;
+ }
+ if (channels[i].flags & IEEE80211_CHAN_NO_IR)
+ nflags |= (IEEE80211_CHAN_NOADHOC|IEEE80211_CHAN_PASSIVE);
+ if (channels[i].flags & IEEE80211_CHAN_RADAR)
+ nflags |= IEEE80211_CHAN_DFS;
+ if (channels[i].flags & IEEE80211_CHAN_NO_160MHZ)
+ cflags &= ~(NET80211_CBW_FLAG_VHT160|NET80211_CBW_FLAG_VHT80P80);
+ if (channels[i].flags & IEEE80211_CHAN_NO_80MHZ)
+ cflags &= ~NET80211_CBW_FLAG_VHT80;
+ /* XXX how to map the remaining enum ieee80211_channel_flags? */
+ if (channels[i].flags & IEEE80211_CHAN_NO_HT40)
+ cflags &= ~NET80211_CBW_FLAG_HT40;
+
+ error = ieee80211_add_channel_cbw(c, maxchan, n,
+ channels[i].hw_value, channels[i].center_freq,
+ channels[i].max_power,
+ nflags, bands, cflags);
+ /* net80211::ENOBUFS: *n >= maxchans */
+ if (error != 0 && error != ENOBUFS)
+ ic_printf(ic, "%s: Adding chan %u/%u/%#x/%#x/%#x/%#x "
+ "returned error %d\n",
+ __func__, channels[i].hw_value,
+ channels[i].center_freq, channels[i].flags,
+ nflags, chan_flags, cflags, error);
+ if (error != 0)
+ break;
+ }
+ }
+
+ /* NL80211_BAND_5GHZ */
+ nchans = 0;
+ if (hw->wiphy->bands[NL80211_BAND_5GHZ] != NULL)
+ nchans = hw->wiphy->bands[NL80211_BAND_5GHZ]->n_channels;
+ if (nchans > 0) {
+ memset(bands, 0, sizeof(bands));
+ chan_flags = 0;
+ setbit(bands, IEEE80211_MODE_11A);
+
+ lkpi_ic_getradiocaps_ht(ic, hw, bands, &chan_flags,
+ NL80211_BAND_5GHZ);
+
+#ifdef LKPI_80211_VHT
+ if (hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.vht_supported) {
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
+ ic->ic_vht_cap.vht_cap_info =
+ hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap;
+ ic->ic_vht_cap.supp_mcs =
+ hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.vht_mcs;
+
+ setbit(bands, IEEE80211_MODE_VHT_5GHZ);
+ chan_flags |= NET80211_CBW_FLAG_VHT80;
+ if (IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_IS_160MHZ(
+ ic->ic_vht_cap.vht_cap_info))
+ chan_flags |= NET80211_CBW_FLAG_VHT160;
+ if (IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_IS_160_80P80MHZ(
+ ic->ic_vht_cap.vht_cap_info))
+ chan_flags |= NET80211_CBW_FLAG_VHT80P80;
+ }
+#endif
+
+ channels = hw->wiphy->bands[NL80211_BAND_5GHZ]->channels;
+ for (i = 0; i < nchans && *n < maxchan; i++) {
+ uint32_t nflags = 0;
+ int cflags = chan_flags;
+
+ if (channels[i].flags & IEEE80211_CHAN_DISABLED) {
+ ic_printf(ic, "%s: Skipping disabled chan "
+ "[%u/%u/%#x]\n", __func__,
+ channels[i].hw_value,
+ channels[i].center_freq, channels[i].flags);
+ continue;
+ }
+ if (channels[i].flags & IEEE80211_CHAN_NO_IR)
+ nflags |= (IEEE80211_CHAN_NOADHOC|IEEE80211_CHAN_PASSIVE);
+ if (channels[i].flags & IEEE80211_CHAN_RADAR)
+ nflags |= IEEE80211_CHAN_DFS;
+ if (channels[i].flags & IEEE80211_CHAN_NO_160MHZ)
+ cflags &= ~(NET80211_CBW_FLAG_VHT160|NET80211_CBW_FLAG_VHT80P80);
+ if (channels[i].flags & IEEE80211_CHAN_NO_80MHZ)
+ cflags &= ~NET80211_CBW_FLAG_VHT80;
+ /* XXX hwo to map the remaining enum ieee80211_channel_flags? */
+ if (channels[i].flags & IEEE80211_CHAN_NO_HT40)
+ cflags &= ~NET80211_CBW_FLAG_HT40;
+
+ error = ieee80211_add_channel_cbw(c, maxchan, n,
+ channels[i].hw_value, channels[i].center_freq,
+ channels[i].max_power,
+ nflags, bands, cflags);
+ /* net80211::ENOBUFS: *n >= maxchans */
+ if (error != 0 && error != ENOBUFS)
+ ic_printf(ic, "%s: Adding chan %u/%u/%#x/%#x/%#x/%#x "
+ "returned error %d\n",
+ __func__, channels[i].hw_value,
+ channels[i].center_freq, channels[i].flags,
+ nflags, chan_flags, cflags, error);
+ if (error != 0)
+ break;
+ }
+ }
+}
+
+static void *
+lkpi_ieee80211_ifalloc(void)
+{
+ struct ieee80211com *ic;
+
+ ic = malloc(sizeof(*ic), M_LKPI80211, M_WAITOK | M_ZERO);
+
+ /* Setting these happens later when we have device information. */
+ ic->ic_softc = NULL;
+ ic->ic_name = "linuxkpi";
+
+ return (ic);
+}
+
+struct ieee80211_hw *
+linuxkpi_ieee80211_alloc_hw(size_t priv_len, const struct ieee80211_ops *ops)
+{
+ struct ieee80211_hw *hw;
+ struct lkpi_hw *lhw;
+ struct wiphy *wiphy;
+ int ac;
+
+ /* Get us and the driver data also allocated. */
+ wiphy = wiphy_new(&linuxkpi_mac80211cfgops, sizeof(*lhw) + priv_len);
+ if (wiphy == NULL)
+ return (NULL);
+
+ lhw = wiphy_priv(wiphy);
+ lhw->ops = ops;
+
+ LKPI_80211_LHW_SCAN_LOCK_INIT(lhw);
+ LKPI_80211_LHW_TXQ_LOCK_INIT(lhw);
+ sx_init_flags(&lhw->lvif_sx, "lhw-lvif", SX_RECURSE | SX_DUPOK);
+ TAILQ_INIT(&lhw->lvif_head);
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ lhw->txq_generation[ac] = 1;
+ TAILQ_INIT(&lhw->scheduled_txqs[ac]);
+ }
+
+ /* Chanctx_conf */
+ INIT_LIST_HEAD(&lhw->lchanctx_list);
+
+ /* Deferred RX path. */
+ LKPI_80211_LHW_RXQ_LOCK_INIT(lhw);
+ TASK_INIT(&lhw->rxq_task, 0, lkpi_80211_lhw_rxq_task, lhw);
+ mbufq_init(&lhw->rxq, 32 * NAPI_POLL_WEIGHT);
+ lhw->rxq_stopped = false;
+
+ /*
+ * XXX-BZ TODO make sure there is a "_null" function to all ops
+ * not initialized.
+ */
+ hw = LHW_TO_HW(lhw);
+ hw->wiphy = wiphy;
+ hw->conf.flags |= IEEE80211_CONF_IDLE;
+ hw->priv = (void *)(lhw + 1);
+
+ /* BSD Specific. */
+ lhw->ic = lkpi_ieee80211_ifalloc();
+
+ IMPROVE();
+
+ return (hw);
+}
+
+void
+linuxkpi_ieee80211_iffree(struct ieee80211_hw *hw)
+{
+ struct lkpi_hw *lhw;
+ struct mbuf *m;
+
+ lhw = HW_TO_LHW(hw);
+ free(lhw->ic, M_LKPI80211);
+ lhw->ic = NULL;
+
+ /*
+ * Drain the deferred RX path.
+ */
+ LKPI_80211_LHW_RXQ_LOCK(lhw);
+ lhw->rxq_stopped = true;
+ LKPI_80211_LHW_RXQ_UNLOCK(lhw);
+
+ /* Drain taskq, won't be restarted due to rxq_stopped being set. */
+ while (taskqueue_cancel(taskqueue_thread, &lhw->rxq_task, NULL) != 0)
+ taskqueue_drain(taskqueue_thread, &lhw->rxq_task);
+
+ /* Flush mbufq (make sure to release ni refs!). */
+ m = mbufq_dequeue(&lhw->rxq);
+ while (m != NULL) {
+#ifdef LKPI_80211_USE_MTAG
+ struct m_tag *mtag;
+
+ mtag = m_tag_locate(m, MTAG_ABI_LKPI80211, LKPI80211_TAG_RXNI, NULL);
+ if (mtag != NULL) {
+ struct lkpi_80211_tag_rxni *rxni;
+
+ rxni = (struct lkpi_80211_tag_rxni *)(mtag + 1);
+ ieee80211_free_node(rxni->ni);
+ }
+#else
+ if (m->m_pkthdr.PH_loc.ptr != NULL) {
+ struct ieee80211_node *ni;
+
+ ni = m->m_pkthdr.PH_loc.ptr;
+ ieee80211_free_node(ni);
+ }
+#endif
+ m_freem(m);
+ m = mbufq_dequeue(&lhw->rxq);
+ }
+ KASSERT(mbufq_empty(&lhw->rxq), ("%s: lhw %p has rxq len %d != 0\n",
+ __func__, lhw, mbufq_len(&lhw->rxq)));
+ LKPI_80211_LHW_RXQ_LOCK_DESTROY(lhw);
+
+ /* Chanctx_conf. */
+ if (!list_empty_careful(&lhw->lchanctx_list)) {
+ struct lkpi_chanctx *lchanctx, *next;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ list_for_each_entry_safe(lchanctx, next, &lhw->lchanctx_list, entry) {
+ if (lchanctx->added_to_drv) {
+ /* In reality we should panic? */
+ chanctx_conf = &lchanctx->chanctx_conf;
+ lkpi_80211_mo_remove_chanctx(hw, chanctx_conf);
+ }
+ list_del(&lchanctx->entry);
+ free(lchanctx, M_LKPI80211);
+ }
+ }
+
+ /* Cleanup more of lhw here or in wiphy_free()? */
+ LKPI_80211_LHW_TXQ_LOCK_DESTROY(lhw);
+ LKPI_80211_LHW_SCAN_LOCK_DESTROY(lhw);
+ sx_destroy(&lhw->lvif_sx);
+ IMPROVE();
+}
+
+void
+linuxkpi_set_ieee80211_dev(struct ieee80211_hw *hw, char *name)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211com *ic;
+
+ lhw = HW_TO_LHW(hw);
+ ic = lhw->ic;
+
+ /* Now set a proper name before ieee80211_ifattach(). */
+ ic->ic_softc = lhw;
+ ic->ic_name = name;
+
+ /* XXX-BZ do we also need to set wiphy name? */
+}
+
+struct ieee80211_hw *
+linuxkpi_wiphy_to_ieee80211_hw(struct wiphy *wiphy)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = wiphy_priv(wiphy);
+ return (LHW_TO_HW(lhw));
+}
+
+static void
+lkpi_radiotap_attach(struct lkpi_hw *lhw)
+{
+ struct ieee80211com *ic;
+
+ ic = lhw->ic;
+ ieee80211_radiotap_attach(ic,
+ &lhw->rtap_tx.wt_ihdr, sizeof(lhw->rtap_tx),
+ LKPI_RTAP_TX_FLAGS_PRESENT,
+ &lhw->rtap_rx.wr_ihdr, sizeof(lhw->rtap_rx),
+ LKPI_RTAP_RX_FLAGS_PRESENT);
+}
+
+int
+linuxkpi_ieee80211_ifattach(struct ieee80211_hw *hw)
+{
+ struct ieee80211com *ic;
+ struct lkpi_hw *lhw;
+ int band, i;
+
+ lhw = HW_TO_LHW(hw);
+ ic = lhw->ic;
+
+ /* We do it this late as wiphy->dev should be set for the name. */
+ lhw->workq = alloc_ordered_workqueue(wiphy_name(hw->wiphy), 0);
+ if (lhw->workq == NULL)
+ return (-EAGAIN);
+
+ /* XXX-BZ figure this out how they count his... */
+ if (!is_zero_ether_addr(hw->wiphy->perm_addr)) {
+ IEEE80211_ADDR_COPY(ic->ic_macaddr,
+ hw->wiphy->perm_addr);
+ } else if (hw->wiphy->n_addresses > 0) {
+ /* We take the first one. */
+ IEEE80211_ADDR_COPY(ic->ic_macaddr,
+ hw->wiphy->addresses[0].addr);
+ } else {
+ ic_printf(ic, "%s: warning, no hardware address!\n", __func__);
+ }
+
+#ifdef __not_yet__
+ /* See comment in lkpi_80211_txq_tx_one(). */
+ ic->ic_headroom = hw->extra_tx_headroom;
+#endif
+
+ ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
+ ic->ic_opmode = IEEE80211_M_STA;
+
+ /* Set device capabilities. */
+ /* XXX-BZ we need to get these from linux80211/drivers and convert. */
+ ic->ic_caps =
+ IEEE80211_C_STA |
+ IEEE80211_C_MONITOR |
+ IEEE80211_C_WPA | /* WPA/RSN */
+#ifdef LKPI_80211_WME
+ IEEE80211_C_WME |
+#endif
+#if 0
+ IEEE80211_C_PMGT |
+#endif
+ IEEE80211_C_SHSLOT | /* short slot time supported */
+ IEEE80211_C_SHPREAMBLE /* short preamble supported */
+ ;
+#if 0
+ /* Scanning is a different kind of beast to re-work. */
+ ic->ic_caps |= IEEE80211_C_BGSCAN;
+#endif
+ if (lhw->ops->hw_scan) {
+ /*
+ * Advertise full-offload scanning.
+ *
+ * Not limiting to SINGLE_SCAN_ON_ALL_BANDS here as otherwise
+ * we essentially disable hw_scan for all drivers not setting
+ * the flag.
+ */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
+ lhw->scan_flags |= LKPI_LHW_SCAN_HW;
+ }
+
+ /* Does HW support Fragmentation offload? */
+ if (ieee80211_hw_check(hw, SUPPORTS_TX_FRAG))
+ ic->ic_flags_ext |= IEEE80211_FEXT_FRAG_OFFLOAD;
+
+ /*
+ * The wiphy variables report bitmasks of avail antennas.
+ * (*get_antenna) get the current bitmask sets which can be
+ * altered by (*set_antenna) for some drivers.
+ * XXX-BZ will the count alone do us much good long-term in net80211?
+ */
+ if (hw->wiphy->available_antennas_rx ||
+ hw->wiphy->available_antennas_tx) {
+ uint32_t rxs, txs;
+
+ if (lkpi_80211_mo_get_antenna(hw, &txs, &rxs) == 0) {
+ ic->ic_rxstream = bitcount32(rxs);
+ ic->ic_txstream = bitcount32(txs);
+ }
+ }
+
+ ic->ic_cryptocaps = 0;
+#ifdef LKPI_80211_HW_CRYPTO
+ if (lkpi_hwcrypto && hw->wiphy->n_cipher_suites > 0) {
+ uint32_t hwciphers;
+
+ hwciphers = 0;
+ for (i = 0; i < hw->wiphy->n_cipher_suites; i++) {
+ uint32_t cs;
+
+ cs = lkpi_l80211_to_net80211_cyphers(
+ ic, hw->wiphy->cipher_suites[i]);
+ if (cs == IEEE80211_CRYPTO_TKIP) {
+ /*
+ * We do set this here. We will only find out
+ * when doing a SET_KEY operation depending on
+ * what the driver returns.
+ * net80211::ieee80211_crypto_newkey()
+ * checks this so we will have to do flags
+ * surgery later.
+ */
+ cs |= IEEE80211_CRYPTO_TKIPMIC;
+ }
+ hwciphers |= cs;
+ }
+ /*
+ * (20250415) nothing anywhere in the path checks we actually
+ * support all these in net80211.
+ * net80211 supports _256 variants but the ioctl does not.
+ */
+ IMPROVE("as net80211 grows more support, enable them");
+ hwciphers &= (IEEE80211_CRYPTO_WEP |
+ IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_TKIPMIC |
+ IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_AES_GCM_128);
+ /*
+ * We only support CCMP here, so further filter.
+ * Also permit TKIP if turned on.
+ */
+ hwciphers &= (IEEE80211_CRYPTO_AES_CCM |
+ IEEE80211_CRYPTO_AES_GCM_128 |
+ (lkpi_hwcrypto_tkip ? (IEEE80211_CRYPTO_TKIP |
+ IEEE80211_CRYPTO_TKIPMIC) : 0));
+ ieee80211_set_hardware_ciphers(ic, hwciphers);
+ }
+#endif
+
+ lkpi_ic_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
+ ic->ic_channels);
+
+ ieee80211_ifattach(ic);
+
+ ic->ic_update_mcast = lkpi_ic_update_mcast;
+ ic->ic_update_promisc = lkpi_ic_update_promisc;
+ ic->ic_update_chw = lkpi_ic_update_chw;
+ ic->ic_parent = lkpi_ic_parent;
+ ic->ic_scan_start = lkpi_ic_scan_start;
+ ic->ic_scan_end = lkpi_ic_scan_end;
+ ic->ic_set_channel = lkpi_ic_set_channel;
+ ic->ic_transmit = lkpi_ic_transmit;
+ ic->ic_raw_xmit = lkpi_ic_raw_xmit;
+ ic->ic_vap_create = lkpi_ic_vap_create;
+ ic->ic_vap_delete = lkpi_ic_vap_delete;
+ ic->ic_getradiocaps = lkpi_ic_getradiocaps;
+ ic->ic_wme.wme_update = lkpi_ic_wme_update;
+
+ lhw->ic_scan_curchan = ic->ic_scan_curchan;
+ ic->ic_scan_curchan = lkpi_ic_scan_curchan;
+ lhw->ic_scan_mindwell = ic->ic_scan_mindwell;
+ ic->ic_scan_mindwell = lkpi_ic_scan_mindwell;
+
+ lhw->ic_node_alloc = ic->ic_node_alloc;
+ ic->ic_node_alloc = lkpi_ic_node_alloc;
+ lhw->ic_node_init = ic->ic_node_init;
+ ic->ic_node_init = lkpi_ic_node_init;
+ lhw->ic_node_cleanup = ic->ic_node_cleanup;
+ ic->ic_node_cleanup = lkpi_ic_node_cleanup;
+ lhw->ic_node_free = ic->ic_node_free;
+ ic->ic_node_free = lkpi_ic_node_free;
+
+#ifdef LKPI_80211_HT
+ /*
+ * Only attach if the driver/firmware supports (*ampdu_action)().
+ * Otherwise it is in the hands of net80211.
+ */
+ if (lhw->ops->ampdu_action != NULL) {
+ lhw->ic_recv_action = ic->ic_recv_action;
+ ic->ic_recv_action = lkpi_ic_recv_action;
+ lhw->ic_send_action = ic->ic_send_action;
+ ic->ic_send_action = lkpi_ic_send_action;
+
+ lhw->ic_ampdu_enable = ic->ic_ampdu_enable;
+ ic->ic_ampdu_enable = lkpi_ic_ampdu_enable;
+
+ lhw->ic_addba_request = ic->ic_addba_request;
+ ic->ic_addba_request = lkpi_ic_addba_request;
+ lhw->ic_addba_response = ic->ic_addba_response;
+ ic->ic_addba_response = lkpi_ic_addba_response;
+ lhw->ic_addba_stop = ic->ic_addba_stop;
+ ic->ic_addba_stop = lkpi_ic_addba_stop;
+ lhw->ic_addba_response_timeout = ic->ic_addba_response_timeout;
+ ic->ic_addba_response_timeout = lkpi_ic_addba_response_timeout;
+
+ lhw->ic_bar_response = ic->ic_bar_response;
+ ic->ic_bar_response = lkpi_ic_bar_response;
+
+ lhw->ic_ampdu_rx_start = ic->ic_ampdu_rx_start;
+ ic->ic_ampdu_rx_start = lkpi_ic_ampdu_rx_start;
+ lhw->ic_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
+ ic->ic_ampdu_rx_stop = lkpi_ic_ampdu_rx_stop;
+ }
+#endif
+
+ lkpi_radiotap_attach(lhw);
+
+ /*
+ * Assign the first possible channel for now; seems Realtek drivers
+ * expect one.
+ * Also remember the amount of bands we support and the most rates
+ * in any band so we can scale [(ext) sup rates] IE(s) accordingly.
+ */
+ lhw->supbands = lhw->max_rates = 0;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *supband;
+ struct linuxkpi_ieee80211_channel *channels;
+
+ supband = hw->wiphy->bands[band];
+ if (supband == NULL || supband->n_channels == 0)
+ continue;
+
+ lhw->supbands++;
+ lhw->max_rates = max(lhw->max_rates, supband->n_bitrates);
+
+ /* If we have a channel, we need to keep counting supbands. */
+ if (hw->conf.chandef.chan != NULL)
+ continue;
+
+ channels = supband->channels;
+ for (i = 0; i < supband->n_channels; i++) {
+
+ if (channels[i].flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ cfg80211_chandef_create(&hw->conf.chandef, &channels[i],
+#ifdef LKPI_80211_HT
+ (ic->ic_flags_ht & IEEE80211_FHT_HT) ? NL80211_CHAN_HT20 :
+#endif
+ NL80211_CHAN_NO_HT);
+ break;
+ }
+ }
+
+ IMPROVE("see net80211::ieee80211_chan_init vs. wiphy->bands[].bitrates possibly in lkpi_ic_getradiocaps?");
+
+ /* Make sure we do not support more than net80211 is willing to take. */
+ if (lhw->max_rates > IEEE80211_RATE_MAXSIZE) {
+ ic_printf(ic, "%s: limiting max_rates %d to %d!\n", __func__,
+ lhw->max_rates, IEEE80211_RATE_MAXSIZE);
+ lhw->max_rates = IEEE80211_RATE_MAXSIZE;
+ }
+
+ /*
+ * The maximum supported bitrates on any band + size for
+ * DSSS Parameter Set give our per-band IE size.
+ * SSID is the responsibility of the driver and goes on the side.
+ * The user specified bits coming from the vap go into the
+ * "common ies" fields.
+ */
+ lhw->scan_ie_len = 2 + IEEE80211_RATE_SIZE;
+ if (lhw->max_rates > IEEE80211_RATE_SIZE)
+ lhw->scan_ie_len += 2 + (lhw->max_rates - IEEE80211_RATE_SIZE);
+
+ if (hw->wiphy->features & NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) {
+ /*
+ * net80211 does not seem to support the DSSS Parameter Set but
+ * some of the drivers insert it so calculate the extra fixed
+ * space in.
+ */
+ lhw->scan_ie_len += 2 + 1;
+ }
+
+#if defined(LKPI_80211_HT)
+ if ((ic->ic_htcaps & IEEE80211_HTC_HT) != 0)
+ lhw->scan_ie_len += sizeof(struct ieee80211_ie_htcap);
+#endif
+#if defined(LKPI_80211_VHT)
+ if (IEEE80211_CONF_VHT(ic))
+ lhw->scan_ie_len += 2 + sizeof(struct ieee80211_vht_cap);
+#endif
+
+ /* Reduce the max_scan_ie_len "left" by the amount we consume already. */
+ if (hw->wiphy->max_scan_ie_len > 0) {
+ if (lhw->scan_ie_len > hw->wiphy->max_scan_ie_len)
+ goto err;
+ hw->wiphy->max_scan_ie_len -= lhw->scan_ie_len;
+ }
+
+ if (bootverbose)
+ ieee80211_announce(ic);
+
+ return (0);
+err:
+ IMPROVE("TODO FIXME CLEANUP");
+ return (-EAGAIN);
+}
+
+void
+linuxkpi_ieee80211_ifdetach(struct ieee80211_hw *hw)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211com *ic;
+
+ lhw = HW_TO_LHW(hw);
+ ic = lhw->ic;
+ ieee80211_ifdetach(ic);
+}
+
+void
+linuxkpi_ieee80211_iterate_interfaces(struct ieee80211_hw *hw,
+ enum ieee80211_iface_iter flags,
+ void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *),
+ void *arg)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ bool active, atomic, nin_drv;
+
+ lhw = HW_TO_LHW(hw);
+
+ if (flags & ~(IEEE80211_IFACE_ITER_NORMAL|
+ IEEE80211_IFACE_ITER_RESUME_ALL|
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER|
+ IEEE80211_IFACE_ITER_ACTIVE|IEEE80211_IFACE_ITER__ATOMIC)) {
+ ic_printf(lhw->ic, "XXX TODO %s flags(%#x) not yet supported.\n",
+ __func__, flags);
+ }
+
+ active = (flags & IEEE80211_IFACE_ITER_ACTIVE) != 0;
+ atomic = (flags & IEEE80211_IFACE_ITER__ATOMIC) != 0;
+ nin_drv = (flags & IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER) != 0;
+
+ if (atomic)
+ LKPI_80211_LHW_LVIF_LOCK(lhw);
+ TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) {
+ struct ieee80211vap *vap;
+
+ vif = LVIF_TO_VIF(lvif);
+
+ /*
+ * If we want "active" interfaces, we need to distinguish on
+ * whether the driver knows about them or not to be able to
+ * handle the "resume" case correctly. Skip the ones the
+ * driver does not know about.
+ */
+ if (active && !lvif->added_to_drv &&
+ (flags & IEEE80211_IFACE_ITER_RESUME_ALL) != 0)
+ continue;
+
+ /*
+ * If we shall skip interfaces not added to the driver do so
+ * if we haven't yet.
+ */
+ if (nin_drv && !lvif->added_to_drv)
+ continue;
+
+ /*
+ * Run the iterator function if we are either not asking
+ * asking for active only or if the VAP is "running".
+ */
+ /* XXX-BZ probably should have state in the lvif as well. */
+ vap = LVIF_TO_VAP(lvif);
+ if (!active || (vap->iv_state != IEEE80211_S_INIT))
+ iterfunc(arg, vif->addr, vif);
+ }
+ if (atomic)
+ LKPI_80211_LHW_LVIF_UNLOCK(lhw);
+}
+
+static void
+lkpi_ieee80211_iterate_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ ieee80211_keyix keyix, struct lkpi_sta *lsta,
+ void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_sta *, struct ieee80211_key_conf *, void *),
+ void *arg)
+{
+ if (!lsta->added_to_drv)
+ return;
+
+ if (lsta->kc[keyix] == NULL)
+ return;
+
+ iterfunc(hw, vif, LSTA_TO_STA(lsta), lsta->kc[keyix], arg);
+}
+
+void
+linuxkpi_ieee80211_iterate_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_sta *, struct ieee80211_key_conf *, void *),
+ void *arg, bool rcu)
+{
+ struct lkpi_sta *lsta;
+ struct lkpi_vif *lvif;
+
+ lvif = VIF_TO_LVIF(vif);
+
+ if (rcu) {
+ rcu_read_lock_held(); /* XXX-BZ is this correct? */
+
+ if (vif == NULL) {
+ TODO();
+ } else {
+ list_for_each_entry_rcu(lsta, &lvif->lsta_list, lsta_list) {
+ for (ieee80211_keyix keyix = 0; keyix < nitems(lsta->kc);
+ keyix++)
+ lkpi_ieee80211_iterate_keys(hw, vif,
+ keyix, lsta, iterfunc, arg);
+ }
+ }
+ } else {
+ TODO("Used by suspend/resume; order of keys as installed to "
+ "firmware is important; we'll need to rewrite some code for that");
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (vif == NULL) {
+ TODO();
+ } else {
+ list_for_each_entry(lsta, &lvif->lsta_list, lsta_list) {
+ for (ieee80211_keyix keyix = 0; keyix < nitems(lsta->kc);
+ keyix++)
+ lkpi_ieee80211_iterate_keys(hw, vif,
+ keyix, lsta, iterfunc, arg);
+ }
+ }
+ }
+}
+
+void
+linuxkpi_ieee80211_iterate_chan_contexts(struct ieee80211_hw *hw,
+ void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *,
+ void *),
+ void *arg)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_chanctx *lchanctx;
+
+ KASSERT(hw != NULL && iterfunc != NULL,
+ ("%s: hw %p iterfunc %p arg %p\n", __func__, hw, iterfunc, arg));
+
+ lhw = HW_TO_LHW(hw);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(lchanctx, &lhw->lchanctx_list, entry) {
+ if (!lchanctx->added_to_drv)
+ continue;
+ iterfunc(hw, &lchanctx->chanctx_conf, arg);
+ }
+ rcu_read_unlock();
+}
+
+void
+linuxkpi_ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
+ void (*iterfunc)(void *, struct ieee80211_sta *), void *arg)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_vif *lvif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+
+ KASSERT(hw != NULL && iterfunc != NULL,
+ ("%s: hw %p iterfunc %p arg %p\n", __func__, hw, iterfunc, arg));
+
+ lhw = HW_TO_LHW(hw);
+
+ LKPI_80211_LHW_LVIF_LOCK(lhw);
+ TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) {
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(lsta, &lvif->lsta_list, lsta_list) {
+ if (!lsta->added_to_drv)
+ continue;
+ sta = LSTA_TO_STA(lsta);
+ iterfunc(arg, sta);
+ }
+ rcu_read_unlock();
+ }
+ LKPI_80211_LHW_LVIF_UNLOCK(lhw);
+}
+
+struct linuxkpi_ieee80211_regdomain *
+lkpi_get_linuxkpi_ieee80211_regdomain(size_t n)
+{
+ struct linuxkpi_ieee80211_regdomain *regd;
+
+ regd = kzalloc(sizeof(*regd) + n * sizeof(struct ieee80211_reg_rule),
+ GFP_KERNEL);
+ return (regd);
+}
+
+int
+linuxkpi_regulatory_set_wiphy_regd_sync(struct wiphy *wiphy,
+ struct linuxkpi_ieee80211_regdomain *regd)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211com *ic;
+ struct ieee80211_regdomain *rd;
+
+ lhw = wiphy_priv(wiphy);
+ ic = lhw->ic;
+
+ rd = &ic->ic_regdomain;
+ if (rd->isocc[0] == '\0') {
+ rd->isocc[0] = regd->alpha2[0];
+ rd->isocc[1] = regd->alpha2[1];
+ }
+
+ TODO();
+ /* XXX-BZ finish the rest. */
+
+ return (0);
+}
+
+void
+linuxkpi_ieee80211_scan_completed(struct ieee80211_hw *hw,
+ struct cfg80211_scan_info *info)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211com *ic;
+ struct ieee80211_scan_state *ss;
+
+ lhw = wiphy_priv(hw->wiphy);
+ ic = lhw->ic;
+ ss = ic->ic_scan;
+
+ ieee80211_scan_done(ss->ss_vap);
+
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ free(lhw->hw_req, M_LKPI80211);
+ lhw->hw_req = NULL;
+ lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING;
+ wakeup(lhw);
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+
+ return;
+}
+
+static void
+lkpi_80211_lhw_rxq_rx_one(struct lkpi_hw *lhw, struct mbuf *m)
+{
+ struct ieee80211_node *ni;
+#ifdef LKPI_80211_USE_MTAG
+ struct m_tag *mtag;
+#endif
+ int ok;
+
+ ni = NULL;
+#ifdef LKPI_80211_USE_MTAG
+ mtag = m_tag_locate(m, MTAG_ABI_LKPI80211, LKPI80211_TAG_RXNI, NULL);
+ if (mtag != NULL) {
+ struct lkpi_80211_tag_rxni *rxni;
+
+ rxni = (struct lkpi_80211_tag_rxni *)(mtag + 1);
+ ni = rxni->ni;
+ }
+#else
+ if (m->m_pkthdr.PH_loc.ptr != NULL) {
+ ni = m->m_pkthdr.PH_loc.ptr;
+ m->m_pkthdr.PH_loc.ptr = NULL;
+ }
+#endif
+
+ if (ni != NULL) {
+ ok = ieee80211_input_mimo(ni, m);
+ ieee80211_free_node(ni); /* Release the reference. */
+ if (ok < 0)
+ m_freem(m);
+ } else {
+ ok = ieee80211_input_mimo_all(lhw->ic, m);
+ /* mbuf got consumed. */
+ }
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_RX)
+ printf("TRACE-RX: %s: handled frame type %#0x\n", __func__, ok);
+#endif
+}
+
+static void
+lkpi_80211_lhw_rxq_task(void *ctx, int pending)
+{
+ struct lkpi_hw *lhw;
+ struct mbufq mq;
+ struct mbuf *m;
+
+ lhw = ctx;
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_RX)
+ printf("TRACE-RX: %s: lhw %p pending %d mbuf_qlen %d\n",
+ __func__, lhw, pending, mbufq_len(&lhw->rxq));
+#endif
+
+ mbufq_init(&mq, IFQ_MAXLEN);
+
+ LKPI_80211_LHW_RXQ_LOCK(lhw);
+ mbufq_concat(&mq, &lhw->rxq);
+ LKPI_80211_LHW_RXQ_UNLOCK(lhw);
+
+ m = mbufq_dequeue(&mq);
+ while (m != NULL) {
+ lkpi_80211_lhw_rxq_rx_one(lhw, m);
+ m = mbufq_dequeue(&mq);
+ }
+}
+
+static void
+lkpi_convert_rx_status(struct ieee80211_hw *hw, struct lkpi_sta *lsta,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_rx_stats *rx_stats,
+ uint8_t *rssip)
+{
+ struct ieee80211_supported_band *supband;
+ struct rate_info rxrate;
+ int i;
+ uint8_t rssi;
+
+ memset(&rxrate, 0, sizeof(rxrate));
+ memset(rx_stats, 0, sizeof(*rx_stats));
+ rx_stats->r_flags = IEEE80211_R_NF | IEEE80211_R_RSSI;
+ /* XXX-BZ correct hardcoded noise floor, survey data? */
+ rx_stats->c_nf = -96;
+ if (ieee80211_hw_check(hw, SIGNAL_DBM) &&
+ !(rx_status->flag & RX_FLAG_NO_SIGNAL_VAL))
+ rssi = rx_status->signal;
+ else
+ rssi = rx_stats->c_nf;
+ /*
+ * net80211 signal strength data are in .5 dBm units relative to
+ * the current noise floor (see comment in ieee80211_node.h).
+ */
+ rssi -= rx_stats->c_nf;
+ if (rssip != NULL)
+ *rssip = rssi;
+ rx_stats->c_rssi = rssi * 2;
+ rx_stats->r_flags |= IEEE80211_R_BAND;
+ rx_stats->c_band =
+ lkpi_nl80211_band_to_net80211_band(rx_status->band);
+ rx_stats->r_flags |= IEEE80211_R_FREQ | IEEE80211_R_IEEE;
+ rx_stats->c_freq = rx_status->freq;
+ rx_stats->c_ieee = ieee80211_mhz2ieee(rx_stats->c_freq, rx_stats->c_band);
+
+ rx_stats->c_rx_tsf = rx_status->mactime;
+
+ /* XXX RX_FLAG_MACTIME_IS_RTAP_TS64 ? */
+ if ((rx_status->flag & RX_FLAG_MACTIME) ==
+ (RX_FLAG_MACTIME_START|RX_FLAG_MACTIME_END)) {
+ rx_stats->r_flags |= IEEE80211_R_TSF64;
+ /* XXX RX_FLAG_MACTIME_PLCP_START ? */
+ if ((rx_status->flag & RX_FLAG_MACTIME) == RX_FLAG_MACTIME_START)
+ rx_stats->r_flags |= IEEE80211_R_TSF_START;
+ if ((rx_status->flag & RX_FLAG_MACTIME) == RX_FLAG_MACTIME_END)
+ rx_stats->r_flags |= IEEE80211_R_TSF_END;
+ /* XXX-BZ if TSF_END will net80211 do the unwind of time? */
+ }
+
+ if (rx_status->chains != 0) {
+ int cc;
+ int8_t crssi;
+
+ rx_stats->c_chain = rx_status->chains;
+ rx_stats->r_flags |= IEEE80211_R_C_CHAIN;
+
+ cc = 0;
+ for (i = 0; i < nitems(rx_status->chain_signal); i++) {
+ if (!(rx_status->chains & BIT(i)))
+ continue;
+ crssi = rx_status->chain_signal[i];
+ crssi -= rx_stats->c_nf;
+ rx_stats->c_rssi_ctl[i] = crssi * 2;
+ rx_stats->c_rssi_ext[i] = crssi * 2; /* XXX _ext ??? ATH thing? */
+ /* We currently only have the global noise floor value. */
+ rx_stats->c_nf_ctl[i] = rx_stats->c_nf;
+ rx_stats->c_nf_ext[i] = rx_stats->c_nf;
+ cc++;
+ }
+ if (cc > 0)
+ rx_stats->r_flags |= (IEEE80211_R_C_NF | IEEE80211_R_C_RSSI);
+ }
+
+ /* XXX-NET80211 We are not going to populate c_phytype! */
+
+ switch (rx_status->encoding) {
+ case RX_ENC_LEGACY:
+ {
+ uint32_t legacy = 0;
+
+ supband = hw->wiphy->bands[rx_status->band];
+ if (supband != NULL)
+ legacy = supband->bitrates[rx_status->rate_idx].bitrate;
+ rx_stats->c_rate = legacy;
+ rxrate.legacy = legacy;
+ /* Is there a LinuxKPI way of reporting IEEE80211_RX_F_CCK / _OFDM? */
+ break;
+ }
+ case RX_ENC_HT:
+ rx_stats->c_pktflags |= IEEE80211_RX_F_HT;
+ rx_stats->c_rate = rx_status->rate_idx; /* mcs */
+ rxrate.flags |= RATE_INFO_FLAGS_MCS;
+ rxrate.mcs = rx_status->rate_idx;
+ if ((rx_status->enc_flags & RX_ENC_FLAG_SHORT_GI) != 0) {
+ rx_stats->c_pktflags |= IEEE80211_RX_F_SHORTGI;
+ rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ }
+ break;
+ case RX_ENC_VHT:
+ rx_stats->c_pktflags |= IEEE80211_RX_F_VHT;
+ rx_stats->c_rate = rx_status->rate_idx; /* mcs */
+ rx_stats->c_vhtnss = rx_status->nss;
+ rxrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
+ rxrate.mcs = rx_status->rate_idx;
+ rxrate.nss = rx_status->nss;
+ if ((rx_status->enc_flags & RX_ENC_FLAG_SHORT_GI) != 0) {
+ rx_stats->c_pktflags |= IEEE80211_RX_F_SHORTGI;
+ rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ }
+ break;
+ case RX_ENC_HE:
+ rxrate.flags |= RATE_INFO_FLAGS_HE_MCS;
+ rxrate.mcs = rx_status->rate_idx;
+ rxrate.nss = rx_status->nss;
+ /* XXX TODO */
+ TODO("net80211 has not matching encoding for %u", rx_status->encoding);
+ break;
+ case RX_ENC_EHT:
+ rxrate.flags |= RATE_INFO_FLAGS_EHT_MCS;
+ rxrate.mcs = rx_status->rate_idx;
+ rxrate.nss = rx_status->nss;
+ /* XXX TODO */
+ TODO("net80211 has not matching encoding for %u", rx_status->encoding);
+ break;
+ }
+
+ rxrate.bw = rx_status->bw;
+ switch (rx_status->bw) {
+ case RATE_INFO_BW_20:
+ rx_stats->c_width = IEEE80211_RX_FW_20MHZ;
+ break;
+ case RATE_INFO_BW_40:
+ rx_stats->c_width = IEEE80211_RX_FW_40MHZ;
+ break;
+ case RATE_INFO_BW_80:
+ rx_stats->c_width = IEEE80211_RX_FW_80MHZ;
+ break;
+ case RATE_INFO_BW_160:
+ rx_stats->c_width = IEEE80211_RX_FW_160MHZ;
+ break;
+ case RATE_INFO_BW_320:
+ case RATE_INFO_BW_HE_RU:
+ case RATE_INFO_BW_EHT_RU:
+ case RATE_INFO_BW_5:
+ case RATE_INFO_BW_10:
+ TODO("net80211 has not matching bandwidth for %u", rx_status->bw);
+ break;
+ }
+
+ if ((rx_status->enc_flags & RX_ENC_FLAG_LDPC) != 0)
+ rx_stats->c_pktflags |= IEEE80211_RX_F_LDPC;
+ if ((rx_status->enc_flags & RX_ENC_FLAG_STBC_MASK) != 0)
+ rx_stats->c_pktflags |= IEEE80211_RX_F_STBC;
+
+ /*
+ * We only need these for LKPI_80211_HW_CRYPTO in theory but in
+ * case the hardware does something we do not expect always leave
+ * these enabled. Leaving this commant as documentation for the || 1.
+ */
+#if defined(LKPI_80211_HW_CRYPTO) || 1
+ if (rx_status->flag & RX_FLAG_DECRYPTED) {
+ rx_stats->c_pktflags |= IEEE80211_RX_F_DECRYPTED;
+ /* Only valid if decrypted is set. */
+ if (rx_status->flag & RX_FLAG_PN_VALIDATED)
+ rx_stats->c_pktflags |= IEEE80211_RX_F_PN_VALIDATED;
+ }
+ if (rx_status->flag & RX_FLAG_IV_STRIPPED)
+ rx_stats->c_pktflags |= IEEE80211_RX_F_IV_STRIP;
+ if (rx_status->flag & RX_FLAG_ICV_STRIPPED)
+ rx_stats->c_pktflags |= IEEE80211_RX_F_ICV_STRIP;
+ if (rx_status->flag & RX_FLAG_MIC_STRIPPED)
+ rx_stats->c_pktflags |= IEEE80211_RX_F_MIC_STRIP;
+ if (rx_status->flag & RX_FLAG_MMIC_STRIPPED)
+ rx_stats->c_pktflags |= IEEE80211_RX_F_MMIC_STRIP;
+ if (rx_status->flag & RX_FLAG_MMIC_ERROR)
+ rx_stats->c_pktflags |= IEEE80211_RX_F_FAIL_MMIC;
+ if (rx_status->flag & RX_FLAG_FAILED_FCS_CRC)
+ rx_stats->c_pktflags |= IEEE80211_RX_F_FAIL_FCSCRC;
+#endif
+
+ if (lsta != NULL) {
+ memcpy(&lsta->sinfo.rxrate, &rxrate, sizeof(rxrate));
+ lsta->sinfo.filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ }
+}
+
+/* For %list see comment towards the end of the function. */
+void
+linuxkpi_ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct napi_struct *napi __unused,
+ struct list_head *list __unused)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211com *ic;
+ struct mbuf *m;
+ struct skb_shared_info *shinfo;
+ struct ieee80211_rx_status *rx_status;
+ struct ieee80211_rx_stats rx_stats;
+ struct ieee80211_node *ni;
+ struct ieee80211vap *vap;
+ struct ieee80211_hdr *hdr;
+ struct lkpi_sta *lsta;
+ int i, offset, ok, error;
+ uint8_t rssi;
+ bool is_beacon;
+
+ lhw = HW_TO_LHW(hw);
+ ic = lhw->ic;
+
+ if (skb->len < 2) {
+ /* Need 80211 stats here. */
+ counter_u64_add(ic->ic_ierrors, 1);
+ IMPROVE();
+ goto err;
+ }
+
+ /*
+ * For now do the data copy; we can later improve things. Might even
+ * have an mbuf backing the skb data then?
+ */
+ m = m_get3(skb->len, M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL) {
+ counter_u64_add(ic->ic_ierrors, 1);
+ goto err;
+ }
+ m_copyback(m, 0, skb->tail - skb->data, skb->data);
+
+ shinfo = skb_shinfo(skb);
+ offset = m->m_len;
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ m_copyback(m, offset, shinfo->frags[i].size,
+ (uint8_t *)linux_page_address(shinfo->frags[i].page) +
+ shinfo->frags[i].offset);
+ offset += shinfo->frags[i].size;
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ hdr = (void *)skb->data;
+ is_beacon = ieee80211_is_beacon(hdr->frame_control);
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (is_beacon && (linuxkpi_debug_80211 & D80211_TRACE_RX_BEACONS) == 0)
+ goto no_trace_beacons;
+
+ if (linuxkpi_debug_80211 & D80211_TRACE_RX)
+ printf("TRACE-RX: %s: skb %p l/d/t-len (%u/%u/%u) "
+ "h %p d %p t %p e %p sh %p (%u) m %p plen %u len %u%s\n",
+ __func__, skb, skb->len, skb->data_len,
+ skb->truesize, skb->head, skb->data, skb->tail, skb->end,
+ shinfo, shinfo->nr_frags,
+ m, m->m_pkthdr.len, m->m_len, is_beacon ? " beacon" : "");
+
+ if (linuxkpi_debug_80211 & D80211_TRACE_RX_DUMP)
+ hexdump(mtod(m, const void *), m->m_len, "RX (raw) ", 0);
+
+ /* Implement a dump_rxcb() !!! */
+ if (linuxkpi_debug_80211 & D80211_TRACE_RX)
+ printf("TRACE-RX: %s: RXCB: %ju %ju %u, %b, %u, %#0x, %#0x, "
+ "%u band %u, %u { %d %d %d %d }, %d, %#x %#x %#x %#x %u %u %u\n",
+ __func__,
+ (uintmax_t)rx_status->boottime_ns,
+ (uintmax_t)rx_status->mactime,
+ rx_status->device_timestamp,
+ rx_status->flag, IEEE80211_RX_STATUS_FLAGS_BITS,
+ rx_status->freq,
+ rx_status->bw,
+ rx_status->encoding,
+ rx_status->ampdu_reference,
+ rx_status->band,
+ rx_status->chains,
+ rx_status->chain_signal[0],
+ rx_status->chain_signal[1],
+ rx_status->chain_signal[2],
+ rx_status->chain_signal[3],
+ rx_status->signal,
+ rx_status->enc_flags,
+ rx_status->he_dcm,
+ rx_status->he_gi,
+ rx_status->he_ru,
+ rx_status->zero_length_psdu_type,
+ rx_status->nss,
+ rx_status->rate_idx);
+no_trace_beacons:
+#endif
+
+ lsta = NULL;
+ if (sta != NULL) {
+ lsta = STA_TO_LSTA(sta);
+ ni = ieee80211_ref_node(lsta->ni);
+ } else {
+ struct ieee80211_frame_min *wh;
+
+ wh = mtod(m, struct ieee80211_frame_min *);
+ ni = ieee80211_find_rxnode(ic, wh);
+ if (ni != NULL)
+ lsta = ni->ni_drv_data;
+ }
+
+ rssi = 0;
+ lkpi_convert_rx_status(hw, lsta, rx_status, &rx_stats, &rssi);
+
+ ok = ieee80211_add_rx_params(m, &rx_stats);
+ if (ok == 0) {
+ m_freem(m);
+ counter_u64_add(ic->ic_ierrors, 1);
+ goto err;
+ }
+
+ if (ni != NULL)
+ vap = ni->ni_vap;
+ else
+ /*
+ * XXX-BZ can we improve this by looking at the frame hdr
+ * or other meta-data passed up?
+ */
+ vap = TAILQ_FIRST(&ic->ic_vaps);
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_RX)
+ printf("TRACE-RX: %s: sta %p lsta %p state %d ni %p vap %p%s\n",
+ __func__, sta, lsta, (lsta != NULL) ? lsta->state : -1,
+ ni, vap, is_beacon ? " beacon" : "");
+#endif
+
+ if (ni != NULL && vap != NULL && is_beacon &&
+ rx_status->device_timestamp > 0 &&
+ m->m_pkthdr.len >= sizeof(struct ieee80211_frame)) {
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_frame *wh;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ if (!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_bssid))
+ goto skip_device_ts;
+
+ lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ IMPROVE("TIMING_BEACON_ONLY?");
+ /* mac80211 specific (not net80211) so keep it here. */
+ vif->bss_conf.sync_device_ts = rx_status->device_timestamp;
+ /*
+ * net80211 should take care of the other information (sync_tsf,
+ * sync_dtim_count) as otherwise we need to parse the beacon.
+ */
+skip_device_ts:
+ ;
+ }
+
+ if (vap != NULL && vap->iv_state > IEEE80211_S_INIT &&
+ ieee80211_radiotap_active_vap(vap)) {
+ struct lkpi_radiotap_rx_hdr *rtap;
+
+ rtap = &lhw->rtap_rx;
+ rtap->wr_tsft = rx_status->device_timestamp;
+ rtap->wr_flags = 0;
+ if (rx_status->enc_flags & RX_ENC_FLAG_SHORTPRE)
+ rtap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+ if (rx_status->enc_flags & RX_ENC_FLAG_SHORT_GI)
+ rtap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
+#if 0 /* .. or it does not given we strip it below. */
+ if (ieee80211_hw_check(hw, RX_INCLUDES_FCS))
+ rtap->wr_flags |= IEEE80211_RADIOTAP_F_FCS;
+#endif
+ if (rx_status->flag & RX_FLAG_FAILED_FCS_CRC)
+ rtap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
+ rtap->wr_rate = 0;
+ IMPROVE();
+ /* XXX TODO status->encoding / rate_index / bw */
+ rtap->wr_chan_freq = htole16(rx_stats.c_freq);
+ if (ic->ic_curchan->ic_ieee == rx_stats.c_ieee)
+ rtap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
+ rtap->wr_dbm_antsignal = rssi;
+ rtap->wr_dbm_antnoise = rx_stats.c_nf;
+ }
+
+ if (ieee80211_hw_check(hw, RX_INCLUDES_FCS))
+ m_adj(m, -IEEE80211_CRC_LEN);
+
+#if 0
+ if (list != NULL) {
+ /*
+ * Normally this would be queued up and delivered by
+ * netif_receive_skb_list(), napi_gro_receive(), or the like.
+ * See mt76::mac80211.c as only current possible consumer.
+ */
+ IMPROVE("we simply pass the packet to net80211 to deal with.");
+ }
+#endif
+
+ /* Attach meta-information to the mbuf for the deferred RX path. */
+ if (ni != NULL) {
+#ifdef LKPI_80211_USE_MTAG
+ struct m_tag *mtag;
+ struct lkpi_80211_tag_rxni *rxni;
+
+ mtag = m_tag_alloc(MTAG_ABI_LKPI80211, LKPI80211_TAG_RXNI,
+ sizeof(*rxni), IEEE80211_M_NOWAIT);
+ if (mtag == NULL) {
+ m_freem(m);
+ counter_u64_add(ic->ic_ierrors, 1);
+ goto err;
+ }
+ rxni = (struct lkpi_80211_tag_rxni *)(mtag + 1);
+ rxni->ni = ni; /* We hold a reference. */
+ m_tag_prepend(m, mtag);
+#else
+ m->m_pkthdr.PH_loc.ptr = ni; /* We hold a reference. */
+#endif
+ }
+
+ LKPI_80211_LHW_RXQ_LOCK(lhw);
+ if (lhw->rxq_stopped) {
+ LKPI_80211_LHW_RXQ_UNLOCK(lhw);
+ m_freem(m);
+ counter_u64_add(ic->ic_ierrors, 1);
+ goto err;
+ }
+
+ error = mbufq_enqueue(&lhw->rxq, m);
+ if (error != 0) {
+ LKPI_80211_LHW_RXQ_UNLOCK(lhw);
+ m_freem(m);
+ counter_u64_add(ic->ic_ierrors, 1);
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_RX)
+ ic_printf(ni->ni_ic, "%s: mbufq_enqueue failed: %d\n",
+ __func__, error);
+#endif
+ goto err;
+ }
+ taskqueue_enqueue(taskqueue_thread, &lhw->rxq_task);
+ LKPI_80211_LHW_RXQ_UNLOCK(lhw);
+
+ IMPROVE();
+
+err:
+ /* The skb is ours so we can free it :-) */
+ kfree_skb(skb);
+}
+
+uint8_t
+linuxkpi_ieee80211_get_tid(struct ieee80211_hdr *hdr, bool nonqos_ok)
+{
+ const struct ieee80211_frame *wh;
+ uint8_t tid;
+
+ /* Linux seems to assume this is a QOS-Data-Frame */
+ KASSERT(nonqos_ok || ieee80211_is_data_qos(hdr->frame_control),
+ ("%s: hdr %p fc %#06x not qos_data\n", __func__, hdr,
+ hdr->frame_control));
+
+ wh = (const struct ieee80211_frame *)hdr;
+ tid = ieee80211_gettid(wh);
+ KASSERT(nonqos_ok || tid == (tid & IEEE80211_QOS_TID), ("%s: tid %u "
+ "not expected (%u?)\n", __func__, tid, IEEE80211_NONQOS_TID));
+
+ return (tid);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+lkpi_wiphy_work(struct work_struct *work)
+{
+ struct lkpi_wiphy *lwiphy;
+ struct wiphy *wiphy;
+ struct wiphy_work *wk;
+
+ lwiphy = container_of(work, struct lkpi_wiphy, wwk);
+ wiphy = LWIPHY_TO_WIPHY(lwiphy);
+
+ wiphy_lock(wiphy);
+
+ LKPI_80211_LWIPHY_WORK_LOCK(lwiphy);
+ wk = list_first_entry_or_null(&lwiphy->wwk_list, struct wiphy_work, entry);
+ /* If there is nothing we do nothing. */
+ if (wk == NULL) {
+ LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy);
+ wiphy_unlock(wiphy);
+ return;
+ }
+ list_del_init(&wk->entry);
+
+ /* More work to do? */
+ if (!list_empty(&lwiphy->wwk_list))
+ schedule_work(work);
+ LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy);
+
+ /* Finally call the (*wiphy_work_fn)() function. */
+ wk->fn(wiphy, wk);
+
+ wiphy_unlock(wiphy);
+}
+
+void
+linuxkpi_wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *wwk)
+{
+ struct lkpi_wiphy *lwiphy;
+
+ lwiphy = WIPHY_TO_LWIPHY(wiphy);
+
+ LKPI_80211_LWIPHY_WORK_LOCK(lwiphy);
+ /* Do not double-queue. */
+ if (list_empty(&wwk->entry))
+ list_add_tail(&wwk->entry, &lwiphy->wwk_list);
+ LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy);
+
+ /*
+ * See how ieee80211_queue_work() work continues in Linux or if things
+ * migrate here over time?
+ * Use a system queue from linux/workqueue.h for now.
+ */
+ queue_work(system_wq, &lwiphy->wwk);
+}
+
+void
+linuxkpi_wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *wwk)
+{
+ struct lkpi_wiphy *lwiphy;
+
+ lwiphy = WIPHY_TO_LWIPHY(wiphy);
+
+ LKPI_80211_LWIPHY_WORK_LOCK(lwiphy);
+ /* Only cancel if queued. */
+ if (!list_empty(&wwk->entry))
+ list_del_init(&wwk->entry);
+ LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy);
+}
+
+void
+linuxkpi_wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *wwk)
+{
+ struct lkpi_wiphy *lwiphy;
+ struct wiphy_work *wk;
+
+ lwiphy = WIPHY_TO_LWIPHY(wiphy);
+ LKPI_80211_LWIPHY_WORK_LOCK(lwiphy);
+ /* If wwk is unset, flush everything; called when wiphy is shut down. */
+ if (wwk != NULL && list_empty(&wwk->entry)) {
+ LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy);
+ return;
+ }
+
+ while (!list_empty(&lwiphy->wwk_list)) {
+
+ wk = list_first_entry(&lwiphy->wwk_list, struct wiphy_work,
+ entry);
+ list_del_init(&wk->entry);
+ LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy);
+ wk->fn(wiphy, wk);
+ LKPI_80211_LWIPHY_WORK_LOCK(lwiphy);
+ if (wk == wwk)
+ break;
+ }
+ LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy);
+}
+
+void
+lkpi_wiphy_delayed_work_timer(struct timer_list *tl)
+{
+ struct wiphy_delayed_work *wdwk;
+
+ wdwk = from_timer(wdwk, tl, timer);
+ wiphy_work_queue(wdwk->wiphy, &wdwk->work);
+}
+
+void
+linuxkpi_wiphy_delayed_work_queue(struct wiphy *wiphy,
+ struct wiphy_delayed_work *wdwk, unsigned long delay)
+{
+ if (delay == 0) {
+ /* Run right away. */
+ del_timer(&wdwk->timer);
+ wiphy_work_queue(wiphy, &wdwk->work);
+ } else {
+ wdwk->wiphy = wiphy;
+ mod_timer(&wdwk->timer, jiffies + delay);
+ }
+}
+
+void
+linuxkpi_wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ struct wiphy_delayed_work *wdwk)
+{
+ del_timer_sync(&wdwk->timer);
+ wiphy_work_cancel(wiphy, &wdwk->work);
+}
+
+/* -------------------------------------------------------------------------- */
+
+struct wiphy *
+linuxkpi_wiphy_new(const struct cfg80211_ops *ops, size_t priv_len)
+{
+ struct lkpi_wiphy *lwiphy;
+ struct wiphy *wiphy;
+
+ lwiphy = kzalloc(sizeof(*lwiphy) + priv_len, GFP_KERNEL);
+ if (lwiphy == NULL)
+ return (NULL);
+ lwiphy->ops = ops;
+
+ LKPI_80211_LWIPHY_WORK_LOCK_INIT(lwiphy);
+ INIT_LIST_HEAD(&lwiphy->wwk_list);
+ INIT_WORK(&lwiphy->wwk, lkpi_wiphy_work);
+
+ wiphy = LWIPHY_TO_WIPHY(lwiphy);
+
+ mutex_init(&wiphy->mtx);
+ TODO();
+
+ return (wiphy);
+}
+
+void
+linuxkpi_wiphy_free(struct wiphy *wiphy)
+{
+ struct lkpi_wiphy *lwiphy;
+
+ if (wiphy == NULL)
+ return;
+
+ linuxkpi_wiphy_work_flush(wiphy, NULL);
+ mutex_destroy(&wiphy->mtx);
+
+ lwiphy = WIPHY_TO_LWIPHY(wiphy);
+ LKPI_80211_LWIPHY_WORK_LOCK_DESTROY(lwiphy);
+
+ kfree(lwiphy);
+}
+
+static uint32_t
+lkpi_cfg80211_calculate_bitrate_ht(struct rate_info *rate)
+{
+ TODO("cfg80211_calculate_bitrate_ht");
+ return (rate->legacy);
+}
+
+static uint32_t
+lkpi_cfg80211_calculate_bitrate_vht(struct rate_info *rate)
+{
+ TODO("cfg80211_calculate_bitrate_vht");
+ return (rate->legacy);
+}
+
+uint32_t
+linuxkpi_cfg80211_calculate_bitrate(struct rate_info *rate)
+{
+
+ /* Beware: order! */
+ if (rate->flags & RATE_INFO_FLAGS_MCS)
+ return (lkpi_cfg80211_calculate_bitrate_ht(rate));
+
+ if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
+ return (lkpi_cfg80211_calculate_bitrate_vht(rate));
+
+ IMPROVE("HE/EHT/...");
+
+ return (rate->legacy);
+}
+
+uint32_t
+linuxkpi_ieee80211_channel_to_frequency(uint32_t channel,
+ enum nl80211_band band)
+{
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return (ieee80211_ieee2mhz(channel, IEEE80211_CHAN_2GHZ));
+ break;
+ case NL80211_BAND_5GHZ:
+ return (ieee80211_ieee2mhz(channel, IEEE80211_CHAN_5GHZ));
+ break;
+ default:
+ /* XXX abort, retry, error, panic? */
+ break;
+ }
+
+ return (0);
+}
+
+uint32_t
+linuxkpi_ieee80211_frequency_to_channel(uint32_t freq, uint32_t flags __unused)
+{
+
+ return (ieee80211_mhz2ieee(freq, 0));
+}
+
+#if 0
+static struct lkpi_sta *
+lkpi_find_lsta_by_ni(struct lkpi_vif *lvif, struct ieee80211_node *ni)
+{
+ struct lkpi_sta *lsta, *temp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(lsta, &lvif->lsta_list, lsta_list) {
+ if (lsta->ni == ni) {
+ rcu_read_unlock();
+ return (lsta);
+ }
+ }
+ rcu_read_unlock();
+
+ return (NULL);
+}
+#endif
+
+struct ieee80211_sta *
+linuxkpi_ieee80211_find_sta(struct ieee80211_vif *vif, const u8 *peer)
+{
+ struct lkpi_vif *lvif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_sta *sta;
+
+ lvif = VIF_TO_LVIF(vif);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(lsta, &lvif->lsta_list, lsta_list) {
+ sta = LSTA_TO_STA(lsta);
+ if (IEEE80211_ADDR_EQ(sta->addr, peer)) {
+ rcu_read_unlock();
+ return (sta);
+ }
+ }
+ rcu_read_unlock();
+ return (NULL);
+}
+
+struct ieee80211_sta *
+linuxkpi_ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
+ const uint8_t *addr, const uint8_t *ourvifaddr)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_vif *lvif;
+ struct lkpi_sta *lsta;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+
+ lhw = wiphy_priv(hw->wiphy);
+ sta = NULL;
+
+ LKPI_80211_LHW_LVIF_LOCK(lhw);
+ TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) {
+
+ /* XXX-BZ check our address from the vif. */
+
+ vif = LVIF_TO_VIF(lvif);
+ if (ourvifaddr != NULL &&
+ !IEEE80211_ADDR_EQ(vif->addr, ourvifaddr))
+ continue;
+ sta = linuxkpi_ieee80211_find_sta(vif, addr);
+ if (sta != NULL)
+ break;
+ }
+ LKPI_80211_LHW_LVIF_UNLOCK(lhw);
+
+ if (sta != NULL) {
+ lsta = STA_TO_LSTA(sta);
+ if (!lsta->added_to_drv)
+ return (NULL);
+ }
+
+ return (sta);
+}
+
+struct sk_buff *
+linuxkpi_ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct lkpi_txq *ltxq;
+ struct lkpi_vif *lvif;
+ struct sk_buff *skb;
+
+ IMPROVE("wiphy_lock? or assert?");
+ skb = NULL;
+ ltxq = TXQ_TO_LTXQ(txq);
+ ltxq->seen_dequeue = true;
+
+ if (ltxq->stopped)
+ goto stopped;
+
+ lvif = VIF_TO_LVIF(ltxq->txq.vif);
+ if (lvif->hw_queue_stopped[ltxq->txq.ac]) {
+ ltxq->stopped = true;
+ goto stopped;
+ }
+
+ IMPROVE("hw(TX_FRAG_LIST)");
+
+ LKPI_80211_LTXQ_LOCK(ltxq);
+ skb = skb_dequeue(&ltxq->skbq);
+ LKPI_80211_LTXQ_UNLOCK(ltxq);
+
+stopped:
+ return (skb);
+}
+
+void
+linuxkpi_ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+ unsigned long *frame_cnt, unsigned long *byte_cnt)
+{
+ struct lkpi_txq *ltxq;
+ struct sk_buff *skb;
+ unsigned long fc, bc;
+
+ ltxq = TXQ_TO_LTXQ(txq);
+
+ fc = bc = 0;
+ LKPI_80211_LTXQ_LOCK(ltxq);
+ skb_queue_walk(&ltxq->skbq, skb) {
+ fc++;
+ bc += skb->len;
+ }
+ LKPI_80211_LTXQ_UNLOCK(ltxq);
+ if (frame_cnt)
+ *frame_cnt = fc;
+ if (byte_cnt)
+ *byte_cnt = bc;
+
+ /* Validate that this is doing the correct thing. */
+ /* Should we keep track on en/dequeue? */
+ IMPROVE();
+}
+
+/*
+ * We are called from ieee80211_free_txskb() or ieee80211_tx_status().
+ * The latter tries to derive the success status from the info flags
+ * passed back from the driver. rawx_mit() saves the ni on the m and the
+ * m on the skb for us to be able to give feedback to net80211.
+ */
+static void
+_lkpi_ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb,
+ int status)
+{
+ struct ieee80211_node *ni;
+ struct mbuf *m;
+
+ m = skb->m;
+ skb->m = NULL;
+
+ if (m != NULL) {
+ ni = m->m_pkthdr.PH_loc.ptr;
+ /* Status: 0 is ok, != 0 is error. */
+ ieee80211_tx_complete(ni, m, status);
+ /* ni & mbuf were consumed. */
+ }
+}
+
+void
+linuxkpi_ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb,
+ int status)
+{
+
+ _lkpi_ieee80211_free_txskb(hw, skb, status);
+ kfree_skb(skb);
+}
+
+void
+linuxkpi_ieee80211_tx_status_ext(struct ieee80211_hw *hw,
+ struct ieee80211_tx_status *txstat)
+{
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_ratectl_tx_status txs;
+ struct ieee80211_node *ni;
+ int status;
+
+ skb = txstat->skb;
+ if (skb->m != NULL) {
+ struct mbuf *m;
+
+ m = skb->m;
+ ni = m->m_pkthdr.PH_loc.ptr;
+ memset(&txs, 0, sizeof(txs));
+ } else {
+ ni = NULL;
+ }
+
+ info = txstat->info;
+ if (info->flags & IEEE80211_TX_STAT_ACK) {
+ status = 0; /* No error. */
+ txs.status = IEEE80211_RATECTL_TX_SUCCESS;
+ } else {
+ status = 1;
+ txs.status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
+ }
+
+ if (ni != NULL) {
+ txs.pktlen = skb->len;
+ txs.flags |= IEEE80211_RATECTL_STATUS_PKTLEN;
+ if (info->status.rates[0].count > 1) {
+ txs.long_retries = info->status.rates[0].count - 1; /* 1 + retries in drivers. */
+ txs.flags |= IEEE80211_RATECTL_STATUS_LONG_RETRY;
+ }
+#if 0 /* Unused in net80211 currently. */
+ /* XXX-BZ convert check .flags for MCS/VHT/.. */
+ txs.final_rate = info->status.rates[0].idx;
+ txs.flags |= IEEE80211_RATECTL_STATUS_FINAL_RATE;
+#endif
+ if (info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID) {
+ txs.rssi = info->status.ack_signal; /* XXX-BZ CONVERT? */
+ txs.flags |= IEEE80211_RATECTL_STATUS_RSSI;
+ }
+
+ IMPROVE("only update rate if needed but that requires us to get a proper rate from mo_sta_statistics");
+ ieee80211_ratectl_tx_complete(ni, &txs);
+ ieee80211_ratectl_rate(ni->ni_vap->iv_bss, NULL, 0);
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_TX) {
+ printf("TX-RATE: %s: long_retries %d\n", __func__,
+ txs.long_retries);
+ }
+#endif
+ }
+
+#ifdef LINUXKPI_DEBUG_80211
+ if (linuxkpi_debug_80211 & D80211_TRACE_TX)
+ printf("TX-STATUS: %s: hw %p skb %p status %d : flags %#x "
+ "band %u hw_queue %u tx_time_est %d : "
+ "rates [ %u %u %#x, %u %u %#x, %u %u %#x, %u %u %#x ] "
+ "ack_signal %u ampdu_ack_len %u ampdu_len %u antenna %u "
+ "tx_time %u flags %#x "
+ "status_driver_data [ %p %p ]\n",
+ __func__, hw, skb, status, info->flags,
+ info->band, info->hw_queue, info->tx_time_est,
+ info->status.rates[0].idx, info->status.rates[0].count,
+ info->status.rates[0].flags,
+ info->status.rates[1].idx, info->status.rates[1].count,
+ info->status.rates[1].flags,
+ info->status.rates[2].idx, info->status.rates[2].count,
+ info->status.rates[2].flags,
+ info->status.rates[3].idx, info->status.rates[3].count,
+ info->status.rates[3].flags,
+ info->status.ack_signal, info->status.ampdu_ack_len,
+ info->status.ampdu_len, info->status.antenna,
+ info->status.tx_time, info->status.flags,
+ info->status.status_driver_data[0],
+ info->status.status_driver_data[1]);
+#endif
+
+ if (txstat->free_list) {
+ _lkpi_ieee80211_free_txskb(hw, skb, status);
+ list_add_tail(&skb->list, txstat->free_list);
+ } else {
+ linuxkpi_ieee80211_free_txskb(hw, skb, status);
+ }
+}
+
+void
+linuxkpi_ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_tx_status status;
+
+ memset(&status, 0, sizeof(status));
+ status.info = IEEE80211_SKB_CB(skb);
+ status.skb = skb;
+ /* sta, n_rates, rates, free_list? */
+
+ ieee80211_tx_status_ext(hw, &status);
+}
+
+/*
+ * This is an internal bandaid for the moment for the way we glue
+ * skbs and mbufs together for TX. Once we have skbs backed by
+ * mbufs this should go away.
+ * This is a public function but kept on the private KPI (lkpi_)
+ * and is not exposed by a header file.
+ */
+static void
+lkpi_ieee80211_free_skb_mbuf(void *p)
+{
+ struct ieee80211_node *ni;
+ struct mbuf *m;
+
+ if (p == NULL)
+ return;
+
+ m = (struct mbuf *)p;
+ M_ASSERTPKTHDR(m);
+
+ ni = m->m_pkthdr.PH_loc.ptr;
+ m->m_pkthdr.PH_loc.ptr = NULL;
+ if (ni != NULL)
+ ieee80211_free_node(ni);
+ m_freem(m);
+}
+
+void
+linuxkpi_ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
+ struct delayed_work *w, int delay)
+{
+ struct lkpi_hw *lhw;
+
+ /* Need to make sure hw is in a stable (non-suspended) state. */
+ IMPROVE();
+
+ lhw = HW_TO_LHW(hw);
+ queue_delayed_work(lhw->workq, w, delay);
+}
+
+void
+linuxkpi_ieee80211_queue_work(struct ieee80211_hw *hw,
+ struct work_struct *w)
+{
+ struct lkpi_hw *lhw;
+
+ /* Need to make sure hw is in a stable (non-suspended) state. */
+ IMPROVE();
+
+ lhw = HW_TO_LHW(hw);
+ queue_work(lhw->workq, w);
+}
+
+struct sk_buff *
+linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *hw, uint8_t *addr,
+ uint8_t *ssid, size_t ssid_len, size_t tailroom)
+{
+ struct sk_buff *skb;
+ struct ieee80211_frame *wh;
+ uint8_t *p;
+ size_t len;
+
+ len = sizeof(*wh);
+ len += 2 + ssid_len;
+
+ skb = dev_alloc_skb(hw->extra_tx_headroom + len + tailroom);
+ if (skb == NULL)
+ return (NULL);
+
+ skb_reserve(skb, hw->extra_tx_headroom);
+
+ wh = skb_put_zero(skb, sizeof(*wh));
+ wh->i_fc[0] = IEEE80211_FC0_VERSION_0;
+ wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_PROBE_REQ | IEEE80211_FC0_TYPE_MGT;
+ IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr2, addr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr);
+
+ p = skb_put(skb, 2 + ssid_len);
+ *p++ = IEEE80211_ELEMID_SSID;
+ *p++ = ssid_len;
+ if (ssid_len > 0)
+ memcpy(p, ssid, ssid_len);
+
+ return (skb);
+}
+
+struct sk_buff *
+linuxkpi_ieee80211_pspoll_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct lkpi_vif *lvif;
+ struct ieee80211vap *vap;
+ struct sk_buff *skb;
+ struct ieee80211_frame_pspoll *psp;
+ uint16_t v;
+
+ skb = dev_alloc_skb(hw->extra_tx_headroom + sizeof(*psp));
+ if (skb == NULL)
+ return (NULL);
+
+ skb_reserve(skb, hw->extra_tx_headroom);
+
+ lvif = VIF_TO_LVIF(vif);
+ vap = LVIF_TO_VAP(lvif);
+
+ psp = skb_put_zero(skb, sizeof(*psp));
+ psp->i_fc[0] = IEEE80211_FC0_VERSION_0;
+ psp->i_fc[0] |= IEEE80211_FC0_SUBTYPE_PS_POLL | IEEE80211_FC0_TYPE_CTL;
+ v = htole16(vif->cfg.aid | 1<<15 | 1<<16);
+ memcpy(&psp->i_aid, &v, sizeof(v));
+ IEEE80211_ADDR_COPY(psp->i_bssid, vap->iv_bss->ni_macaddr);
+ IEEE80211_ADDR_COPY(psp->i_ta, vif->addr);
+
+ return (skb);
+}
+
+struct sk_buff *
+linuxkpi_ieee80211_nullfunc_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int linkid, bool qos)
+{
+ struct lkpi_vif *lvif;
+ struct ieee80211vap *vap;
+ struct sk_buff *skb;
+ struct ieee80211_frame *nullf;
+
+ IMPROVE("linkid");
+
+ skb = dev_alloc_skb(hw->extra_tx_headroom + sizeof(*nullf));
+ if (skb == NULL)
+ return (NULL);
+
+ skb_reserve(skb, hw->extra_tx_headroom);
+
+ lvif = VIF_TO_LVIF(vif);
+ vap = LVIF_TO_VAP(lvif);
+
+ nullf = skb_put_zero(skb, sizeof(*nullf));
+ nullf->i_fc[0] = IEEE80211_FC0_VERSION_0;
+ nullf->i_fc[0] |= IEEE80211_FC0_SUBTYPE_NODATA | IEEE80211_FC0_TYPE_DATA;
+ nullf->i_fc[1] = IEEE80211_FC1_DIR_TODS;
+
+ IEEE80211_ADDR_COPY(nullf->i_addr1, vap->iv_bss->ni_bssid);
+ IEEE80211_ADDR_COPY(nullf->i_addr2, vif->addr);
+ IEEE80211_ADDR_COPY(nullf->i_addr3, vap->iv_bss->ni_macaddr);
+
+ return (skb);
+}
+
+struct wireless_dev *
+linuxkpi_ieee80211_vif_to_wdev(struct ieee80211_vif *vif)
+{
+ struct lkpi_vif *lvif;
+
+ lvif = VIF_TO_LVIF(vif);
+ return (&lvif->wdev);
+}
+
+void
+linuxkpi_ieee80211_connection_loss(struct ieee80211_vif *vif)
+{
+ struct lkpi_vif *lvif;
+ struct ieee80211vap *vap;
+ enum ieee80211_state nstate;
+ int arg;
+
+ lvif = VIF_TO_LVIF(vif);
+ vap = LVIF_TO_VAP(lvif);
+
+ /*
+ * Go to init; otherwise we need to elaborately check state and
+ * handle accordingly, e.g., if in RUN we could call iv_bmiss.
+ * Let the statemachine handle all neccessary changes.
+ */
+ nstate = IEEE80211_S_INIT;
+ arg = 0; /* Not a valid reason. */
+
+ ic_printf(vap->iv_ic, "%s: vif %p vap %p state %s\n", __func__,
+ vif, vap, ieee80211_state_name[vap->iv_state]);
+ ieee80211_new_state(vap, nstate, arg);
+}
+
+void
+linuxkpi_ieee80211_beacon_loss(struct ieee80211_vif *vif)
+{
+ struct lkpi_vif *lvif;
+ struct ieee80211vap *vap;
+
+ lvif = VIF_TO_LVIF(vif);
+ vap = LVIF_TO_VAP(lvif);
+
+ ic_printf(vap->iv_ic, "%s: vif %p vap %p state %s\n", __func__,
+ vif, vap, ieee80211_state_name[vap->iv_state]);
+ ieee80211_beacon_miss(vap->iv_ic);
+}
+
+/* -------------------------------------------------------------------------- */
+
+void
+linuxkpi_ieee80211_stop_queue(struct ieee80211_hw *hw, int qnum)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ int ac_count, ac;
+
+ KASSERT(qnum < hw->queues, ("%s: qnum %d >= hw->queues %d, hw %p\n",
+ __func__, qnum, hw->queues, hw));
+
+ lhw = wiphy_priv(hw->wiphy);
+
+ /* See lkpi_ic_vap_create(). */
+ if (hw->queues >= IEEE80211_NUM_ACS)
+ ac_count = IEEE80211_NUM_ACS;
+ else
+ ac_count = 1;
+
+ LKPI_80211_LHW_LVIF_LOCK(lhw);
+ TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) {
+
+ vif = LVIF_TO_VIF(lvif);
+ for (ac = 0; ac < ac_count; ac++) {
+ IMPROVE_TXQ("LOCKING");
+ if (qnum == vif->hw_queue[ac]) {
+#ifdef LINUXKPI_DEBUG_80211
+ /*
+ * For now log this to better understand
+ * how this is supposed to work.
+ */
+ if (lvif->hw_queue_stopped[ac] &&
+ (linuxkpi_debug_80211 & D80211_IMPROVE_TXQ) != 0)
+ ic_printf(lhw->ic, "%s:%d: lhw %p hw %p "
+ "lvif %p vif %p ac %d qnum %d already "
+ "stopped\n", __func__, __LINE__,
+ lhw, hw, lvif, vif, ac, qnum);
+#endif
+ lvif->hw_queue_stopped[ac] = true;
+ }
+ }
+ }
+ LKPI_80211_LHW_LVIF_UNLOCK(lhw);
+}
+
+void
+linuxkpi_ieee80211_stop_queues(struct ieee80211_hw *hw)
+{
+ int i;
+
+ IMPROVE_TXQ("Locking; do we need further info?");
+ for (i = 0; i < hw->queues; i++)
+ linuxkpi_ieee80211_stop_queue(hw, i);
+}
+
+
+static void
+lkpi_ieee80211_wake_queues(struct ieee80211_hw *hw, int hwq)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_vif *lvif;
+ struct lkpi_sta *lsta;
+ int ac_count, ac, tid;
+
+ /* See lkpi_ic_vap_create(). */
+ if (hw->queues >= IEEE80211_NUM_ACS)
+ ac_count = IEEE80211_NUM_ACS;
+ else
+ ac_count = 1;
+
+ lhw = wiphy_priv(hw->wiphy);
+
+ IMPROVE_TXQ("Locking");
+ LKPI_80211_LHW_LVIF_LOCK(lhw);
+ TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) {
+ struct ieee80211_vif *vif;
+
+ vif = LVIF_TO_VIF(lvif);
+ for (ac = 0; ac < ac_count; ac++) {
+
+ if (hwq == vif->hw_queue[ac]) {
+
+ /* XXX-BZ what about software scan? */
+
+#ifdef LINUXKPI_DEBUG_80211
+ /*
+ * For now log this to better understand
+ * how this is supposed to work.
+ */
+ if (!lvif->hw_queue_stopped[ac] &&
+ (linuxkpi_debug_80211 & D80211_IMPROVE_TXQ) != 0)
+ ic_printf(lhw->ic, "%s:%d: lhw %p hw %p "
+ "lvif %p vif %p ac %d hw_q not stopped\n",
+ __func__, __LINE__,
+ lhw, hw, lvif, vif, ac);
+#endif
+ lvif->hw_queue_stopped[ac] = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(lsta, &lvif->lsta_list, lsta_list) {
+ struct ieee80211_sta *sta;
+
+ sta = LSTA_TO_STA(lsta);
+ for (tid = 0; tid < nitems(sta->txq); tid++) {
+ struct lkpi_txq *ltxq;
+
+ if (sta->txq[tid] == NULL)
+ continue;
+
+ if (sta->txq[tid]->ac != ac)
+ continue;
+
+ ltxq = TXQ_TO_LTXQ(sta->txq[tid]);
+ if (!ltxq->stopped)
+ continue;
+
+ ltxq->stopped = false;
+
+ if (!skb_queue_empty(&ltxq->skbq))
+ lkpi_80211_mo_wake_tx_queue(hw, sta->txq[tid]);
+ }
+ }
+ rcu_read_unlock();
+ }
+ }
+ }
+ LKPI_80211_LHW_LVIF_UNLOCK(lhw);
+}
+
+static void
+lkpi_ieee80211_wake_queues_locked(struct ieee80211_hw *hw)
+{
+ int i;
+
+ IMPROVE_TXQ("Is this all/enough here?");
+ for (i = 0; i < hw->queues; i++)
+ lkpi_ieee80211_wake_queues(hw, i);
+}
+
+void
+linuxkpi_ieee80211_wake_queues(struct ieee80211_hw *hw)
+{
+ wiphy_lock(hw->wiphy);
+ lkpi_ieee80211_wake_queues_locked(hw);
+ wiphy_unlock(hw->wiphy);
+}
+
+void
+linuxkpi_ieee80211_wake_queue(struct ieee80211_hw *hw, int qnum)
+{
+
+ KASSERT(qnum < hw->queues, ("%s: qnum %d >= hw->queues %d, hw %p\n",
+ __func__, qnum, hw->queues, hw));
+
+ wiphy_lock(hw->wiphy);
+ lkpi_ieee80211_wake_queues(hw, qnum);
+ wiphy_unlock(hw->wiphy);
+}
+
+/* This is just hardware queues. */
+void
+linuxkpi_ieee80211_txq_schedule_start(struct ieee80211_hw *hw, uint8_t ac)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+
+ IMPROVE_TXQ("Are there reasons why we wouldn't schedule?");
+ IMPROVE_TXQ("LOCKING");
+ if (++lhw->txq_generation[ac] == 0)
+ lhw->txq_generation[ac]++;
+}
+
+struct ieee80211_txq *
+linuxkpi_ieee80211_next_txq(struct ieee80211_hw *hw, uint8_t ac)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_txq *txq;
+ struct lkpi_txq *ltxq;
+
+ lhw = HW_TO_LHW(hw);
+ txq = NULL;
+
+ IMPROVE_TXQ("LOCKING");
+
+ /* Check that we are scheduled. */
+ if (lhw->txq_generation[ac] == 0)
+ goto out;
+
+ ltxq = TAILQ_FIRST(&lhw->scheduled_txqs[ac]);
+ if (ltxq == NULL)
+ goto out;
+ if (ltxq->txq_generation == lhw->txq_generation[ac])
+ goto out;
+
+ ltxq->txq_generation = lhw->txq_generation[ac];
+ TAILQ_REMOVE(&lhw->scheduled_txqs[ac], ltxq, txq_entry);
+ txq = &ltxq->txq;
+ TAILQ_ELEM_INIT(ltxq, txq_entry);
+
+out:
+ return (txq);
+}
+
+void linuxkpi_ieee80211_schedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq, bool withoutpkts)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_txq *ltxq;
+ bool ltxq_empty;
+
+ ltxq = TXQ_TO_LTXQ(txq);
+
+ IMPROVE_TXQ("LOCKING");
+
+ /* Only schedule if work to do or asked to anyway. */
+ LKPI_80211_LTXQ_LOCK(ltxq);
+ ltxq_empty = skb_queue_empty(&ltxq->skbq);
+ LKPI_80211_LTXQ_UNLOCK(ltxq);
+ if (!withoutpkts && ltxq_empty)
+ goto out;
+
+ /*
+ * Make sure we do not double-schedule. We do this by checking tqe_prev,
+ * the previous entry in our tailq. tqe_prev is always valid if this entry
+ * is queued, tqe_next may be NULL if this is the only element in the list.
+ */
+ if (ltxq->txq_entry.tqe_prev != NULL)
+ goto out;
+
+ lhw = HW_TO_LHW(hw);
+ TAILQ_INSERT_TAIL(&lhw->scheduled_txqs[txq->ac], ltxq, txq_entry);
+out:
+ return;
+}
+
+void
+linuxkpi_ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_txq *ntxq;
+ struct ieee80211_tx_control control;
+ struct sk_buff *skb;
+
+ lhw = HW_TO_LHW(hw);
+
+ LKPI_80211_LHW_TXQ_LOCK(lhw);
+ ieee80211_txq_schedule_start(hw, txq->ac);
+ do {
+ ntxq = ieee80211_next_txq(hw, txq->ac);
+ if (ntxq == NULL)
+ break;
+
+ memset(&control, 0, sizeof(control));
+ control.sta = ntxq->sta;
+ do {
+ skb = linuxkpi_ieee80211_tx_dequeue(hw, ntxq);
+ if (skb == NULL)
+ break;
+ lkpi_80211_mo_tx(hw, &control, skb);
+ } while(1);
+
+ ieee80211_return_txq(hw, ntxq, false);
+ } while (1);
+ ieee80211_txq_schedule_end(hw, txq->ac);
+ LKPI_80211_LHW_TXQ_UNLOCK(lhw);
+}
+
+/* -------------------------------------------------------------------------- */
+
+struct lkpi_cfg80211_bss {
+ u_int refcnt;
+ struct cfg80211_bss bss;
+};
+
+struct lkpi_cfg80211_get_bss_iter_lookup {
+ struct wiphy *wiphy;
+ struct linuxkpi_ieee80211_channel *chan;
+ const uint8_t *bssid;
+ const uint8_t *ssid;
+ size_t ssid_len;
+ enum ieee80211_bss_type bss_type;
+ enum ieee80211_privacy privacy;
+
+ /*
+ * Something to store a copy of the result as the net80211 scan cache
+ * is not refoucnted so a scan entry might go away any time.
+ */
+ bool match;
+ struct cfg80211_bss *bss;
+};
+
+static void
+lkpi_cfg80211_get_bss_iterf(void *arg, const struct ieee80211_scan_entry *se)
+{
+ struct lkpi_cfg80211_get_bss_iter_lookup *lookup;
+ size_t ielen;
+
+ lookup = arg;
+
+ /* Do not try to find another match. */
+ if (lookup->match)
+ return;
+
+ /* Nothing to store result. */
+ if (lookup->bss == NULL)
+ return;
+
+ if (lookup->privacy != IEEE80211_PRIVACY_ANY) {
+ /* if (se->se_capinfo & IEEE80211_CAPINFO_PRIVACY) */
+ /* We have no idea what to compare to as the drivers only request ANY */
+ return;
+ }
+
+ if (lookup->bss_type != IEEE80211_BSS_TYPE_ANY) {
+ /* if (se->se_capinfo & (IEEE80211_CAPINFO_IBSS|IEEE80211_CAPINFO_ESS)) */
+ /* We have no idea what to compare to as the drivers only request ANY */
+ return;
+ }
+
+ if (lookup->chan != NULL) {
+ struct linuxkpi_ieee80211_channel *chan;
+
+ chan = linuxkpi_ieee80211_get_channel(lookup->wiphy,
+ se->se_chan->ic_freq);
+ if (chan == NULL || chan != lookup->chan)
+ return;
+ }
+
+ if (lookup->bssid && !IEEE80211_ADDR_EQ(lookup->bssid, se->se_bssid))
+ return;
+
+ if (lookup->ssid) {
+ if (lookup->ssid_len != se->se_ssid[1] ||
+ se->se_ssid[1] == 0)
+ return;
+ if (memcmp(lookup->ssid, se->se_ssid+2, lookup->ssid_len) != 0)
+ return;
+ }
+
+ ielen = se->se_ies.len;
+
+ lookup->bss->ies = malloc(sizeof(*lookup->bss->ies) + ielen,
+ M_LKPI80211, M_NOWAIT | M_ZERO);
+ if (lookup->bss->ies == NULL)
+ return;
+
+ lookup->bss->ies->data = (uint8_t *)lookup->bss->ies + sizeof(*lookup->bss->ies);
+ lookup->bss->ies->len = ielen;
+ if (ielen)
+ memcpy(lookup->bss->ies->data, se->se_ies.data, ielen);
+
+ lookup->match = true;
+}
+
+struct cfg80211_bss *
+linuxkpi_cfg80211_get_bss(struct wiphy *wiphy, struct linuxkpi_ieee80211_channel *chan,
+ const uint8_t *bssid, const uint8_t *ssid, size_t ssid_len,
+ enum ieee80211_bss_type bss_type, enum ieee80211_privacy privacy)
+{
+ struct lkpi_cfg80211_bss *lbss;
+ struct lkpi_cfg80211_get_bss_iter_lookup lookup;
+ struct lkpi_hw *lhw;
+ struct ieee80211vap *vap;
+
+ lhw = wiphy_priv(wiphy);
+
+ /* Let's hope we can alloc. */
+ lbss = malloc(sizeof(*lbss), M_LKPI80211, M_NOWAIT | M_ZERO);
+ if (lbss == NULL) {
+ ic_printf(lhw->ic, "%s: alloc failed.\n", __func__);
+ return (NULL);
+ }
+
+ lookup.wiphy = wiphy;
+ lookup.chan = chan;
+ lookup.bssid = bssid;
+ lookup.ssid = ssid;
+ lookup.ssid_len = ssid_len;
+ lookup.bss_type = bss_type;
+ lookup.privacy = privacy;
+ lookup.match = false;
+ lookup.bss = &lbss->bss;
+
+ IMPROVE("Iterate over all VAPs comparing perm_addr and addresses?");
+ vap = TAILQ_FIRST(&lhw->ic->ic_vaps);
+ ieee80211_scan_iterate(vap, lkpi_cfg80211_get_bss_iterf, &lookup);
+ if (!lookup.match) {
+ free(lbss, M_LKPI80211);
+ return (NULL);
+ }
+
+ refcount_init(&lbss->refcnt, 1);
+ return (&lbss->bss);
+}
+
+void
+linuxkpi_cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss)
+{
+ struct lkpi_cfg80211_bss *lbss;
+
+ lbss = container_of(bss, struct lkpi_cfg80211_bss, bss);
+
+ /* Free everything again on refcount ... */
+ if (refcount_release(&lbss->refcnt)) {
+ free(lbss->bss.ies, M_LKPI80211);
+ free(lbss, M_LKPI80211);
+ }
+}
+
+void
+linuxkpi_cfg80211_bss_flush(struct wiphy *wiphy)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211com *ic;
+ struct ieee80211vap *vap;
+
+ lhw = wiphy_priv(wiphy);
+ ic = lhw->ic;
+
+ /*
+ * If we haven't called ieee80211_ifattach() yet
+ * or there is no VAP, there are no scans to flush.
+ */
+ if (ic == NULL ||
+ (lhw->sc_flags & LKPI_MAC80211_DRV_STARTED) == 0)
+ return;
+
+ /* Should only happen on the current one? Not seen it late enough. */
+ IEEE80211_LOCK(ic);
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ ieee80211_scan_flush(vap);
+ IEEE80211_UNLOCK(ic);
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * hw->conf get initialized/set in various places for us:
+ * - linuxkpi_ieee80211_alloc_hw(): flags
+ * - linuxkpi_ieee80211_ifattach(): chandef
+ * - lkpi_ic_vap_create(): listen_interval
+ * - lkpi_ic_set_channel(): chandef, flags
+ */
+
+int lkpi_80211_update_chandef(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *new)
+{
+ struct cfg80211_chan_def *cd;
+ uint32_t changed;
+ int error;
+
+ changed = 0;
+ if (new == NULL || new->def.chan == NULL)
+ cd = NULL;
+ else
+ cd = &new->def;
+
+ if (cd && cd->chan != hw->conf.chandef.chan) {
+ /* Copy; the chan pointer is fine and will stay valid. */
+ hw->conf.chandef = *cd;
+ changed |= IEEE80211_CONF_CHANGE_CHANNEL;
+ }
+ IMPROVE("IEEE80211_CONF_CHANGE_PS, IEEE80211_CONF_CHANGE_POWER");
+
+ if (changed == 0)
+ return (0);
+
+ error = lkpi_80211_mo_config(hw, changed);
+ return (error);
+}
+
+/* -------------------------------------------------------------------------- */
+
+MODULE_VERSION(linuxkpi_wlan, 1);
+MODULE_DEPEND(linuxkpi_wlan, linuxkpi, 1, 1, 1);
+MODULE_DEPEND(linuxkpi_wlan, wlan, 1, 1, 1);
diff --git a/sys/compat/linuxkpi/common/src/linux_80211.h b/sys/compat/linuxkpi/common/src/linux_80211.h
new file mode 100644
index 000000000000..89afec1235bd
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_80211.h
@@ -0,0 +1,454 @@
+/*-
+ * Copyright (c) 2020-2023 The FreeBSD Foundation
+ * Copyright (c) 2020-2021 Bjoern A. Zeeb
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Public functions are called linuxkpi_*().
+ * Internal (static) functions are called lkpi_*().
+ *
+ * The internal structures holding metadata over public structures are also
+ * called lkpi_xxx (usually with a member at the end called xxx).
+ * Note: we do not replicate the structure names but the general variable names
+ * for these (e.g., struct hw -> struct lkpi_hw, struct sta -> struct lkpi_sta).
+ * There are macros to access one from the other.
+ * We call the internal versions lxxx (e.g., hw -> lhw, sta -> lsta).
+ */
+
+#ifndef _LKPI_SRC_LINUX_80211_H
+#define _LKPI_SRC_LINUX_80211_H
+
+#include "opt_wlan.h"
+
+#if defined(IEEE80211_DEBUG) && !defined(LINUXKPI_DEBUG_80211)
+#define LINUXKPI_DEBUG_80211
+#endif
+
+/* #define LINUXKPI_DEBUG_80211 */
+
+#ifndef D80211_TODO
+#define D80211_TODO 0x00000001
+#endif
+#ifndef D80211_IMPROVE
+#define D80211_IMPROVE 0x00000002
+#endif
+#define D80211_IMPROVE_TXQ 0x00000004
+#define D80211_TRACE 0x00000010
+#define D80211_TRACEOK 0x00000020
+#define D80211_TRACE_TX 0x00000100
+#define D80211_TRACE_TX_DUMP 0x00000200
+#define D80211_TRACE_RX 0x00001000
+#define D80211_TRACE_RX_DUMP 0x00002000
+#define D80211_TRACE_RX_BEACONS 0x00004000
+#define D80211_TRACEX (D80211_TRACE_TX|D80211_TRACE_RX)
+#define D80211_TRACEX_DUMP (D80211_TRACE_TX_DUMP|D80211_TRACE_RX_DUMP)
+#define D80211_TRACE_STA 0x00010000
+#define D80211_TRACE_HW_CRYPTO 0x00020000
+#define D80211_TRACE_MO 0x00100000
+#define D80211_TRACE_MODE 0x0f000000
+#define D80211_TRACE_MODE_HT 0x01000000
+#define D80211_TRACE_MODE_VHT 0x02000000
+#define D80211_TRACE_MODE_HE 0x04000000
+#define D80211_TRACE_MODE_EHT 0x08000000
+
+#define IMPROVE_TXQ(...) \
+ if (linuxkpi_debug_80211 & D80211_IMPROVE_TXQ) \
+ printf("%s:%d: XXX LKPI80211 IMPROVE_TXQ\n", __func__, __LINE__)
+
+#define IMPROVE_HT(fmt, ...) \
+ if (linuxkpi_debug_80211 & D80211_TRACE_MODE_HT) \
+ printf("%s:%d: XXX LKPI80211 IMPROVE_HT " fmt "\n", \
+ __func__, __LINE__, ##__VA_ARGS__);
+
+#define MTAG_ABI_LKPI80211 1707696513 /* LinuxKPI 802.11 KBI */
+
+#ifdef LKPI_80211_USE_MTAG
+/*
+ * Deferred RX path.
+ * We need to pass *ni along (and possibly more in the future so
+ * we use a struct right from the start.
+ */
+#define LKPI80211_TAG_RXNI 0 /* deferred RX path */
+struct lkpi_80211_tag_rxni {
+ struct ieee80211_node *ni; /* MUST hold a reference to it. */
+};
+#endif
+
+struct lkpi_radiotap_tx_hdr {
+ struct ieee80211_radiotap_header wt_ihdr;
+ uint8_t wt_flags;
+ uint8_t wt_rate;
+ uint16_t wt_chan_freq;
+ uint16_t wt_chan_flags;
+} __packed;
+#define LKPI_RTAP_TX_FLAGS_PRESENT \
+ ((1 << IEEE80211_RADIOTAP_FLAGS) | \
+ (1 << IEEE80211_RADIOTAP_RATE) | \
+ (1 << IEEE80211_RADIOTAP_CHANNEL))
+
+struct lkpi_radiotap_rx_hdr {
+ struct ieee80211_radiotap_header wr_ihdr;
+ uint64_t wr_tsft;
+ uint8_t wr_flags;
+ uint8_t wr_rate;
+ uint16_t wr_chan_freq;
+ uint16_t wr_chan_flags;
+ int8_t wr_dbm_antsignal;
+ int8_t wr_dbm_antnoise;
+} __packed __aligned(8);
+#define LKPI_RTAP_RX_FLAGS_PRESENT \
+ ((1 << IEEE80211_RADIOTAP_TSFT) | \
+ (1 << IEEE80211_RADIOTAP_FLAGS) | \
+ (1 << IEEE80211_RADIOTAP_RATE) | \
+ (1 << IEEE80211_RADIOTAP_CHANNEL) | \
+ (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
+ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
+
+struct lkpi_hw;
+
+struct lkpi_txq {
+ TAILQ_ENTRY(lkpi_txq) txq_entry;
+
+ struct mtx ltxq_mtx;
+ bool seen_dequeue;
+ bool stopped;
+ uint32_t txq_generation;
+ struct sk_buff_head skbq;
+
+ /* Must be last! */
+ struct ieee80211_txq txq __aligned(CACHE_LINE_SIZE);
+};
+#define TXQ_TO_LTXQ(_txq) container_of(_txq, struct lkpi_txq, txq)
+
+
+struct lkpi_sta {
+ struct list_head lsta_list;
+ struct ieee80211_node *ni;
+ struct ieee80211_hw *hw; /* back pointer f. locking. */
+
+ /* Deferred TX path. */
+ /* Eventually we might want to migrate this into net80211 entirely. */
+ /* XXX-BZ can we use sta->txq[] instead directly? */
+ struct task txq_task;
+ struct mbufq txq;
+ struct mtx txq_mtx;
+
+ struct ieee80211_key_conf *kc[IEEE80211_WEP_NKID];
+ enum ieee80211_sta_state state;
+ bool txq_ready; /* Can we run the taskq? */
+ bool added_to_drv; /* Driver knows; i.e. we called ...(). */
+ bool in_mgd; /* XXX-BZ should this be per-vif? */
+
+ struct station_info sinfo; /* statistics */
+
+ /* Must be last! */
+ struct ieee80211_sta sta __aligned(CACHE_LINE_SIZE);
+};
+#define STA_TO_LSTA(_sta) container_of(_sta, struct lkpi_sta, sta)
+#define LSTA_TO_STA(_lsta) (&(_lsta)->sta)
+
+/* Either protected by wiphy lock or rcu for the list. */
+struct lkpi_vif {
+ TAILQ_ENTRY(lkpi_vif) lvif_entry;
+ struct ieee80211vap iv_vap;
+ eventhandler_tag lvif_ifllevent;
+
+ struct sysctl_ctx_list sysctl_ctx;
+
+ struct mtx mtx;
+ struct wireless_dev wdev;
+
+ /* Other local stuff. */
+ int (*iv_newstate)(struct ieee80211vap *,
+ enum ieee80211_state, int);
+ struct ieee80211_node * (*iv_update_bss)(struct ieee80211vap *,
+ struct ieee80211_node *);
+ struct list_head lsta_list;
+
+ struct lkpi_sta *lvif_bss;
+
+ struct ieee80211_node *key_update_iv_bss;
+ int ic_unlocked; /* Count of ic unlocks pending (*mo_set_key) */
+ int nt_unlocked; /* Count of nt unlocks pending (*mo_set_key) */
+ bool lvif_bss_synched;
+ bool added_to_drv; /* Driver knows; i.e. we called add_interface(). */
+
+ bool hw_queue_stopped[IEEE80211_NUM_ACS];
+
+ /* Must be last! */
+ struct ieee80211_vif vif __aligned(CACHE_LINE_SIZE);
+};
+#define VAP_TO_LVIF(_vap) container_of(_vap, struct lkpi_vif, iv_vap)
+#define LVIF_TO_VAP(_lvif) (&(_lvif)->iv_vap)
+#define VIF_TO_LVIF(_vif) container_of(_vif, struct lkpi_vif, vif)
+#define LVIF_TO_VIF(_lvif) (&(_lvif)->vif)
+
+
+struct lkpi_hw { /* name it mac80211_sc? */
+ const struct ieee80211_ops *ops;
+ struct ieee80211_scan_request *hw_req;
+ struct workqueue_struct *workq;
+
+ /* FreeBSD specific compat. */
+ /* Linux device is in hw.wiphy->dev after SET_IEEE80211_DEV(). */
+ struct ieee80211com *ic;
+ struct lkpi_radiotap_tx_hdr rtap_tx;
+ struct lkpi_radiotap_rx_hdr rtap_rx;
+
+ TAILQ_HEAD(, lkpi_vif) lvif_head;
+ struct sx lvif_sx;
+
+ struct list_head lchanctx_list;
+
+ struct mtx txq_mtx;
+ uint32_t txq_generation[IEEE80211_NUM_ACS];
+ TAILQ_HEAD(, lkpi_txq) scheduled_txqs[IEEE80211_NUM_ACS];
+
+ /* Deferred RX path. */
+ struct task rxq_task;
+ struct mbufq rxq;
+ struct mtx rxq_mtx;
+
+ /* Scan functions we overload to handle depending on scan mode. */
+ void (*ic_scan_curchan)(struct ieee80211_scan_state *,
+ unsigned long);
+ void (*ic_scan_mindwell)(struct ieee80211_scan_state *);
+
+ /* Node functions we overload to sync state. */
+ struct ieee80211_node * (*ic_node_alloc)(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN]);
+ int (*ic_node_init)(struct ieee80211_node *);
+ void (*ic_node_cleanup)(struct ieee80211_node *);
+ void (*ic_node_free)(struct ieee80211_node *);
+
+ /* HT and later functions. */
+ int (*ic_recv_action)(struct ieee80211_node *,
+ const struct ieee80211_frame *,
+ const uint8_t *, const uint8_t *);
+ int (*ic_send_action)(struct ieee80211_node *,
+ int, int, void *);
+ int (*ic_ampdu_enable)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *);
+ int (*ic_addba_request)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+ int (*ic_addba_response)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+ void (*ic_addba_stop)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *);
+ void (*ic_addba_response_timeout)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *);
+ void (*ic_bar_response)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int);
+ int (*ic_ampdu_rx_start)(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *, int, int, int);
+ void (*ic_ampdu_rx_stop)(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *);
+
+#define LKPI_MAC80211_DRV_STARTED 0x00000001
+ uint32_t sc_flags;
+#define LKPI_LHW_SCAN_RUNNING 0x00000001
+#define LKPI_LHW_SCAN_HW 0x00000002
+ uint32_t scan_flags;
+ struct mtx scan_mtx;
+
+ int supbands; /* Number of supported bands. */
+ int max_rates; /* Maximum number of bitrates supported in any channel. */
+ int scan_ie_len; /* Length of common per-band scan IEs. */
+
+ bool update_mc;
+ bool update_wme;
+ bool rxq_stopped;
+
+ /* Must be last! */
+ struct ieee80211_hw hw __aligned(CACHE_LINE_SIZE);
+};
+#define LHW_TO_HW(_lhw) (&(_lhw)->hw)
+#define HW_TO_LHW(_hw) container_of(_hw, struct lkpi_hw, hw)
+
+struct lkpi_chanctx {
+ struct list_head entry;
+
+ bool added_to_drv; /* Managed by MO */
+
+ struct ieee80211_chanctx_conf chanctx_conf __aligned(CACHE_LINE_SIZE);
+};
+#define LCHANCTX_TO_CHANCTX_CONF(_lchanctx) \
+ (&(_lchanctx)->chanctx_conf)
+#define CHANCTX_CONF_TO_LCHANCTX(_conf) \
+ container_of(_conf, struct lkpi_chanctx, chanctx_conf)
+
+struct lkpi_wiphy {
+ const struct cfg80211_ops *ops;
+
+ struct work_struct wwk;
+ struct list_head wwk_list;
+ struct mtx wwk_mtx;
+
+ /* Must be last! */
+ struct wiphy wiphy __aligned(CACHE_LINE_SIZE);
+};
+#define WIPHY_TO_LWIPHY(_wiphy) container_of(_wiphy, struct lkpi_wiphy, wiphy)
+#define LWIPHY_TO_WIPHY(_lwiphy) (&(_lwiphy)->wiphy)
+
+#define LKPI_80211_LWIPHY_WORK_LOCK_INIT(_lwiphy) \
+ mtx_init(&(_lwiphy)->wwk_mtx, "lwiphy-work", NULL, MTX_DEF);
+#define LKPI_80211_LWIPHY_WORK_LOCK_DESTROY(_lwiphy) \
+ mtx_destroy(&(_lwiphy)->wwk_mtx)
+#define LKPI_80211_LWIPHY_WORK_LOCK(_lwiphy) \
+ mtx_lock(&(_lwiphy)->wwk_mtx)
+#define LKPI_80211_LWIPHY_WORK_UNLOCK(_lwiphy) \
+ mtx_unlock(&(_lwiphy)->wwk_mtx)
+#define LKPI_80211_LWIPHY_WORK_LOCK_ASSERT(_lwiphy) \
+ mtx_assert(&(_lwiphy)->wwk_mtx, MA_OWNED)
+#define LKPI_80211_LWIPHY_WORK_UNLOCK_ASSERT(_lwiphy) \
+ mtx_assert(&(_lwiphy)->wwk_mtx, MA_NOTOWNED)
+
+#define LKPI_80211_LHW_SCAN_LOCK_INIT(_lhw) \
+ mtx_init(&(_lhw)->scan_mtx, "lhw-scan", NULL, MTX_DEF | MTX_RECURSE);
+#define LKPI_80211_LHW_SCAN_LOCK_DESTROY(_lhw) \
+ mtx_destroy(&(_lhw)->scan_mtx);
+#define LKPI_80211_LHW_SCAN_LOCK(_lhw) \
+ mtx_lock(&(_lhw)->scan_mtx)
+#define LKPI_80211_LHW_SCAN_UNLOCK(_lhw) \
+ mtx_unlock(&(_lhw)->scan_mtx)
+#define LKPI_80211_LHW_SCAN_LOCK_ASSERT(_lhw) \
+ mtx_assert(&(_lhw)->scan_mtx, MA_OWNED)
+#define LKPI_80211_LHW_SCAN_UNLOCK_ASSERT(_lhw) \
+ mtx_assert(&(_lhw)->scan_mtx, MA_NOTOWNED)
+
+#define LKPI_80211_LHW_TXQ_LOCK_INIT(_lhw) \
+ mtx_init(&(_lhw)->txq_mtx, "lhw-txq", NULL, MTX_DEF | MTX_RECURSE);
+#define LKPI_80211_LHW_TXQ_LOCK_DESTROY(_lhw) \
+ mtx_destroy(&(_lhw)->txq_mtx);
+#define LKPI_80211_LHW_TXQ_LOCK(_lhw) \
+ mtx_lock(&(_lhw)->txq_mtx)
+#define LKPI_80211_LHW_TXQ_UNLOCK(_lhw) \
+ mtx_unlock(&(_lhw)->txq_mtx)
+#define LKPI_80211_LHW_TXQ_LOCK_ASSERT(_lhw) \
+ mtx_assert(&(_lhw)->txq_mtx, MA_OWNED)
+#define LKPI_80211_LHW_TXQ_UNLOCK_ASSERT(_lhw) \
+ mtx_assert(&(_lhw)->txq_mtx, MA_NOTOWNED)
+
+#define LKPI_80211_LHW_RXQ_LOCK_INIT(_lhw) \
+ mtx_init(&(_lhw)->rxq_mtx, "lhw-rxq", NULL, MTX_DEF | MTX_RECURSE);
+#define LKPI_80211_LHW_RXQ_LOCK_DESTROY(_lhw) \
+ mtx_destroy(&(_lhw)->rxq_mtx);
+#define LKPI_80211_LHW_RXQ_LOCK(_lhw) \
+ mtx_lock(&(_lhw)->rxq_mtx)
+#define LKPI_80211_LHW_RXQ_UNLOCK(_lhw) \
+ mtx_unlock(&(_lhw)->rxq_mtx)
+#define LKPI_80211_LHW_RXQ_LOCK_ASSERT(_lhw) \
+ mtx_assert(&(_lhw)->rxq_mtx, MA_OWNED)
+#define LKPI_80211_LHW_RXQ_UNLOCK_ASSERT(_lhw) \
+ mtx_assert(&(_lhw)->rxq_mtx, MA_NOTOWNED)
+
+#define LKPI_80211_LHW_LVIF_LOCK(_lhw) sx_xlock(&(_lhw)->lvif_sx)
+#define LKPI_80211_LHW_LVIF_UNLOCK(_lhw) sx_xunlock(&(_lhw)->lvif_sx)
+
+#define LKPI_80211_LVIF_LOCK(_lvif) mtx_lock(&(_lvif)->mtx)
+#define LKPI_80211_LVIF_UNLOCK(_lvif) mtx_unlock(&(_lvif)->mtx)
+
+#define LKPI_80211_LSTA_TXQ_LOCK_INIT(_lsta) \
+ mtx_init(&(_lsta)->txq_mtx, "lsta-txq", NULL, MTX_DEF);
+#define LKPI_80211_LSTA_TXQ_LOCK_DESTROY(_lsta) \
+ mtx_destroy(&(_lsta)->txq_mtx);
+#define LKPI_80211_LSTA_TXQ_LOCK(_lsta) \
+ mtx_lock(&(_lsta)->txq_mtx)
+#define LKPI_80211_LSTA_TXQ_UNLOCK(_lsta) \
+ mtx_unlock(&(_lsta)->txq_mtx)
+#define LKPI_80211_LSTA_TXQ_LOCK_ASSERT(_lsta) \
+ mtx_assert(&(_lsta)->txq_mtx, MA_OWNED)
+#define LKPI_80211_LSTA_TXQ_UNLOCK_ASSERT(_lsta) \
+ mtx_assert(&(_lsta)->txq_mtx, MA_NOTOWNED)
+
+#define LKPI_80211_LTXQ_LOCK_INIT(_ltxq) \
+ mtx_init(&(_ltxq)->ltxq_mtx, "ltxq", NULL, MTX_DEF);
+#define LKPI_80211_LTXQ_LOCK_DESTROY(_ltxq) \
+ mtx_destroy(&(_ltxq)->ltxq_mtx);
+#define LKPI_80211_LTXQ_LOCK(_ltxq) \
+ mtx_lock(&(_ltxq)->ltxq_mtx)
+#define LKPI_80211_LTXQ_UNLOCK(_ltxq) \
+ mtx_unlock(&(_ltxq)->ltxq_mtx)
+#define LKPI_80211_LTXQ_LOCK_ASSERT(_ltxq) \
+ mtx_assert(&(_ltxq)->ltxq_mtx, MA_OWNED)
+#define LKPI_80211_LTXQ_UNLOCK_ASSERT(_ltxq) \
+ mtx_assert(&(_ltxq)->ltxq_mtx, MA_NOTOWNED)
+
+int lkpi_80211_mo_start(struct ieee80211_hw *);
+void lkpi_80211_mo_stop(struct ieee80211_hw *, bool);
+int lkpi_80211_mo_get_antenna(struct ieee80211_hw *, u32 *, u32 *);
+int lkpi_80211_mo_set_frag_threshold(struct ieee80211_hw *, uint32_t);
+int lkpi_80211_mo_set_rts_threshold(struct ieee80211_hw *, uint32_t);
+int lkpi_80211_mo_add_interface(struct ieee80211_hw *, struct ieee80211_vif *);
+void lkpi_80211_mo_remove_interface(struct ieee80211_hw *, struct ieee80211_vif *);
+int lkpi_80211_mo_hw_scan(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_scan_request *);
+void lkpi_80211_mo_cancel_hw_scan(struct ieee80211_hw *, struct ieee80211_vif *);
+void lkpi_80211_mo_sw_scan_complete(struct ieee80211_hw *, struct ieee80211_vif *);
+void lkpi_80211_mo_sw_scan_start(struct ieee80211_hw *, struct ieee80211_vif *,
+ const u8 *);
+u64 lkpi_80211_mo_prepare_multicast(struct ieee80211_hw *,
+ struct netdev_hw_addr_list *);
+void lkpi_80211_mo_configure_filter(struct ieee80211_hw *, unsigned int,
+ unsigned int *, u64);
+int lkpi_80211_mo_sta_state(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct lkpi_sta *, enum ieee80211_sta_state);
+int lkpi_80211_mo_config(struct ieee80211_hw *, uint32_t);
+int lkpi_80211_mo_assign_vif_chanctx(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf *);
+void lkpi_80211_mo_unassign_vif_chanctx(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf *);
+int lkpi_80211_mo_add_chanctx(struct ieee80211_hw *, struct ieee80211_chanctx_conf *);
+void lkpi_80211_mo_change_chanctx(struct ieee80211_hw *,
+ struct ieee80211_chanctx_conf *, uint32_t);
+void lkpi_80211_mo_remove_chanctx(struct ieee80211_hw *,
+ struct ieee80211_chanctx_conf *);
+void lkpi_80211_mo_bss_info_changed(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_bss_conf *, uint64_t);
+int lkpi_80211_mo_conf_tx(struct ieee80211_hw *, struct ieee80211_vif *,
+ uint32_t, uint16_t, const struct ieee80211_tx_queue_params *);
+void lkpi_80211_mo_flush(struct ieee80211_hw *, struct ieee80211_vif *,
+ uint32_t, bool);
+void lkpi_80211_mo_mgd_prepare_tx(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_prep_tx_info *);
+void lkpi_80211_mo_mgd_complete_tx(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_prep_tx_info *);
+void lkpi_80211_mo_tx(struct ieee80211_hw *, struct ieee80211_tx_control *,
+ struct sk_buff *);
+void lkpi_80211_mo_wake_tx_queue(struct ieee80211_hw *, struct ieee80211_txq *);
+void lkpi_80211_mo_sync_rx_queues(struct ieee80211_hw *);
+void lkpi_80211_mo_sta_pre_rcu_remove(struct ieee80211_hw *,
+ struct ieee80211_vif *, struct ieee80211_sta *);
+int lkpi_80211_mo_set_key(struct ieee80211_hw *, enum set_key_cmd,
+ struct ieee80211_vif *, struct ieee80211_sta *,
+ struct ieee80211_key_conf *);
+int lkpi_80211_mo_ampdu_action(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_ampdu_params *);
+int lkpi_80211_mo_sta_statistics(struct ieee80211_hw *, struct ieee80211_vif *,
+ struct ieee80211_sta *, struct station_info *);
+
+#endif /* _LKPI_SRC_LINUX_80211_H */
diff --git a/sys/compat/linuxkpi/common/src/linux_80211_macops.c b/sys/compat/linuxkpi/common/src/linux_80211_macops.c
new file mode 100644
index 000000000000..78b2120f2d8c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_80211_macops.c
@@ -0,0 +1,756 @@
+/*-
+ * Copyright (c) 2021-2022 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/errno.h>
+
+#define LINUXKPI_NET80211
+#include <net/mac80211.h>
+
+#include "linux_80211.h"
+
+/* Could be a different tracing framework later. */
+#ifdef LINUXKPI_DEBUG_80211
+#define LKPI_80211_TRACE_MO(fmt, ...) \
+ if (linuxkpi_debug_80211 & D80211_TRACE_MO) \
+ printf("LKPI_80211_TRACE_MO %s:%d: %d %d %lu: " fmt "\n", \
+ __func__, __LINE__, curcpu, curthread->td_tid, \
+ jiffies, __VA_ARGS__)
+#else
+#define LKPI_80211_TRACE_MO(...) do { } while(0)
+#endif
+
+int
+lkpi_80211_mo_start(struct ieee80211_hw *hw)
+{
+ struct lkpi_hw *lhw;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->start == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ if ((lhw->sc_flags & LKPI_MAC80211_DRV_STARTED)) {
+ /* Trying to start twice is an error. */
+ error = EEXIST;
+ goto out;
+ }
+ LKPI_80211_TRACE_MO("hw %p", hw);
+ error = lhw->ops->start(hw);
+ if (error == 0)
+ lhw->sc_flags |= LKPI_MAC80211_DRV_STARTED;
+
+out:
+ return (error);
+}
+
+void
+lkpi_80211_mo_stop(struct ieee80211_hw *hw, bool suspend)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->stop == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p suspend %d", hw, suspend);
+ lhw->ops->stop(hw, suspend);
+ lhw->sc_flags &= ~LKPI_MAC80211_DRV_STARTED;
+}
+
+int
+lkpi_80211_mo_get_antenna(struct ieee80211_hw *hw, u32 *txs, u32 *rxs)
+{
+ struct lkpi_hw *lhw;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->get_antenna == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p", hw);
+ error = lhw->ops->get_antenna(hw, txs, rxs);
+
+out:
+ return (error);
+}
+
+int
+lkpi_80211_mo_set_frag_threshold(struct ieee80211_hw *hw, uint32_t frag_th)
+{
+ struct lkpi_hw *lhw;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->set_frag_threshold == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p frag_th %u", hw, frag_th);
+ error = lhw->ops->set_frag_threshold(hw, frag_th);
+
+out:
+ return (error);
+}
+
+int
+lkpi_80211_mo_set_rts_threshold(struct ieee80211_hw *hw, uint32_t rts_th)
+{
+ struct lkpi_hw *lhw;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->set_rts_threshold == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p rts_th %u", hw, rts_th);
+ error = lhw->ops->set_rts_threshold(hw, rts_th);
+
+out:
+ return (error);
+}
+
+
+int
+lkpi_80211_mo_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_vif *lvif;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->add_interface == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ lvif = VIF_TO_LVIF(vif);
+ LKPI_80211_LVIF_LOCK(lvif);
+ if (lvif->added_to_drv) {
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ /* Trying to add twice is an error. */
+ error = EEXIST;
+ goto out;
+ }
+ LKPI_80211_LVIF_UNLOCK(lvif);
+
+ LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif);
+ error = lhw->ops->add_interface(hw, vif);
+ if (error == 0) {
+ LKPI_80211_LVIF_LOCK(lvif);
+ lvif->added_to_drv = true;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ }
+
+out:
+ return (error);
+}
+
+void
+lkpi_80211_mo_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_vif *lvif;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->remove_interface == NULL)
+ return;
+
+ lvif = VIF_TO_LVIF(vif);
+ LKPI_80211_LVIF_LOCK(lvif);
+ if (!lvif->added_to_drv) {
+ LKPI_80211_LVIF_UNLOCK(lvif);
+ return;
+ }
+ LKPI_80211_LVIF_UNLOCK(lvif);
+
+ LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif);
+ lhw->ops->remove_interface(hw, vif);
+ LKPI_80211_LVIF_LOCK(lvif);
+ lvif->added_to_drv = false;
+ LKPI_80211_LVIF_UNLOCK(lvif);
+}
+
+
+int
+lkpi_80211_mo_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *sr)
+{
+ struct lkpi_hw *lhw;
+ int error;
+
+ /*
+ * MUST NOT return EPERM as that is a "magic number 1" based on rtw88
+ * driver indicating hw_scan is not supported despite the ops call
+ * being available.
+ */
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->hw_scan == NULL) {
+ /* Return magic number to use sw scan. */
+ error = 1;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("CALLING hw %p vif %p sr %p", hw, vif, sr);
+ error = lhw->ops->hw_scan(hw, vif, sr);
+ LKPI_80211_TRACE_MO("RETURNING hw %p vif %p sr %p error %d", hw, vif, sr, error);
+
+out:
+ return (error);
+}
+
+void
+lkpi_80211_mo_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->cancel_hw_scan == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif);
+ lhw->ops->cancel_hw_scan(hw, vif);
+}
+
+void
+lkpi_80211_mo_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->sw_scan_complete == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif);
+ lhw->ops->sw_scan_complete(hw, vif);
+ lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING;
+}
+
+void
+lkpi_80211_mo_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *addr)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->sw_scan_start == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif);
+ lhw->ops->sw_scan_start(hw, vif, addr);
+}
+
+
+/*
+ * We keep the Linux type here; it really is an uintptr_t.
+ */
+u64
+lkpi_80211_mo_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct lkpi_hw *lhw;
+ u64 ptr;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->prepare_multicast == NULL)
+ return (0);
+
+ LKPI_80211_TRACE_MO("hw %p mc_list %p", hw, mc_list);
+ ptr = lhw->ops->prepare_multicast(hw, mc_list);
+ return (ptr);
+}
+
+void
+lkpi_80211_mo_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 mc_ptr)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->configure_filter == NULL)
+ return;
+
+ if (mc_ptr == 0)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p changed_flags %#x total_flags %p mc_ptr %ju", hw, changed_flags, total_flags, (uintmax_t)mc_ptr);
+ lhw->ops->configure_filter(hw, changed_flags, total_flags, mc_ptr);
+}
+
+
+/*
+ * So far we only called sta_{add,remove} as an alternative to sta_state.
+ * Let's keep the implementation simpler and hide sta_{add,remove} under the
+ * hood here calling them if state_state is not available from mo_sta_state.
+ */
+static int
+lkpi_80211_mo_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_sta *lsta;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->sta_add == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ lsta = STA_TO_LSTA(sta);
+ if (lsta->added_to_drv) {
+ error = EEXIST;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p vif %p sta %p", hw, vif, sta);
+ error = lhw->ops->sta_add(hw, vif, sta);
+ if (error == 0)
+ lsta->added_to_drv = true;
+
+out:
+ return error;
+}
+
+static int
+lkpi_80211_mo_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_sta *lsta;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->sta_remove == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ lsta = STA_TO_LSTA(sta);
+ if (!lsta->added_to_drv) {
+ /* If we never added the sta, do not complain on cleanup. */
+ error = 0;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p vif %p sta %p", hw, vif, sta);
+ error = lhw->ops->sta_remove(hw, vif, sta);
+ if (error == 0)
+ lsta->added_to_drv = false;
+
+out:
+ return error;
+}
+
+int
+lkpi_80211_mo_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct lkpi_sta *lsta, enum ieee80211_sta_state nstate)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_sta *sta;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ sta = LSTA_TO_STA(lsta);
+ if (lhw->ops->sta_state != NULL) {
+ LKPI_80211_TRACE_MO("hw %p vif %p sta %p nstate %d", hw, vif, sta, nstate);
+ error = lhw->ops->sta_state(hw, vif, sta, lsta->state, nstate);
+ if (error == 0) {
+ if (nstate == IEEE80211_STA_NOTEXIST)
+ lsta->added_to_drv = false;
+ else
+ lsta->added_to_drv = true;
+ lsta->state = nstate;
+ }
+ goto out;
+ }
+
+ /* XXX-BZ is the change state AUTH or ASSOC here? */
+ if (lsta->state < IEEE80211_STA_ASSOC && nstate == IEEE80211_STA_ASSOC) {
+ error = lkpi_80211_mo_sta_add(hw, vif, sta);
+ if (error == 0)
+ lsta->added_to_drv = true;
+ } else if (lsta->state >= IEEE80211_STA_ASSOC &&
+ nstate < IEEE80211_STA_ASSOC) {
+ error = lkpi_80211_mo_sta_remove(hw, vif, sta);
+ if (error == 0)
+ lsta->added_to_drv = false;
+ } else
+ /* Nothing to do. */
+ error = 0;
+ if (error == 0)
+ lsta->state = nstate;
+
+out:
+ /* XXX-BZ should we manage state in here? */
+ return (error);
+}
+
+int
+lkpi_80211_mo_config(struct ieee80211_hw *hw, uint32_t changed)
+{
+ struct lkpi_hw *lhw;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->config == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p changed %u", hw, changed);
+ error = lhw->ops->config(hw, changed);
+
+out:
+ return (error);
+}
+
+
+int
+lkpi_80211_mo_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf, struct ieee80211_chanctx_conf *chanctx_conf)
+{
+ struct lkpi_hw *lhw;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->assign_vif_chanctx == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p vif %p bss_conf %p chanctx_conf %p",
+ hw, vif, conf, chanctx_conf);
+ error = lhw->ops->assign_vif_chanctx(hw, vif, conf, chanctx_conf);
+ if (error == 0)
+ vif->bss_conf.chanctx_conf = chanctx_conf;
+
+out:
+ return (error);
+}
+
+void
+lkpi_80211_mo_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf, struct ieee80211_chanctx_conf *chanctx_conf)
+{
+ struct lkpi_hw *lhw;
+
+ might_sleep();
+ lockdep_assert_wiphy(hw->wiphy);
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->unassign_vif_chanctx == NULL)
+ return;
+
+ if (chanctx_conf == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p vif %p bss_conf %p chanctx_conf %p",
+ hw, vif, conf, chanctx_conf);
+ lhw->ops->unassign_vif_chanctx(hw, vif, conf, chanctx_conf);
+}
+
+
+int
+lkpi_80211_mo_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_chanctx *lchanctx;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->add_chanctx == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p chanctx_conf %p", hw, chanctx_conf);
+ error = lhw->ops->add_chanctx(hw, chanctx_conf);
+ if (error == 0) {
+ lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf);
+ lchanctx->added_to_drv = true;
+ }
+
+out:
+ return (error);
+}
+
+void
+lkpi_80211_mo_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf, uint32_t changed)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->change_chanctx == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p chanctx_conf %p changed %u", hw, chanctx_conf, changed);
+ lhw->ops->change_chanctx(hw, chanctx_conf, changed);
+}
+
+void
+lkpi_80211_mo_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_chanctx *lchanctx;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->remove_chanctx == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p chanctx_conf %p", hw, chanctx_conf);
+ lhw->ops->remove_chanctx(hw, chanctx_conf);
+ lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf);
+ lchanctx->added_to_drv = false;
+}
+
+void
+lkpi_80211_mo_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf, uint64_t changed)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->link_info_changed == NULL &&
+ lhw->ops->bss_info_changed == NULL)
+ return;
+
+ if (changed == 0)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p vif %p conf %p changed %#jx", hw, vif, conf, (uintmax_t)changed);
+ if (lhw->ops->link_info_changed != NULL)
+ lhw->ops->link_info_changed(hw, vif, conf, changed);
+ else
+ lhw->ops->bss_info_changed(hw, vif, conf, changed);
+}
+
+int
+lkpi_80211_mo_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ uint32_t link_id, uint16_t ac, const struct ieee80211_tx_queue_params *txqp)
+{
+ struct lkpi_hw *lhw;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->conf_tx == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p vif %p link_id %u ac %u txpq %p",
+ hw, vif, link_id, ac, txqp);
+ error = lhw->ops->conf_tx(hw, vif, link_id, ac, txqp);
+
+out:
+ return (error);
+}
+
+void
+lkpi_80211_mo_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ uint32_t nqueues, bool drop)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->flush == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p vif %p nqueues %u drop %d", hw, vif, nqueues, drop);
+ lhw->ops->flush(hw, vif, nqueues, drop);
+}
+
+void
+lkpi_80211_mo_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *txinfo)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->mgd_prepare_tx == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p vif %p txinfo %p", hw, vif, txinfo);
+ lhw->ops->mgd_prepare_tx(hw, vif, txinfo);
+}
+
+void
+lkpi_80211_mo_mgd_complete_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *txinfo)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->mgd_complete_tx == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p vif %p txinfo %p", hw, vif, txinfo);
+ lhw->ops->mgd_complete_tx(hw, vif, txinfo);
+}
+
+void
+lkpi_80211_mo_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *txctrl,
+ struct sk_buff *skb)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->tx == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p txctrl %p skb %p", hw, txctrl, skb);
+ lhw->ops->tx(hw, txctrl, skb);
+}
+
+void
+lkpi_80211_mo_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->wake_tx_queue == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p txq %p", hw, txq);
+ lhw->ops->wake_tx_queue(hw, txq);
+}
+
+void
+lkpi_80211_mo_sync_rx_queues(struct ieee80211_hw *hw)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->sync_rx_queues == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p", hw);
+ lhw->ops->sync_rx_queues(hw);
+}
+
+void
+lkpi_80211_mo_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct lkpi_hw *lhw;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->sta_pre_rcu_remove == NULL)
+ return;
+
+ LKPI_80211_TRACE_MO("hw %p vif %p sta %p", hw, vif, sta);
+ lhw->ops->sta_pre_rcu_remove(hw, vif, sta);
+}
+
+int
+lkpi_80211_mo_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *kc)
+{
+ struct lkpi_hw *lhw;
+ int error;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->set_key == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p cmd %d vif %p sta %p kc %p", hw, cmd, vif, sta, kc);
+ error = lhw->ops->set_key(hw, cmd, vif, sta, kc);
+
+out:
+ return (error);
+}
+
+int
+lkpi_80211_mo_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct lkpi_hw *lhw;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->ampdu_action == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ LKPI_80211_TRACE_MO("hw %p vif %p params %p { %p, %d, %u, %u, %u, %u, %d }",
+ hw, vif, params, params->sta, params->action, params->buf_size,
+ params->timeout, params->ssn, params->tid, params->amsdu);
+ error = lhw->ops->ampdu_action(hw, vif, params);
+
+out:
+ return (error);
+}
+
+int
+lkpi_80211_mo_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct station_info *sinfo)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_sta *lsta;
+ int error;
+
+ lhw = HW_TO_LHW(hw);
+ if (lhw->ops->sta_statistics == NULL) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ lsta = STA_TO_LSTA(sta);
+ if (!lsta->added_to_drv) {
+ error = EEXIST;
+ goto out;
+ }
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ LKPI_80211_TRACE_MO("hw %p vif %p sta %p sinfo %p", hw, vif, sta, sinfo);
+ lhw->ops->sta_statistics(hw, vif, sta, sinfo);
+ error = 0;
+
+out:
+ return (error);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_acpi.c b/sys/compat/linuxkpi/common/src/linux_acpi.c
new file mode 100644
index 000000000000..d18c69d9210d
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_acpi.c
@@ -0,0 +1,376 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2018 Johannes Lundberg <johalun@FreeBSD.org>
+ * Copyright (c) 2020 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/eventhandler.h>
+#include <sys/kernel.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/uuid.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/video.h>
+
+#define ACPI_AC_CLASS "ac_adapter"
+
+ACPI_MODULE_NAME("linux_acpi")
+
+enum {
+ LINUX_ACPI_ACAD,
+ LINUX_ACPI_VIDEO,
+ LINUX_ACPI_TAGS /* must be last */
+};
+_Static_assert(LINUX_ACPI_TAGS <= LINUX_NOTIFY_TAGS,
+ "Not enough space for tags in notifier_block structure");
+
+#ifdef DEV_ACPI
+
+suspend_state_t pm_suspend_target_state = PM_SUSPEND_ON;
+
+static uint32_t linux_acpi_target_sleep_state = ACPI_STATE_S0;
+
+static eventhandler_tag resume_tag;
+static eventhandler_tag suspend_tag;
+
+ACPI_HANDLE
+bsd_acpi_get_handle(device_t bsddev)
+{
+ return (acpi_get_handle(bsddev));
+}
+
+bool
+acpi_check_dsm(ACPI_HANDLE handle, const char *uuid, int rev, uint64_t funcs)
+{
+
+ if (funcs == 0)
+ return (false);
+
+ /*
+ * From ACPI 6.3 spec 9.1.1:
+ * Bit 0 indicates whether there is support for any functions other
+ * than function 0 for the specified UUID and Revision ID. If set to
+ * zero, no functions are supported (other than function zero) for the
+ * specified UUID and Revision ID.
+ */
+ funcs |= 1 << 0;
+
+ return ((acpi_DSMQuery(handle, uuid, rev) & funcs) == funcs);
+}
+
+ACPI_OBJECT *
+acpi_evaluate_dsm_typed(ACPI_HANDLE handle, const char *uuid, int rev,
+ int func, ACPI_OBJECT *argv4, ACPI_OBJECT_TYPE type)
+{
+ ACPI_BUFFER buf;
+
+ return (ACPI_SUCCESS(acpi_EvaluateDSMTyped(handle, uuid, rev, func,
+ argv4, &buf, type)) ? (ACPI_OBJECT *)buf.Pointer : NULL);
+}
+
+union linuxkpi_acpi_object *
+acpi_evaluate_dsm(ACPI_HANDLE ObjHandle, const guid_t *guid,
+ UINT64 rev, UINT64 func, union linuxkpi_acpi_object *pkg)
+{
+ ACPI_BUFFER buf;
+
+ return (ACPI_SUCCESS(acpi_EvaluateDSM(ObjHandle, (const uint8_t *)guid,
+ rev, func, (ACPI_OBJECT *)pkg, &buf)) ?
+ (union linuxkpi_acpi_object *)buf.Pointer : NULL);
+}
+
+static void
+linux_handle_power_suspend_event(void *arg __unused)
+{
+ /*
+ * Only support S3 for now.
+ * acpi_sleep_event isn't always called so we use power_suspend_early
+ * instead which means we don't know what state we're switching to.
+ * TODO: Make acpi_sleep_event consistent
+ */
+ linux_acpi_target_sleep_state = ACPI_STATE_S3;
+ pm_suspend_target_state = PM_SUSPEND_MEM;
+}
+
+static void
+linux_handle_power_resume_event(void *arg __unused)
+{
+ linux_acpi_target_sleep_state = ACPI_STATE_S0;
+ pm_suspend_target_state = PM_SUSPEND_ON;
+}
+
+static void
+linux_handle_acpi_acad_event(void *arg, int data)
+{
+ struct notifier_block *nb = arg;
+ /*
+ * Event type information is lost ATM in FreeBSD ACPI event handler.
+ * Fortunately, drm-kmod do not distinct AC event types too, so we can
+ * use any type e.g. ACPI_NOTIFY_BUS_CHECK that suits notifier handler.
+ */
+ struct acpi_bus_event abe = {
+ .device_class = ACPI_AC_CLASS,
+ .type = ACPI_NOTIFY_BUS_CHECK,
+ .data = data,
+ };
+
+ nb->notifier_call(nb, 0, &abe);
+}
+
+static void
+linux_handle_acpi_video_event(void *arg, int type)
+{
+ struct notifier_block *nb = arg;
+ struct acpi_bus_event abe = {
+ .device_class = ACPI_VIDEO_CLASS,
+ .type = type,
+ .data = 0,
+ };
+
+ nb->notifier_call(nb, 0, &abe);
+}
+
+int
+register_acpi_notifier(struct notifier_block *nb)
+{
+ nb->tags[LINUX_ACPI_ACAD] = EVENTHANDLER_REGISTER(acpi_acad_event,
+ linux_handle_acpi_acad_event, nb, EVENTHANDLER_PRI_FIRST);
+ nb->tags[LINUX_ACPI_VIDEO] = EVENTHANDLER_REGISTER(acpi_video_event,
+ linux_handle_acpi_video_event, nb, EVENTHANDLER_PRI_FIRST);
+
+ return (0);
+}
+
+int
+unregister_acpi_notifier(struct notifier_block *nb)
+{
+ EVENTHANDLER_DEREGISTER(acpi_acad_event, nb->tags[LINUX_ACPI_ACAD]);
+ EVENTHANDLER_DEREGISTER(acpi_video_event, nb->tags[LINUX_ACPI_VIDEO]);
+
+ return (0);
+}
+
+uint32_t
+acpi_target_system_state(void)
+{
+ return (linux_acpi_target_sleep_state);
+}
+
+struct acpi_dev_present_ctx {
+ const char *hid;
+ const char *uid;
+ int64_t hrv;
+ struct acpi_device *dev;
+};
+
+static ACPI_STATUS
+acpi_dev_present_cb(ACPI_HANDLE handle, UINT32 level, void *context,
+ void **result)
+{
+ ACPI_DEVICE_INFO *devinfo;
+ struct acpi_device *dev;
+ struct acpi_dev_present_ctx *match = context;
+ bool present = false;
+ UINT32 sta, hrv;
+ int i;
+
+ if (handle == NULL)
+ return (AE_OK);
+
+ if (!ACPI_FAILURE(acpi_GetInteger(handle, "_STA", &sta)) &&
+ !ACPI_DEVICE_PRESENT(sta))
+ return (AE_OK);
+
+ if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &devinfo)))
+ return (AE_OK);
+
+ if ((devinfo->Valid & ACPI_VALID_HID) != 0 &&
+ strcmp(match->hid, devinfo->HardwareId.String) == 0) {
+ present = true;
+ } else if ((devinfo->Valid & ACPI_VALID_CID) != 0) {
+ for (i = 0; i < devinfo->CompatibleIdList.Count; i++) {
+ if (strcmp(match->hid,
+ devinfo->CompatibleIdList.Ids[i].String) == 0) {
+ present = true;
+ break;
+ }
+ }
+ }
+ if (present && match->uid != NULL &&
+ ((devinfo->Valid & ACPI_VALID_UID) == 0 ||
+ strcmp(match->uid, devinfo->UniqueId.String) != 0))
+ present = false;
+
+ AcpiOsFree(devinfo);
+ if (!present)
+ return (AE_OK);
+
+ if (match->hrv != -1) {
+ if (ACPI_FAILURE(acpi_GetInteger(handle, "_HRV", &hrv)))
+ return (AE_OK);
+ if (hrv != match->hrv)
+ return (AE_OK);
+ }
+
+ dev = acpi_get_device(handle);
+ if (dev == NULL)
+ return (AE_OK);
+ match->dev = dev;
+
+ return (AE_ERROR);
+}
+
+bool
+lkpi_acpi_dev_present(const char *hid, const char *uid, int64_t hrv)
+{
+ struct acpi_dev_present_ctx match;
+ int rv;
+
+ match.hid = hid;
+ match.uid = uid;
+ match.hrv = hrv;
+
+ rv = AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, acpi_dev_present_cb, NULL, &match, NULL);
+
+ return (rv == AE_ERROR);
+}
+
+struct acpi_device *
+lkpi_acpi_dev_get_first_match_dev(const char *hid, const char *uid,
+ int64_t hrv)
+{
+ struct acpi_dev_present_ctx match;
+ int rv;
+
+ match.hid = hid;
+ match.uid = uid;
+ match.hrv = hrv;
+ match.dev = NULL;
+
+ rv = AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, acpi_dev_present_cb, NULL, &match, NULL);
+
+ return (rv == AE_ERROR ? match.dev : NULL);
+}
+
+static void
+linux_register_acpi_event_handlers(void *arg __unused)
+{
+ /*
+ * XXX johalun: acpi_{sleep,wakeup}_event can't be trusted, use
+ * power_{suspend_early,resume} 'acpiconf -s 3' or 'zzz' will not
+ * generate acpi_sleep_event... Lid open or wake on button generates
+ * acpi_wakeup_event on one of my Dell laptops but not the other
+ * (but it does power on)... is this a general thing?
+ */
+ resume_tag = EVENTHANDLER_REGISTER(power_resume,
+ linux_handle_power_resume_event, NULL, EVENTHANDLER_PRI_FIRST);
+ suspend_tag = EVENTHANDLER_REGISTER(power_suspend_early,
+ linux_handle_power_suspend_event, NULL, EVENTHANDLER_PRI_FIRST);
+}
+
+static void
+linux_deregister_acpi_event_handlers(void *arg __unused)
+{
+ EVENTHANDLER_DEREGISTER(power_resume, resume_tag);
+ EVENTHANDLER_DEREGISTER(power_suspend_early, suspend_tag);
+}
+
+SYSINIT(linux_acpi_events, SI_SUB_DRIVERS, SI_ORDER_ANY,
+ linux_register_acpi_event_handlers, NULL);
+SYSUNINIT(linux_acpi_events, SI_SUB_DRIVERS, SI_ORDER_ANY,
+ linux_deregister_acpi_event_handlers, NULL);
+
+#else /* !DEV_ACPI */
+
+ACPI_HANDLE
+bsd_acpi_get_handle(device_t bsddev)
+{
+ return (NULL);
+}
+
+bool
+acpi_check_dsm(ACPI_HANDLE handle, const char *uuid, int rev, uint64_t funcs)
+{
+ return (false);
+}
+
+ACPI_OBJECT *
+acpi_evaluate_dsm_typed(ACPI_HANDLE handle, const char *uuid, int rev,
+ int func, ACPI_OBJECT *argv4, ACPI_OBJECT_TYPE type)
+{
+ return (NULL);
+}
+
+union linuxkpi_acpi_object *
+acpi_evaluate_dsm(ACPI_HANDLE ObjHandle, const guid_t *guid,
+ UINT64 rev, UINT64 func, union linuxkpi_acpi_object *pkg)
+{
+ return (NULL);
+}
+
+int
+register_acpi_notifier(struct notifier_block *nb)
+{
+ return (0);
+}
+
+int
+unregister_acpi_notifier(struct notifier_block *nb)
+{
+ return (0);
+}
+
+uint32_t
+acpi_target_system_state(void)
+{
+ return (ACPI_STATE_S0);
+}
+
+bool
+lkpi_acpi_dev_present(const char *hid, const char *uid, int64_t hrv)
+{
+ return (false);
+}
+
+struct acpi_device *
+lkpi_acpi_dev_get_first_match_dev(const char *hid, const char *uid,
+ int64_t hrv)
+{
+ return (NULL);
+}
+
+#endif /* !DEV_ACPI */
diff --git a/sys/compat/linuxkpi/common/src/linux_aperture.c b/sys/compat/linuxkpi/common/src/linux_aperture.c
new file mode 100644
index 000000000000..21c7041fc851
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_aperture.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: MIT
+
+#include <linux/aperture.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfb.h>
+#include <linux/types.h>
+#include <linux/vgaarb.h>
+
+#include <video/vga.h>
+
+/**
+ * DOC: overview
+ *
+ * A graphics device might be supported by different drivers, but only one
+ * driver can be active at any given time. Many systems load a generic
+ * graphics drivers, such as EFI-GOP or VESA, early during the boot process.
+ * During later boot stages, they replace the generic driver with a dedicated,
+ * hardware-specific driver. To take over the device, the dedicated driver
+ * first has to remove the generic driver. Aperture functions manage
+ * ownership of framebuffer memory and hand-over between drivers.
+ *
+ * Graphics drivers should call aperture_remove_conflicting_devices()
+ * at the top of their probe function. The function removes any generic
+ * driver that is currently associated with the given framebuffer memory.
+ * An example for a graphics device on the platform bus is shown below.
+ *
+ * .. code-block:: c
+ *
+ * static int example_probe(struct platform_device *pdev)
+ * {
+ * struct resource *mem;
+ * resource_size_t base, size;
+ * int ret;
+ *
+ * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * if (!mem)
+ * return -ENODEV;
+ * base = mem->start;
+ * size = resource_size(mem);
+ *
+ * ret = aperture_remove_conflicting_devices(base, size, "example");
+ * if (ret)
+ * return ret;
+ *
+ * // Initialize the hardware
+ * ...
+ *
+ * return 0;
+ * }
+ *
+ * static const struct platform_driver example_driver = {
+ * .probe = example_probe,
+ * ...
+ * };
+ *
+ * The given example reads the platform device's I/O-memory range from the
+ * device instance. An active framebuffer will be located within this range.
+ * The call to aperture_remove_conflicting_devices() releases drivers that
+ * have previously claimed ownership of the range and are currently driving
+ * output on the framebuffer. If successful, the new driver can take over
+ * the device.
+ *
+ * While the given example uses a platform device, the aperture helpers work
+ * with every bus that has an addressable framebuffer. In the case of PCI,
+ * device drivers can also call aperture_remove_conflicting_pci_devices() and
+ * let the function detect the apertures automatically. Device drivers without
+ * knowledge of the framebuffer's location can call
+ * aperture_remove_all_conflicting_devices(), which removes all known devices.
+ *
+ * Drivers that are susceptible to being removed by other drivers, such as
+ * generic EFI or VESA drivers, have to register themselves as owners of their
+ * framebuffer apertures. Ownership of the framebuffer memory is achieved
+ * by calling devm_aperture_acquire_for_platform_device(). If successful, the
+ * driver is the owner of the framebuffer range. The function fails if the
+ * framebuffer is already owned by another driver. See below for an example.
+ *
+ * .. code-block:: c
+ *
+ * static int generic_probe(struct platform_device *pdev)
+ * {
+ * struct resource *mem;
+ * resource_size_t base, size;
+ *
+ * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * if (!mem)
+ * return -ENODEV;
+ * base = mem->start;
+ * size = resource_size(mem);
+ *
+ * ret = devm_aperture_acquire_for_platform_device(pdev, base, size);
+ * if (ret)
+ * return ret;
+ *
+ * // Initialize the hardware
+ * ...
+ *
+ * return 0;
+ * }
+ *
+ * static int generic_remove(struct platform_device *)
+ * {
+ * // Hot-unplug the device
+ * ...
+ *
+ * return 0;
+ * }
+ *
+ * static const struct platform_driver generic_driver = {
+ * .probe = generic_probe,
+ * .remove = generic_remove,
+ * ...
+ * };
+ *
+ * The similar to the previous example, the generic driver claims ownership
+ * of the framebuffer memory from its probe function. This will fail if the
+ * memory range, or parts of it, is already owned by another driver.
+ *
+ * If successful, the generic driver is now subject to forced removal by
+ * another driver. This only works for platform drivers that support hot
+ * unplugging. When a driver calls aperture_remove_conflicting_devices()
+ * et al for the registered framebuffer range, the aperture helpers call
+ * platform_device_unregister() and the generic driver unloads itself. The
+ * generic driver also has to provide a remove function to make this work.
+ * Once hot unplugged from hardware, it may not access the device's
+ * registers, framebuffer memory, ROM, etc afterwards.
+ */
+
+struct aperture_range {
+ struct device *dev;
+ resource_size_t base;
+ resource_size_t size;
+ struct list_head lh;
+ void (*detach)(struct device *dev);
+};
+
+static LIST_HEAD(apertures);
+static DEFINE_MUTEX(apertures_lock);
+
+static bool overlap(resource_size_t base1, resource_size_t end1,
+ resource_size_t base2, resource_size_t end2)
+{
+ return (base1 < end2) && (end1 > base2);
+}
+
+static void devm_aperture_acquire_release(void *data)
+{
+ struct aperture_range *ap = data;
+ bool detached = !ap->dev;
+
+ if (detached)
+ return;
+
+ mutex_lock(&apertures_lock);
+ list_del(&ap->lh);
+ mutex_unlock(&apertures_lock);
+}
+
+static int devm_aperture_acquire(struct device *dev,
+ resource_size_t base, resource_size_t size,
+ void (*detach)(struct device *))
+{
+ size_t end = base + size;
+ struct list_head *pos;
+ struct aperture_range *ap;
+
+ mutex_lock(&apertures_lock);
+
+ list_for_each(pos, &apertures) {
+ ap = container_of(pos, struct aperture_range, lh);
+ if (overlap(base, end, ap->base, ap->base + ap->size)) {
+ mutex_unlock(&apertures_lock);
+ return -EBUSY;
+ }
+ }
+
+ ap = devm_kzalloc(dev, sizeof(*ap), GFP_KERNEL);
+ if (!ap) {
+ mutex_unlock(&apertures_lock);
+ return -ENOMEM;
+ }
+
+ ap->dev = dev;
+ ap->base = base;
+ ap->size = size;
+ ap->detach = detach;
+ INIT_LIST_HEAD(&ap->lh);
+
+ list_add(&ap->lh, &apertures);
+
+ mutex_unlock(&apertures_lock);
+
+ return devm_add_action_or_reset(dev, devm_aperture_acquire_release, ap);
+}
+
+static void aperture_detach_platform_device(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /*
+ * Remove the device from the device hierarchy. This is the right thing
+ * to do for firmware-based fb drivers, such as EFI, VESA or VGA. After
+ * the new driver takes over the hardware, the firmware device's state
+ * will be lost.
+ *
+ * For non-platform devices, a new callback would be required.
+ *
+ * If the aperture helpers ever need to handle native drivers, this call
+ * would only have to unplug the DRM device, so that the hardware device
+ * stays around after detachment.
+ */
+ platform_device_unregister(pdev);
+}
+
+/**
+ * devm_aperture_acquire_for_platform_device - Acquires ownership of an aperture
+ * on behalf of a platform device.
+ * @pdev: the platform device to own the aperture
+ * @base: the aperture's byte offset in physical memory
+ * @size: the aperture size in bytes
+ *
+ * Installs the given device as the new owner of the aperture. The function
+ * expects the aperture to be provided by a platform device. If another
+ * driver takes over ownership of the aperture, aperture helpers will then
+ * unregister the platform device automatically. All acquired apertures are
+ * released automatically when the underlying device goes away.
+ *
+ * The function fails if the aperture, or parts of it, is currently
+ * owned by another device. To evict current owners, callers should use
+ * remove_conflicting_devices() et al. before calling this function.
+ *
+ * Returns:
+ * 0 on success, or a negative errno value otherwise.
+ */
+int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size)
+{
+ return devm_aperture_acquire(&pdev->dev, base, size, aperture_detach_platform_device);
+}
+EXPORT_SYMBOL(devm_aperture_acquire_for_platform_device);
+
+static void aperture_detach_devices(resource_size_t base, resource_size_t size)
+{
+ resource_size_t end = base + size;
+ struct list_head *pos, *n;
+
+ mutex_lock(&apertures_lock);
+
+ list_for_each_safe(pos, n, &apertures) {
+ struct aperture_range *ap = container_of(pos, struct aperture_range, lh);
+ struct device *dev = ap->dev;
+
+ if (WARN_ON_ONCE(!dev))
+ continue;
+
+ if (!overlap(base, end, ap->base, ap->base + ap->size))
+ continue;
+
+ ap->dev = NULL; /* detach from device */
+ list_del(&ap->lh);
+
+ ap->detach(dev);
+ }
+
+ mutex_unlock(&apertures_lock);
+}
+
+/**
+ * aperture_remove_conflicting_devices - remove devices in the given range
+ * @base: the aperture's base address in physical memory
+ * @size: aperture size in bytes
+ * @name: a descriptive name of the requesting driver
+ *
+ * This function removes devices that own apertures within @base and @size.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ const char *name)
+{
+ /*
+ * If a driver asked to unregister a platform device registered by
+ * sysfb, then can be assumed that this is a driver for a display
+ * that is set up by the system firmware and has a generic driver.
+ *
+ * Drivers for devices that don't have a generic driver will never
+ * ask for this, so let's assume that a real driver for the display
+ * was already probed and prevent sysfb to register devices later.
+ */
+#ifdef __linux__
+ sysfb_disable();
+#endif
+
+ aperture_detach_devices(base, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(aperture_remove_conflicting_devices);
+
+/**
+ * __aperture_remove_legacy_vga_devices - remove legacy VGA devices of a PCI devices
+ * @pdev: PCI device
+ *
+ * This function removes VGA devices provided by @pdev, such as a VGA
+ * framebuffer or a console. This is useful if you have a VGA-compatible
+ * PCI graphics device with framebuffers in non-BAR locations. Drivers
+ * should acquire ownership of those memory areas and afterwards call
+ * this helper to release remaining VGA devices.
+ *
+ * If your hardware has its framebuffers accessible via PCI BARS, use
+ * aperture_remove_conflicting_pci_devices() instead. The function will
+ * release any VGA devices automatically.
+ *
+ * WARNING: Apparently we must remove graphics drivers before calling
+ * this helper. Otherwise the vga fbdev driver falls over if
+ * we have vgacon configured.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev)
+{
+ /* VGA framebuffer */
+ aperture_detach_devices(VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE);
+
+ /* VGA textmode console */
+#ifdef __linux__
+ return vga_remove_vgacon(pdev);
+#elif defined(__FreeBSD__)
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(__aperture_remove_legacy_vga_devices);
+
+/**
+ * aperture_remove_conflicting_pci_devices - remove existing framebuffers for PCI devices
+ * @pdev: PCI device
+ * @name: a descriptive name of the requesting driver
+ *
+ * This function removes devices that own apertures within any of @pdev's
+ * memory bars. The function assumes that PCI device with shadowed ROM
+ * drives a primary display and therefore kicks out vga16fb as well.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
+{
+ bool primary = false;
+ resource_size_t base, size;
+ int bar, ret = 0;
+
+#ifdef __linux__
+ if (pdev == vga_default_device())
+ primary = true;
+
+ if (primary)
+ sysfb_disable();
+#endif
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ continue;
+
+ base = pci_resource_start(pdev, bar);
+ size = pci_resource_len(pdev, bar);
+ aperture_detach_devices(base, size);
+ }
+
+ /*
+ * If this is the primary adapter, there could be a VGA device
+ * that consumes the VGA framebuffer I/O range. Remove this
+ * device as well.
+ */
+ if (primary)
+ ret = __aperture_remove_legacy_vga_devices(pdev);
+
+ return ret;
+
+}
+EXPORT_SYMBOL(aperture_remove_conflicting_pci_devices);
diff --git a/sys/compat/linuxkpi/common/src/linux_cmdline.c b/sys/compat/linuxkpi/common/src/linux_cmdline.c
new file mode 100644
index 000000000000..0cfa1d56ee6a
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_cmdline.c
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <video/cmdline.h>
+
+const char *
+video_get_options(const char *connector_name)
+{
+ char tunable[64];
+ const char *options;
+
+ /*
+ * A user may use loader tunables to set a specific mode for the
+ * console. Tunables are read in the following order:
+ * 1. kern.vt.fb.modes.$connector_name
+ * 2. kern.vt.fb.default_mode
+ *
+ * Example of a mode specific to the LVDS connector:
+ * kern.vt.fb.modes.LVDS="1024x768"
+ *
+ * Example of a mode applied to all connectors not having a
+ * connector-specific mode:
+ * kern.vt.fb.default_mode="640x480"
+ */
+ snprintf(tunable, sizeof(tunable), "kern.vt.fb.modes.%s",
+ connector_name);
+ if (bootverbose) {
+ printf("[drm] Connector %s: get mode from tunables:\n", connector_name);
+ printf("[drm] - %s\n", tunable);
+ printf("[drm] - kern.vt.fb.default_mode\n");
+ }
+ options = kern_getenv(tunable);
+ if (options == NULL)
+ options = kern_getenv("kern.vt.fb.default_mode");
+
+ return (options);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c
new file mode 100644
index 000000000000..dcdec0dfcc78
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_compat.c
@@ -0,0 +1,3020 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include "opt_global.h"
+#include "opt_stack.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/proc.h>
+#include <sys/sglist.h>
+#include <sys/sleepqueue.h>
+#include <sys/refcount.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+#include <sys/eventhandler.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filio.h>
+#include <sys/rwlock.h>
+#include <sys/mman.h>
+#include <sys/stack.h>
+#include <sys/stdarg.h>
+#include <sys/sysent.h>
+#include <sys/time.h>
+#include <sys/user.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_radix.h>
+
+#if defined(__i386__) || defined(__amd64__)
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#endif
+
+#include <linux/kobject.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/cdev.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/utsname.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include <linux/io-mapping.h>
+#include <linux/poll.h>
+#include <linux/smp.h>
+#include <linux/wait_bit.h>
+#include <linux/rcupdate.h>
+#include <linux/interval_tree.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/printk.h>
+#include <linux/seq_file.h>
+
+#if defined(__i386__) || defined(__amd64__)
+#include <asm/smp.h>
+#include <asm/processor.h>
+#endif
+
+#include <xen/xen.h>
+#ifdef XENHVM
+#undef xen_pv_domain
+#undef xen_initial_domain
+/* xen/xen-os.h redefines __must_check */
+#undef __must_check
+#include <xen/xen-os.h>
+#endif
+
+SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "LinuxKPI parameters");
+
+int linuxkpi_debug;
+SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN,
+ &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable.");
+
+int linuxkpi_rcu_debug;
+SYSCTL_INT(_compat_linuxkpi, OID_AUTO, rcu_debug, CTLFLAG_RWTUN,
+ &linuxkpi_rcu_debug, 0, "Set to enable RCU warning. Clear to disable.");
+
+int linuxkpi_warn_dump_stack = 0;
+SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN,
+ &linuxkpi_warn_dump_stack, 0,
+ "Set to enable stack traces from WARN_ON(). Clear to disable.");
+
+static struct timeval lkpi_net_lastlog;
+static int lkpi_net_curpps;
+static int lkpi_net_maxpps = 99;
+SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN,
+ &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second.");
+
+MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat");
+
+#include <linux/rbtree.h>
+/* Undo Linux compat changes. */
+#undef RB_ROOT
+#undef file
+#undef cdev
+#define RB_ROOT(head) (head)->rbh_root
+
+static void linux_destroy_dev(struct linux_cdev *);
+static void linux_cdev_deref(struct linux_cdev *ldev);
+static struct vm_area_struct *linux_cdev_handle_find(void *handle);
+
+cpumask_t cpu_online_mask;
+static cpumask_t **static_single_cpu_mask;
+static cpumask_t *static_single_cpu_mask_lcs;
+struct kobject linux_class_root;
+struct device linux_root_device;
+struct class linux_class_misc;
+struct list_head pci_drivers;
+struct list_head pci_devices;
+spinlock_t pci_lock;
+struct uts_namespace init_uts_ns;
+
+unsigned long linux_timer_hz_mask;
+
+wait_queue_head_t linux_bit_waitq;
+wait_queue_head_t linux_var_waitq;
+
+int
+panic_cmp(struct rb_node *one, struct rb_node *two)
+{
+ panic("no cmp");
+}
+
+RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
+
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START,
+ LAST,, lkpi_interval_tree)
+
+static void
+linux_device_release(struct device *dev)
+{
+ pr_debug("linux_device_release: %s\n", dev_name(dev));
+ kfree(dev);
+}
+
+static ssize_t
+linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct class_attribute *dattr;
+ ssize_t error;
+
+ dattr = container_of(attr, struct class_attribute, attr);
+ error = -EIO;
+ if (dattr->show)
+ error = dattr->show(container_of(kobj, struct class, kobj),
+ dattr, buf);
+ return (error);
+}
+
+static ssize_t
+linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ struct class_attribute *dattr;
+ ssize_t error;
+
+ dattr = container_of(attr, struct class_attribute, attr);
+ error = -EIO;
+ if (dattr->store)
+ error = dattr->store(container_of(kobj, struct class, kobj),
+ dattr, buf, count);
+ return (error);
+}
+
+static void
+linux_class_release(struct kobject *kobj)
+{
+ struct class *class;
+
+ class = container_of(kobj, struct class, kobj);
+ if (class->class_release)
+ class->class_release(class);
+}
+
+static const struct sysfs_ops linux_class_sysfs = {
+ .show = linux_class_show,
+ .store = linux_class_store,
+};
+
+const struct kobj_type linux_class_ktype = {
+ .release = linux_class_release,
+ .sysfs_ops = &linux_class_sysfs
+};
+
+static void
+linux_dev_release(struct kobject *kobj)
+{
+ struct device *dev;
+
+ dev = container_of(kobj, struct device, kobj);
+ /* This is the precedence defined by linux. */
+ if (dev->release)
+ dev->release(dev);
+ else if (dev->class && dev->class->dev_release)
+ dev->class->dev_release(dev);
+}
+
+static ssize_t
+linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct device_attribute *dattr;
+ ssize_t error;
+
+ dattr = container_of(attr, struct device_attribute, attr);
+ error = -EIO;
+ if (dattr->show)
+ error = dattr->show(container_of(kobj, struct device, kobj),
+ dattr, buf);
+ return (error);
+}
+
+static ssize_t
+linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ struct device_attribute *dattr;
+ ssize_t error;
+
+ dattr = container_of(attr, struct device_attribute, attr);
+ error = -EIO;
+ if (dattr->store)
+ error = dattr->store(container_of(kobj, struct device, kobj),
+ dattr, buf, count);
+ return (error);
+}
+
+static const struct sysfs_ops linux_dev_sysfs = {
+ .show = linux_dev_show,
+ .store = linux_dev_store,
+};
+
+const struct kobj_type linux_dev_ktype = {
+ .release = linux_dev_release,
+ .sysfs_ops = &linux_dev_sysfs
+};
+
+struct device *
+device_create(struct class *class, struct device *parent, dev_t devt,
+ void *drvdata, const char *fmt, ...)
+{
+ struct device *dev;
+ va_list args;
+
+ dev = kzalloc(sizeof(*dev), M_WAITOK);
+ dev->parent = parent;
+ dev->class = class;
+ dev->devt = devt;
+ dev->driver_data = drvdata;
+ dev->release = linux_device_release;
+ va_start(args, fmt);
+ kobject_set_name_vargs(&dev->kobj, fmt, args);
+ va_end(args);
+ device_register(dev);
+
+ return (dev);
+}
+
+struct device *
+device_create_groups_vargs(struct class *class, struct device *parent,
+ dev_t devt, void *drvdata, const struct attribute_group **groups,
+ const char *fmt, va_list args)
+{
+ struct device *dev = NULL;
+ int retval = -ENODEV;
+
+ if (class == NULL || IS_ERR(class))
+ goto error;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ dev->devt = devt;
+ dev->class = class;
+ dev->parent = parent;
+ dev->groups = groups;
+ dev->release = device_create_release;
+ /* device_initialize() needs the class and parent to be set */
+ device_initialize(dev);
+ dev_set_drvdata(dev, drvdata);
+
+ retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
+ if (retval)
+ goto error;
+
+ retval = device_add(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+
+struct class *
+lkpi_class_create(const char *name)
+{
+ struct class *class;
+ int error;
+
+ class = kzalloc(sizeof(*class), M_WAITOK);
+ class->name = name;
+ class->class_release = linux_class_kfree;
+ error = class_register(class);
+ if (error) {
+ kfree(class);
+ return (NULL);
+ }
+
+ return (class);
+}
+
+static void
+linux_kq_lock(void *arg)
+{
+ spinlock_t *s = arg;
+
+ spin_lock(s);
+}
+static void
+linux_kq_unlock(void *arg)
+{
+ spinlock_t *s = arg;
+
+ spin_unlock(s);
+}
+
+static void
+linux_kq_assert_lock(void *arg, int what)
+{
+#ifdef INVARIANTS
+ spinlock_t *s = arg;
+
+ if (what == LA_LOCKED)
+ mtx_assert(s, MA_OWNED);
+ else
+ mtx_assert(s, MA_NOTOWNED);
+#endif
+}
+
+static void
+linux_file_kqfilter_poll(struct linux_file *, int);
+
+struct linux_file *
+linux_file_alloc(void)
+{
+ struct linux_file *filp;
+
+ filp = kzalloc(sizeof(*filp), GFP_KERNEL);
+
+ /* set initial refcount */
+ filp->f_count = 1;
+
+ /* setup fields needed by kqueue support */
+ spin_lock_init(&filp->f_kqlock);
+ knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock,
+ linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock);
+
+ return (filp);
+}
+
+void
+linux_file_free(struct linux_file *filp)
+{
+ if (filp->_file == NULL) {
+ if (filp->f_op != NULL && filp->f_op->release != NULL)
+ filp->f_op->release(filp->f_vnode, filp);
+ if (filp->f_shmem != NULL)
+ vm_object_deallocate(filp->f_shmem);
+ kfree_rcu(filp, rcu);
+ } else {
+ /*
+ * The close method of the character device or file
+ * will free the linux_file structure:
+ */
+ _fdrop(filp->_file, curthread);
+ }
+}
+
+struct linux_cdev *
+cdev_alloc(void)
+{
+ struct linux_cdev *cdev;
+
+ cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK);
+ kobject_init(&cdev->kobj, &linux_cdev_ktype);
+ cdev->refs = 1;
+ return (cdev);
+}
+
+static int
+linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
+ vm_page_t *mres)
+{
+ struct vm_area_struct *vmap;
+
+ vmap = linux_cdev_handle_find(vm_obj->handle);
+
+ MPASS(vmap != NULL);
+ MPASS(vmap->vm_private_data == vm_obj->handle);
+
+ if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) {
+ vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset;
+ vm_page_t page;
+
+ if (((*mres)->flags & PG_FICTITIOUS) != 0) {
+ /*
+ * If the passed in result page is a fake
+ * page, update it with the new physical
+ * address.
+ */
+ page = *mres;
+ vm_page_updatefake(page, paddr, vm_obj->memattr);
+ } else {
+ /*
+ * Replace the passed in "mres" page with our
+ * own fake page and free up the all of the
+ * original pages.
+ */
+ VM_OBJECT_WUNLOCK(vm_obj);
+ page = vm_page_getfake(paddr, vm_obj->memattr);
+ VM_OBJECT_WLOCK(vm_obj);
+
+ vm_page_replace(page, vm_obj, (*mres)->pindex, *mres);
+ *mres = page;
+ }
+ vm_page_valid(page);
+ return (VM_PAGER_OK);
+ }
+ return (VM_PAGER_FAIL);
+}
+
+static int
+linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
+ vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
+{
+ struct vm_area_struct *vmap;
+ int err;
+
+ /* get VM area structure */
+ vmap = linux_cdev_handle_find(vm_obj->handle);
+ MPASS(vmap != NULL);
+ MPASS(vmap->vm_private_data == vm_obj->handle);
+
+ VM_OBJECT_WUNLOCK(vm_obj);
+
+ linux_set_current(curthread);
+
+ down_write(&vmap->vm_mm->mmap_sem);
+ if (unlikely(vmap->vm_ops == NULL)) {
+ err = VM_FAULT_SIGBUS;
+ } else {
+ struct vm_fault vmf;
+
+ /* fill out VM fault structure */
+ vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx);
+ vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
+ vmf.pgoff = 0;
+ vmf.page = NULL;
+ vmf.vma = vmap;
+
+ vmap->vm_pfn_count = 0;
+ vmap->vm_pfn_pcount = &vmap->vm_pfn_count;
+ vmap->vm_obj = vm_obj;
+
+ err = vmap->vm_ops->fault(&vmf);
+
+ while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) {
+ kern_yield(PRI_USER);
+ err = vmap->vm_ops->fault(&vmf);
+ }
+ }
+
+ /* translate return code */
+ switch (err) {
+ case VM_FAULT_OOM:
+ err = VM_PAGER_AGAIN;
+ break;
+ case VM_FAULT_SIGBUS:
+ err = VM_PAGER_BAD;
+ break;
+ case VM_FAULT_NOPAGE:
+ /*
+ * By contract the fault handler will return having
+ * busied all the pages itself. If pidx is already
+ * found in the object, it will simply xbusy the first
+ * page and return with vm_pfn_count set to 1.
+ */
+ *first = vmap->vm_pfn_first;
+ *last = *first + vmap->vm_pfn_count - 1;
+ err = VM_PAGER_OK;
+ break;
+ default:
+ err = VM_PAGER_ERROR;
+ break;
+ }
+ up_write(&vmap->vm_mm->mmap_sem);
+ VM_OBJECT_WLOCK(vm_obj);
+ return (err);
+}
+
+static struct rwlock linux_vma_lock;
+static TAILQ_HEAD(, vm_area_struct) linux_vma_head =
+ TAILQ_HEAD_INITIALIZER(linux_vma_head);
+
+static void
+linux_cdev_handle_free(struct vm_area_struct *vmap)
+{
+ /* Drop reference on vm_file */
+ if (vmap->vm_file != NULL)
+ fput(vmap->vm_file);
+
+ /* Drop reference on mm_struct */
+ mmput(vmap->vm_mm);
+
+ kfree(vmap);
+}
+
+static void
+linux_cdev_handle_remove(struct vm_area_struct *vmap)
+{
+ rw_wlock(&linux_vma_lock);
+ TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry);
+ rw_wunlock(&linux_vma_lock);
+}
+
+static struct vm_area_struct *
+linux_cdev_handle_find(void *handle)
+{
+ struct vm_area_struct *vmap;
+
+ rw_rlock(&linux_vma_lock);
+ TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) {
+ if (vmap->vm_private_data == handle)
+ break;
+ }
+ rw_runlock(&linux_vma_lock);
+ return (vmap);
+}
+
+static int
+linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
+ vm_ooffset_t foff, struct ucred *cred, u_short *color)
+{
+
+ MPASS(linux_cdev_handle_find(handle) != NULL);
+ *color = 0;
+ return (0);
+}
+
+static void
+linux_cdev_pager_dtor(void *handle)
+{
+ const struct vm_operations_struct *vm_ops;
+ struct vm_area_struct *vmap;
+
+ vmap = linux_cdev_handle_find(handle);
+ MPASS(vmap != NULL);
+
+ /*
+ * Remove handle before calling close operation to prevent
+ * other threads from reusing the handle pointer.
+ */
+ linux_cdev_handle_remove(vmap);
+
+ down_write(&vmap->vm_mm->mmap_sem);
+ vm_ops = vmap->vm_ops;
+ if (likely(vm_ops != NULL))
+ vm_ops->close(vmap);
+ up_write(&vmap->vm_mm->mmap_sem);
+
+ linux_cdev_handle_free(vmap);
+}
+
+static struct cdev_pager_ops linux_cdev_pager_ops[2] = {
+ {
+ /* OBJT_MGTDEVICE */
+ .cdev_pg_populate = linux_cdev_pager_populate,
+ .cdev_pg_ctor = linux_cdev_pager_ctor,
+ .cdev_pg_dtor = linux_cdev_pager_dtor
+ },
+ {
+ /* OBJT_DEVICE */
+ .cdev_pg_fault = linux_cdev_pager_fault,
+ .cdev_pg_ctor = linux_cdev_pager_ctor,
+ .cdev_pg_dtor = linux_cdev_pager_dtor
+ },
+};
+
+int
+zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size)
+{
+ struct pctrie_iter pages;
+ vm_object_t obj;
+ vm_page_t m;
+
+ obj = vma->vm_obj;
+ if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0)
+ return (-ENOTSUP);
+ VM_OBJECT_RLOCK(obj);
+ vm_page_iter_limit_init(&pages, obj, OFF_TO_IDX(address + size));
+ VM_RADIX_FOREACH_FROM(m, &pages, OFF_TO_IDX(address))
+ pmap_remove_all(m);
+ VM_OBJECT_RUNLOCK(obj);
+ return (0);
+}
+
+void
+vma_set_file(struct vm_area_struct *vma, struct linux_file *file)
+{
+ struct linux_file *tmp;
+
+ /* Changing an anonymous vma with this is illegal */
+ get_file(file);
+ tmp = vma->vm_file;
+ vma->vm_file = file;
+ fput(tmp);
+}
+
+static struct file_operations dummy_ldev_ops = {
+ /* XXXKIB */
+};
+
+static struct linux_cdev dummy_ldev = {
+ .ops = &dummy_ldev_ops,
+};
+
+#define LDEV_SI_DTR 0x0001
+#define LDEV_SI_REF 0x0002
+
+static void
+linux_get_fop(struct linux_file *filp, const struct file_operations **fop,
+ struct linux_cdev **dev)
+{
+ struct linux_cdev *ldev;
+ u_int siref;
+
+ ldev = filp->f_cdev;
+ *fop = filp->f_op;
+ if (ldev != NULL) {
+ if (ldev->kobj.ktype == &linux_cdev_static_ktype) {
+ refcount_acquire(&ldev->refs);
+ } else {
+ for (siref = ldev->siref;;) {
+ if ((siref & LDEV_SI_DTR) != 0) {
+ ldev = &dummy_ldev;
+ *fop = ldev->ops;
+ siref = ldev->siref;
+ MPASS((ldev->siref & LDEV_SI_DTR) == 0);
+ } else if (atomic_fcmpset_int(&ldev->siref,
+ &siref, siref + LDEV_SI_REF)) {
+ break;
+ }
+ }
+ }
+ }
+ *dev = ldev;
+}
+
+static void
+linux_drop_fop(struct linux_cdev *ldev)
+{
+
+ if (ldev == NULL)
+ return;
+ if (ldev->kobj.ktype == &linux_cdev_static_ktype) {
+ linux_cdev_deref(ldev);
+ } else {
+ MPASS(ldev->kobj.ktype == &linux_cdev_ktype);
+ MPASS((ldev->siref & ~LDEV_SI_DTR) != 0);
+ atomic_subtract_int(&ldev->siref, LDEV_SI_REF);
+ }
+}
+
+#define OPW(fp,td,code) ({ \
+ struct file *__fpop; \
+ __typeof(code) __retval; \
+ \
+ __fpop = (td)->td_fpop; \
+ (td)->td_fpop = (fp); \
+ __retval = (code); \
+ (td)->td_fpop = __fpop; \
+ __retval; \
+})
+
+static int
+linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td,
+ struct file *file)
+{
+ struct linux_cdev *ldev;
+ struct linux_file *filp;
+ const struct file_operations *fop;
+ int error;
+
+ ldev = dev->si_drv1;
+
+ filp = linux_file_alloc();
+ filp->f_dentry = &filp->f_dentry_store;
+ filp->f_op = ldev->ops;
+ filp->f_mode = file->f_flag;
+ filp->f_flags = file->f_flag;
+ filp->f_vnode = file->f_vnode;
+ filp->_file = file;
+ refcount_acquire(&ldev->refs);
+ filp->f_cdev = ldev;
+
+ linux_set_current(td);
+ linux_get_fop(filp, &fop, &ldev);
+
+ if (fop->open != NULL) {
+ error = -fop->open(file->f_vnode, filp);
+ if (error != 0) {
+ linux_drop_fop(ldev);
+ linux_cdev_deref(filp->f_cdev);
+ kfree(filp);
+ return (error);
+ }
+ }
+
+ /* hold on to the vnode - used for fstat() */
+ vref(filp->f_vnode);
+
+ /* release the file from devfs */
+ finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops);
+ linux_drop_fop(ldev);
+ return (ENXIO);
+}
+
+#define LINUX_IOCTL_MIN_PTR 0x10000UL
+#define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX)
+
+static inline int
+linux_remap_address(void **uaddr, size_t len)
+{
+ uintptr_t uaddr_val = (uintptr_t)(*uaddr);
+
+ if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR &&
+ uaddr_val < LINUX_IOCTL_MAX_PTR)) {
+ struct task_struct *pts = current;
+ if (pts == NULL) {
+ *uaddr = NULL;
+ return (1);
+ }
+
+ /* compute data offset */
+ uaddr_val -= LINUX_IOCTL_MIN_PTR;
+
+ /* check that length is within bounds */
+ if ((len > IOCPARM_MAX) ||
+ (uaddr_val + len) > pts->bsd_ioctl_len) {
+ *uaddr = NULL;
+ return (1);
+ }
+
+ /* re-add kernel buffer address */
+ uaddr_val += (uintptr_t)pts->bsd_ioctl_data;
+
+ /* update address location */
+ *uaddr = (void *)uaddr_val;
+ return (1);
+ }
+ return (0);
+}
+
+int
+linux_copyin(const void *uaddr, void *kaddr, size_t len)
+{
+ if (linux_remap_address(__DECONST(void **, &uaddr), len)) {
+ if (uaddr == NULL)
+ return (-EFAULT);
+ memcpy(kaddr, uaddr, len);
+ return (0);
+ }
+ return (-copyin(uaddr, kaddr, len));
+}
+
+int
+linux_copyout(const void *kaddr, void *uaddr, size_t len)
+{
+ if (linux_remap_address(&uaddr, len)) {
+ if (uaddr == NULL)
+ return (-EFAULT);
+ memcpy(uaddr, kaddr, len);
+ return (0);
+ }
+ return (-copyout(kaddr, uaddr, len));
+}
+
+size_t
+linux_clear_user(void *_uaddr, size_t _len)
+{
+ uint8_t *uaddr = _uaddr;
+ size_t len = _len;
+
+ /* make sure uaddr is aligned before going into the fast loop */
+ while (((uintptr_t)uaddr & 7) != 0 && len > 7) {
+ if (subyte(uaddr, 0))
+ return (_len);
+ uaddr++;
+ len--;
+ }
+
+ /* zero 8 bytes at a time */
+ while (len > 7) {
+#ifdef __LP64__
+ if (suword64(uaddr, 0))
+ return (_len);
+#else
+ if (suword32(uaddr, 0))
+ return (_len);
+ if (suword32(uaddr + 4, 0))
+ return (_len);
+#endif
+ uaddr += 8;
+ len -= 8;
+ }
+
+ /* zero fill end, if any */
+ while (len > 0) {
+ if (subyte(uaddr, 0))
+ return (_len);
+ uaddr++;
+ len--;
+ }
+ return (0);
+}
+
+int
+linux_access_ok(const void *uaddr, size_t len)
+{
+ uintptr_t saddr;
+ uintptr_t eaddr;
+
+ /* get start and end address */
+ saddr = (uintptr_t)uaddr;
+ eaddr = (uintptr_t)uaddr + len;
+
+ /* verify addresses are valid for userspace */
+ return ((saddr == eaddr) ||
+ (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS));
+}
+
+/*
+ * This function should return either EINTR or ERESTART depending on
+ * the signal type sent to this thread:
+ */
+static int
+linux_get_error(struct task_struct *task, int error)
+{
+ /* check for signal type interrupt code */
+ if (error == EINTR || error == ERESTARTSYS || error == ERESTART) {
+ error = -linux_schedule_get_interrupt_value(task);
+ if (error == 0)
+ error = EINTR;
+ }
+ return (error);
+}
+
+static int
+linux_file_ioctl_sub(struct file *fp, struct linux_file *filp,
+ const struct file_operations *fop, u_long cmd, caddr_t data,
+ struct thread *td)
+{
+ struct task_struct *task = current;
+ unsigned size;
+ int error;
+
+ size = IOCPARM_LEN(cmd);
+ /* refer to logic in sys_ioctl() */
+ if (size > 0) {
+ /*
+ * Setup hint for linux_copyin() and linux_copyout().
+ *
+ * Background: Linux code expects a user-space address
+ * while FreeBSD supplies a kernel-space address.
+ */
+ task->bsd_ioctl_data = data;
+ task->bsd_ioctl_len = size;
+ data = (void *)LINUX_IOCTL_MIN_PTR;
+ } else {
+ /* fetch user-space pointer */
+ data = *(void **)data;
+ }
+#ifdef COMPAT_FREEBSD32
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+ /* try the compat IOCTL handler first */
+ if (fop->compat_ioctl != NULL) {
+ error = -OPW(fp, td, fop->compat_ioctl(filp,
+ cmd, (u_long)data));
+ } else {
+ error = ENOTTY;
+ }
+
+ /* fallback to the regular IOCTL handler, if any */
+ if (error == ENOTTY && fop->unlocked_ioctl != NULL) {
+ error = -OPW(fp, td, fop->unlocked_ioctl(filp,
+ cmd, (u_long)data));
+ }
+ } else
+#endif
+ {
+ if (fop->unlocked_ioctl != NULL) {
+ error = -OPW(fp, td, fop->unlocked_ioctl(filp,
+ cmd, (u_long)data));
+ } else {
+ error = ENOTTY;
+ }
+ }
+ if (size > 0) {
+ task->bsd_ioctl_data = NULL;
+ task->bsd_ioctl_len = 0;
+ }
+
+ if (error == EWOULDBLOCK) {
+ /* update kqfilter status, if any */
+ linux_file_kqfilter_poll(filp,
+ LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
+ } else {
+ error = linux_get_error(task, error);
+ }
+ return (error);
+}
+
+#define LINUX_POLL_TABLE_NORMAL ((poll_table *)1)
+
+/*
+ * This function atomically updates the poll wakeup state and returns
+ * the previous state at the time of update.
+ */
+static uint8_t
+linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate)
+{
+ int c, old;
+
+ c = v->counter;
+
+ while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
+ c = old;
+
+ return (c);
+}
+
+static int
+linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key)
+{
+ static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
+ [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */
+ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
+ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY,
+ [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */
+ };
+ struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq);
+
+ switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
+ case LINUX_FWQ_STATE_QUEUED:
+ linux_poll_wakeup(filp);
+ return (1);
+ default:
+ return (0);
+ }
+}
+
+void
+linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p)
+{
+ static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
+ [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY,
+ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
+ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */
+ [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED,
+ };
+
+ /* check if we are called inside the select system call */
+ if (p == LINUX_POLL_TABLE_NORMAL)
+ selrecord(curthread, &filp->f_selinfo);
+
+ switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
+ case LINUX_FWQ_STATE_INIT:
+ /* NOTE: file handles can only belong to one wait-queue */
+ filp->f_wait_queue.wqh = wqh;
+ filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback;
+ add_wait_queue(wqh, &filp->f_wait_queue.wq);
+ atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+linux_poll_wait_dequeue(struct linux_file *filp)
+{
+ static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
+ [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */
+ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT,
+ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT,
+ [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT,
+ };
+
+ seldrain(&filp->f_selinfo);
+
+ switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
+ case LINUX_FWQ_STATE_NOT_READY:
+ case LINUX_FWQ_STATE_QUEUED:
+ case LINUX_FWQ_STATE_READY:
+ remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+linux_poll_wakeup(struct linux_file *filp)
+{
+ /* this function should be NULL-safe */
+ if (filp == NULL)
+ return;
+
+ selwakeup(&filp->f_selinfo);
+
+ spin_lock(&filp->f_kqlock);
+ filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ |
+ LINUX_KQ_FLAG_NEED_WRITE;
+
+ /* make sure the "knote" gets woken up */
+ KNOTE_LOCKED(&filp->f_selinfo.si_note, 1);
+ spin_unlock(&filp->f_kqlock);
+}
+
+static struct linux_file *
+__get_file_rcu(struct linux_file **f)
+{
+ struct linux_file *file1, *file2;
+
+ file1 = READ_ONCE(*f);
+ if (file1 == NULL)
+ return (NULL);
+
+ if (!refcount_acquire_if_not_zero(
+ file1->_file == NULL ? &file1->f_count : &file1->_file->f_count))
+ return (ERR_PTR(-EAGAIN));
+
+ file2 = READ_ONCE(*f);
+ if (file2 == file1)
+ return (file2);
+
+ fput(file1);
+ return (ERR_PTR(-EAGAIN));
+}
+
+struct linux_file *
+linux_get_file_rcu(struct linux_file **f)
+{
+ struct linux_file *file1;
+
+ for (;;) {
+ file1 = __get_file_rcu(f);
+ if (file1 == NULL)
+ return (NULL);
+
+ if (IS_ERR(file1))
+ continue;
+
+ return (file1);
+ }
+}
+
+struct linux_file *
+get_file_active(struct linux_file **f)
+{
+ struct linux_file *file1;
+
+ rcu_read_lock();
+ file1 = __get_file_rcu(f);
+ rcu_read_unlock();
+ if (IS_ERR(file1))
+ file1 = NULL;
+
+ return (file1);
+}
+
+static void
+linux_file_kqfilter_detach(struct knote *kn)
+{
+ struct linux_file *filp = kn->kn_hook;
+
+ spin_lock(&filp->f_kqlock);
+ knlist_remove(&filp->f_selinfo.si_note, kn, 1);
+ spin_unlock(&filp->f_kqlock);
+}
+
+static int
+linux_file_kqfilter_read_event(struct knote *kn, long hint)
+{
+ struct linux_file *filp = kn->kn_hook;
+
+ mtx_assert(&filp->f_kqlock, MA_OWNED);
+
+ return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0);
+}
+
+static int
+linux_file_kqfilter_write_event(struct knote *kn, long hint)
+{
+ struct linux_file *filp = kn->kn_hook;
+
+ mtx_assert(&filp->f_kqlock, MA_OWNED);
+
+ return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0);
+}
+
+static const struct filterops linux_dev_kqfiltops_read = {
+ .f_isfd = 1,
+ .f_detach = linux_file_kqfilter_detach,
+ .f_event = linux_file_kqfilter_read_event,
+};
+
+static const struct filterops linux_dev_kqfiltops_write = {
+ .f_isfd = 1,
+ .f_detach = linux_file_kqfilter_detach,
+ .f_event = linux_file_kqfilter_write_event,
+};
+
+static void
+linux_file_kqfilter_poll(struct linux_file *filp, int kqflags)
+{
+ struct thread *td;
+ const struct file_operations *fop;
+ struct linux_cdev *ldev;
+ int temp;
+
+ if ((filp->f_kqflags & kqflags) == 0)
+ return;
+
+ td = curthread;
+
+ linux_get_fop(filp, &fop, &ldev);
+ /* get the latest polling state */
+ temp = OPW(filp->_file, td, fop->poll(filp, NULL));
+ linux_drop_fop(ldev);
+
+ spin_lock(&filp->f_kqlock);
+ /* clear kqflags */
+ filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ |
+ LINUX_KQ_FLAG_NEED_WRITE);
+ /* update kqflags */
+ if ((temp & (POLLIN | POLLOUT)) != 0) {
+ if ((temp & POLLIN) != 0)
+ filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ;
+ if ((temp & POLLOUT) != 0)
+ filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE;
+
+ /* make sure the "knote" gets woken up */
+ KNOTE_LOCKED(&filp->f_selinfo.si_note, 0);
+ }
+ spin_unlock(&filp->f_kqlock);
+}
+
+static int
+linux_file_kqfilter(struct file *file, struct knote *kn)
+{
+ struct linux_file *filp;
+ struct thread *td;
+ int error;
+
+ td = curthread;
+ filp = (struct linux_file *)file->f_data;
+ filp->f_flags = file->f_flag;
+ if (filp->f_op->poll == NULL)
+ return (EINVAL);
+
+ spin_lock(&filp->f_kqlock);
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ;
+ kn->kn_fop = &linux_dev_kqfiltops_read;
+ kn->kn_hook = filp;
+ knlist_add(&filp->f_selinfo.si_note, kn, 1);
+ error = 0;
+ break;
+ case EVFILT_WRITE:
+ filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE;
+ kn->kn_fop = &linux_dev_kqfiltops_write;
+ kn->kn_hook = filp;
+ knlist_add(&filp->f_selinfo.si_note, kn, 1);
+ error = 0;
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ spin_unlock(&filp->f_kqlock);
+
+ if (error == 0) {
+ linux_set_current(td);
+
+ /* update kqfilter status, if any */
+ linux_file_kqfilter_poll(filp,
+ LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
+ }
+ return (error);
+}
+
+static int
+linux_file_mmap_single(struct file *fp, const struct file_operations *fop,
+ vm_ooffset_t *offset, vm_size_t size, struct vm_object **object,
+ int nprot, bool is_shared, struct thread *td)
+{
+ struct task_struct *task;
+ struct vm_area_struct *vmap;
+ struct mm_struct *mm;
+ struct linux_file *filp;
+ vm_memattr_t attr;
+ int error;
+
+ filp = (struct linux_file *)fp->f_data;
+ filp->f_flags = fp->f_flag;
+
+ if (fop->mmap == NULL)
+ return (EOPNOTSUPP);
+
+ linux_set_current(td);
+
+ /*
+ * The same VM object might be shared by multiple processes
+ * and the mm_struct is usually freed when a process exits.
+ *
+ * The atomic reference below makes sure the mm_struct is
+ * available as long as the vmap is in the linux_vma_head.
+ */
+ task = current;
+ mm = task->mm;
+ if (atomic_inc_not_zero(&mm->mm_users) == 0)
+ return (EINVAL);
+
+ vmap = kzalloc(sizeof(*vmap), GFP_KERNEL);
+ vmap->vm_start = 0;
+ vmap->vm_end = size;
+ vmap->vm_pgoff = *offset / PAGE_SIZE;
+ vmap->vm_pfn = 0;
+ vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL);
+ if (is_shared)
+ vmap->vm_flags |= VM_SHARED;
+ vmap->vm_ops = NULL;
+ vmap->vm_file = get_file(filp);
+ vmap->vm_mm = mm;
+
+ if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) {
+ error = linux_get_error(task, EINTR);
+ } else {
+ error = -OPW(fp, td, fop->mmap(filp, vmap));
+ error = linux_get_error(task, error);
+ up_write(&vmap->vm_mm->mmap_sem);
+ }
+
+ if (error != 0) {
+ linux_cdev_handle_free(vmap);
+ return (error);
+ }
+
+ attr = pgprot2cachemode(vmap->vm_page_prot);
+
+ if (vmap->vm_ops != NULL) {
+ struct vm_area_struct *ptr;
+ void *vm_private_data;
+ bool vm_no_fault;
+
+ if (vmap->vm_ops->open == NULL ||
+ vmap->vm_ops->close == NULL ||
+ vmap->vm_private_data == NULL) {
+ /* free allocated VM area struct */
+ linux_cdev_handle_free(vmap);
+ return (EINVAL);
+ }
+
+ vm_private_data = vmap->vm_private_data;
+
+ rw_wlock(&linux_vma_lock);
+ TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) {
+ if (ptr->vm_private_data == vm_private_data)
+ break;
+ }
+ /* check if there is an existing VM area struct */
+ if (ptr != NULL) {
+ /* check if the VM area structure is invalid */
+ if (ptr->vm_ops == NULL ||
+ ptr->vm_ops->open == NULL ||
+ ptr->vm_ops->close == NULL) {
+ error = ESTALE;
+ vm_no_fault = 1;
+ } else {
+ error = EEXIST;
+ vm_no_fault = (ptr->vm_ops->fault == NULL);
+ }
+ } else {
+ /* insert VM area structure into list */
+ TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry);
+ error = 0;
+ vm_no_fault = (vmap->vm_ops->fault == NULL);
+ }
+ rw_wunlock(&linux_vma_lock);
+
+ if (error != 0) {
+ /* free allocated VM area struct */
+ linux_cdev_handle_free(vmap);
+ /* check for stale VM area struct */
+ if (error != EEXIST)
+ return (error);
+ }
+
+ /* check if there is no fault handler */
+ if (vm_no_fault) {
+ *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE,
+ &linux_cdev_pager_ops[1], size, nprot, *offset,
+ td->td_ucred);
+ } else {
+ *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE,
+ &linux_cdev_pager_ops[0], size, nprot, *offset,
+ td->td_ucred);
+ }
+
+ /* check if allocating the VM object failed */
+ if (*object == NULL) {
+ if (error == 0) {
+ /* remove VM area struct from list */
+ linux_cdev_handle_remove(vmap);
+ /* free allocated VM area struct */
+ linux_cdev_handle_free(vmap);
+ }
+ return (EINVAL);
+ }
+ } else {
+ struct sglist *sg;
+
+ sg = sglist_alloc(1, M_WAITOK);
+ sglist_append_phys(sg,
+ (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len);
+
+ *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len,
+ nprot, 0, td->td_ucred);
+
+ linux_cdev_handle_free(vmap);
+
+ if (*object == NULL) {
+ sglist_free(sg);
+ return (EINVAL);
+ }
+ }
+
+ if (attr != VM_MEMATTR_DEFAULT) {
+ VM_OBJECT_WLOCK(*object);
+ vm_object_set_memattr(*object, attr);
+ VM_OBJECT_WUNLOCK(*object);
+ }
+ *offset = 0;
+ return (0);
+}
+
+struct cdevsw linuxcdevsw = {
+ .d_version = D_VERSION,
+ .d_fdopen = linux_dev_fdopen,
+ .d_name = "lkpidev",
+};
+
+static int
+linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
+ int flags, struct thread *td)
+{
+ struct linux_file *filp;
+ const struct file_operations *fop;
+ struct linux_cdev *ldev;
+ ssize_t bytes;
+ int error;
+
+ error = 0;
+ filp = (struct linux_file *)file->f_data;
+ filp->f_flags = file->f_flag;
+ /* XXX no support for I/O vectors currently */
+ if (uio->uio_iovcnt != 1)
+ return (EOPNOTSUPP);
+ if (uio->uio_resid > DEVFS_IOSIZE_MAX)
+ return (EINVAL);
+ linux_set_current(td);
+ linux_get_fop(filp, &fop, &ldev);
+ if (fop->read != NULL) {
+ bytes = OPW(file, td, fop->read(filp,
+ uio->uio_iov->iov_base,
+ uio->uio_iov->iov_len, &uio->uio_offset));
+ if (bytes >= 0) {
+ uio->uio_iov->iov_base =
+ ((uint8_t *)uio->uio_iov->iov_base) + bytes;
+ uio->uio_iov->iov_len -= bytes;
+ uio->uio_resid -= bytes;
+ } else {
+ error = linux_get_error(current, -bytes);
+ }
+ } else
+ error = ENXIO;
+
+ /* update kqfilter status, if any */
+ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ);
+ linux_drop_fop(ldev);
+
+ return (error);
+}
+
+static int
+linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred,
+ int flags, struct thread *td)
+{
+ struct linux_file *filp;
+ const struct file_operations *fop;
+ struct linux_cdev *ldev;
+ ssize_t bytes;
+ int error;
+
+ filp = (struct linux_file *)file->f_data;
+ filp->f_flags = file->f_flag;
+ /* XXX no support for I/O vectors currently */
+ if (uio->uio_iovcnt != 1)
+ return (EOPNOTSUPP);
+ if (uio->uio_resid > DEVFS_IOSIZE_MAX)
+ return (EINVAL);
+ linux_set_current(td);
+ linux_get_fop(filp, &fop, &ldev);
+ if (fop->write != NULL) {
+ bytes = OPW(file, td, fop->write(filp,
+ uio->uio_iov->iov_base,
+ uio->uio_iov->iov_len, &uio->uio_offset));
+ if (bytes >= 0) {
+ uio->uio_iov->iov_base =
+ ((uint8_t *)uio->uio_iov->iov_base) + bytes;
+ uio->uio_iov->iov_len -= bytes;
+ uio->uio_resid -= bytes;
+ error = 0;
+ } else {
+ error = linux_get_error(current, -bytes);
+ }
+ } else
+ error = ENXIO;
+
+ /* update kqfilter status, if any */
+ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE);
+
+ linux_drop_fop(ldev);
+
+ return (error);
+}
+
+static int
+linux_file_poll(struct file *file, int events, struct ucred *active_cred,
+ struct thread *td)
+{
+ struct linux_file *filp;
+ const struct file_operations *fop;
+ struct linux_cdev *ldev;
+ int revents;
+
+ filp = (struct linux_file *)file->f_data;
+ filp->f_flags = file->f_flag;
+ linux_set_current(td);
+ linux_get_fop(filp, &fop, &ldev);
+ if (fop->poll != NULL) {
+ revents = OPW(file, td, fop->poll(filp,
+ LINUX_POLL_TABLE_NORMAL)) & events;
+ } else {
+ revents = 0;
+ }
+ linux_drop_fop(ldev);
+ return (revents);
+}
+
+static int
+linux_file_close(struct file *file, struct thread *td)
+{
+ struct linux_file *filp;
+ int (*release)(struct inode *, struct linux_file *);
+ const struct file_operations *fop;
+ struct linux_cdev *ldev;
+ int error;
+
+ filp = (struct linux_file *)file->f_data;
+
+ KASSERT(file_count(filp) == 0,
+ ("File refcount(%d) is not zero", file_count(filp)));
+
+ if (td == NULL)
+ td = curthread;
+
+ error = 0;
+ filp->f_flags = file->f_flag;
+ linux_set_current(td);
+ linux_poll_wait_dequeue(filp);
+ linux_get_fop(filp, &fop, &ldev);
+ /*
+ * Always use the real release function, if any, to avoid
+ * leaking device resources:
+ */
+ release = filp->f_op->release;
+ if (release != NULL)
+ error = -OPW(file, td, release(filp->f_vnode, filp));
+ funsetown(&filp->f_sigio);
+ if (filp->f_vnode != NULL)
+ vrele(filp->f_vnode);
+ linux_drop_fop(ldev);
+ ldev = filp->f_cdev;
+ if (ldev != NULL)
+ linux_cdev_deref(ldev);
+ linux_synchronize_rcu(RCU_TYPE_REGULAR);
+ kfree(filp);
+
+ return (error);
+}
+
+static int
+linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
+ struct thread *td)
+{
+ struct linux_file *filp;
+ const struct file_operations *fop;
+ struct linux_cdev *ldev;
+ struct fiodgname_arg *fgn;
+ const char *p;
+ int error, i;
+
+ error = 0;
+ filp = (struct linux_file *)fp->f_data;
+ filp->f_flags = fp->f_flag;
+ linux_get_fop(filp, &fop, &ldev);
+
+ linux_set_current(td);
+ switch (cmd) {
+ case FIONBIO:
+ break;
+ case FIOASYNC:
+ if (fop->fasync == NULL)
+ break;
+ error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC));
+ break;
+ case FIOSETOWN:
+ error = fsetown(*(int *)data, &filp->f_sigio);
+ if (error == 0) {
+ if (fop->fasync == NULL)
+ break;
+ error = -OPW(fp, td, fop->fasync(0, filp,
+ fp->f_flag & FASYNC));
+ }
+ break;
+ case FIOGETOWN:
+ *(int *)data = fgetown(&filp->f_sigio);
+ break;
+ case FIODGNAME:
+#ifdef COMPAT_FREEBSD32
+ case FIODGNAME_32:
+#endif
+ if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) {
+ error = ENXIO;
+ break;
+ }
+ fgn = data;
+ p = devtoname(filp->f_cdev->cdev);
+ i = strlen(p) + 1;
+ if (i > fgn->len) {
+ error = EINVAL;
+ break;
+ }
+ error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i);
+ break;
+ default:
+ error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td);
+ break;
+ }
+ linux_drop_fop(ldev);
+ return (error);
+}
+
+static int
+linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot,
+ vm_prot_t maxprot, int flags, struct file *fp,
+ vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp)
+{
+ /*
+ * Character devices do not provide private mappings
+ * of any kind:
+ */
+ if ((maxprot & VM_PROT_WRITE) == 0 &&
+ (prot & VM_PROT_WRITE) != 0)
+ return (EACCES);
+ if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0)
+ return (EINVAL);
+
+ return (linux_file_mmap_single(fp, fop, foff, objsize, objp,
+ (int)prot, (flags & MAP_SHARED) ? true : false, td));
+}
+
+static int
+linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
+ vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
+ struct thread *td)
+{
+ struct linux_file *filp;
+ const struct file_operations *fop;
+ struct linux_cdev *ldev;
+ struct mount *mp;
+ struct vnode *vp;
+ vm_object_t object;
+ vm_prot_t maxprot;
+ int error;
+
+ filp = (struct linux_file *)fp->f_data;
+
+ vp = filp->f_vnode;
+ if (vp == NULL)
+ return (EOPNOTSUPP);
+
+ /*
+ * Ensure that file and memory protections are
+ * compatible.
+ */
+ mp = vp->v_mount;
+ if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) {
+ maxprot = VM_PROT_NONE;
+ if ((prot & VM_PROT_EXECUTE) != 0)
+ return (EACCES);
+ } else
+ maxprot = VM_PROT_EXECUTE;
+ if ((fp->f_flag & FREAD) != 0)
+ maxprot |= VM_PROT_READ;
+ else if ((prot & VM_PROT_READ) != 0)
+ return (EACCES);
+
+ /*
+ * If we are sharing potential changes via MAP_SHARED and we
+ * are trying to get write permission although we opened it
+ * without asking for it, bail out.
+ *
+ * Note that most character devices always share mappings.
+ *
+ * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE
+ * requests rather than doing it here.
+ */
+ if ((flags & MAP_SHARED) != 0) {
+ if ((fp->f_flag & FWRITE) != 0)
+ maxprot |= VM_PROT_WRITE;
+ else if ((prot & VM_PROT_WRITE) != 0)
+ return (EACCES);
+ }
+ maxprot &= cap_maxprot;
+
+ linux_get_fop(filp, &fop, &ldev);
+ error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp,
+ &foff, fop, &object);
+ if (error != 0)
+ goto out;
+
+ error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
+ foff, FALSE, td);
+ if (error != 0)
+ vm_object_deallocate(object);
+out:
+ linux_drop_fop(ldev);
+ return (error);
+}
+
+static int
+linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
+{
+ struct linux_file *filp;
+ struct vnode *vp;
+ int error;
+
+ filp = (struct linux_file *)fp->f_data;
+ if (filp->f_vnode == NULL)
+ return (EOPNOTSUPP);
+
+ vp = filp->f_vnode;
+
+ vn_lock(vp, LK_SHARED | LK_RETRY);
+ error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED);
+ VOP_UNLOCK(vp);
+
+ return (error);
+}
+
+static int
+linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
+ struct filedesc *fdp)
+{
+ struct linux_file *filp;
+ struct vnode *vp;
+ int error;
+
+ filp = fp->f_data;
+ vp = filp->f_vnode;
+ if (vp == NULL) {
+ error = 0;
+ kif->kf_type = KF_TYPE_DEV;
+ } else {
+ vref(vp);
+ FILEDESC_SUNLOCK(fdp);
+ error = vn_fill_kinfo_vnode(vp, kif);
+ vrele(vp);
+ kif->kf_type = KF_TYPE_VNODE;
+ FILEDESC_SLOCK(fdp);
+ }
+ return (error);
+}
+
+unsigned int
+linux_iminor(struct inode *inode)
+{
+ struct linux_cdev *ldev;
+
+ if (inode == NULL || inode->v_rdev == NULL ||
+ inode->v_rdev->si_devsw != &linuxcdevsw)
+ return (-1U);
+ ldev = inode->v_rdev->si_drv1;
+ if (ldev == NULL)
+ return (-1U);
+
+ return (minor(ldev->dev));
+}
+
+static int
+linux_file_kcmp(struct file *fp1, struct file *fp2, struct thread *td)
+{
+ struct linux_file *filp1, *filp2;
+
+ if (fp2->f_type != DTYPE_DEV)
+ return (3);
+
+ filp1 = fp1->f_data;
+ filp2 = fp2->f_data;
+ return (kcmp_cmp((uintptr_t)filp1->f_cdev, (uintptr_t)filp2->f_cdev));
+}
+
+const struct fileops linuxfileops = {
+ .fo_read = linux_file_read,
+ .fo_write = linux_file_write,
+ .fo_truncate = invfo_truncate,
+ .fo_kqfilter = linux_file_kqfilter,
+ .fo_stat = linux_file_stat,
+ .fo_fill_kinfo = linux_file_fill_kinfo,
+ .fo_poll = linux_file_poll,
+ .fo_close = linux_file_close,
+ .fo_ioctl = linux_file_ioctl,
+ .fo_mmap = linux_file_mmap,
+ .fo_chmod = invfo_chmod,
+ .fo_chown = invfo_chown,
+ .fo_sendfile = invfo_sendfile,
+ .fo_cmp = linux_file_kcmp,
+ .fo_flags = DFLAG_PASSABLE,
+};
+
+/*
+ * Hash of vmmap addresses. This is infrequently accessed and does not
+ * need to be particularly large. This is done because we must store the
+ * caller's idea of the map size to properly unmap.
+ */
+struct vmmap {
+ LIST_ENTRY(vmmap) vm_next;
+ void *vm_addr;
+ unsigned long vm_size;
+};
+
+struct vmmaphd {
+ struct vmmap *lh_first;
+};
+#define VMMAP_HASH_SIZE 64
+#define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
+#define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
+static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
+static struct mtx vmmaplock;
+
+static void
+vmmap_add(void *addr, unsigned long size)
+{
+ struct vmmap *vmmap;
+
+ vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
+ mtx_lock(&vmmaplock);
+ vmmap->vm_size = size;
+ vmmap->vm_addr = addr;
+ LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
+ mtx_unlock(&vmmaplock);
+}
+
+static struct vmmap *
+vmmap_remove(void *addr)
+{
+ struct vmmap *vmmap;
+
+ mtx_lock(&vmmaplock);
+ LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
+ if (vmmap->vm_addr == addr)
+ break;
+ if (vmmap)
+ LIST_REMOVE(vmmap, vm_next);
+ mtx_unlock(&vmmaplock);
+
+ return (vmmap);
+}
+
+#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
+void *
+_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
+{
+ void *addr;
+
+ addr = pmap_mapdev_attr(phys_addr, size, attr);
+ if (addr == NULL)
+ return (NULL);
+ vmmap_add(addr, size);
+
+ return (addr);
+}
+#endif
+
+void
+iounmap(void *addr)
+{
+ struct vmmap *vmmap;
+
+ vmmap = vmmap_remove(addr);
+ if (vmmap == NULL)
+ return;
+#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
+ pmap_unmapdev(addr, vmmap->vm_size);
+#endif
+ kfree(vmmap);
+}
+
+void *
+vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
+{
+ vm_offset_t off;
+ size_t size;
+
+ size = count * PAGE_SIZE;
+ off = kva_alloc(size);
+ if (off == 0)
+ return (NULL);
+ vmmap_add((void *)off, size);
+ pmap_qenter(off, pages, count);
+
+ return ((void *)off);
+}
+
+void
+vunmap(void *addr)
+{
+ struct vmmap *vmmap;
+
+ vmmap = vmmap_remove(addr);
+ if (vmmap == NULL)
+ return;
+ pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
+ kva_free((vm_offset_t)addr, vmmap->vm_size);
+ kfree(vmmap);
+}
+
+static char *
+devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap)
+{
+ unsigned int len;
+ char *p;
+ va_list aq;
+
+ va_copy(aq, ap);
+ len = vsnprintf(NULL, 0, fmt, aq);
+ va_end(aq);
+
+ if (dev != NULL)
+ p = devm_kmalloc(dev, len + 1, gfp);
+ else
+ p = kmalloc(len + 1, gfp);
+ if (p != NULL)
+ vsnprintf(p, len + 1, fmt, ap);
+
+ return (p);
+}
+
+char *
+kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
+{
+
+ return (devm_kvasprintf(NULL, gfp, fmt, ap));
+}
+
+char *
+lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
+{
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = devm_kvasprintf(dev, gfp, fmt, ap);
+ va_end(ap);
+
+ return (p);
+}
+
+char *
+kasprintf(gfp_t gfp, const char *fmt, ...)
+{
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = kvasprintf(gfp, fmt, ap);
+ va_end(ap);
+
+ return (p);
+}
+
+int
+__lkpi_hexdump_printf(void *arg1 __unused, const char *fmt, ...)
+{
+ va_list ap;
+ int result;
+
+ va_start(ap, fmt);
+ result = vprintf(fmt, ap);
+ va_end(ap);
+ return (result);
+}
+
+int
+__lkpi_hexdump_sbuf_printf(void *arg1, const char *fmt, ...)
+{
+ va_list ap;
+ int result;
+
+ va_start(ap, fmt);
+ result = sbuf_vprintf(arg1, fmt, ap);
+ va_end(ap);
+ return (result);
+}
+
+void
+lkpi_hex_dump(int(*_fpf)(void *, const char *, ...), void *arg1,
+ const char *level, const char *prefix_str,
+ const int prefix_type, const int rowsize, const int groupsize,
+ const void *buf, size_t len, const bool ascii)
+{
+ typedef const struct { long long value; } __packed *print_64p_t;
+ typedef const struct { uint32_t value; } __packed *print_32p_t;
+ typedef const struct { uint16_t value; } __packed *print_16p_t;
+ const void *buf_old = buf;
+ int row;
+
+ while (len > 0) {
+ if (level != NULL)
+ _fpf(arg1, "%s", level);
+ if (prefix_str != NULL)
+ _fpf(arg1, "%s ", prefix_str);
+
+ switch (prefix_type) {
+ case DUMP_PREFIX_ADDRESS:
+ _fpf(arg1, "[%p] ", buf);
+ break;
+ case DUMP_PREFIX_OFFSET:
+ _fpf(arg1, "[%#tx] ", ((const char *)buf -
+ (const char *)buf_old));
+ break;
+ default:
+ break;
+ }
+ for (row = 0; row != rowsize; row++) {
+ if (groupsize == 8 && len > 7) {
+ _fpf(arg1, "%016llx ", ((print_64p_t)buf)->value);
+ buf = (const uint8_t *)buf + 8;
+ len -= 8;
+ } else if (groupsize == 4 && len > 3) {
+ _fpf(arg1, "%08x ", ((print_32p_t)buf)->value);
+ buf = (const uint8_t *)buf + 4;
+ len -= 4;
+ } else if (groupsize == 2 && len > 1) {
+ _fpf(arg1, "%04x ", ((print_16p_t)buf)->value);
+ buf = (const uint8_t *)buf + 2;
+ len -= 2;
+ } else if (len > 0) {
+ _fpf(arg1, "%02x ", *(const uint8_t *)buf);
+ buf = (const uint8_t *)buf + 1;
+ len--;
+ } else {
+ break;
+ }
+ }
+ _fpf(arg1, "\n");
+ }
+}
+
+static void
+linux_timer_callback_wrapper(void *context)
+{
+ struct timer_list *timer;
+
+ timer = context;
+
+ /* the timer is about to be shutdown permanently */
+ if (timer->function == NULL)
+ return;
+
+ if (linux_set_current_flags(curthread, M_NOWAIT)) {
+ /* try again later */
+ callout_reset(&timer->callout, 1,
+ &linux_timer_callback_wrapper, timer);
+ return;
+ }
+
+ timer->function(timer->data);
+}
+
+static int
+linux_timer_jiffies_until(unsigned long expires)
+{
+ unsigned long delta = expires - jiffies;
+
+ /*
+ * Guard against already expired values and make sure that the value can
+ * be used as a tick count, rather than a jiffies count.
+ */
+ if ((long)delta < 1)
+ delta = 1;
+ else if (delta > INT_MAX)
+ delta = INT_MAX;
+ return ((int)delta);
+}
+
+int
+mod_timer(struct timer_list *timer, unsigned long expires)
+{
+ int ret;
+
+ timer->expires = expires;
+ ret = callout_reset(&timer->callout,
+ linux_timer_jiffies_until(expires),
+ &linux_timer_callback_wrapper, timer);
+
+ MPASS(ret == 0 || ret == 1);
+
+ return (ret == 1);
+}
+
+void
+add_timer(struct timer_list *timer)
+{
+
+ callout_reset(&timer->callout,
+ linux_timer_jiffies_until(timer->expires),
+ &linux_timer_callback_wrapper, timer);
+}
+
+void
+add_timer_on(struct timer_list *timer, int cpu)
+{
+
+ callout_reset_on(&timer->callout,
+ linux_timer_jiffies_until(timer->expires),
+ &linux_timer_callback_wrapper, timer, cpu);
+}
+
+int
+del_timer(struct timer_list *timer)
+{
+
+ if (callout_stop(&(timer)->callout) == -1)
+ return (0);
+ return (1);
+}
+
+int
+del_timer_sync(struct timer_list *timer)
+{
+
+ if (callout_drain(&(timer)->callout) == -1)
+ return (0);
+ return (1);
+}
+
+int
+timer_delete_sync(struct timer_list *timer)
+{
+
+ return (del_timer_sync(timer));
+}
+
+int
+timer_shutdown_sync(struct timer_list *timer)
+{
+
+ timer->function = NULL;
+ return (del_timer_sync(timer));
+}
+
+/* greatest common divisor, Euclid equation */
+static uint64_t
+lkpi_gcd_64(uint64_t a, uint64_t b)
+{
+ uint64_t an;
+ uint64_t bn;
+
+ while (b != 0) {
+ an = b;
+ bn = a % b;
+ a = an;
+ b = bn;
+ }
+ return (a);
+}
+
+uint64_t lkpi_nsec2hz_rem;
+uint64_t lkpi_nsec2hz_div = 1000000000ULL;
+uint64_t lkpi_nsec2hz_max;
+
+uint64_t lkpi_usec2hz_rem;
+uint64_t lkpi_usec2hz_div = 1000000ULL;
+uint64_t lkpi_usec2hz_max;
+
+uint64_t lkpi_msec2hz_rem;
+uint64_t lkpi_msec2hz_div = 1000ULL;
+uint64_t lkpi_msec2hz_max;
+
+static void
+linux_timer_init(void *arg)
+{
+ uint64_t gcd;
+
+ /*
+ * Compute an internal HZ value which can divide 2**32 to
+ * avoid timer rounding problems when the tick value wraps
+ * around 2**32:
+ */
+ linux_timer_hz_mask = 1;
+ while (linux_timer_hz_mask < (unsigned long)hz)
+ linux_timer_hz_mask *= 2;
+ linux_timer_hz_mask--;
+
+ /* compute some internal constants */
+
+ lkpi_nsec2hz_rem = hz;
+ lkpi_usec2hz_rem = hz;
+ lkpi_msec2hz_rem = hz;
+
+ gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div);
+ lkpi_nsec2hz_rem /= gcd;
+ lkpi_nsec2hz_div /= gcd;
+ lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem;
+
+ gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div);
+ lkpi_usec2hz_rem /= gcd;
+ lkpi_usec2hz_div /= gcd;
+ lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem;
+
+ gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div);
+ lkpi_msec2hz_rem /= gcd;
+ lkpi_msec2hz_div /= gcd;
+ lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem;
+}
+SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
+
+void
+linux_complete_common(struct completion *c, int all)
+{
+ sleepq_lock(c);
+ if (all) {
+ c->done = UINT_MAX;
+ sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
+ } else {
+ if (c->done != UINT_MAX)
+ c->done++;
+ sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
+ }
+ sleepq_release(c);
+}
+
+/*
+ * Indefinite wait for done != 0 with or without signals.
+ */
+int
+linux_wait_for_common(struct completion *c, int flags)
+{
+ struct task_struct *task;
+ int error;
+
+ if (SCHEDULER_STOPPED())
+ return (0);
+
+ task = current;
+
+ if (flags != 0)
+ flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
+ else
+ flags = SLEEPQ_SLEEP;
+ error = 0;
+ for (;;) {
+ sleepq_lock(c);
+ if (c->done)
+ break;
+ sleepq_add(c, NULL, "completion", flags, 0);
+ if (flags & SLEEPQ_INTERRUPTIBLE) {
+ DROP_GIANT();
+ error = -sleepq_wait_sig(c, 0);
+ PICKUP_GIANT();
+ if (error != 0) {
+ linux_schedule_save_interrupt_value(task, error);
+ error = -ERESTARTSYS;
+ goto intr;
+ }
+ } else {
+ DROP_GIANT();
+ sleepq_wait(c, 0);
+ PICKUP_GIANT();
+ }
+ }
+ if (c->done != UINT_MAX)
+ c->done--;
+ sleepq_release(c);
+
+intr:
+ return (error);
+}
+
+/*
+ * Time limited wait for done != 0 with or without signals.
+ */
+unsigned long
+linux_wait_for_timeout_common(struct completion *c, unsigned long timeout,
+ int flags)
+{
+ struct task_struct *task;
+ unsigned long end = jiffies + timeout, error;
+
+ if (SCHEDULER_STOPPED())
+ return (0);
+
+ task = current;
+
+ if (flags != 0)
+ flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
+ else
+ flags = SLEEPQ_SLEEP;
+
+ for (;;) {
+ sleepq_lock(c);
+ if (c->done)
+ break;
+ sleepq_add(c, NULL, "completion", flags, 0);
+ sleepq_set_timeout(c, linux_timer_jiffies_until(end));
+
+ DROP_GIANT();
+ if (flags & SLEEPQ_INTERRUPTIBLE)
+ error = -sleepq_timedwait_sig(c, 0);
+ else
+ error = -sleepq_timedwait(c, 0);
+ PICKUP_GIANT();
+
+ if (error != 0) {
+ /* check for timeout */
+ if (error == -EWOULDBLOCK) {
+ error = 0; /* timeout */
+ } else {
+ /* signal happened */
+ linux_schedule_save_interrupt_value(task, error);
+ error = -ERESTARTSYS;
+ }
+ goto done;
+ }
+ }
+ if (c->done != UINT_MAX)
+ c->done--;
+ sleepq_release(c);
+
+ /* return how many jiffies are left */
+ error = linux_timer_jiffies_until(end);
+done:
+ return (error);
+}
+
+int
+linux_try_wait_for_completion(struct completion *c)
+{
+ int isdone;
+
+ sleepq_lock(c);
+ isdone = (c->done != 0);
+ if (c->done != 0 && c->done != UINT_MAX)
+ c->done--;
+ sleepq_release(c);
+ return (isdone);
+}
+
+int
+linux_completion_done(struct completion *c)
+{
+ int isdone;
+
+ sleepq_lock(c);
+ isdone = (c->done != 0);
+ sleepq_release(c);
+ return (isdone);
+}
+
+static void
+linux_cdev_deref(struct linux_cdev *ldev)
+{
+ if (refcount_release(&ldev->refs) &&
+ ldev->kobj.ktype == &linux_cdev_ktype)
+ kfree(ldev);
+}
+
+static void
+linux_cdev_release(struct kobject *kobj)
+{
+ struct linux_cdev *cdev;
+ struct kobject *parent;
+
+ cdev = container_of(kobj, struct linux_cdev, kobj);
+ parent = kobj->parent;
+ linux_destroy_dev(cdev);
+ linux_cdev_deref(cdev);
+ kobject_put(parent);
+}
+
+static void
+linux_cdev_static_release(struct kobject *kobj)
+{
+ struct cdev *cdev;
+ struct linux_cdev *ldev;
+
+ ldev = container_of(kobj, struct linux_cdev, kobj);
+ cdev = ldev->cdev;
+ if (cdev != NULL) {
+ destroy_dev(cdev);
+ ldev->cdev = NULL;
+ }
+ kobject_put(kobj->parent);
+}
+
+int
+linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev)
+{
+ int ret;
+
+ if (dev->devt != 0) {
+ /* Set parent kernel object. */
+ ldev->kobj.parent = &dev->kobj;
+
+ /*
+ * Unlike Linux we require the kobject of the
+ * character device structure to have a valid name
+ * before calling this function:
+ */
+ if (ldev->kobj.name == NULL)
+ return (-EINVAL);
+
+ ret = cdev_add(ldev, dev->devt, 1);
+ if (ret)
+ return (ret);
+ }
+ ret = device_add(dev);
+ if (ret != 0 && dev->devt != 0)
+ cdev_del(ldev);
+ return (ret);
+}
+
+void
+linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev)
+{
+ device_del(dev);
+
+ if (dev->devt != 0)
+ cdev_del(ldev);
+}
+
+static void
+linux_destroy_dev(struct linux_cdev *ldev)
+{
+
+ if (ldev->cdev == NULL)
+ return;
+
+ MPASS((ldev->siref & LDEV_SI_DTR) == 0);
+ MPASS(ldev->kobj.ktype == &linux_cdev_ktype);
+
+ atomic_set_int(&ldev->siref, LDEV_SI_DTR);
+ while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0)
+ pause("ldevdtr", hz / 4);
+
+ destroy_dev(ldev->cdev);
+ ldev->cdev = NULL;
+}
+
+const struct kobj_type linux_cdev_ktype = {
+ .release = linux_cdev_release,
+};
+
+const struct kobj_type linux_cdev_static_ktype = {
+ .release = linux_cdev_static_release,
+};
+
+static void
+linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate)
+{
+ struct notifier_block *nb;
+ struct netdev_notifier_info ni;
+
+ nb = arg;
+ ni.ifp = ifp;
+ ni.dev = (struct net_device *)ifp;
+ if (linkstate == LINK_STATE_UP)
+ nb->notifier_call(nb, NETDEV_UP, &ni);
+ else
+ nb->notifier_call(nb, NETDEV_DOWN, &ni);
+}
+
+static void
+linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp)
+{
+ struct notifier_block *nb;
+ struct netdev_notifier_info ni;
+
+ nb = arg;
+ ni.ifp = ifp;
+ ni.dev = (struct net_device *)ifp;
+ nb->notifier_call(nb, NETDEV_REGISTER, &ni);
+}
+
+static void
+linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp)
+{
+ struct notifier_block *nb;
+ struct netdev_notifier_info ni;
+
+ nb = arg;
+ ni.ifp = ifp;
+ ni.dev = (struct net_device *)ifp;
+ nb->notifier_call(nb, NETDEV_UNREGISTER, &ni);
+}
+
+static void
+linux_handle_iflladdr_event(void *arg, struct ifnet *ifp)
+{
+ struct notifier_block *nb;
+ struct netdev_notifier_info ni;
+
+ nb = arg;
+ ni.ifp = ifp;
+ ni.dev = (struct net_device *)ifp;
+ nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni);
+}
+
+static void
+linux_handle_ifaddr_event(void *arg, struct ifnet *ifp)
+{
+ struct notifier_block *nb;
+ struct netdev_notifier_info ni;
+
+ nb = arg;
+ ni.ifp = ifp;
+ ni.dev = (struct net_device *)ifp;
+ nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni);
+}
+
+int
+register_netdevice_notifier(struct notifier_block *nb)
+{
+
+ nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER(
+ ifnet_link_event, linux_handle_ifnet_link_event, nb, 0);
+ nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER(
+ ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0);
+ nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER(
+ ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0);
+ nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER(
+ iflladdr_event, linux_handle_iflladdr_event, nb, 0);
+
+ return (0);
+}
+
+int
+register_inetaddr_notifier(struct notifier_block *nb)
+{
+
+ nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER(
+ ifaddr_event, linux_handle_ifaddr_event, nb, 0);
+ return (0);
+}
+
+int
+unregister_netdevice_notifier(struct notifier_block *nb)
+{
+
+ EVENTHANDLER_DEREGISTER(ifnet_link_event,
+ nb->tags[NETDEV_UP]);
+ EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
+ nb->tags[NETDEV_REGISTER]);
+ EVENTHANDLER_DEREGISTER(ifnet_departure_event,
+ nb->tags[NETDEV_UNREGISTER]);
+ EVENTHANDLER_DEREGISTER(iflladdr_event,
+ nb->tags[NETDEV_CHANGEADDR]);
+
+ return (0);
+}
+
+int
+unregister_inetaddr_notifier(struct notifier_block *nb)
+{
+
+ EVENTHANDLER_DEREGISTER(ifaddr_event,
+ nb->tags[NETDEV_CHANGEIFADDR]);
+
+ return (0);
+}
+
+struct list_sort_thunk {
+ int (*cmp)(void *, struct list_head *, struct list_head *);
+ void *priv;
+};
+
+static inline int
+linux_le_cmp(const void *d1, const void *d2, void *priv)
+{
+ struct list_head *le1, *le2;
+ struct list_sort_thunk *thunk;
+
+ thunk = priv;
+ le1 = *(__DECONST(struct list_head **, d1));
+ le2 = *(__DECONST(struct list_head **, d2));
+ return ((thunk->cmp)(thunk->priv, le1, le2));
+}
+
+void
+list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
+ struct list_head *a, struct list_head *b))
+{
+ struct list_sort_thunk thunk;
+ struct list_head **ar, *le;
+ size_t count, i;
+
+ count = 0;
+ list_for_each(le, head)
+ count++;
+ ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK);
+ i = 0;
+ list_for_each(le, head)
+ ar[i++] = le;
+ thunk.cmp = cmp;
+ thunk.priv = priv;
+ qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk);
+ INIT_LIST_HEAD(head);
+ for (i = 0; i < count; i++)
+ list_add_tail(ar[i], head);
+ free(ar, M_KMALLOC);
+}
+
+#if defined(__i386__) || defined(__amd64__)
+int
+linux_wbinvd_on_all_cpus(void)
+{
+
+ pmap_invalidate_cache();
+ return (0);
+}
+#endif
+
+int
+linux_on_each_cpu(void callback(void *), void *data)
+{
+
+ smp_rendezvous(smp_no_rendezvous_barrier, callback,
+ smp_no_rendezvous_barrier, data);
+ return (0);
+}
+
+int
+linux_in_atomic(void)
+{
+
+ return ((curthread->td_pflags & TDP_NOFAULTING) != 0);
+}
+
+struct linux_cdev *
+linux_find_cdev(const char *name, unsigned major, unsigned minor)
+{
+ dev_t dev = MKDEV(major, minor);
+ struct cdev *cdev;
+
+ dev_lock();
+ LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) {
+ struct linux_cdev *ldev = cdev->si_drv1;
+ if (ldev->dev == dev &&
+ strcmp(kobject_name(&ldev->kobj), name) == 0) {
+ break;
+ }
+ }
+ dev_unlock();
+
+ return (cdev != NULL ? cdev->si_drv1 : NULL);
+}
+
+int
+__register_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops)
+{
+ struct linux_cdev *cdev;
+ int ret = 0;
+ int i;
+
+ for (i = baseminor; i < baseminor + count; i++) {
+ cdev = cdev_alloc();
+ cdev->ops = fops;
+ kobject_set_name(&cdev->kobj, name);
+
+ ret = cdev_add(cdev, makedev(major, i), 1);
+ if (ret != 0)
+ break;
+ }
+ return (ret);
+}
+
+int
+__register_chrdev_p(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops, uid_t uid,
+ gid_t gid, int mode)
+{
+ struct linux_cdev *cdev;
+ int ret = 0;
+ int i;
+
+ for (i = baseminor; i < baseminor + count; i++) {
+ cdev = cdev_alloc();
+ cdev->ops = fops;
+ kobject_set_name(&cdev->kobj, name);
+
+ ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode);
+ if (ret != 0)
+ break;
+ }
+ return (ret);
+}
+
+void
+__unregister_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name)
+{
+ struct linux_cdev *cdevp;
+ int i;
+
+ for (i = baseminor; i < baseminor + count; i++) {
+ cdevp = linux_find_cdev(name, major, i);
+ if (cdevp != NULL)
+ cdev_del(cdevp);
+ }
+}
+
+void
+linux_dump_stack(void)
+{
+#ifdef STACK
+ struct stack st;
+
+ stack_save(&st);
+ stack_print(&st);
+#endif
+}
+
+int
+linuxkpi_net_ratelimit(void)
+{
+
+ return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps,
+ lkpi_net_maxpps));
+}
+
+struct io_mapping *
+io_mapping_create_wc(resource_size_t base, unsigned long size)
+{
+ struct io_mapping *mapping;
+
+ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+ if (mapping == NULL)
+ return (NULL);
+ return (io_mapping_init_wc(mapping, base, size));
+}
+
+/* We likely want a linuxkpi_device.c at some point. */
+bool
+device_can_wakeup(struct device *dev)
+{
+
+ if (dev == NULL)
+ return (false);
+ /*
+ * XXX-BZ iwlwifi queries it as part of enabling WoWLAN.
+ * Normally this would be based on a bool in dev->power.XXX.
+ * Check such as PCI PCIM_PCAP_*PME. We have no way to enable this yet.
+ * We may get away by directly calling into bsddev for as long as
+ * we can assume PCI only avoiding changing struct device breaking KBI.
+ */
+ pr_debug("%s:%d: not enabled; see comment.\n", __func__, __LINE__);
+ return (false);
+}
+
+static void
+devm_device_group_remove(struct device *dev, void *p)
+{
+ const struct attribute_group **dr = p;
+ const struct attribute_group *group = *dr;
+
+ sysfs_remove_group(&dev->kobj, group);
+}
+
+int
+lkpi_devm_device_add_group(struct device *dev,
+ const struct attribute_group *group)
+{
+ const struct attribute_group **dr;
+ int ret;
+
+ dr = devres_alloc(devm_device_group_remove, sizeof(*dr), GFP_KERNEL);
+ if (dr == NULL)
+ return (-ENOMEM);
+
+ ret = sysfs_create_group(&dev->kobj, group);
+ if (ret == 0) {
+ *dr = group;
+ devres_add(dev, dr);
+ } else
+ devres_free(dr);
+
+ return (ret);
+}
+
+#if defined(__i386__) || defined(__amd64__)
+bool linux_cpu_has_clflush;
+struct cpuinfo_x86 boot_cpu_data;
+struct cpuinfo_x86 *__cpu_data;
+#endif
+
+cpumask_t *
+lkpi_get_static_single_cpu_mask(int cpuid)
+{
+
+ KASSERT((cpuid >= 0 && cpuid <= mp_maxid), ("%s: invalid cpuid %d\n",
+ __func__, cpuid));
+ KASSERT(!CPU_ABSENT(cpuid), ("%s: cpu with cpuid %d is absent\n",
+ __func__, cpuid));
+
+ return (static_single_cpu_mask[cpuid]);
+}
+
+bool
+lkpi_xen_initial_domain(void)
+{
+#ifdef XENHVM
+ return (xen_initial_domain());
+#else
+ return (false);
+#endif
+}
+
+bool
+lkpi_xen_pv_domain(void)
+{
+#ifdef XENHVM
+ return (xen_pv_domain());
+#else
+ return (false);
+#endif
+}
+
+static void
+linux_compat_init(void *arg)
+{
+ struct sysctl_oid *rootoid;
+ int i;
+
+#if defined(__i386__) || defined(__amd64__)
+ static const uint32_t x86_vendors[X86_VENDOR_NUM] = {
+ [X86_VENDOR_INTEL] = CPU_VENDOR_INTEL,
+ [X86_VENDOR_CYRIX] = CPU_VENDOR_CYRIX,
+ [X86_VENDOR_AMD] = CPU_VENDOR_AMD,
+ [X86_VENDOR_UMC] = CPU_VENDOR_UMC,
+ [X86_VENDOR_CENTAUR] = CPU_VENDOR_CENTAUR,
+ [X86_VENDOR_TRANSMETA] = CPU_VENDOR_TRANSMETA,
+ [X86_VENDOR_NSC] = CPU_VENDOR_NSC,
+ [X86_VENDOR_HYGON] = CPU_VENDOR_HYGON,
+ };
+ uint8_t x86_vendor = X86_VENDOR_UNKNOWN;
+
+ for (i = 0; i < X86_VENDOR_NUM; i++) {
+ if (cpu_vendor_id != 0 && cpu_vendor_id == x86_vendors[i]) {
+ x86_vendor = i;
+ break;
+ }
+ }
+ linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH);
+ boot_cpu_data.x86_clflush_size = cpu_clflush_line_size;
+ boot_cpu_data.x86_max_cores = mp_ncpus;
+ boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id);
+ boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id);
+ boot_cpu_data.x86_vendor = x86_vendor;
+
+ __cpu_data = kmalloc_array(mp_maxid + 1,
+ sizeof(*__cpu_data), M_WAITOK | M_ZERO);
+ CPU_FOREACH(i) {
+ __cpu_data[i].x86_clflush_size = cpu_clflush_line_size;
+ __cpu_data[i].x86_max_cores = mp_ncpus;
+ __cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id);
+ __cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id);
+ __cpu_data[i].x86_vendor = x86_vendor;
+ }
+#endif
+ rw_init(&linux_vma_lock, "lkpi-vma-lock");
+
+ rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
+ OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
+ kobject_init(&linux_class_root, &linux_class_ktype);
+ kobject_set_name(&linux_class_root, "class");
+ linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
+ OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
+ kobject_init(&linux_root_device.kobj, &linux_dev_ktype);
+ kobject_set_name(&linux_root_device.kobj, "device");
+ linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL,
+ SYSCTL_CHILDREN(rootoid), OID_AUTO, "device",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device");
+ linux_root_device.bsddev = root_bus;
+ linux_class_misc.name = "misc";
+ class_register(&linux_class_misc);
+ INIT_LIST_HEAD(&pci_drivers);
+ INIT_LIST_HEAD(&pci_devices);
+ spin_lock_init(&pci_lock);
+ mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
+ for (i = 0; i < VMMAP_HASH_SIZE; i++)
+ LIST_INIT(&vmmaphead[i]);
+ init_waitqueue_head(&linux_bit_waitq);
+ init_waitqueue_head(&linux_var_waitq);
+
+ CPU_COPY(&all_cpus, &cpu_online_mask);
+ /*
+ * Generate a single-CPU cpumask_t for each CPU (possibly) in the system.
+ * CPUs are indexed from 0..(mp_maxid). The entry for cpuid 0 will only
+ * have itself in the cpumask, cupid 1 only itself on entry 1, and so on.
+ * This is used by cpumask_of() (and possibly others in the future) for,
+ * e.g., drivers to pass hints to irq_set_affinity_hint().
+ */
+ static_single_cpu_mask = kmalloc_array(mp_maxid + 1,
+ sizeof(static_single_cpu_mask), M_WAITOK | M_ZERO);
+
+ /*
+ * When the number of CPUs reach a threshold, we start to save memory
+ * given the sets are static by overlapping those having their single
+ * bit set at same position in a bitset word. Asymptotically, this
+ * regular scheme is in O(n²) whereas the overlapping one is in O(n)
+ * only with n being the maximum number of CPUs, so the gain will become
+ * huge quite quickly. The threshold for 64-bit architectures is 128
+ * CPUs.
+ */
+ if (mp_ncpus < (2 * _BITSET_BITS)) {
+ cpumask_t *sscm_ptr;
+
+ /*
+ * This represents 'mp_ncpus * __bitset_words(CPU_SETSIZE) *
+ * (_BITSET_BITS / 8)' bytes (for comparison with the
+ * overlapping scheme).
+ */
+ static_single_cpu_mask_lcs = kmalloc_array(mp_ncpus,
+ sizeof(*static_single_cpu_mask_lcs),
+ M_WAITOK | M_ZERO);
+
+ sscm_ptr = static_single_cpu_mask_lcs;
+ CPU_FOREACH(i) {
+ static_single_cpu_mask[i] = sscm_ptr++;
+ CPU_SET(i, static_single_cpu_mask[i]);
+ }
+ } else {
+ /* Pointer to a bitset word. */
+ __typeof(((cpuset_t *)NULL)->__bits[0]) *bwp;
+
+ /*
+ * Allocate memory for (static) spans of 'cpumask_t' ('cpuset_t'
+ * really) with a single bit set that can be reused for all
+ * single CPU masks by making them start at different offsets.
+ * We need '__bitset_words(CPU_SETSIZE) - 1' bitset words before
+ * the word having its single bit set, and the same amount
+ * after.
+ */
+ static_single_cpu_mask_lcs = mallocarray(_BITSET_BITS,
+ (2 * __bitset_words(CPU_SETSIZE) - 1) * (_BITSET_BITS / 8),
+ M_KMALLOC, M_WAITOK | M_ZERO);
+
+ /*
+ * We rely below on cpuset_t and the bitset generic
+ * implementation assigning words in the '__bits' array in the
+ * same order of bits (i.e., little-endian ordering, not to be
+ * confused with machine endianness, which concerns bits in
+ * words and other integers). This is an imperfect test, but it
+ * will detect a change to big-endian ordering.
+ */
+ _Static_assert(
+ __bitset_word(_BITSET_BITS + 1, _BITSET_BITS) == 1,
+ "Assumes a bitset implementation that is little-endian "
+ "on its words");
+
+ /* Initialize the single bit of each static span. */
+ bwp = (__typeof(bwp))static_single_cpu_mask_lcs +
+ (__bitset_words(CPU_SETSIZE) - 1);
+ for (i = 0; i < _BITSET_BITS; i++) {
+ CPU_SET(i, (cpuset_t *)bwp);
+ bwp += (2 * __bitset_words(CPU_SETSIZE) - 1);
+ }
+
+ /*
+ * Finally set all CPU masks to the proper word in their
+ * relevant span.
+ */
+ CPU_FOREACH(i) {
+ bwp = (__typeof(bwp))static_single_cpu_mask_lcs;
+ /* Find the non-zero word of the relevant span. */
+ bwp += (2 * __bitset_words(CPU_SETSIZE) - 1) *
+ (i % _BITSET_BITS) +
+ __bitset_words(CPU_SETSIZE) - 1;
+ /* Shift to find the CPU mask start. */
+ bwp -= (i / _BITSET_BITS);
+ static_single_cpu_mask[i] = (cpuset_t *)bwp;
+ }
+ }
+
+ strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release));
+}
+SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
+
+static void
+linux_compat_uninit(void *arg)
+{
+ linux_kobject_kfree_name(&linux_class_root);
+ linux_kobject_kfree_name(&linux_root_device.kobj);
+ linux_kobject_kfree_name(&linux_class_misc.kobj);
+
+ free(static_single_cpu_mask_lcs, M_KMALLOC);
+ free(static_single_cpu_mask, M_KMALLOC);
+#if defined(__i386__) || defined(__amd64__)
+ free(__cpu_data, M_KMALLOC);
+#endif
+
+ mtx_destroy(&vmmaplock);
+ spin_lock_destroy(&pci_lock);
+ rw_destroy(&linux_vma_lock);
+}
+SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
+
+/*
+ * NOTE: Linux frequently uses "unsigned long" for pointer to integer
+ * conversion and vice versa, where in FreeBSD "uintptr_t" would be
+ * used. Assert these types have the same size, else some parts of the
+ * LinuxKPI may not work like expected:
+ */
+CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t));
diff --git a/sys/compat/linuxkpi/common/src/linux_current.c b/sys/compat/linuxkpi/common/src/linux_current.c
new file mode 100644
index 000000000000..c342eb279caa
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_current.c
@@ -0,0 +1,343 @@
+/*-
+ * Copyright (c) 2017 Hans Petter Selasky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#ifdef __amd64__
+#define DEV_APIC
+#elif defined(__i386__)
+#include "opt_apic.h"
+#endif
+
+#include <linux/compat.h>
+#include <linux/completion.h>
+#include <linux/mm.h>
+#include <linux/kthread.h>
+#include <linux/moduleparam.h>
+
+#include <sys/kernel.h>
+#include <sys/eventhandler.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <vm/uma.h>
+
+#ifdef DEV_APIC
+extern u_int first_msi_irq, num_msi_irqs;
+#endif
+
+static eventhandler_tag linuxkpi_thread_dtor_tag;
+
+static uma_zone_t linux_current_zone;
+static uma_zone_t linux_mm_zone;
+
+/* check if another thread already has a mm_struct */
+static struct mm_struct *
+find_other_mm(struct proc *p)
+{
+ struct thread *td;
+ struct task_struct *ts;
+ struct mm_struct *mm;
+
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ FOREACH_THREAD_IN_PROC(p, td) {
+ ts = td->td_lkpi_task;
+ if (ts == NULL)
+ continue;
+ mm = ts->mm;
+ if (mm == NULL)
+ continue;
+ /* try to share other mm_struct */
+ if (atomic_inc_not_zero(&mm->mm_users))
+ return (mm);
+ }
+ return (NULL);
+}
+
+int
+linux_alloc_current(struct thread *td, int flags)
+{
+ struct proc *proc;
+ struct task_struct *ts;
+ struct mm_struct *mm, *mm_other;
+
+ MPASS(td->td_lkpi_task == NULL);
+
+ if ((td->td_pflags & TDP_ITHREAD) != 0 || !THREAD_CAN_SLEEP()) {
+ flags &= ~M_WAITOK;
+ flags |= M_NOWAIT | M_USE_RESERVE;
+ }
+
+ ts = uma_zalloc(linux_current_zone, flags | M_ZERO);
+ if (ts == NULL) {
+ if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
+ panic("linux_alloc_current: failed to allocate task");
+ return (ENOMEM);
+ }
+ mm = NULL;
+
+ /* setup new task structure */
+ atomic_set(&ts->kthread_flags, 0);
+ ts->task_thread = td;
+ ts->comm = td->td_name;
+ ts->pid = td->td_tid;
+ ts->group_leader = ts;
+ atomic_set(&ts->usage, 1);
+ atomic_set(&ts->state, TASK_RUNNING);
+ init_completion(&ts->parked);
+ init_completion(&ts->exited);
+
+ proc = td->td_proc;
+
+ PROC_LOCK(proc);
+ mm_other = find_other_mm(proc);
+
+ /* use allocated mm_struct as a fallback */
+ if (mm_other == NULL) {
+ PROC_UNLOCK(proc);
+ mm = uma_zalloc(linux_mm_zone, flags | M_ZERO);
+ if (mm == NULL) {
+ if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
+ panic(
+ "linux_alloc_current: failed to allocate mm");
+ uma_zfree(linux_current_zone, mm);
+ return (ENOMEM);
+ }
+
+ PROC_LOCK(proc);
+ mm_other = find_other_mm(proc);
+ if (mm_other == NULL) {
+ /* setup new mm_struct */
+ init_rwsem(&mm->mmap_sem);
+ atomic_set(&mm->mm_count, 1);
+ atomic_set(&mm->mm_users, 1);
+ /* set mm_struct pointer */
+ ts->mm = mm;
+ /* clear pointer to not free memory */
+ mm = NULL;
+ } else {
+ ts->mm = mm_other;
+ }
+ } else {
+ ts->mm = mm_other;
+ }
+
+ /* store pointer to task struct */
+ td->td_lkpi_task = ts;
+ PROC_UNLOCK(proc);
+
+ /* free mm_struct pointer, if any */
+ uma_zfree(linux_mm_zone, mm);
+
+ return (0);
+}
+
+struct mm_struct *
+linux_get_task_mm(struct task_struct *task)
+{
+ struct mm_struct *mm;
+
+ mm = task->mm;
+ if (mm != NULL) {
+ atomic_inc(&mm->mm_users);
+ return (mm);
+ }
+ return (NULL);
+}
+
+void
+linux_mm_dtor(struct mm_struct *mm)
+{
+ uma_zfree(linux_mm_zone, mm);
+}
+
+void
+linux_free_current(struct task_struct *ts)
+{
+ mmput(ts->mm);
+ uma_zfree(linux_current_zone, ts);
+}
+
+static void
+linuxkpi_thread_dtor(void *arg __unused, struct thread *td)
+{
+ struct task_struct *ts;
+
+ ts = td->td_lkpi_task;
+ if (ts == NULL)
+ return;
+
+ td->td_lkpi_task = NULL;
+ put_task_struct(ts);
+}
+
+static struct task_struct *
+linux_get_pid_task_int(pid_t pid, const bool do_get)
+{
+ struct thread *td;
+ struct proc *p;
+ struct task_struct *ts;
+
+ if (pid > PID_MAX) {
+ /* try to find corresponding thread */
+ td = tdfind(pid, -1);
+ if (td != NULL) {
+ ts = td->td_lkpi_task;
+ if (do_get && ts != NULL)
+ get_task_struct(ts);
+ PROC_UNLOCK(td->td_proc);
+ return (ts);
+ }
+ } else {
+ /* try to find corresponding procedure */
+ p = pfind(pid);
+ if (p != NULL) {
+ FOREACH_THREAD_IN_PROC(p, td) {
+ ts = td->td_lkpi_task;
+ if (ts != NULL) {
+ if (do_get)
+ get_task_struct(ts);
+ PROC_UNLOCK(p);
+ return (ts);
+ }
+ }
+ PROC_UNLOCK(p);
+ }
+ }
+ return (NULL);
+}
+
+struct task_struct *
+linux_pid_task(pid_t pid)
+{
+ return (linux_get_pid_task_int(pid, false));
+}
+
+struct task_struct *
+linux_get_pid_task(pid_t pid)
+{
+ return (linux_get_pid_task_int(pid, true));
+}
+
+bool
+linux_task_exiting(struct task_struct *task)
+{
+ struct thread *td;
+ struct proc *p;
+ bool ret;
+
+ ret = false;
+
+ /* try to find corresponding thread */
+ td = tdfind(task->pid, -1);
+ if (td != NULL) {
+ p = td->td_proc;
+ } else {
+ /* try to find corresponding procedure */
+ p = pfind(task->pid);
+ }
+
+ if (p != NULL) {
+ if ((p->p_flag & P_WEXIT) != 0)
+ ret = true;
+ PROC_UNLOCK(p);
+ }
+ return (ret);
+}
+
+static int lkpi_task_resrv;
+SYSCTL_INT(_compat_linuxkpi, OID_AUTO, task_struct_reserve,
+ CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lkpi_task_resrv, 0,
+ "Number of struct task and struct mm to reserve for non-sleepable "
+ "allocations");
+
+static void
+linux_current_init(void *arg __unused)
+{
+ TUNABLE_INT_FETCH("compat.linuxkpi.task_struct_reserve",
+ &lkpi_task_resrv);
+ if (lkpi_task_resrv == 0) {
+#ifdef DEV_APIC
+ /*
+ * Number of interrupt threads plus per-cpu callout
+ * SWI threads.
+ */
+ lkpi_task_resrv = first_msi_irq + num_msi_irqs + MAXCPU;
+#else
+ lkpi_task_resrv = 1024; /* XXXKIB arbitrary */
+#endif
+ }
+ linux_current_zone = uma_zcreate("lkpicurr",
+ sizeof(struct task_struct), NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, 0);
+ uma_zone_reserve(linux_current_zone, lkpi_task_resrv);
+ uma_prealloc(linux_current_zone, lkpi_task_resrv);
+ linux_mm_zone = uma_zcreate("lkpimm",
+ sizeof(struct mm_struct), NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, 0);
+ uma_zone_reserve(linux_mm_zone, lkpi_task_resrv);
+ uma_prealloc(linux_mm_zone, lkpi_task_resrv);
+
+ atomic_thread_fence_seq_cst();
+
+ linuxkpi_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
+ linuxkpi_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
+ lkpi_alloc_current = linux_alloc_current;
+}
+SYSINIT(linux_current, SI_SUB_EVENTHANDLER + 1, SI_ORDER_SECOND,
+ linux_current_init, NULL);
+
+static void
+linux_current_uninit(void *arg __unused)
+{
+ struct proc *p;
+ struct task_struct *ts;
+ struct thread *td;
+
+ lkpi_alloc_current = linux_alloc_current_noop;
+
+ atomic_thread_fence_seq_cst();
+
+ sx_slock(&allproc_lock);
+ FOREACH_PROC_IN_SYSTEM(p) {
+ PROC_LOCK(p);
+ FOREACH_THREAD_IN_PROC(p, td) {
+ if ((ts = td->td_lkpi_task) != NULL) {
+ td->td_lkpi_task = NULL;
+ put_task_struct(ts);
+ }
+ }
+ PROC_UNLOCK(p);
+ }
+ sx_sunlock(&allproc_lock);
+
+ thread_reap_barrier();
+
+ EVENTHANDLER_DEREGISTER(thread_dtor, linuxkpi_thread_dtor_tag);
+
+ uma_zdestroy(linux_current_zone);
+ uma_zdestroy(linux_mm_zone);
+}
+SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER + 1, SI_ORDER_SECOND,
+ linux_current_uninit, NULL);
diff --git a/sys/compat/linuxkpi/common/src/linux_devres.c b/sys/compat/linuxkpi/common/src/linux_devres.c
new file mode 100644
index 000000000000..84f03ba0dd7d
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_devres.c
@@ -0,0 +1,267 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ *
+ * This software was developed by Bj\xc3\xb6rn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+/*
+ * Linux devres KPI implementation.
+ */
+
+struct devres {
+ struct list_head entry;
+ void (*release)(struct device *, void *);
+
+ /* Must come last. */
+ uint8_t __drdata[0] __aligned(CACHE_LINE_SIZE);
+};
+
+void *
+lkpi_devres_alloc(void(*release)(struct device *, void *),
+ size_t size, gfp_t gfp)
+{
+ void *p;
+ struct devres *dr;
+ size_t total;
+
+ if (size == 0)
+ return (NULL);
+
+ total = sizeof(*dr) + size;
+ dr = kmalloc(total, gfp);
+ if (dr == NULL)
+ return (NULL);
+
+ INIT_LIST_HEAD(&dr->entry);
+ dr->release = release;
+ p = (void *)(dr+1);
+
+ return (p);
+}
+
+static void
+lkpi_devres_free_dr(struct devres *dr)
+{
+
+ /*
+ * We have no dev, so cannot lock. This means someone else has
+ * to do this prior to us if devres_add() had been called.
+ */
+ KASSERT(list_empty_careful(&dr->entry),
+ ("%s: dr %p still on devres_head\n", __func__, dr));
+ kfree(dr);
+}
+
+void
+lkpi_devres_free(void *p)
+{
+ struct devres *dr;
+
+ if (p == NULL)
+ return;
+
+ dr = container_of(p, struct devres, __drdata);
+ lkpi_devres_free_dr(dr);
+}
+
+void
+lkpi_devres_add(struct device *dev, void *p)
+{
+ struct devres *dr;
+
+ KASSERT(dev != NULL && p != NULL, ("%s: dev %p p %p\n",
+ __func__, dev, p));
+
+ dr = container_of(p, struct devres, __drdata);
+ spin_lock(&dev->devres_lock);
+ list_add(&dr->entry, &dev->devres_head);
+ spin_unlock(&dev->devres_lock);
+}
+
+static struct devres *
+lkpi_devres_find_dr(struct device *dev, void(*release)(struct device *, void *),
+ int (*match)(struct device *, void *, void *), void *mp)
+{
+ struct devres *dr, *next;
+ void *p;
+
+ KASSERT(dev != NULL, ("%s: dev %p\n", __func__, dev));
+ assert_spin_locked(&dev->devres_lock);
+
+ list_for_each_entry_safe(dr, next, &dev->devres_head, entry) {
+ if (dr->release != release)
+ continue;
+ p = (void *)(dr+1);
+ if (match != NULL && match(dev, p, mp) == false)
+ continue;
+ return (dr);
+ }
+
+ return (NULL);
+}
+
+void *
+lkpi_devres_find(struct device *dev, void(*release)(struct device *, void *),
+ int (*match)(struct device *, void *, void *), void *mp)
+{
+ struct devres *dr;
+
+ KASSERT(dev != NULL, ("%s: dev %p\n", __func__, dev));
+
+ spin_lock(&dev->devres_lock);
+ dr = lkpi_devres_find_dr(dev, release, match, mp);
+ spin_unlock(&dev->devres_lock);
+
+ if (dr == NULL)
+ return (NULL);
+
+ return ((void *)(dr + 1));
+}
+
+static void
+lkpi_devres_unlink_locked(struct device *dev, struct devres *dr)
+{
+ KASSERT(dev != NULL, ("%s: dev %p\n", __func__, dev));
+ KASSERT(dr != NULL, ("%s: dr %p\n", __func__, dr));
+ assert_spin_locked(&dev->devres_lock);
+
+ list_del_init(&dr->entry);
+}
+
+void
+lkpi_devres_unlink(struct device *dev, void *p)
+{
+ struct devres *dr;
+
+ KASSERT(dev != NULL && p != NULL, ("%s: dev %p p %p\n",
+ __func__, dev, p));
+
+ dr = container_of(p, struct devres, __drdata);
+ spin_lock(&dev->devres_lock);
+ lkpi_devres_unlink_locked(dev, dr);
+ spin_unlock(&dev->devres_lock);
+}
+
+/* This is called on device free. */
+void
+lkpi_devres_release_free_list(struct device *dev)
+{
+ struct devres *dr, *next;
+ void *p;
+
+ /* Free any resources allocated on the device. */
+ /* No need to lock anymore. */
+ list_for_each_entry_safe(dr, next, &dev->devres_head, entry) {
+ p = (void *)(dr+1);
+ if (dr->release != NULL)
+ dr->release(dev, p);
+ /* This should probably be a function of some kind. */
+ list_del_init(&dr->entry);
+ lkpi_devres_free(p);
+ }
+}
+
+int
+lkpi_devres_destroy(struct device *dev, void(*release)(struct device *, void *),
+ int (*match)(struct device *, void *, void *), void *mp)
+{
+ struct devres *dr;
+
+ spin_lock(&dev->devres_lock);
+ dr = lkpi_devres_find_dr(dev, release, match, mp);
+ if (dr != NULL)
+ lkpi_devres_unlink_locked(dev, dr);
+ spin_unlock(&dev->devres_lock);
+
+ if (dr == NULL)
+ return (-ENOENT);
+ lkpi_devres_free_dr(dr);
+
+ return (0);
+}
+
+/*
+ * Devres release function for k*malloc().
+ * While there is nothing to do here adding, e.g., tracing would be
+ * possible so we leave the empty function here.
+ * Also good for documentation as it is the simplest example.
+ */
+void
+lkpi_devm_kmalloc_release(struct device *dev __unused, void *p __unused)
+{
+
+ /* Nothing to do. Freed with the devres. */
+}
+
+struct devres_action {
+ void *data;
+ void (*action)(void *);
+};
+
+static void
+lkpi_devm_action_release(struct device *dev, void *res)
+{
+ struct devres_action *devres;
+
+ devres = (struct devres_action *)res;
+ devres->action(devres->data);
+}
+
+int
+lkpi_devm_add_action(struct device *dev, void (*action)(void *), void *data)
+{
+ struct devres_action *devres;
+
+ KASSERT(action != NULL, ("%s: action is NULL\n", __func__));
+ devres = lkpi_devres_alloc(lkpi_devm_action_release,
+ sizeof(struct devres_action), GFP_KERNEL);
+ if (devres == NULL)
+ return (-ENOMEM);
+ devres->data = data;
+ devres->action = action;
+ devres_add(dev, devres);
+
+ return (0);
+}
+
+int
+lkpi_devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data)
+{
+ int rv;
+
+ rv = lkpi_devm_add_action(dev, action, data);
+ if (rv != 0)
+ action(data);
+
+ return (rv);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_dmi.c b/sys/compat/linuxkpi/common/src/linux_dmi.c
new file mode 100644
index 000000000000..9e3faaeddeb9
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_dmi.c
@@ -0,0 +1,147 @@
+/*-
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Emmanuel Vadot under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+
+#include <linux/dmi.h>
+
+static char *dmi_data[DMI_STRING_MAX];
+
+static void
+linux_dmi_preload(void *arg)
+{
+
+ dmi_data[DMI_BIOS_VENDOR] = kern_getenv("smbios.bios.vendor");
+ dmi_data[DMI_BIOS_VERSION] = kern_getenv("smbios.bios.version");
+ dmi_data[DMI_BIOS_DATE] = kern_getenv("smbios.bios.reldate");
+ dmi_data[DMI_SYS_VENDOR] = kern_getenv("smbios.system.maker");
+ dmi_data[DMI_PRODUCT_NAME] = kern_getenv("smbios.system.product");
+ dmi_data[DMI_PRODUCT_VERSION] = kern_getenv("smbios.system.version");
+ dmi_data[DMI_PRODUCT_SERIAL] = kern_getenv("smbios.system.serial");
+ dmi_data[DMI_PRODUCT_UUID] = kern_getenv("smbios.system.uuid");
+ dmi_data[DMI_BOARD_VENDOR] = kern_getenv("smbios.planar.maker");
+ dmi_data[DMI_BOARD_NAME] = kern_getenv("smbios.planar.product");
+ dmi_data[DMI_BOARD_VERSION] = kern_getenv("smbios.planar.version");
+ dmi_data[DMI_BOARD_SERIAL] = kern_getenv("smbios.planar.serial");
+ dmi_data[DMI_BOARD_ASSET_TAG] = kern_getenv("smbios.planar.tag");
+ dmi_data[DMI_CHASSIS_VENDOR] = kern_getenv("smbios.chassis.maker");
+ dmi_data[DMI_CHASSIS_TYPE] = kern_getenv("smbios.chassis.type");
+ dmi_data[DMI_CHASSIS_VERSION] = kern_getenv("smbios.chassis.version");
+ dmi_data[DMI_CHASSIS_SERIAL] = kern_getenv("smbios.chassis.serial");
+ dmi_data[DMI_CHASSIS_ASSET_TAG] = kern_getenv("smbios.chassis.tag");
+}
+SYSINIT(linux_dmi_preload, SI_SUB_DRIVERS, SI_ORDER_ANY, linux_dmi_preload, NULL);
+
+/* Match a system against a field */
+bool
+linux_dmi_match(enum dmi_field f, const char *str)
+{
+
+ if (f < DMI_STRING_MAX &&
+ dmi_data[f] != NULL &&
+ strcmp(dmi_data[f], str) == 0)
+ return(true);
+ return (false);
+}
+
+/* Match a system against the struct, all matches must be ok */
+static bool
+linux_dmi_matches(const struct dmi_system_id *dsi)
+{
+ enum dmi_field slot;
+ int i;
+
+ for (i = 0; i < nitems(dsi->matches); i++) {
+ slot = dsi->matches[i].slot;
+ if (slot == DMI_NONE)
+ break;
+ if (slot >= DMI_STRING_MAX ||
+ dmi_data[slot] == NULL)
+ return (false);
+ if (dsi->matches[i].exact_match) {
+ if (dmi_match(slot, dsi->matches[i].substr))
+ continue;
+ } else if (strstr(dmi_data[slot],
+ dsi->matches[i].substr) != NULL) {
+ continue;
+ }
+ return (false);
+ }
+ return (true);
+}
+
+/* Return the string matching the field */
+const char *
+linux_dmi_get_system_info(int field)
+{
+
+ if (field < DMI_STRING_MAX)
+ return (dmi_data[field]);
+ return (NULL);
+}
+
+/*
+ * Match a system against the structs list
+ * If a match is found return the corresponding structure.
+ */
+const struct dmi_system_id *
+linux_dmi_first_match(const struct dmi_system_id *list)
+{
+ const struct dmi_system_id *dsi;
+
+ for (dsi = list; dsi->matches[0].slot != 0; dsi++) {
+ if (linux_dmi_matches(dsi))
+ return (dsi);
+ }
+
+ return (NULL);
+}
+
+/*
+ * Match a system against the structs list
+ * For each match call the callback with the corresponding data
+ * Return the number of matches.
+ */
+int
+linux_dmi_check_system(const struct dmi_system_id *sysid)
+{
+ const struct dmi_system_id *dsi;
+ int matches = 0;
+
+ for (dsi = sysid; dsi->matches[0].slot != 0; dsi++) {
+ if (linux_dmi_matches(dsi)) {
+ matches++;
+ if (dsi->callback && dsi->callback(dsi))
+ break;
+ }
+ }
+
+ return (matches);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_domain.c b/sys/compat/linuxkpi/common/src/linux_domain.c
new file mode 100644
index 000000000000..8e936aac4719
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_domain.c
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2021 NVIDIA Networking
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/domainset.h>
+#include <sys/bus.h>
+
+#include <linux/compat.h>
+#include <linux/device.h>
+
+struct domainset *
+linux_get_vm_domain_set(int node)
+{
+ KASSERT(node < MAXMEMDOM, ("Invalid VM domain %d", node));
+
+ if (node < 0)
+ return (DOMAINSET_RR());
+ else
+ return (DOMAINSET_PREF(node));
+}
+
+int
+linux_dev_to_node(struct device *dev)
+{
+ int numa_domain;
+
+ if (dev == NULL || dev->bsddev == NULL ||
+ bus_get_domain(dev->bsddev, &numa_domain) != 0)
+ return (-1);
+ else
+ return (numa_domain);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_firmware.c b/sys/compat/linuxkpi/common/src/linux_firmware.c
new file mode 100644
index 000000000000..12658df5ce83
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_firmware.c
@@ -0,0 +1,247 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020-2021 The FreeBSD Foundation
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/firmware.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+#include <linux/firmware.h>
+#undef firmware
+
+MALLOC_DEFINE(M_LKPI_FW, "lkpifw", "LinuxKPI firmware");
+
+struct lkpi_fw_task {
+ /* Task and arguments for the "nowait" callback. */
+ struct task fw_task;
+ gfp_t gfp;
+ const char *fw_name;
+ struct device *dev;
+ void *drv;
+ void(*cont)(const struct linuxkpi_firmware *, void *);
+};
+
+static int
+_linuxkpi_request_firmware(const char *fw_name, const struct linuxkpi_firmware **fw,
+ struct device *dev, gfp_t gfp __unused, bool enoentok, bool warn)
+{
+ const struct firmware *fbdfw;
+ struct linuxkpi_firmware *lfw;
+ const char *fwimg;
+ char *p;
+ uint32_t flags;
+
+ if (fw_name == NULL || fw == NULL || dev == NULL) {
+ *fw = NULL;
+ return (-EINVAL);
+ }
+
+ /* Set independent on "warn". To debug, bootverbose is avail. */
+ flags = FIRMWARE_GET_NOWARN;
+
+ KASSERT(gfp == GFP_KERNEL, ("%s: gfp %#x\n", __func__, gfp));
+ lfw = malloc(sizeof(*lfw), M_LKPI_FW, M_WAITOK | M_ZERO);
+
+ /*
+ * Linux can have a path in the firmware which is hard to replicate
+ * for auto-firmware-module-loading.
+ * On FreeBSD, depending on what people do, the firmware will either
+ * be called "fw", or "dir_fw", or "modname_dir_fw". The latter the
+ * driver author has to deal with herself (requesting the special name).
+ * We also optionally flatten '/'s and '.'s as some firmware modules do.
+ * We probe in the least-of-work order avoiding memory operations.
+ * It will be preferred to build the firmware .ko in a well matching
+ * way rather than adding more name-mangling-hacks here in the future
+ * (though we could if needed).
+ */
+ /* (1) Try the original name. */
+ fbdfw = firmware_get_flags(fw_name, flags);
+ /* (2) Try any name removed of path, if we have not yet. */
+ if (fbdfw == NULL) {
+ fwimg = strrchr(fw_name, '/');
+ if (fwimg != NULL)
+ fwimg++;
+ if (fwimg == NULL || *fwimg == '\0')
+ fwimg = fw_name;
+ if (fwimg != fw_name)
+ fbdfw = firmware_get_flags(fwimg, flags);
+ }
+ /* (3) Flatten '/', '.' and '-' to '_' and try with adjusted name. */
+ if (fbdfw == NULL &&
+ (strchr(fw_name, '/') != NULL || strchr(fw_name, '.') != NULL ||
+ strchr(fw_name, '-'))) {
+ fwimg = strdup(fw_name, M_LKPI_FW);
+ if (fwimg != NULL) {
+ while ((p = strchr(fwimg, '/')) != NULL)
+ *p = '_';
+ fbdfw = firmware_get_flags(fwimg, flags);
+ if (fbdfw == NULL) {
+ while ((p = strchr(fwimg, '.')) != NULL)
+ *p = '_';
+ fbdfw = firmware_get_flags(fwimg, flags);
+ }
+ if (fbdfw == NULL) {
+ while ((p = strchr(fwimg, '-')) != NULL)
+ *p = '_';
+ fbdfw = firmware_get_flags(fwimg, flags);
+ }
+ free(__DECONST(void *, fwimg), M_LKPI_FW);
+ }
+ }
+ if (fbdfw == NULL) {
+ if (enoentok)
+ *fw = lfw;
+ else {
+ free(lfw, M_LKPI_FW);
+ *fw = NULL;
+ }
+ if (warn)
+ device_printf(dev->bsddev, "could not load firmware "
+ "image '%s'\n", fw_name);
+ return (-ENOENT);
+ }
+
+ device_printf(dev->bsddev,"successfully loaded firmware image '%s'\n",
+ fw_name);
+ lfw->fbdfw = fbdfw;
+ lfw->data = (const uint8_t *)fbdfw->data;
+ lfw->size = fbdfw->datasize;
+ *fw = lfw;
+ return (0);
+}
+
+static void
+lkpi_fw_task(void *ctx, int pending)
+{
+ struct lkpi_fw_task *lfwt;
+ const struct linuxkpi_firmware *fw;
+
+ KASSERT(ctx != NULL && pending == 1, ("%s: lfwt %p, pending %d\n",
+ __func__, ctx, pending));
+
+ lfwt = ctx;
+ if (lfwt->cont == NULL)
+ goto out;
+
+ _linuxkpi_request_firmware(lfwt->fw_name, &fw, lfwt->dev,
+ lfwt->gfp, true, true);
+
+ /*
+ * Linux seems to run the callback if it cannot find the firmware.
+ * We call it in all cases as it is the only feedback to the requester.
+ */
+ lfwt->cont(fw, lfwt->drv);
+ /* Do not assume fw is still valid! */
+
+out:
+ free(lfwt, M_LKPI_FW);
+}
+
+int
+linuxkpi_request_firmware_nowait(struct module *mod __unused, bool _t __unused,
+ const char *fw_name, struct device *dev, gfp_t gfp, void *drv,
+ void(*cont)(const struct linuxkpi_firmware *, void *))
+{
+ struct lkpi_fw_task *lfwt;
+ int error;
+
+ lfwt = malloc(sizeof(*lfwt), M_LKPI_FW, M_WAITOK | M_ZERO);
+ lfwt->gfp = gfp;
+ lfwt->fw_name = fw_name;
+ lfwt->dev = dev;
+ lfwt->drv = drv;
+ lfwt->cont = cont;
+ TASK_INIT(&lfwt->fw_task, 0, lkpi_fw_task, lfwt);
+ error = taskqueue_enqueue(taskqueue_thread, &lfwt->fw_task);
+
+ if (error)
+ return (-error);
+ return (0);
+}
+
+int
+linuxkpi_request_firmware(const struct linuxkpi_firmware **fw,
+ const char *fw_name, struct device *dev)
+{
+
+ return (_linuxkpi_request_firmware(fw_name, fw, dev, GFP_KERNEL, false,
+ true));
+}
+
+int
+linuxkpi_firmware_request_nowarn(const struct linuxkpi_firmware **fw,
+ const char *fw_name, struct device *dev)
+{
+
+ return (_linuxkpi_request_firmware(fw_name, fw, dev, GFP_KERNEL, false,
+ false));
+}
+
+void
+linuxkpi_release_firmware(const struct linuxkpi_firmware *fw)
+{
+
+ if (fw == NULL)
+ return;
+
+ if (fw->fbdfw)
+ firmware_put(fw->fbdfw, FIRMWARE_UNLOAD);
+ free(__DECONST(void *, fw), M_LKPI_FW);
+}
+
+int
+linuxkpi_request_partial_firmware_into_buf(const struct linuxkpi_firmware **fw,
+ const char *fw_name, struct device *dev, uint8_t *buf, size_t buflen,
+ size_t offset)
+{
+ const struct linuxkpi_firmware *lfw;
+ int error;
+
+ error = linuxkpi_request_firmware(fw, fw_name, dev);
+ if (error != 0)
+ return (error);
+
+ lfw = *fw;
+ if ((offset + buflen) >= lfw->size) {
+ linuxkpi_release_firmware(lfw);
+ return (-ERANGE);
+ }
+
+ memcpy(buf, lfw->data + offset, buflen);
+
+ return (0);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_folio.c b/sys/compat/linuxkpi/common/src/linux_folio.c
new file mode 100644
index 000000000000..c2af7792be04
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_folio.c
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ * Copyright (c) 2024-2025 Jean-Sébastien Pédron
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/page.h>
+#include <linux/pagevec.h>
+
+struct folio *
+folio_alloc(gfp_t gfp, unsigned int order)
+{
+ struct page *page;
+ struct folio *folio;
+
+ /*
+ * Allocated pages are wired already. There is no need to increase a
+ * refcount here.
+ */
+ page = alloc_pages(gfp | __GFP_COMP, order);
+ folio = (struct folio *)page;
+
+ return (folio);
+}
+
+void
+__folio_batch_release(struct folio_batch *fbatch)
+{
+ release_pages(fbatch->folios, folio_batch_count(fbatch));
+
+ folio_batch_reinit(fbatch);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_fpu.c b/sys/compat/linuxkpi/common/src/linux_fpu.c
new file mode 100644
index 000000000000..4e40a2b004bb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_fpu.c
@@ -0,0 +1,99 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Val Packett <val@packett.cool>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+
+#include <linux/compat.h>
+#include <linux/sched.h>
+
+#include <asm/fpu/api.h>
+
+#if defined(__aarch64__) || defined(__arm__) || defined(__amd64__) || \
+ defined(__i386__) || defined(__powerpc64__)
+
+#include <machine/fpu.h>
+
+/*
+ * Technically the Linux API isn't supposed to allow nesting sections
+ * either, but currently used versions of GPU drivers rely on nesting
+ * working, so we only enter the section on the outermost level.
+ */
+
+void
+lkpi_kernel_fpu_begin(void)
+{
+ if ((current->fpu_ctx_level)++ == 0)
+ fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX);
+}
+
+void
+lkpi_kernel_fpu_end(void)
+{
+ if (--(current->fpu_ctx_level) == 0)
+ fpu_kern_leave(curthread, NULL);
+}
+
+void
+lkpi_fpu_safe_exec(fpu_safe_exec_cb_t func, void *ctx)
+{
+ unsigned int save_fpu_level;
+
+ save_fpu_level =
+ __current_unallocated(curthread) ? 0 : current->fpu_ctx_level;
+ if (__predict_false(save_fpu_level != 0)) {
+ current->fpu_ctx_level = 1;
+ kernel_fpu_end();
+ }
+ func(ctx);
+ if (__predict_false(save_fpu_level != 0)) {
+ kernel_fpu_begin();
+ current->fpu_ctx_level = save_fpu_level;
+ }
+}
+
+#else
+
+void
+lkpi_kernel_fpu_begin(void)
+{
+}
+
+void
+lkpi_kernel_fpu_end(void)
+{
+}
+
+void
+lkpi_fpu_safe_exec(fpu_safe_exec_cb_t func, void *ctx)
+{
+ func(ctx);
+}
+
+#endif
diff --git a/sys/compat/linuxkpi/common/src/linux_hdmi.c b/sys/compat/linuxkpi/common/src/linux_hdmi.c
new file mode 100644
index 000000000000..fc47693e913c
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_hdmi.c
@@ -0,0 +1,1959 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifdef __linux__
+#include <drm/display/drm_dp.h>
+#endif
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/hdmi.h>
+#include <linux/string.h>
+#include <linux/device.h>
+
+#define hdmi_log(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
+
+static u8 hdmi_infoframe_checksum(const u8 *ptr, size_t size)
+{
+ u8 csum = 0;
+ size_t i;
+
+ /* compute checksum */
+ for (i = 0; i < size; i++)
+ csum += ptr[i];
+
+ return 256 - csum;
+}
+
+static void hdmi_infoframe_set_checksum(void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+
+ ptr[3] = hdmi_infoframe_checksum(buffer, size);
+}
+
+/**
+ * hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
+ * @frame: HDMI AVI infoframe
+ */
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_AVI;
+ frame->version = 2;
+ frame->length = HDMI_AVI_INFOFRAME_SIZE;
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_init);
+
+static int hdmi_avi_infoframe_check_only(const struct hdmi_avi_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_AVI ||
+ frame->version != 2 ||
+ frame->length != HDMI_AVI_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hdmi_avi_infoframe_check() - check a HDMI AVI infoframe
+ * @frame: HDMI AVI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame)
+{
+ return hdmi_avi_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_check);
+
+/**
+ * hdmi_avi_infoframe_pack_only() - write HDMI AVI infoframe to binary buffer
+ * @frame: HDMI AVI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+ int ret;
+
+ ret = hdmi_avi_infoframe_check_only(frame);
+ if (ret)
+ return ret;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
+
+ /*
+ * Data byte 1, bit 4 has to be set if we provide the active format
+ * aspect ratio
+ */
+ if (frame->active_aspect & 0xf)
+ ptr[0] |= BIT(4);
+
+ /* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */
+ if (frame->top_bar || frame->bottom_bar)
+ ptr[0] |= BIT(3);
+
+ if (frame->left_bar || frame->right_bar)
+ ptr[0] |= BIT(2);
+
+ ptr[1] = ((frame->colorimetry & 0x3) << 6) |
+ ((frame->picture_aspect & 0x3) << 4) |
+ (frame->active_aspect & 0xf);
+
+ ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) |
+ ((frame->quantization_range & 0x3) << 2) |
+ (frame->nups & 0x3);
+
+ if (frame->itc)
+ ptr[2] |= BIT(7);
+
+ ptr[3] = frame->video_code & 0x7f;
+
+ ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) |
+ ((frame->content_type & 0x3) << 4) |
+ (frame->pixel_repeat & 0xf);
+
+ ptr[5] = frame->top_bar & 0xff;
+ ptr[6] = (frame->top_bar >> 8) & 0xff;
+ ptr[7] = frame->bottom_bar & 0xff;
+ ptr[8] = (frame->bottom_bar >> 8) & 0xff;
+ ptr[9] = frame->left_bar & 0xff;
+ ptr[10] = (frame->left_bar >> 8) & 0xff;
+ ptr[11] = frame->right_bar & 0xff;
+ ptr[12] = (frame->right_bar >> 8) & 0xff;
+
+ hdmi_infoframe_set_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_pack_only);
+
+/**
+ * hdmi_avi_infoframe_pack() - check a HDMI AVI infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI AVI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_avi_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_avi_infoframe_pack_only(frame, buffer, size);
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
+
+/**
+ * hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe
+ * @frame: HDMI SPD infoframe
+ * @vendor: vendor string
+ * @product: product string
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
+ const char *vendor, const char *product)
+{
+ size_t len;
+
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_SPD;
+ frame->version = 1;
+ frame->length = HDMI_SPD_INFOFRAME_SIZE;
+
+ len = strlen(vendor);
+ memcpy(frame->vendor, vendor, min(len, sizeof(frame->vendor)));
+ len = strlen(product);
+ memcpy(frame->product, product, min(len, sizeof(frame->product)));
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_init);
+
+static int hdmi_spd_infoframe_check_only(const struct hdmi_spd_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_SPD ||
+ frame->version != 1 ||
+ frame->length != HDMI_SPD_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hdmi_spd_infoframe_check() - check a HDMI SPD infoframe
+ * @frame: HDMI SPD infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame)
+{
+ return hdmi_spd_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_check);
+
+/**
+ * hdmi_spd_infoframe_pack_only() - write HDMI SPD infoframe to binary buffer
+ * @frame: HDMI SPD infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+ int ret;
+
+ ret = hdmi_spd_infoframe_check_only(frame);
+ if (ret)
+ return ret;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ memcpy(ptr, frame->vendor, sizeof(frame->vendor));
+ memcpy(ptr + 8, frame->product, sizeof(frame->product));
+
+ ptr[24] = frame->sdi;
+
+ hdmi_infoframe_set_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_pack_only);
+
+/**
+ * hdmi_spd_infoframe_pack() - check a HDMI SPD infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI SPD infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_spd_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_spd_infoframe_pack_only(frame, buffer, size);
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
+
+/**
+ * hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe
+ * @frame: HDMI audio infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
+ frame->version = 1;
+ frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_init);
+
+static int hdmi_audio_infoframe_check_only(const struct hdmi_audio_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_AUDIO ||
+ frame->version != 1 ||
+ frame->length != HDMI_AUDIO_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hdmi_audio_infoframe_check() - check a HDMI audio infoframe
+ * @frame: HDMI audio infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_audio_infoframe_check(const struct hdmi_audio_infoframe *frame)
+{
+ return hdmi_audio_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_check);
+
+static void
+hdmi_audio_infoframe_pack_payload(const struct hdmi_audio_infoframe *frame,
+ u8 *buffer)
+{
+ u8 channels;
+
+ if (frame->channels >= 2)
+ channels = frame->channels - 1;
+ else
+ channels = 0;
+
+ buffer[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
+ buffer[1] = ((frame->sample_frequency & 0x7) << 2) |
+ (frame->sample_size & 0x3);
+ buffer[2] = frame->coding_type_ext & 0x1f;
+ buffer[3] = frame->channel_allocation;
+ buffer[4] = (frame->level_shift_value & 0xf) << 3;
+
+ if (frame->downmix_inhibit)
+ buffer[4] |= BIT(7);
+}
+
+/**
+ * hdmi_audio_infoframe_pack_only() - write HDMI audio infoframe to binary buffer
+ * @frame: HDMI audio infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+ int ret;
+
+ ret = hdmi_audio_infoframe_check_only(frame);
+ if (ret)
+ return ret;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ hdmi_audio_infoframe_pack_payload(frame,
+ ptr + HDMI_INFOFRAME_HEADER_SIZE);
+
+ hdmi_infoframe_set_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack_only);
+
+/**
+ * hdmi_audio_infoframe_pack() - check a HDMI Audio infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI Audio infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_audio_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_audio_infoframe_pack_only(frame, buffer, size);
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
+
+#ifdef __linux__
+/**
+ * hdmi_audio_infoframe_pack_for_dp - Pack a HDMI Audio infoframe for DisplayPort
+ *
+ * @frame: HDMI Audio infoframe
+ * @sdp: Secondary data packet for DisplayPort.
+ * @dp_version: DisplayPort version to be encoded in the header
+ *
+ * Packs a HDMI Audio Infoframe to be sent over DisplayPort. This function
+ * fills the secondary data packet to be used for DisplayPort.
+ *
+ * Return: Number of total written bytes or a negative errno on failure.
+ */
+ssize_t
+hdmi_audio_infoframe_pack_for_dp(const struct hdmi_audio_infoframe *frame,
+ struct dp_sdp *sdp, u8 dp_version)
+{
+ int ret;
+
+ ret = hdmi_audio_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ memset(sdp->db, 0, sizeof(sdp->db));
+
+ /* Secondary-data packet header */
+ sdp->sdp_header.HB0 = 0;
+ sdp->sdp_header.HB1 = frame->type;
+ sdp->sdp_header.HB2 = DP_SDP_AUDIO_INFOFRAME_HB2;
+ sdp->sdp_header.HB3 = (dp_version & 0x3f) << 2;
+
+ hdmi_audio_infoframe_pack_payload(frame, sdp->db);
+
+ /* Return size = frame length + four HB for sdp_header */
+ return frame->length + 4;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack_for_dp);
+#endif
+
+/**
+ * hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
+ * @frame: HDMI vendor infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_VENDOR;
+ frame->version = 1;
+
+ frame->oui = HDMI_IEEE_OUI;
+
+ /*
+ * 0 is a valid value for s3d_struct, so we use a special "not set"
+ * value
+ */
+ frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
+ frame->length = HDMI_VENDOR_INFOFRAME_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
+
+static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *frame)
+{
+ /* for side by side (half) we also need to provide 3D_Ext_Data */
+ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ return 6;
+ else if (frame->vic != 0 || frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+ return 5;
+ else
+ return 4;
+}
+
+static int hdmi_vendor_infoframe_check_only(const struct hdmi_vendor_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_VENDOR ||
+ frame->version != 1 ||
+ frame->oui != HDMI_IEEE_OUI)
+ return -EINVAL;
+
+ /* only one of those can be supplied */
+ if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+ return -EINVAL;
+
+ if (frame->length != hdmi_vendor_infoframe_length(frame))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hdmi_vendor_infoframe_check() - check a HDMI vendor infoframe
+ * @frame: HDMI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame)
+{
+ frame->length = hdmi_vendor_infoframe_length(frame);
+
+ return hdmi_vendor_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_check);
+
+/**
+ * hdmi_vendor_infoframe_pack_only() - write a HDMI vendor infoframe to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+ int ret;
+
+ ret = hdmi_vendor_infoframe_check_only(frame);
+ if (ret)
+ return ret;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* HDMI OUI */
+ ptr[4] = 0x03;
+ ptr[5] = 0x0c;
+ ptr[6] = 0x00;
+
+ if (frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) {
+ ptr[7] = 0x2 << 5; /* video format */
+ ptr[8] = (frame->s3d_struct & 0xf) << 4;
+ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
+ } else if (frame->vic) {
+ ptr[7] = 0x1 << 5; /* video format */
+ ptr[8] = frame->vic;
+ } else {
+ ptr[7] = 0x0 << 5; /* video format */
+ }
+
+ hdmi_infoframe_set_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_pack_only);
+
+/**
+ * hdmi_vendor_infoframe_pack() - check a HDMI Vendor infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI Vendor infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_vendor_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_vendor_infoframe_pack_only(frame, buffer, size);
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
+
+static int
+hdmi_vendor_any_infoframe_check_only(const union hdmi_vendor_any_infoframe *frame)
+{
+ if (frame->any.type != HDMI_INFOFRAME_TYPE_VENDOR ||
+ frame->any.version != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hdmi_drm_infoframe_init() - initialize an HDMI Dynaminc Range and
+ * mastering infoframe
+ * @frame: HDMI DRM infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_DRM;
+ frame->version = 1;
+ frame->length = HDMI_DRM_INFOFRAME_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_drm_infoframe_init);
+
+static int hdmi_drm_infoframe_check_only(const struct hdmi_drm_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_DRM ||
+ frame->version != 1)
+ return -EINVAL;
+
+ if (frame->length != HDMI_DRM_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hdmi_drm_infoframe_check() - check a HDMI DRM infoframe
+ * @frame: HDMI DRM infoframe
+ *
+ * Validates that the infoframe is consistent.
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame)
+{
+ return hdmi_drm_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_drm_infoframe_check);
+
+/**
+ * hdmi_drm_infoframe_pack_only() - write HDMI DRM infoframe to binary buffer
+ * @frame: HDMI DRM infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame,
+ void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+ int i;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ *ptr++ = frame->eotf;
+ *ptr++ = frame->metadata_type;
+
+ for (i = 0; i < 3; i++) {
+ *ptr++ = frame->display_primaries[i].x;
+ *ptr++ = frame->display_primaries[i].x >> 8;
+ *ptr++ = frame->display_primaries[i].y;
+ *ptr++ = frame->display_primaries[i].y >> 8;
+ }
+
+ *ptr++ = frame->white_point.x;
+ *ptr++ = frame->white_point.x >> 8;
+
+ *ptr++ = frame->white_point.y;
+ *ptr++ = frame->white_point.y >> 8;
+
+ *ptr++ = frame->max_display_mastering_luminance;
+ *ptr++ = frame->max_display_mastering_luminance >> 8;
+
+ *ptr++ = frame->min_display_mastering_luminance;
+ *ptr++ = frame->min_display_mastering_luminance >> 8;
+
+ *ptr++ = frame->max_cll;
+ *ptr++ = frame->max_cll >> 8;
+
+ *ptr++ = frame->max_fall;
+ *ptr++ = frame->max_fall >> 8;
+
+ hdmi_infoframe_set_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_drm_infoframe_pack_only);
+
+/**
+ * hdmi_drm_infoframe_pack() - check a HDMI DRM infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI DRM infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_drm_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_drm_infoframe_pack_only(frame, buffer, size);
+}
+EXPORT_SYMBOL(hdmi_drm_infoframe_pack);
+
+/*
+ * hdmi_vendor_any_infoframe_check() - check a vendor infoframe
+ */
+static int
+hdmi_vendor_any_infoframe_check(union hdmi_vendor_any_infoframe *frame)
+{
+ int ret;
+
+ ret = hdmi_vendor_any_infoframe_check_only(frame);
+ if (ret)
+ return ret;
+
+ /* we only know about HDMI vendor infoframes */
+ if (frame->any.oui != HDMI_IEEE_OUI)
+ return -EINVAL;
+
+ return hdmi_vendor_infoframe_check(&frame->hdmi);
+}
+
+/*
+ * hdmi_vendor_any_infoframe_pack_only() - write a vendor infoframe to binary buffer
+ */
+static ssize_t
+hdmi_vendor_any_infoframe_pack_only(const union hdmi_vendor_any_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_vendor_any_infoframe_check_only(frame);
+ if (ret)
+ return ret;
+
+ /* we only know about HDMI vendor infoframes */
+ if (frame->any.oui != HDMI_IEEE_OUI)
+ return -EINVAL;
+
+ return hdmi_vendor_infoframe_pack_only(&frame->hdmi, buffer, size);
+}
+
+/*
+ * hdmi_vendor_any_infoframe_pack() - check a vendor infoframe,
+ * and write it to binary buffer
+ */
+static ssize_t
+hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_vendor_any_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_vendor_any_infoframe_pack_only(frame, buffer, size);
+}
+
+/**
+ * hdmi_infoframe_check() - check a HDMI infoframe
+ * @frame: HDMI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int
+hdmi_infoframe_check(union hdmi_infoframe *frame)
+{
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return hdmi_avi_infoframe_check(&frame->avi);
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return hdmi_spd_infoframe_check(&frame->spd);
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return hdmi_audio_infoframe_check(&frame->audio);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return hdmi_vendor_any_infoframe_check(&frame->vendor);
+ default:
+ WARN(1, "Bad infoframe type %d\n", frame->any.type);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(hdmi_infoframe_check);
+
+/**
+ * hdmi_infoframe_pack_only() - write a HDMI infoframe to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t
+hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, void *buffer, size_t size)
+{
+ ssize_t length;
+
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ length = hdmi_avi_infoframe_pack_only(&frame->avi,
+ buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ length = hdmi_drm_infoframe_pack_only(&frame->drm,
+ buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ length = hdmi_spd_infoframe_pack_only(&frame->spd,
+ buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ length = hdmi_audio_infoframe_pack_only(&frame->audio,
+ buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ length = hdmi_vendor_any_infoframe_pack_only(&frame->vendor,
+ buffer, size);
+ break;
+ default:
+ WARN(1, "Bad infoframe type %d\n", frame->any.type);
+ length = -EINVAL;
+ }
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_infoframe_pack_only);
+
+/**
+ * hdmi_infoframe_pack() - check a HDMI infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame,
+ void *buffer, size_t size)
+{
+ ssize_t length;
+
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ length = hdmi_drm_infoframe_pack(&frame->drm, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ length = hdmi_vendor_any_infoframe_pack(&frame->vendor,
+ buffer, size);
+ break;
+ default:
+ WARN(1, "Bad infoframe type %d\n", frame->any.type);
+ length = -EINVAL;
+ }
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_infoframe_pack);
+
+static const char *hdmi_infoframe_type_get_name(enum hdmi_infoframe_type type)
+{
+ if (type < 0x80 || type > 0x9f)
+ return "Invalid";
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return "Vendor";
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return "Auxiliary Video Information (AVI)";
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return "Source Product Description (SPD)";
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return "Audio";
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return "Dynamic Range and Mastering";
+ }
+ return "Reserved";
+}
+
+static void hdmi_infoframe_log_header(const char *level,
+ struct device *dev,
+ const struct hdmi_any_infoframe *frame)
+{
+ hdmi_log("HDMI infoframe: %s, version %u, length %u\n",
+ hdmi_infoframe_type_get_name(frame->type),
+ frame->version, frame->length);
+}
+
+static const char *hdmi_colorspace_get_name(enum hdmi_colorspace colorspace)
+{
+ switch (colorspace) {
+ case HDMI_COLORSPACE_RGB:
+ return "RGB";
+ case HDMI_COLORSPACE_YUV422:
+ return "YCbCr 4:2:2";
+ case HDMI_COLORSPACE_YUV444:
+ return "YCbCr 4:4:4";
+ case HDMI_COLORSPACE_YUV420:
+ return "YCbCr 4:2:0";
+ case HDMI_COLORSPACE_RESERVED4:
+ return "Reserved (4)";
+ case HDMI_COLORSPACE_RESERVED5:
+ return "Reserved (5)";
+ case HDMI_COLORSPACE_RESERVED6:
+ return "Reserved (6)";
+ case HDMI_COLORSPACE_IDO_DEFINED:
+ return "IDO Defined";
+ }
+ return "Invalid";
+}
+
+static const char *hdmi_scan_mode_get_name(enum hdmi_scan_mode scan_mode)
+{
+ switch (scan_mode) {
+ case HDMI_SCAN_MODE_NONE:
+ return "No Data";
+ case HDMI_SCAN_MODE_OVERSCAN:
+ return "Overscan";
+ case HDMI_SCAN_MODE_UNDERSCAN:
+ return "Underscan";
+ case HDMI_SCAN_MODE_RESERVED:
+ return "Reserved";
+ }
+ return "Invalid";
+}
+
+static const char *hdmi_colorimetry_get_name(enum hdmi_colorimetry colorimetry)
+{
+ switch (colorimetry) {
+ case HDMI_COLORIMETRY_NONE:
+ return "No Data";
+ case HDMI_COLORIMETRY_ITU_601:
+ return "ITU601";
+ case HDMI_COLORIMETRY_ITU_709:
+ return "ITU709";
+ case HDMI_COLORIMETRY_EXTENDED:
+ return "Extended";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_picture_aspect_get_name(enum hdmi_picture_aspect picture_aspect)
+{
+ switch (picture_aspect) {
+ case HDMI_PICTURE_ASPECT_NONE:
+ return "No Data";
+ case HDMI_PICTURE_ASPECT_4_3:
+ return "4:3";
+ case HDMI_PICTURE_ASPECT_16_9:
+ return "16:9";
+ case HDMI_PICTURE_ASPECT_64_27:
+ return "64:27";
+ case HDMI_PICTURE_ASPECT_256_135:
+ return "256:135";
+ case HDMI_PICTURE_ASPECT_RESERVED:
+ return "Reserved";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_active_aspect_get_name(enum hdmi_active_aspect active_aspect)
+{
+ if (active_aspect < 0 || active_aspect > 0xf)
+ return "Invalid";
+
+ switch (active_aspect) {
+ case HDMI_ACTIVE_ASPECT_16_9_TOP:
+ return "16:9 Top";
+ case HDMI_ACTIVE_ASPECT_14_9_TOP:
+ return "14:9 Top";
+ case HDMI_ACTIVE_ASPECT_16_9_CENTER:
+ return "16:9 Center";
+ case HDMI_ACTIVE_ASPECT_PICTURE:
+ return "Same as Picture";
+ case HDMI_ACTIVE_ASPECT_4_3:
+ return "4:3";
+ case HDMI_ACTIVE_ASPECT_16_9:
+ return "16:9";
+ case HDMI_ACTIVE_ASPECT_14_9:
+ return "14:9";
+ case HDMI_ACTIVE_ASPECT_4_3_SP_14_9:
+ return "4:3 SP 14:9";
+ case HDMI_ACTIVE_ASPECT_16_9_SP_14_9:
+ return "16:9 SP 14:9";
+ case HDMI_ACTIVE_ASPECT_16_9_SP_4_3:
+ return "16:9 SP 4:3";
+ }
+ return "Reserved";
+}
+
+static const char *
+hdmi_extended_colorimetry_get_name(enum hdmi_extended_colorimetry ext_col)
+{
+ switch (ext_col) {
+ case HDMI_EXTENDED_COLORIMETRY_XV_YCC_601:
+ return "xvYCC 601";
+ case HDMI_EXTENDED_COLORIMETRY_XV_YCC_709:
+ return "xvYCC 709";
+ case HDMI_EXTENDED_COLORIMETRY_S_YCC_601:
+ return "sYCC 601";
+ case HDMI_EXTENDED_COLORIMETRY_OPYCC_601:
+ return "opYCC 601";
+ case HDMI_EXTENDED_COLORIMETRY_OPRGB:
+ return "opRGB";
+ case HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM:
+ return "BT.2020 Constant Luminance";
+ case HDMI_EXTENDED_COLORIMETRY_BT2020:
+ return "BT.2020";
+ case HDMI_EXTENDED_COLORIMETRY_RESERVED:
+ return "Reserved";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_quantization_range_get_name(enum hdmi_quantization_range qrange)
+{
+ switch (qrange) {
+ case HDMI_QUANTIZATION_RANGE_DEFAULT:
+ return "Default";
+ case HDMI_QUANTIZATION_RANGE_LIMITED:
+ return "Limited";
+ case HDMI_QUANTIZATION_RANGE_FULL:
+ return "Full";
+ case HDMI_QUANTIZATION_RANGE_RESERVED:
+ return "Reserved";
+ }
+ return "Invalid";
+}
+
+static const char *hdmi_nups_get_name(enum hdmi_nups nups)
+{
+ switch (nups) {
+ case HDMI_NUPS_UNKNOWN:
+ return "Unknown Non-uniform Scaling";
+ case HDMI_NUPS_HORIZONTAL:
+ return "Horizontally Scaled";
+ case HDMI_NUPS_VERTICAL:
+ return "Vertically Scaled";
+ case HDMI_NUPS_BOTH:
+ return "Horizontally and Vertically Scaled";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_ycc_quantization_range_get_name(enum hdmi_ycc_quantization_range qrange)
+{
+ switch (qrange) {
+ case HDMI_YCC_QUANTIZATION_RANGE_LIMITED:
+ return "Limited";
+ case HDMI_YCC_QUANTIZATION_RANGE_FULL:
+ return "Full";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_content_type_get_name(enum hdmi_content_type content_type)
+{
+ switch (content_type) {
+ case HDMI_CONTENT_TYPE_GRAPHICS:
+ return "Graphics";
+ case HDMI_CONTENT_TYPE_PHOTO:
+ return "Photo";
+ case HDMI_CONTENT_TYPE_CINEMA:
+ return "Cinema";
+ case HDMI_CONTENT_TYPE_GAME:
+ return "Game";
+ }
+ return "Invalid";
+}
+
+static void hdmi_avi_infoframe_log(const char *level,
+ struct device *dev,
+ const struct hdmi_avi_infoframe *frame)
+{
+ hdmi_infoframe_log_header(level, dev,
+ (const struct hdmi_any_infoframe *)frame);
+
+ hdmi_log(" colorspace: %s\n",
+ hdmi_colorspace_get_name(frame->colorspace));
+ hdmi_log(" scan mode: %s\n",
+ hdmi_scan_mode_get_name(frame->scan_mode));
+ hdmi_log(" colorimetry: %s\n",
+ hdmi_colorimetry_get_name(frame->colorimetry));
+ hdmi_log(" picture aspect: %s\n",
+ hdmi_picture_aspect_get_name(frame->picture_aspect));
+ hdmi_log(" active aspect: %s\n",
+ hdmi_active_aspect_get_name(frame->active_aspect));
+ hdmi_log(" itc: %s\n", frame->itc ? "IT Content" : "No Data");
+ hdmi_log(" extended colorimetry: %s\n",
+ hdmi_extended_colorimetry_get_name(frame->extended_colorimetry));
+ hdmi_log(" quantization range: %s\n",
+ hdmi_quantization_range_get_name(frame->quantization_range));
+ hdmi_log(" nups: %s\n", hdmi_nups_get_name(frame->nups));
+ hdmi_log(" video code: %u\n", frame->video_code);
+ hdmi_log(" ycc quantization range: %s\n",
+ hdmi_ycc_quantization_range_get_name(frame->ycc_quantization_range));
+ hdmi_log(" hdmi content type: %s\n",
+ hdmi_content_type_get_name(frame->content_type));
+ hdmi_log(" pixel repeat: %u\n", frame->pixel_repeat);
+ hdmi_log(" bar top %u, bottom %u, left %u, right %u\n",
+ frame->top_bar, frame->bottom_bar,
+ frame->left_bar, frame->right_bar);
+}
+
+static const char *hdmi_spd_sdi_get_name(enum hdmi_spd_sdi sdi)
+{
+ if (sdi < 0 || sdi > 0xff)
+ return "Invalid";
+ switch (sdi) {
+ case HDMI_SPD_SDI_UNKNOWN:
+ return "Unknown";
+ case HDMI_SPD_SDI_DSTB:
+ return "Digital STB";
+ case HDMI_SPD_SDI_DVDP:
+ return "DVD Player";
+ case HDMI_SPD_SDI_DVHS:
+ return "D-VHS";
+ case HDMI_SPD_SDI_HDDVR:
+ return "HDD Videorecorder";
+ case HDMI_SPD_SDI_DVC:
+ return "DVC";
+ case HDMI_SPD_SDI_DSC:
+ return "DSC";
+ case HDMI_SPD_SDI_VCD:
+ return "Video CD";
+ case HDMI_SPD_SDI_GAME:
+ return "Game";
+ case HDMI_SPD_SDI_PC:
+ return "PC General";
+ case HDMI_SPD_SDI_BD:
+ return "Blu-Ray Disc (BD)";
+ case HDMI_SPD_SDI_SACD:
+ return "Super Audio CD";
+ case HDMI_SPD_SDI_HDDVD:
+ return "HD DVD";
+ case HDMI_SPD_SDI_PMP:
+ return "PMP";
+ }
+ return "Reserved";
+}
+
+static void hdmi_spd_infoframe_log(const char *level,
+ struct device *dev,
+ const struct hdmi_spd_infoframe *frame)
+{
+ u8 buf[17];
+
+ hdmi_infoframe_log_header(level, dev,
+ (const struct hdmi_any_infoframe *)frame);
+
+ memset(buf, 0, sizeof(buf));
+
+ strncpy(buf, frame->vendor, 8);
+ hdmi_log(" vendor: %s\n", buf);
+ strncpy(buf, frame->product, 16);
+ hdmi_log(" product: %s\n", buf);
+ hdmi_log(" source device information: %s (0x%x)\n",
+ hdmi_spd_sdi_get_name(frame->sdi), frame->sdi);
+}
+
+static const char *
+hdmi_audio_coding_type_get_name(enum hdmi_audio_coding_type coding_type)
+{
+ switch (coding_type) {
+ case HDMI_AUDIO_CODING_TYPE_STREAM:
+ return "Refer to Stream Header";
+ case HDMI_AUDIO_CODING_TYPE_PCM:
+ return "PCM";
+ case HDMI_AUDIO_CODING_TYPE_AC3:
+ return "AC-3";
+ case HDMI_AUDIO_CODING_TYPE_MPEG1:
+ return "MPEG1";
+ case HDMI_AUDIO_CODING_TYPE_MP3:
+ return "MP3";
+ case HDMI_AUDIO_CODING_TYPE_MPEG2:
+ return "MPEG2";
+ case HDMI_AUDIO_CODING_TYPE_AAC_LC:
+ return "AAC";
+ case HDMI_AUDIO_CODING_TYPE_DTS:
+ return "DTS";
+ case HDMI_AUDIO_CODING_TYPE_ATRAC:
+ return "ATRAC";
+ case HDMI_AUDIO_CODING_TYPE_DSD:
+ return "One Bit Audio";
+ case HDMI_AUDIO_CODING_TYPE_EAC3:
+ return "Dolby Digital +";
+ case HDMI_AUDIO_CODING_TYPE_DTS_HD:
+ return "DTS-HD";
+ case HDMI_AUDIO_CODING_TYPE_MLP:
+ return "MAT (MLP)";
+ case HDMI_AUDIO_CODING_TYPE_DST:
+ return "DST";
+ case HDMI_AUDIO_CODING_TYPE_WMA_PRO:
+ return "WMA PRO";
+ case HDMI_AUDIO_CODING_TYPE_CXT:
+ return "Refer to CXT";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_audio_sample_size_get_name(enum hdmi_audio_sample_size sample_size)
+{
+ switch (sample_size) {
+ case HDMI_AUDIO_SAMPLE_SIZE_STREAM:
+ return "Refer to Stream Header";
+ case HDMI_AUDIO_SAMPLE_SIZE_16:
+ return "16 bit";
+ case HDMI_AUDIO_SAMPLE_SIZE_20:
+ return "20 bit";
+ case HDMI_AUDIO_SAMPLE_SIZE_24:
+ return "24 bit";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_audio_sample_frequency_get_name(enum hdmi_audio_sample_frequency freq)
+{
+ switch (freq) {
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM:
+ return "Refer to Stream Header";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_32000:
+ return "32 kHz";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_44100:
+ return "44.1 kHz (CD)";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_48000:
+ return "48 kHz";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_88200:
+ return "88.2 kHz";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_96000:
+ return "96 kHz";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_176400:
+ return "176.4 kHz";
+ case HDMI_AUDIO_SAMPLE_FREQUENCY_192000:
+ return "192 kHz";
+ }
+ return "Invalid";
+}
+
+static const char *
+hdmi_audio_coding_type_ext_get_name(enum hdmi_audio_coding_type_ext ctx)
+{
+ if (ctx < 0 || ctx > 0x1f)
+ return "Invalid";
+
+ switch (ctx) {
+ case HDMI_AUDIO_CODING_TYPE_EXT_CT:
+ return "Refer to CT";
+ case HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC:
+ return "HE AAC";
+ case HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2:
+ return "HE AAC v2";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND:
+ return "MPEG SURROUND";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC:
+ return "MPEG-4 HE AAC";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2:
+ return "MPEG-4 HE AAC v2";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC:
+ return "MPEG-4 AAC LC";
+ case HDMI_AUDIO_CODING_TYPE_EXT_DRA:
+ return "DRA";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND:
+ return "MPEG-4 HE AAC + MPEG Surround";
+ case HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND:
+ return "MPEG-4 AAC LC + MPEG Surround";
+ }
+ return "Reserved";
+}
+
+static void hdmi_audio_infoframe_log(const char *level,
+ struct device *dev,
+ const struct hdmi_audio_infoframe *frame)
+{
+ hdmi_infoframe_log_header(level, dev,
+ (const struct hdmi_any_infoframe *)frame);
+
+ if (frame->channels)
+ hdmi_log(" channels: %u\n", frame->channels - 1);
+ else
+ hdmi_log(" channels: Refer to stream header\n");
+ hdmi_log(" coding type: %s\n",
+ hdmi_audio_coding_type_get_name(frame->coding_type));
+ hdmi_log(" sample size: %s\n",
+ hdmi_audio_sample_size_get_name(frame->sample_size));
+ hdmi_log(" sample frequency: %s\n",
+ hdmi_audio_sample_frequency_get_name(frame->sample_frequency));
+ hdmi_log(" coding type ext: %s\n",
+ hdmi_audio_coding_type_ext_get_name(frame->coding_type_ext));
+ hdmi_log(" channel allocation: 0x%x\n",
+ frame->channel_allocation);
+ hdmi_log(" level shift value: %u dB\n",
+ frame->level_shift_value);
+ hdmi_log(" downmix inhibit: %s\n",
+ frame->downmix_inhibit ? "Yes" : "No");
+}
+
+static void hdmi_drm_infoframe_log(const char *level,
+ struct device *dev,
+ const struct hdmi_drm_infoframe *frame)
+{
+ int i;
+
+ hdmi_infoframe_log_header(level, dev,
+ (struct hdmi_any_infoframe *)frame);
+ hdmi_log("length: %d\n", frame->length);
+ hdmi_log("metadata type: %d\n", frame->metadata_type);
+ hdmi_log("eotf: %d\n", frame->eotf);
+ for (i = 0; i < 3; i++) {
+ hdmi_log("x[%d]: %d\n", i, frame->display_primaries[i].x);
+ hdmi_log("y[%d]: %d\n", i, frame->display_primaries[i].y);
+ }
+
+ hdmi_log("white point x: %d\n", frame->white_point.x);
+ hdmi_log("white point y: %d\n", frame->white_point.y);
+
+ hdmi_log("max_display_mastering_luminance: %d\n",
+ frame->max_display_mastering_luminance);
+ hdmi_log("min_display_mastering_luminance: %d\n",
+ frame->min_display_mastering_luminance);
+
+ hdmi_log("max_cll: %d\n", frame->max_cll);
+ hdmi_log("max_fall: %d\n", frame->max_fall);
+}
+
+static const char *
+hdmi_3d_structure_get_name(enum hdmi_3d_structure s3d_struct)
+{
+ if (s3d_struct < 0 || s3d_struct > 0xf)
+ return "Invalid";
+
+ switch (s3d_struct) {
+ case HDMI_3D_STRUCTURE_FRAME_PACKING:
+ return "Frame Packing";
+ case HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE:
+ return "Field Alternative";
+ case HDMI_3D_STRUCTURE_LINE_ALTERNATIVE:
+ return "Line Alternative";
+ case HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL:
+ return "Side-by-side (Full)";
+ case HDMI_3D_STRUCTURE_L_DEPTH:
+ return "L + Depth";
+ case HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH:
+ return "L + Depth + Graphics + Graphics-depth";
+ case HDMI_3D_STRUCTURE_TOP_AND_BOTTOM:
+ return "Top-and-Bottom";
+ case HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF:
+ return "Side-by-side (Half)";
+ default:
+ break;
+ }
+ return "Reserved";
+}
+
+static void
+hdmi_vendor_any_infoframe_log(const char *level,
+ struct device *dev,
+ const union hdmi_vendor_any_infoframe *frame)
+{
+ const struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
+
+ hdmi_infoframe_log_header(level, dev,
+ (const struct hdmi_any_infoframe *)frame);
+
+ if (frame->any.oui != HDMI_IEEE_OUI) {
+ hdmi_log(" not a HDMI vendor infoframe\n");
+ return;
+ }
+ if (hvf->vic == 0 && hvf->s3d_struct == HDMI_3D_STRUCTURE_INVALID) {
+ hdmi_log(" empty frame\n");
+ return;
+ }
+
+ if (hvf->vic)
+ hdmi_log(" HDMI VIC: %u\n", hvf->vic);
+ if (hvf->s3d_struct != HDMI_3D_STRUCTURE_INVALID) {
+ hdmi_log(" 3D structure: %s\n",
+ hdmi_3d_structure_get_name(hvf->s3d_struct));
+ if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ hdmi_log(" 3D extension data: %d\n",
+ hvf->s3d_ext_data);
+ }
+}
+
+/**
+ * hdmi_infoframe_log() - log info of HDMI infoframe
+ * @level: logging level
+ * @dev: device
+ * @frame: HDMI infoframe
+ */
+void hdmi_infoframe_log(const char *level,
+ struct device *dev,
+ const union hdmi_infoframe *frame)
+{
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ hdmi_avi_infoframe_log(level, dev, &frame->avi);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ hdmi_spd_infoframe_log(level, dev, &frame->spd);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ hdmi_audio_infoframe_log(level, dev, &frame->audio);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ hdmi_vendor_any_infoframe_log(level, dev, &frame->vendor);
+ break;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ hdmi_drm_infoframe_log(level, dev, &frame->drm);
+ break;
+ }
+}
+EXPORT_SYMBOL(hdmi_infoframe_log);
+
+/**
+ * hdmi_avi_infoframe_unpack() - unpack binary buffer to a HDMI AVI infoframe
+ * @frame: HDMI AVI infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
+ *
+ * Unpacks the information contained in binary @buffer into a structured
+ * @frame of the HDMI Auxiliary Video (AVI) information frame.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
+ const void *buffer, size_t size)
+{
+ const u8 *ptr = buffer;
+
+ if (size < HDMI_INFOFRAME_SIZE(AVI))
+ return -EINVAL;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_AVI ||
+ ptr[1] != 2 ||
+ ptr[2] != HDMI_AVI_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(AVI)) != 0)
+ return -EINVAL;
+
+ hdmi_avi_infoframe_init(frame);
+
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ frame->colorspace = (ptr[0] >> 5) & 0x3;
+ if (ptr[0] & 0x10)
+ frame->active_aspect = ptr[1] & 0xf;
+ if (ptr[0] & 0x8) {
+ frame->top_bar = (ptr[6] << 8) | ptr[5];
+ frame->bottom_bar = (ptr[8] << 8) | ptr[7];
+ }
+ if (ptr[0] & 0x4) {
+ frame->left_bar = (ptr[10] << 8) | ptr[9];
+ frame->right_bar = (ptr[12] << 8) | ptr[11];
+ }
+ frame->scan_mode = ptr[0] & 0x3;
+
+ frame->colorimetry = (ptr[1] >> 6) & 0x3;
+ frame->picture_aspect = (ptr[1] >> 4) & 0x3;
+ frame->active_aspect = ptr[1] & 0xf;
+
+ frame->itc = ptr[2] & 0x80 ? true : false;
+ frame->extended_colorimetry = (ptr[2] >> 4) & 0x7;
+ frame->quantization_range = (ptr[2] >> 2) & 0x3;
+ frame->nups = ptr[2] & 0x3;
+
+ frame->video_code = ptr[3] & 0x7f;
+ frame->ycc_quantization_range = (ptr[4] >> 6) & 0x3;
+ frame->content_type = (ptr[4] >> 4) & 0x3;
+
+ frame->pixel_repeat = ptr[4] & 0xf;
+
+ return 0;
+}
+
+/**
+ * hdmi_spd_infoframe_unpack() - unpack binary buffer to a HDMI SPD infoframe
+ * @frame: HDMI SPD infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
+ *
+ * Unpacks the information contained in binary @buffer into a structured
+ * @frame of the HDMI Source Product Description (SPD) information frame.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
+ const void *buffer, size_t size)
+{
+ const u8 *ptr = buffer;
+ int ret;
+
+ if (size < HDMI_INFOFRAME_SIZE(SPD))
+ return -EINVAL;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_SPD ||
+ ptr[1] != 1 ||
+ ptr[2] != HDMI_SPD_INFOFRAME_SIZE) {
+ return -EINVAL;
+ }
+
+ if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(SPD)) != 0)
+ return -EINVAL;
+
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ret = hdmi_spd_infoframe_init(frame, ptr, ptr + 8);
+ if (ret)
+ return ret;
+
+ frame->sdi = ptr[24];
+
+ return 0;
+}
+
+/**
+ * hdmi_audio_infoframe_unpack() - unpack binary buffer to a HDMI AUDIO infoframe
+ * @frame: HDMI Audio infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
+ *
+ * Unpacks the information contained in binary @buffer into a structured
+ * @frame of the HDMI Audio information frame.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
+ const void *buffer, size_t size)
+{
+ const u8 *ptr = buffer;
+ int ret;
+
+ if (size < HDMI_INFOFRAME_SIZE(AUDIO))
+ return -EINVAL;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_AUDIO ||
+ ptr[1] != 1 ||
+ ptr[2] != HDMI_AUDIO_INFOFRAME_SIZE) {
+ return -EINVAL;
+ }
+
+ if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(AUDIO)) != 0)
+ return -EINVAL;
+
+ ret = hdmi_audio_infoframe_init(frame);
+ if (ret)
+ return ret;
+
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ frame->channels = ptr[0] & 0x7;
+ frame->coding_type = (ptr[0] >> 4) & 0xf;
+ frame->sample_size = ptr[1] & 0x3;
+ frame->sample_frequency = (ptr[1] >> 2) & 0x7;
+ frame->coding_type_ext = ptr[2] & 0x1f;
+ frame->channel_allocation = ptr[3];
+ frame->level_shift_value = (ptr[4] >> 3) & 0xf;
+ frame->downmix_inhibit = ptr[4] & 0x80 ? true : false;
+
+ return 0;
+}
+
+/**
+ * hdmi_vendor_any_infoframe_unpack() - unpack binary buffer to a HDMI
+ * vendor infoframe
+ * @frame: HDMI Vendor infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
+ *
+ * Unpacks the information contained in binary @buffer into a structured
+ * @frame of the HDMI Vendor information frame.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int
+hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
+ const void *buffer, size_t size)
+{
+ const u8 *ptr = buffer;
+ size_t length;
+ int ret;
+ u8 hdmi_video_format;
+ struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
+
+ if (size < HDMI_INFOFRAME_HEADER_SIZE)
+ return -EINVAL;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR ||
+ ptr[1] != 1 ||
+ (ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6))
+ return -EINVAL;
+
+ length = ptr[2];
+
+ if (size < HDMI_INFOFRAME_HEADER_SIZE + length)
+ return -EINVAL;
+
+ if (hdmi_infoframe_checksum(buffer,
+ HDMI_INFOFRAME_HEADER_SIZE + length) != 0)
+ return -EINVAL;
+
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ /* HDMI OUI */
+ if ((ptr[0] != 0x03) ||
+ (ptr[1] != 0x0c) ||
+ (ptr[2] != 0x00))
+ return -EINVAL;
+
+ hdmi_video_format = ptr[3] >> 5;
+
+ if (hdmi_video_format > 0x2)
+ return -EINVAL;
+
+ ret = hdmi_vendor_infoframe_init(hvf);
+ if (ret)
+ return ret;
+
+ hvf->length = length;
+
+ if (hdmi_video_format == 0x2) {
+ if (length != 5 && length != 6)
+ return -EINVAL;
+ hvf->s3d_struct = ptr[4] >> 4;
+ if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) {
+ if (length != 6)
+ return -EINVAL;
+ hvf->s3d_ext_data = ptr[5] >> 4;
+ }
+ } else if (hdmi_video_format == 0x1) {
+ if (length != 5)
+ return -EINVAL;
+ hvf->vic = ptr[4];
+ } else {
+ if (length != 4)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hdmi_drm_infoframe_unpack_only() - unpack binary buffer of CTA-861-G DRM
+ * infoframe DataBytes to a HDMI DRM
+ * infoframe
+ * @frame: HDMI DRM infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
+ *
+ * Unpacks CTA-861-G DRM infoframe DataBytes contained in the binary @buffer
+ * into a structured @frame of the HDMI Dynamic Range and Mastering (DRM)
+ * infoframe.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame,
+ const void *buffer, size_t size)
+{
+ const u8 *ptr = buffer;
+ const u8 *temp;
+ u8 x_lsb, x_msb;
+ u8 y_lsb, y_msb;
+ int ret;
+ int i;
+
+ if (size < HDMI_DRM_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ ret = hdmi_drm_infoframe_init(frame);
+ if (ret)
+ return ret;
+
+ frame->eotf = ptr[0] & 0x7;
+ frame->metadata_type = ptr[1] & 0x7;
+
+ temp = ptr + 2;
+ for (i = 0; i < 3; i++) {
+ x_lsb = *temp++;
+ x_msb = *temp++;
+ frame->display_primaries[i].x = (x_msb << 8) | x_lsb;
+ y_lsb = *temp++;
+ y_msb = *temp++;
+ frame->display_primaries[i].y = (y_msb << 8) | y_lsb;
+ }
+
+ frame->white_point.x = (ptr[15] << 8) | ptr[14];
+ frame->white_point.y = (ptr[17] << 8) | ptr[16];
+
+ frame->max_display_mastering_luminance = (ptr[19] << 8) | ptr[18];
+ frame->min_display_mastering_luminance = (ptr[21] << 8) | ptr[20];
+ frame->max_cll = (ptr[23] << 8) | ptr[22];
+ frame->max_fall = (ptr[25] << 8) | ptr[24];
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_drm_infoframe_unpack_only);
+
+/**
+ * hdmi_drm_infoframe_unpack() - unpack binary buffer to a HDMI DRM infoframe
+ * @frame: HDMI DRM infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
+ *
+ * Unpacks the CTA-861-G DRM infoframe contained in the binary @buffer into
+ * a structured @frame of the HDMI Dynamic Range and Mastering (DRM)
+ * infoframe. It also verifies the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
+ const void *buffer, size_t size)
+{
+ const u8 *ptr = buffer;
+ int ret;
+
+ if (size < HDMI_INFOFRAME_SIZE(DRM))
+ return -EINVAL;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_DRM ||
+ ptr[1] != 1 ||
+ ptr[2] != HDMI_DRM_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(DRM)) != 0)
+ return -EINVAL;
+
+ ret = hdmi_drm_infoframe_unpack_only(frame, ptr + HDMI_INFOFRAME_HEADER_SIZE,
+ size - HDMI_INFOFRAME_HEADER_SIZE);
+ return ret;
+}
+
+/**
+ * hdmi_infoframe_unpack() - unpack binary buffer to a HDMI infoframe
+ * @frame: HDMI infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
+ *
+ * Unpacks the information contained in binary buffer @buffer into a structured
+ * @frame of a HDMI infoframe.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+ const void *buffer, size_t size)
+{
+ int ret;
+ const u8 *ptr = buffer;
+
+ if (size < HDMI_INFOFRAME_HEADER_SIZE)
+ return -EINVAL;
+
+ switch (ptr[0]) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ ret = hdmi_avi_infoframe_unpack(&frame->avi, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ ret = hdmi_drm_infoframe_unpack(&frame->drm, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ ret = hdmi_spd_infoframe_unpack(&frame->spd, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ ret = hdmi_audio_infoframe_unpack(&frame->audio, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ ret = hdmi_vendor_any_infoframe_unpack(&frame->vendor, buffer, size);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(hdmi_infoframe_unpack);
diff --git a/sys/compat/linuxkpi/common/src/linux_hrtimer.c b/sys/compat/linuxkpi/common/src/linux_hrtimer.c
new file mode 100644
index 000000000000..dca5d5cf709b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_hrtimer.c
@@ -0,0 +1,141 @@
+/*-
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/time.h>
+
+#include <machine/cpu.h>
+
+#include <linux/hrtimer.h>
+
+static void
+hrtimer_call_handler(void *arg)
+{
+ struct hrtimer *hrtimer;
+ enum hrtimer_restart ret;
+
+ hrtimer = arg;
+ ret = hrtimer->function(hrtimer);
+
+ if (ret == HRTIMER_RESTART) {
+ callout_schedule_sbt(&hrtimer->callout,
+ nstosbt(hrtimer->expires), nstosbt(hrtimer->precision), 0);
+ } else {
+ callout_deactivate(&hrtimer->callout);
+ }
+}
+
+bool
+linux_hrtimer_active(struct hrtimer *hrtimer)
+{
+ bool ret;
+
+ mtx_lock(&hrtimer->mtx);
+ ret = callout_active(&hrtimer->callout);
+ mtx_unlock(&hrtimer->mtx);
+
+ return (ret);
+}
+
+/*
+ * Try to cancel active hrtimer.
+ * Return 1 if timer was active and cancellation succeeded, 0 if timer was
+ * inactive, or -1 if the timer is being serviced and can't be cancelled.
+ */
+int
+linux_hrtimer_try_to_cancel(struct hrtimer *hrtimer)
+{
+ int ret;
+
+ mtx_lock(&hrtimer->mtx);
+ ret = callout_stop(&hrtimer->callout);
+ mtx_unlock(&hrtimer->mtx);
+ if (ret > 0) {
+ return (1);
+ } else if (ret < 0) {
+ return (0);
+ } else {
+ return (-1);
+ }
+}
+
+/*
+ * Cancel active hrtimer.
+ * Return 1 if timer was active and cancellation succeeded, or 0 otherwise.
+ */
+int
+linux_hrtimer_cancel(struct hrtimer *hrtimer)
+{
+
+ return (callout_drain(&hrtimer->callout) > 0);
+}
+
+void
+linux_hrtimer_init(struct hrtimer *hrtimer)
+{
+
+ memset(hrtimer, 0, sizeof(*hrtimer));
+ mtx_init(&hrtimer->mtx, "hrtimer", NULL,
+ MTX_DEF | MTX_RECURSE | MTX_NOWITNESS);
+ callout_init_mtx(&hrtimer->callout, &hrtimer->mtx, 0);
+}
+
+void
+linux_hrtimer_set_expires(struct hrtimer *hrtimer, ktime_t time)
+{
+ hrtimer->expires = ktime_to_ns(time);
+}
+
+void
+linux_hrtimer_start(struct hrtimer *hrtimer, ktime_t time)
+{
+
+ linux_hrtimer_start_range_ns(hrtimer, time, 0);
+}
+
+void
+linux_hrtimer_start_range_ns(struct hrtimer *hrtimer, ktime_t time,
+ int64_t nsec)
+{
+
+ mtx_lock(&hrtimer->mtx);
+ hrtimer->precision = nsec;
+ callout_reset_sbt(&hrtimer->callout, nstosbt(ktime_to_ns(time)),
+ nstosbt(nsec), hrtimer_call_handler, hrtimer, 0);
+ mtx_unlock(&hrtimer->mtx);
+}
+
+void
+linux_hrtimer_forward_now(struct hrtimer *hrtimer, ktime_t interval)
+{
+
+ mtx_lock(&hrtimer->mtx);
+ callout_reset_sbt(&hrtimer->callout, nstosbt(ktime_to_ns(interval)),
+ nstosbt(hrtimer->precision), hrtimer_call_handler, hrtimer, 0);
+ mtx_unlock(&hrtimer->mtx);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_i2c.c b/sys/compat/linuxkpi/common/src/linux_i2c.c
new file mode 100644
index 000000000000..f18570202f74
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_i2c.c
@@ -0,0 +1,381 @@
+/*-
+ * Copyright (c) 2021 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/malloc.h>
+
+#include <dev/iicbus/iicbus.h>
+#include <dev/iicbus/iiconf.h>
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+
+#include "iicbus_if.h"
+#include "iicbb_if.h"
+#include "lkpi_iic_if.h"
+
+static int lkpi_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs);
+static int lkpi_i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr);
+
+struct lkpi_iic_softc {
+ device_t iicbus;
+ struct i2c_adapter *adapter;
+};
+
+static struct sx lkpi_sx_i2c;
+
+static void
+lkpi_sysinit_i2c(void *arg __unused)
+{
+
+ sx_init(&lkpi_sx_i2c, "lkpi-i2c");
+}
+
+static void
+lkpi_sysuninit_i2c(void *arg __unused)
+{
+
+ sx_destroy(&lkpi_sx_i2c);
+}
+
+SYSINIT(lkpi_i2c, SI_SUB_DRIVERS, SI_ORDER_ANY,
+ lkpi_sysinit_i2c, NULL);
+SYSUNINIT(lkpi_i2c, SI_SUB_DRIVERS, SI_ORDER_ANY,
+ lkpi_sysuninit_i2c, NULL);
+
+static int
+lkpi_iic_probe(device_t dev)
+{
+
+ device_set_desc(dev, "LinuxKPI I2C");
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+static int
+lkpi_iic_attach(device_t dev)
+{
+ struct lkpi_iic_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY);
+ if (sc->iicbus == NULL) {
+ device_printf(dev, "Couldn't add iicbus child, aborting\n");
+ return (ENXIO);
+ }
+ bus_attach_children(dev);
+ return (0);
+}
+
+static int
+lkpi_iic_detach(device_t dev)
+{
+ struct lkpi_iic_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (sc->iicbus)
+ device_delete_child(dev, sc->iicbus);
+ return (0);
+}
+
+static int
+lkpi_iic_add_adapter(device_t dev, struct i2c_adapter *adapter)
+{
+ struct lkpi_iic_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->adapter = adapter;
+
+ return (0);
+}
+
+static struct i2c_adapter *
+lkpi_iic_get_adapter(device_t dev)
+{
+ struct lkpi_iic_softc *sc;
+
+ sc = device_get_softc(dev);
+ return (sc->adapter);
+}
+
+static device_method_t lkpi_iic_methods[] = {
+ /* device interface */
+ DEVMETHOD(device_probe, lkpi_iic_probe),
+ DEVMETHOD(device_attach, lkpi_iic_attach),
+ DEVMETHOD(device_detach, lkpi_iic_detach),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+
+ /* iicbus interface */
+ DEVMETHOD(iicbus_transfer, lkpi_i2c_transfer),
+ DEVMETHOD(iicbus_reset, lkpi_i2c_reset),
+ DEVMETHOD(iicbus_callback, iicbus_null_callback),
+
+ /* lkpi_iic interface */
+ DEVMETHOD(lkpi_iic_add_adapter, lkpi_iic_add_adapter),
+ DEVMETHOD(lkpi_iic_get_adapter, lkpi_iic_get_adapter),
+
+ DEVMETHOD_END
+};
+
+driver_t lkpi_iic_driver = {
+ "lkpi_iic",
+ lkpi_iic_methods,
+ sizeof(struct lkpi_iic_softc),
+};
+
+DRIVER_MODULE(lkpi_iic, drmn, lkpi_iic_driver, 0, 0);
+DRIVER_MODULE(lkpi_iic, drm, lkpi_iic_driver, 0, 0);
+DRIVER_MODULE(iicbus, lkpi_iic, iicbus_driver, 0, 0);
+MODULE_DEPEND(linuxkpi, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER);
+
+static int
+lkpi_i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr)
+{
+
+ /* That doesn't seems to be supported in linux */
+ return (0);
+}
+
+static int i2c_check_for_quirks(struct i2c_adapter *adapter,
+ struct iic_msg *msgs, uint32_t nmsgs)
+{
+ const struct i2c_adapter_quirks *quirks;
+ device_t dev;
+ int i, max_nmsgs;
+ bool check_len;
+
+ dev = adapter->dev.parent->bsddev;
+ quirks = adapter->quirks;
+ if (quirks == NULL)
+ return (0);
+
+ check_len = true;
+ max_nmsgs = quirks->max_num_msgs;
+
+ if (quirks->flags & I2C_AQ_COMB) {
+ max_nmsgs = 2;
+
+ if (nmsgs == 2) {
+ if (quirks->flags & I2C_AQ_COMB_WRITE_FIRST &&
+ msgs[0].flags & IIC_M_RD) {
+ device_printf(dev,
+ "Error: "
+ "first combined message must be write\n");
+ return (EOPNOTSUPP);
+ }
+ if (quirks->flags & I2C_AQ_COMB_READ_SECOND &&
+ !(msgs[1].flags & IIC_M_RD)) {
+ device_printf(dev,
+ "Error: "
+ "second combined message must be read\n");
+ return (EOPNOTSUPP);
+ }
+
+ if (quirks->flags & I2C_AQ_COMB_SAME_ADDR &&
+ msgs[0].slave != msgs[1].slave) {
+ device_printf(dev,
+ "Error: "
+ "combined message must be use the same "
+ "address\n");
+ return (EOPNOTSUPP);
+ }
+
+ if (quirks->max_comb_1st_msg_len &&
+ msgs[0].len > quirks->max_comb_1st_msg_len) {
+ device_printf(dev,
+ "Error: "
+ "message too long: %hu > %hu max\n",
+ msgs[0].len,
+ quirks->max_comb_1st_msg_len);
+ return (EOPNOTSUPP);
+ }
+ if (quirks->max_comb_2nd_msg_len &&
+ msgs[1].len > quirks->max_comb_2nd_msg_len) {
+ device_printf(dev,
+ "Error: "
+ "message too long: %hu > %hu max\n",
+ msgs[1].len,
+ quirks->max_comb_2nd_msg_len);
+ return (EOPNOTSUPP);
+ }
+
+ check_len = false;
+ }
+ }
+
+ if (max_nmsgs && nmsgs > max_nmsgs) {
+ device_printf(dev,
+ "Error: too many messages: %d > %d max\n",
+ nmsgs, max_nmsgs);
+ return (EOPNOTSUPP);
+ }
+
+ for (i = 0; i < nmsgs; i++) {
+ if (msgs[i].flags & IIC_M_RD) {
+ if (check_len && quirks->max_read_len &&
+ msgs[i].len > quirks->max_read_len) {
+ device_printf(dev,
+ "Error: "
+ "message %d too long: %hu > %hu max\n",
+ i, msgs[i].len, quirks->max_read_len);
+ return (EOPNOTSUPP);
+ }
+ if (quirks->flags & I2C_AQ_NO_ZERO_LEN_READ &&
+ msgs[i].len == 0) {
+ device_printf(dev,
+ "Error: message %d of length 0\n", i);
+ return (EOPNOTSUPP);
+ }
+ } else {
+ if (check_len && quirks->max_write_len &&
+ msgs[i].len > quirks->max_write_len) {
+ device_printf(dev,
+ "Message %d too long: %hu > %hu max\n",
+ i, msgs[i].len, quirks->max_write_len);
+ return (EOPNOTSUPP);
+ }
+ if (quirks->flags & I2C_AQ_NO_ZERO_LEN_WRITE &&
+ msgs[i].len == 0) {
+ device_printf(dev,
+ "Error: message %d of length 0\n", i);
+ return (EOPNOTSUPP);
+ }
+ }
+ }
+
+ return (0);
+}
+
+static int
+lkpi_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs)
+{
+ struct lkpi_iic_softc *sc;
+ struct i2c_msg *linux_msgs;
+ int i, ret = 0;
+
+ sc = device_get_softc(dev);
+ if (sc->adapter == NULL)
+ return (ENXIO);
+ ret = i2c_check_for_quirks(sc->adapter, msgs, nmsgs);
+ if (ret != 0)
+ return (ret);
+ linux_set_current(curthread);
+
+ linux_msgs = malloc(sizeof(struct i2c_msg) * nmsgs,
+ M_DEVBUF, M_WAITOK | M_ZERO);
+
+ for (i = 0; i < nmsgs; i++) {
+ linux_msgs[i].addr = msgs[i].slave >> 1;
+ linux_msgs[i].len = msgs[i].len;
+ linux_msgs[i].buf = msgs[i].buf;
+ if (msgs[i].flags & IIC_M_RD) {
+ linux_msgs[i].flags |= I2C_M_RD;
+ for (int j = 0; j < msgs[i].len; j++)
+ msgs[i].buf[j] = 0;
+ }
+ if (msgs[i].flags & IIC_M_NOSTART)
+ linux_msgs[i].flags |= I2C_M_NOSTART;
+ }
+ ret = i2c_transfer(sc->adapter, linux_msgs, nmsgs);
+ free(linux_msgs, M_DEVBUF);
+
+ if (ret < 0)
+ return (-ret);
+ return (0);
+}
+
+int
+lkpi_i2c_add_adapter(struct i2c_adapter *adapter)
+{
+ device_t lkpi_iic;
+
+ if (adapter->name[0] == '\0')
+ return (-EINVAL);
+ if (bootverbose)
+ device_printf(adapter->dev.parent->bsddev,
+ "Adding i2c adapter %s\n", adapter->name);
+ sx_xlock(&lkpi_sx_i2c);
+ lkpi_iic = device_add_child(adapter->dev.parent->bsddev, "lkpi_iic",
+ DEVICE_UNIT_ANY);
+ if (lkpi_iic == NULL) {
+ device_printf(adapter->dev.parent->bsddev, "Couldn't add lkpi_iic\n");
+ sx_xunlock(&lkpi_sx_i2c);
+ return (ENXIO);
+ }
+
+ bus_topo_lock();
+ bus_attach_children(adapter->dev.parent->bsddev);
+ bus_topo_unlock();
+ LKPI_IIC_ADD_ADAPTER(lkpi_iic, adapter);
+ sx_xunlock(&lkpi_sx_i2c);
+ return (0);
+}
+
+int
+lkpi_i2c_del_adapter(struct i2c_adapter *adapter)
+{
+ device_t child;
+ int unit, rv;
+
+ if (adapter == NULL)
+ return (-EINVAL);
+ if (bootverbose)
+ device_printf(adapter->dev.parent->bsddev,
+ "Removing i2c adapter %s\n", adapter->name);
+ sx_xlock(&lkpi_sx_i2c);
+ unit = 0;
+ while ((child = device_find_child(adapter->dev.parent->bsddev, "lkpi_iic", unit++)) != NULL) {
+
+ if (adapter == LKPI_IIC_GET_ADAPTER(child)) {
+ bus_topo_lock();
+ device_delete_child(adapter->dev.parent->bsddev, child);
+ bus_topo_unlock();
+ rv = 0;
+ goto out;
+ }
+ }
+
+ unit = 0;
+ while ((child = device_find_child(adapter->dev.parent->bsddev, "lkpi_iicbb", unit++)) != NULL) {
+
+ if (adapter == LKPI_IIC_GET_ADAPTER(child)) {
+ bus_topo_lock();
+ device_delete_child(adapter->dev.parent->bsddev, child);
+ bus_topo_unlock();
+ rv = 0;
+ goto out;
+ }
+ }
+ rv = -EINVAL;
+out:
+ sx_xunlock(&lkpi_sx_i2c);
+ return (rv);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_i2cbb.c b/sys/compat/linuxkpi/common/src/linux_i2cbb.c
new file mode 100644
index 000000000000..48a018ec2533
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_i2cbb.c
@@ -0,0 +1,325 @@
+/*-
+ * Copyright (c) 2021 Beckhoff Automation GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/malloc.h>
+
+#include <dev/iicbus/iicbus.h>
+#include <dev/iicbus/iiconf.h>
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+
+#include "iicbus_if.h"
+#include "iicbb_if.h"
+#include "lkpi_iic_if.h"
+
+static void lkpi_iicbb_setsda(device_t dev, int val);
+static void lkpi_iicbb_setscl(device_t dev, int val);
+static int lkpi_iicbb_getscl(device_t dev);
+static int lkpi_iicbb_getsda(device_t dev);
+static int lkpi_iicbb_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr);
+static int lkpi_iicbb_pre_xfer(device_t dev);
+static void lkpi_iicbb_post_xfer(device_t dev);
+
+struct lkpi_iicbb_softc {
+ device_t iicbb;
+ struct i2c_adapter *adapter;
+};
+
+static struct sx lkpi_sx_i2cbb;
+
+static void
+lkpi_sysinit_i2cbb(void *arg __unused)
+{
+
+ sx_init(&lkpi_sx_i2cbb, "lkpi-i2cbb");
+}
+
+static void
+lkpi_sysuninit_i2cbb(void *arg __unused)
+{
+
+ sx_destroy(&lkpi_sx_i2cbb);
+}
+
+SYSINIT(lkpi_i2cbb, SI_SUB_DRIVERS, SI_ORDER_ANY,
+ lkpi_sysinit_i2cbb, NULL);
+SYSUNINIT(lkpi_i2cbb, SI_SUB_DRIVERS, SI_ORDER_ANY,
+ lkpi_sysuninit_i2cbb, NULL);
+
+static int
+lkpi_iicbb_probe(device_t dev)
+{
+
+ device_set_desc(dev, "LinuxKPI I2CBB");
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+static int
+lkpi_iicbb_attach(device_t dev)
+{
+ struct lkpi_iicbb_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->iicbb = device_add_child(dev, "iicbb", DEVICE_UNIT_ANY);
+ if (sc->iicbb == NULL) {
+ device_printf(dev, "Couldn't add iicbb child, aborting\n");
+ return (ENXIO);
+ }
+ bus_attach_children(dev);
+ return (0);
+}
+
+static int
+lkpi_iicbb_detach(device_t dev)
+{
+ struct lkpi_iicbb_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (sc->iicbb)
+ device_delete_child(dev, sc->iicbb);
+ return (0);
+}
+
+static int
+lkpi_iicbb_add_adapter(device_t dev, struct i2c_adapter *adapter)
+{
+ struct lkpi_iicbb_softc *sc;
+ struct i2c_algo_bit_data *algo_data;
+
+ sc = device_get_softc(dev);
+ sc->adapter = adapter;
+
+ /*
+ * Set iicbb timing parameters deriving speed from the protocol delay.
+ */
+ algo_data = adapter->algo_data;
+ if (algo_data->udelay != 0)
+ IICBUS_RESET(sc->iicbb, 1000000 / algo_data->udelay, 0, NULL);
+ return (0);
+}
+
+static struct i2c_adapter *
+lkpi_iicbb_get_adapter(device_t dev)
+{
+ struct lkpi_iicbb_softc *sc;
+
+ sc = device_get_softc(dev);
+ return (sc->adapter);
+}
+
+static device_method_t lkpi_iicbb_methods[] = {
+ /* device interface */
+ DEVMETHOD(device_probe, lkpi_iicbb_probe),
+ DEVMETHOD(device_attach, lkpi_iicbb_attach),
+ DEVMETHOD(device_detach, lkpi_iicbb_detach),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+
+ /* iicbb interface */
+ DEVMETHOD(iicbb_setsda, lkpi_iicbb_setsda),
+ DEVMETHOD(iicbb_setscl, lkpi_iicbb_setscl),
+ DEVMETHOD(iicbb_getsda, lkpi_iicbb_getsda),
+ DEVMETHOD(iicbb_getscl, lkpi_iicbb_getscl),
+ DEVMETHOD(iicbb_reset, lkpi_iicbb_reset),
+ DEVMETHOD(iicbb_pre_xfer, lkpi_iicbb_pre_xfer),
+ DEVMETHOD(iicbb_post_xfer, lkpi_iicbb_post_xfer),
+
+ /* lkpi_iicbb interface */
+ DEVMETHOD(lkpi_iic_add_adapter, lkpi_iicbb_add_adapter),
+ DEVMETHOD(lkpi_iic_get_adapter, lkpi_iicbb_get_adapter),
+
+ DEVMETHOD_END
+};
+
+driver_t lkpi_iicbb_driver = {
+ "lkpi_iicbb",
+ lkpi_iicbb_methods,
+ sizeof(struct lkpi_iicbb_softc),
+};
+
+DRIVER_MODULE(lkpi_iicbb, drmn, lkpi_iicbb_driver, 0, 0);
+DRIVER_MODULE(lkpi_iicbb, drm, lkpi_iicbb_driver, 0, 0);
+DRIVER_MODULE(iicbb, lkpi_iicbb, iicbb_driver, 0, 0);
+MODULE_DEPEND(linuxkpi, iicbb, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER);
+
+static void
+lkpi_iicbb_setsda(device_t dev, int val)
+{
+ struct lkpi_iicbb_softc *sc;
+ struct i2c_algo_bit_data *algo_data;
+
+ sc = device_get_softc(dev);
+ algo_data = sc->adapter->algo_data;
+ algo_data->setsda(algo_data->data, val);
+}
+
+static void
+lkpi_iicbb_setscl(device_t dev, int val)
+{
+ struct lkpi_iicbb_softc *sc;
+ struct i2c_algo_bit_data *algo_data;
+
+ sc = device_get_softc(dev);
+ algo_data = sc->adapter->algo_data;
+ algo_data->setscl(algo_data->data, val);
+}
+
+static int
+lkpi_iicbb_getscl(device_t dev)
+{
+ struct lkpi_iicbb_softc *sc;
+ struct i2c_algo_bit_data *algo_data;
+ int ret;
+
+ sc = device_get_softc(dev);
+ algo_data = sc->adapter->algo_data;
+ ret = algo_data->getscl(algo_data->data);
+ return (ret);
+}
+
+static int
+lkpi_iicbb_getsda(device_t dev)
+{
+ struct lkpi_iicbb_softc *sc;
+ struct i2c_algo_bit_data *algo_data;
+ int ret;
+
+ sc = device_get_softc(dev);
+ algo_data = sc->adapter->algo_data;
+ ret = algo_data->getsda(algo_data->data);
+ return (ret);
+}
+
+static int
+lkpi_iicbb_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr)
+{
+
+ /* That doesn't seems to be supported in linux */
+ return (0);
+}
+
+static int
+lkpi_iicbb_pre_xfer(device_t dev)
+{
+ struct lkpi_iicbb_softc *sc;
+ struct i2c_algo_bit_data *algo_data;
+ int rc = 0;
+
+ sc = device_get_softc(dev);
+ algo_data = sc->adapter->algo_data;
+ if (algo_data->pre_xfer != 0)
+ rc = algo_data->pre_xfer(sc->adapter);
+ return (rc);
+}
+
+static void
+lkpi_iicbb_post_xfer(device_t dev)
+{
+ struct lkpi_iicbb_softc *sc;
+ struct i2c_algo_bit_data *algo_data;
+
+ sc = device_get_softc(dev);
+ algo_data = sc->adapter->algo_data;
+ if (algo_data->post_xfer != NULL)
+ algo_data->post_xfer(sc->adapter);
+}
+
+int
+lkpi_i2cbb_transfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int nmsgs)
+{
+ struct iic_msg *bsd_msgs;
+ int ret = ENXIO;
+
+ linux_set_current(curthread);
+
+ bsd_msgs = malloc(sizeof(struct iic_msg) * nmsgs,
+ M_DEVBUF, M_WAITOK | M_ZERO);
+
+ for (int i = 0; i < nmsgs; i++) {
+ bsd_msgs[i].slave = msgs[i].addr << 1;
+ bsd_msgs[i].len = msgs[i].len;
+ bsd_msgs[i].buf = msgs[i].buf;
+ if (msgs[i].flags & I2C_M_RD)
+ bsd_msgs[i].flags |= IIC_M_RD;
+ if (msgs[i].flags & I2C_M_NOSTART)
+ bsd_msgs[i].flags |= IIC_M_NOSTART;
+ }
+
+ for (int unit = 0; ; unit++) {
+ device_t child;
+ struct lkpi_iicbb_softc *sc;
+
+ child = device_find_child(adapter->dev.parent->bsddev,
+ "lkpi_iicbb", unit);
+ if (child == NULL)
+ break;
+ if (adapter == LKPI_IIC_GET_ADAPTER(child)) {
+ sc = device_get_softc(child);
+ ret = IICBUS_TRANSFER(sc->iicbb, bsd_msgs, nmsgs);
+ ret = iic2errno(ret);
+ break;
+ }
+ }
+
+ free(bsd_msgs, M_DEVBUF);
+
+ if (ret != 0)
+ return (-ret);
+ return (nmsgs);
+}
+
+int
+lkpi_i2c_bit_add_bus(struct i2c_adapter *adapter)
+{
+ device_t lkpi_iicbb;
+
+ if (bootverbose)
+ device_printf(adapter->dev.parent->bsddev,
+ "Adding i2c adapter %s\n", adapter->name);
+ sx_xlock(&lkpi_sx_i2cbb);
+ lkpi_iicbb = device_add_child(adapter->dev.parent->bsddev, "lkpi_iicbb",
+ DEVICE_UNIT_ANY);
+ if (lkpi_iicbb == NULL) {
+ device_printf(adapter->dev.parent->bsddev, "Couldn't add lkpi_iicbb\n");
+ sx_xunlock(&lkpi_sx_i2cbb);
+ return (ENXIO);
+ }
+
+ bus_topo_lock();
+ bus_attach_children(adapter->dev.parent->bsddev);
+ bus_topo_unlock();
+ LKPI_IIC_ADD_ADAPTER(lkpi_iicbb, adapter);
+ sx_xunlock(&lkpi_sx_i2cbb);
+ return (0);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_idr.c b/sys/compat/linuxkpi/common/src/linux_idr.c
new file mode 100644
index 000000000000..664177835c85
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_idr.c
@@ -0,0 +1,813 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/stdarg.h>
+
+#include <linux/bitmap.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+
+#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
+#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
+
+struct linux_idr_cache {
+ spinlock_t lock;
+ struct idr_layer *head;
+ unsigned count;
+};
+
+DPCPU_DEFINE_STATIC(struct linux_idr_cache, linux_idr_cache);
+
+/*
+ * IDR Implementation.
+ *
+ * This is quick and dirty and not as re-entrant as the linux version
+ * however it should be fairly fast. It is basically a radix tree with
+ * a builtin bitmap for allocation.
+ */
+static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
+
+static struct idr_layer *
+idr_preload_dequeue_locked(struct linux_idr_cache *lic)
+{
+ struct idr_layer *retval;
+
+ /* check if wrong thread is trying to dequeue */
+ if (mtx_owned(&lic->lock) == 0)
+ return (NULL);
+
+ retval = lic->head;
+ if (likely(retval != NULL)) {
+ lic->head = retval->ary[0];
+ lic->count--;
+ retval->ary[0] = NULL;
+ }
+ return (retval);
+}
+
+static void
+idr_preload_init(void *arg)
+{
+ int cpu;
+
+ CPU_FOREACH(cpu) {
+ struct linux_idr_cache *lic =
+ DPCPU_ID_PTR(cpu, linux_idr_cache);
+
+ spin_lock_init(&lic->lock);
+ }
+}
+SYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL);
+
+static void
+idr_preload_uninit(void *arg)
+{
+ int cpu;
+
+ CPU_FOREACH(cpu) {
+ struct idr_layer *cacheval;
+ struct linux_idr_cache *lic =
+ DPCPU_ID_PTR(cpu, linux_idr_cache);
+
+ while (1) {
+ spin_lock(&lic->lock);
+ cacheval = idr_preload_dequeue_locked(lic);
+ spin_unlock(&lic->lock);
+
+ if (cacheval == NULL)
+ break;
+ free(cacheval, M_IDR);
+ }
+ spin_lock_destroy(&lic->lock);
+ }
+}
+SYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL);
+
+void
+idr_preload(gfp_t gfp_mask)
+{
+ struct linux_idr_cache *lic;
+ struct idr_layer *cacheval;
+
+ sched_pin();
+
+ lic = &DPCPU_GET(linux_idr_cache);
+
+ /* fill up cache */
+ spin_lock(&lic->lock);
+ while (lic->count < MAX_IDR_FREE) {
+ spin_unlock(&lic->lock);
+ cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask);
+ spin_lock(&lic->lock);
+ if (cacheval == NULL)
+ break;
+ cacheval->ary[0] = lic->head;
+ lic->head = cacheval;
+ lic->count++;
+ }
+}
+
+void
+idr_preload_end(void)
+{
+ struct linux_idr_cache *lic;
+
+ lic = &DPCPU_GET(linux_idr_cache);
+ spin_unlock(&lic->lock);
+ sched_unpin();
+}
+
+static inline int
+idr_max(struct idr *idr)
+{
+ return (1 << (idr->layers * IDR_BITS)) - 1;
+}
+
+static inline int
+idr_pos(int id, int layer)
+{
+ return (id >> (IDR_BITS * layer)) & IDR_MASK;
+}
+
+void
+idr_init(struct idr *idr)
+{
+ bzero(idr, sizeof(*idr));
+ mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
+}
+
+/* Only frees cached pages. */
+void
+idr_destroy(struct idr *idr)
+{
+ struct idr_layer *il, *iln;
+
+ /*
+ * This idr can be reused, and this function might be called multiple times
+ * without a idr_init(). Check if this is the case. If we do not do this
+ * then the mutex will panic while asserting that it is valid.
+ */
+ if (mtx_initialized(&idr->lock) == 0)
+ return;
+
+ idr_remove_all(idr);
+ mtx_lock(&idr->lock);
+ for (il = idr->free; il != NULL; il = iln) {
+ iln = il->ary[0];
+ free(il, M_IDR);
+ }
+ mtx_unlock(&idr->lock);
+ mtx_destroy(&idr->lock);
+}
+
+static void
+idr_remove_layer(struct idr_layer *il, int layer)
+{
+ int i;
+
+ if (il == NULL)
+ return;
+ if (layer == 0) {
+ free(il, M_IDR);
+ return;
+ }
+ for (i = 0; i < IDR_SIZE; i++)
+ if (il->ary[i])
+ idr_remove_layer(il->ary[i], layer - 1);
+}
+
+void
+idr_remove_all(struct idr *idr)
+{
+
+ mtx_lock(&idr->lock);
+ idr_remove_layer(idr->top, idr->layers - 1);
+ idr->top = NULL;
+ idr->layers = 0;
+ mtx_unlock(&idr->lock);
+}
+
+static void *
+idr_remove_locked(struct idr *idr, int id)
+{
+ struct idr_layer *il;
+ void *res;
+ int layer;
+ int idx;
+
+ id &= MAX_ID_MASK;
+ il = idr->top;
+ layer = idr->layers - 1;
+ if (il == NULL || id > idr_max(idr))
+ return (NULL);
+ /*
+ * Walk down the tree to this item setting bitmaps along the way
+ * as we know at least one item will be free along this path.
+ */
+ while (layer && il) {
+ idx = idr_pos(id, layer);
+ il->bitmap |= 1 << idx;
+ il = il->ary[idx];
+ layer--;
+ }
+ idx = id & IDR_MASK;
+ /*
+ * At this point we've set free space bitmaps up the whole tree.
+ * We could make this non-fatal and unwind but linux dumps a stack
+ * and a warning so I don't think it's necessary.
+ */
+ if (il == NULL || (il->bitmap & (1 << idx)) != 0)
+ panic("idr_remove: Item %d not allocated (%p, %p)\n",
+ id, idr, il);
+ res = il->ary[idx];
+ il->ary[idx] = NULL;
+ il->bitmap |= 1 << idx;
+
+ return (res);
+}
+
+void *
+idr_remove(struct idr *idr, int id)
+{
+ void *res;
+
+ mtx_lock(&idr->lock);
+ res = idr_remove_locked(idr, id);
+ mtx_unlock(&idr->lock);
+
+ return (res);
+}
+
+static inline struct idr_layer *
+idr_find_layer_locked(struct idr *idr, int id)
+{
+ struct idr_layer *il;
+ int layer;
+
+ id &= MAX_ID_MASK;
+ il = idr->top;
+ layer = idr->layers - 1;
+ if (il == NULL || id > idr_max(idr))
+ return (NULL);
+ while (layer && il) {
+ il = il->ary[idr_pos(id, layer)];
+ layer--;
+ }
+ return (il);
+}
+
+void *
+idr_replace(struct idr *idr, void *ptr, int id)
+{
+ struct idr_layer *il;
+ void *res;
+ int idx;
+
+ mtx_lock(&idr->lock);
+ il = idr_find_layer_locked(idr, id);
+ idx = id & IDR_MASK;
+
+ /* Replace still returns an error if the item was not allocated. */
+ if (il == NULL || (il->bitmap & (1 << idx))) {
+ res = ERR_PTR(-ENOENT);
+ } else {
+ res = il->ary[idx];
+ il->ary[idx] = ptr;
+ }
+ mtx_unlock(&idr->lock);
+ return (res);
+}
+
+static inline void *
+idr_find_locked(struct idr *idr, int id)
+{
+ struct idr_layer *il;
+ void *res;
+
+ mtx_assert(&idr->lock, MA_OWNED);
+ il = idr_find_layer_locked(idr, id);
+ if (il != NULL)
+ res = il->ary[id & IDR_MASK];
+ else
+ res = NULL;
+ return (res);
+}
+
+void *
+idr_find(struct idr *idr, int id)
+{
+ void *res;
+
+ mtx_lock(&idr->lock);
+ res = idr_find_locked(idr, id);
+ mtx_unlock(&idr->lock);
+ return (res);
+}
+
+void *
+idr_get_next(struct idr *idr, int *nextidp)
+{
+ void *res = NULL;
+ int id = *nextidp;
+
+ mtx_lock(&idr->lock);
+ for (; id <= idr_max(idr); id++) {
+ res = idr_find_locked(idr, id);
+ if (res == NULL)
+ continue;
+ *nextidp = id;
+ break;
+ }
+ mtx_unlock(&idr->lock);
+ return (res);
+}
+
+int
+idr_pre_get(struct idr *idr, gfp_t gfp_mask)
+{
+ struct idr_layer *il, *iln;
+ struct idr_layer *head;
+ int need;
+
+ mtx_lock(&idr->lock);
+ for (;;) {
+ need = idr->layers + 1;
+ for (il = idr->free; il != NULL; il = il->ary[0])
+ need--;
+ mtx_unlock(&idr->lock);
+ if (need <= 0)
+ break;
+ for (head = NULL; need; need--) {
+ iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
+ if (iln == NULL)
+ break;
+ bitmap_fill(&iln->bitmap, IDR_SIZE);
+ if (head != NULL) {
+ il->ary[0] = iln;
+ il = iln;
+ } else
+ head = il = iln;
+ }
+ if (head == NULL)
+ return (0);
+ mtx_lock(&idr->lock);
+ il->ary[0] = idr->free;
+ idr->free = head;
+ }
+ return (1);
+}
+
+static struct idr_layer *
+idr_free_list_get(struct idr *idp)
+{
+ struct idr_layer *il;
+
+ if ((il = idp->free) != NULL) {
+ idp->free = il->ary[0];
+ il->ary[0] = NULL;
+ }
+ return (il);
+}
+
+static inline struct idr_layer *
+idr_get(struct idr *idp)
+{
+ struct idr_layer *il;
+
+ if ((il = idr_free_list_get(idp)) != NULL) {
+ MPASS(il->bitmap != 0);
+ } else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) {
+ bitmap_fill(&il->bitmap, IDR_SIZE);
+ } else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) {
+ bitmap_fill(&il->bitmap, IDR_SIZE);
+ } else {
+ return (NULL);
+ }
+ return (il);
+}
+
+/*
+ * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
+ * first for simplicity sake.
+ */
+static int
+idr_get_new_locked(struct idr *idr, void *ptr, int *idp)
+{
+ struct idr_layer *stack[MAX_LEVEL];
+ struct idr_layer *il;
+ int error;
+ int layer;
+ int idx;
+ int id;
+
+ mtx_assert(&idr->lock, MA_OWNED);
+
+ error = -EAGAIN;
+ /*
+ * Expand the tree until there is free space.
+ */
+ if (idr->top == NULL || idr->top->bitmap == 0) {
+ if (idr->layers == MAX_LEVEL + 1) {
+ error = -ENOSPC;
+ goto out;
+ }
+ il = idr_get(idr);
+ if (il == NULL)
+ goto out;
+ il->ary[0] = idr->top;
+ if (idr->top)
+ il->bitmap &= ~1;
+ idr->top = il;
+ idr->layers++;
+ }
+ il = idr->top;
+ id = 0;
+ /*
+ * Walk the tree following free bitmaps, record our path.
+ */
+ for (layer = idr->layers - 1;; layer--) {
+ stack[layer] = il;
+ idx = ffsl(il->bitmap);
+ if (idx == 0)
+ panic("idr_get_new: Invalid leaf state (%p, %p)\n",
+ idr, il);
+ idx--;
+ id |= idx << (layer * IDR_BITS);
+ if (layer == 0)
+ break;
+ if (il->ary[idx] == NULL) {
+ il->ary[idx] = idr_get(idr);
+ if (il->ary[idx] == NULL)
+ goto out;
+ }
+ il = il->ary[idx];
+ }
+ /*
+ * Allocate the leaf to the consumer.
+ */
+ il->bitmap &= ~(1 << idx);
+ il->ary[idx] = ptr;
+ *idp = id;
+ /*
+ * Clear bitmaps potentially up to the root.
+ */
+ while (il->bitmap == 0 && ++layer < idr->layers) {
+ il = stack[layer];
+ il->bitmap &= ~(1 << idr_pos(id, layer));
+ }
+ error = 0;
+out:
+#ifdef INVARIANTS
+ if (error == 0 && idr_find_locked(idr, id) != ptr) {
+ panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
+ idr, id, ptr);
+ }
+#endif
+ return (error);
+}
+
+int
+idr_get_new(struct idr *idr, void *ptr, int *idp)
+{
+ int retval;
+
+ mtx_lock(&idr->lock);
+ retval = idr_get_new_locked(idr, ptr, idp);
+ mtx_unlock(&idr->lock);
+ return (retval);
+}
+
+static int
+idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp)
+{
+ struct idr_layer *stack[MAX_LEVEL];
+ struct idr_layer *il;
+ int error;
+ int layer;
+ int idx, sidx;
+ int id;
+
+ mtx_assert(&idr->lock, MA_OWNED);
+
+ error = -EAGAIN;
+ /*
+ * Compute the layers required to support starting_id and the mask
+ * at the top layer.
+ */
+restart:
+ idx = starting_id;
+ layer = 0;
+ while (idx & ~IDR_MASK) {
+ layer++;
+ idx >>= IDR_BITS;
+ }
+ if (layer == MAX_LEVEL + 1) {
+ error = -ENOSPC;
+ goto out;
+ }
+ /*
+ * Expand the tree until there is free space at or beyond starting_id.
+ */
+ while (idr->layers <= layer ||
+ idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
+ if (idr->layers == MAX_LEVEL + 1) {
+ error = -ENOSPC;
+ goto out;
+ }
+ il = idr_get(idr);
+ if (il == NULL)
+ goto out;
+ il->ary[0] = idr->top;
+ if (idr->top && idr->top->bitmap == 0)
+ il->bitmap &= ~1;
+ idr->top = il;
+ idr->layers++;
+ }
+ il = idr->top;
+ id = 0;
+ /*
+ * Walk the tree following free bitmaps, record our path.
+ */
+ for (layer = idr->layers - 1;; layer--) {
+ stack[layer] = il;
+ sidx = idr_pos(starting_id, layer);
+ /* Returns index numbered from 0 or size if none exists. */
+ idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
+ if (idx == IDR_SIZE && sidx == 0)
+ panic("idr_get_new: Invalid leaf state (%p, %p)\n",
+ idr, il);
+ /*
+ * We may have walked a path where there was a free bit but
+ * it was lower than what we wanted. Restart the search with
+ * a larger starting id. id contains the progress we made so
+ * far. Search the leaf one above this level. This may
+ * restart as many as MAX_LEVEL times but that is expected
+ * to be rare.
+ */
+ if (idx == IDR_SIZE) {
+ starting_id = id + (1 << ((layer + 1) * IDR_BITS));
+ goto restart;
+ }
+ if (idx > sidx)
+ starting_id = 0; /* Search the whole subtree. */
+ id |= idx << (layer * IDR_BITS);
+ if (layer == 0)
+ break;
+ if (il->ary[idx] == NULL) {
+ il->ary[idx] = idr_get(idr);
+ if (il->ary[idx] == NULL)
+ goto out;
+ }
+ il = il->ary[idx];
+ }
+ /*
+ * Allocate the leaf to the consumer.
+ */
+ il->bitmap &= ~(1 << idx);
+ il->ary[idx] = ptr;
+ *idp = id;
+ /*
+ * Clear bitmaps potentially up to the root.
+ */
+ while (il->bitmap == 0 && ++layer < idr->layers) {
+ il = stack[layer];
+ il->bitmap &= ~(1 << idr_pos(id, layer));
+ }
+ error = 0;
+out:
+#ifdef INVARIANTS
+ if (error == 0 && idr_find_locked(idr, id) != ptr) {
+ panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
+ idr, id, ptr);
+ }
+#endif
+ return (error);
+}
+
+int
+idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
+{
+ int retval;
+
+ mtx_lock(&idr->lock);
+ retval = idr_get_new_above_locked(idr, ptr, starting_id, idp);
+ mtx_unlock(&idr->lock);
+ return (retval);
+}
+
+int
+ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
+{
+ return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id));
+}
+
+static int
+idr_alloc_locked(struct idr *idr, void *ptr, int start, int end)
+{
+ int max = end > 0 ? end - 1 : INT_MAX;
+ int error;
+ int id;
+
+ mtx_assert(&idr->lock, MA_OWNED);
+
+ if (unlikely(start < 0))
+ return (-EINVAL);
+ if (unlikely(max < start))
+ return (-ENOSPC);
+
+ if (start == 0)
+ error = idr_get_new_locked(idr, ptr, &id);
+ else
+ error = idr_get_new_above_locked(idr, ptr, start, &id);
+
+ if (unlikely(error < 0))
+ return (error);
+ if (unlikely(id > max)) {
+ idr_remove_locked(idr, id);
+ return (-ENOSPC);
+ }
+ return (id);
+}
+
+int
+idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
+{
+ int retval;
+
+ mtx_lock(&idr->lock);
+ retval = idr_alloc_locked(idr, ptr, start, end);
+ mtx_unlock(&idr->lock);
+ return (retval);
+}
+
+int
+idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
+{
+ int retval;
+
+ mtx_lock(&idr->lock);
+ retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end);
+ if (unlikely(retval == -ENOSPC))
+ retval = idr_alloc_locked(idr, ptr, start, end);
+ if (likely(retval >= 0))
+ idr->next_cyclic_id = retval + 1;
+ mtx_unlock(&idr->lock);
+ return (retval);
+}
+
+static int
+idr_for_each_layer(struct idr_layer *il, int offset, int layer,
+ int (*f)(int id, void *p, void *data), void *data)
+{
+ int i, err;
+
+ if (il == NULL)
+ return (0);
+ if (layer == 0) {
+ for (i = 0; i < IDR_SIZE; i++) {
+ if (il->ary[i] == NULL)
+ continue;
+ err = f(i + offset, il->ary[i], data);
+ if (err)
+ return (err);
+ }
+ return (0);
+ }
+ for (i = 0; i < IDR_SIZE; i++) {
+ if (il->ary[i] == NULL)
+ continue;
+ err = idr_for_each_layer(il->ary[i],
+ (i + offset) * IDR_SIZE, layer - 1, f, data);
+ if (err)
+ return (err);
+ }
+ return (0);
+}
+
+/* NOTE: It is not allowed to modify the IDR tree while it is being iterated */
+int
+idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data)
+{
+ return (idr_for_each_layer(idp->top, 0, idp->layers - 1, f, data));
+}
+
+static int
+idr_has_entry(int id, void *p, void *data)
+{
+
+ return (1);
+}
+
+bool
+idr_is_empty(struct idr *idp)
+{
+
+ return (idr_for_each(idp, idr_has_entry, NULL) == 0);
+}
+
+int
+ida_pre_get(struct ida *ida, gfp_t flags)
+{
+ if (idr_pre_get(&ida->idr, flags) == 0)
+ return (0);
+
+ if (ida->free_bitmap == NULL) {
+ ida->free_bitmap =
+ malloc(sizeof(struct ida_bitmap), M_IDR, flags);
+ }
+ return (ida->free_bitmap != NULL);
+}
+
+int
+ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
+ gfp_t flags)
+{
+ int ret, id;
+ unsigned int max;
+
+ MPASS((int)start >= 0);
+
+ if ((int)end <= 0)
+ max = INT_MAX;
+ else {
+ MPASS(end > start);
+ max = end - 1;
+ }
+again:
+ if (!ida_pre_get(ida, flags))
+ return (-ENOMEM);
+
+ if ((ret = ida_get_new_above(ida, start, &id)) == 0) {
+ if (id > max) {
+ ida_remove(ida, id);
+ ret = -ENOSPC;
+ } else {
+ ret = id;
+ }
+ }
+ if (__predict_false(ret == -EAGAIN))
+ goto again;
+
+ return (ret);
+}
+
+void
+ida_simple_remove(struct ida *ida, unsigned int id)
+{
+ idr_remove(&ida->idr, id);
+}
+
+void
+ida_remove(struct ida *ida, int id)
+{
+ idr_remove(&ida->idr, id);
+}
+
+void
+ida_init(struct ida *ida)
+{
+ idr_init(&ida->idr);
+}
+
+void
+ida_destroy(struct ida *ida)
+{
+ idr_destroy(&ida->idr);
+ free(ida->free_bitmap, M_IDR);
+ ida->free_bitmap = NULL;
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_interrupt.c b/sys/compat/linuxkpi/common/src/linux_interrupt.c
new file mode 100644
index 000000000000..378088246f21
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_interrupt.c
@@ -0,0 +1,251 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/interrupt.h>
+
+struct irq_ent {
+ struct list_head links;
+ struct device *dev;
+ struct resource *res;
+ void *arg;
+ irqreturn_t (*handler)(int, void *);
+ irqreturn_t (*thread_handler)(int, void *);
+ void *tag;
+ unsigned int irq;
+};
+
+static inline int
+lkpi_irq_rid(struct device *dev, unsigned int irq)
+{
+ /* check for MSI- or MSIX- interrupt */
+ if (irq >= dev->irq_start && irq < dev->irq_end)
+ return (irq - dev->irq_start + 1);
+ else
+ return (0);
+}
+
+static inline struct irq_ent *
+lkpi_irq_ent(struct device *dev, unsigned int irq)
+{
+ struct irq_ent *irqe;
+
+ list_for_each_entry(irqe, &dev->irqents, links)
+ if (irqe->irq == irq)
+ return (irqe);
+
+ return (NULL);
+}
+
+static void
+lkpi_irq_handler(void *ent)
+{
+ struct irq_ent *irqe;
+
+ if (linux_set_current_flags(curthread, M_NOWAIT))
+ return;
+
+ irqe = ent;
+ if (irqe->handler(irqe->irq, irqe->arg) == IRQ_WAKE_THREAD &&
+ irqe->thread_handler != NULL) {
+ THREAD_SLEEPING_OK();
+ irqe->thread_handler(irqe->irq, irqe->arg);
+ THREAD_NO_SLEEPING();
+ }
+}
+
+static inline void
+lkpi_irq_release(struct device *dev, struct irq_ent *irqe)
+{
+ if (irqe->tag != NULL)
+ bus_teardown_intr(dev->bsddev, irqe->res, irqe->tag);
+ if (irqe->res != NULL)
+ bus_release_resource(dev->bsddev, SYS_RES_IRQ,
+ rman_get_rid(irqe->res), irqe->res);
+ list_del(&irqe->links);
+}
+
+static void
+lkpi_devm_irq_release(struct device *dev, void *p)
+{
+ struct irq_ent *irqe;
+
+ if (dev == NULL || p == NULL)
+ return;
+
+ irqe = p;
+ lkpi_irq_release(dev, irqe);
+}
+
+int
+lkpi_request_irq(struct device *xdev, unsigned int irq,
+ irq_handler_t handler, irq_handler_t thread_handler,
+ unsigned long flags, const char *name, void *arg)
+{
+ struct resource *res;
+ struct irq_ent *irqe;
+ struct device *dev;
+ unsigned resflags;
+ int error;
+ int rid;
+
+ dev = lkpi_pci_find_irq_dev(irq);
+ if (dev == NULL)
+ return -ENXIO;
+ if (xdev != NULL && xdev != dev)
+ return -ENXIO;
+ rid = lkpi_irq_rid(dev, irq);
+ resflags = RF_ACTIVE;
+ if ((flags & IRQF_SHARED) != 0)
+ resflags |= RF_SHAREABLE;
+ res = bus_alloc_resource_any(dev->bsddev, SYS_RES_IRQ, &rid, resflags);
+ if (res == NULL)
+ return (-ENXIO);
+ if (xdev != NULL)
+ irqe = lkpi_devres_alloc(lkpi_devm_irq_release, sizeof(*irqe),
+ GFP_KERNEL | __GFP_ZERO);
+ else
+ irqe = kzalloc(sizeof(*irqe), GFP_KERNEL);
+ irqe->dev = dev;
+ irqe->res = res;
+ irqe->arg = arg;
+ irqe->handler = handler;
+ irqe->thread_handler = thread_handler;
+ irqe->irq = irq;
+
+ error = bus_setup_intr(dev->bsddev, res, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, lkpi_irq_handler, irqe, &irqe->tag);
+ if (error)
+ goto errout;
+ list_add(&irqe->links, &dev->irqents);
+ if (xdev != NULL)
+ devres_add(xdev, irqe);
+
+ return 0;
+
+errout:
+ bus_release_resource(dev->bsddev, SYS_RES_IRQ, rid, irqe->res);
+ if (xdev != NULL)
+ devres_free(irqe);
+ else
+ kfree(irqe);
+ return (-error);
+}
+
+int
+lkpi_enable_irq(unsigned int irq)
+{
+ struct irq_ent *irqe;
+ struct device *dev;
+
+ dev = lkpi_pci_find_irq_dev(irq);
+ if (dev == NULL)
+ return -EINVAL;
+ irqe = lkpi_irq_ent(dev, irq);
+ if (irqe == NULL || irqe->tag != NULL)
+ return -EINVAL;
+ return -bus_setup_intr(dev->bsddev, irqe->res, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, lkpi_irq_handler, irqe, &irqe->tag);
+}
+
+void
+lkpi_disable_irq(unsigned int irq)
+{
+ struct irq_ent *irqe;
+ struct device *dev;
+
+ dev = lkpi_pci_find_irq_dev(irq);
+ if (dev == NULL)
+ return;
+ irqe = lkpi_irq_ent(dev, irq);
+ if (irqe == NULL)
+ return;
+ if (irqe->tag != NULL)
+ bus_teardown_intr(dev->bsddev, irqe->res, irqe->tag);
+ irqe->tag = NULL;
+}
+
+int
+lkpi_bind_irq_to_cpu(unsigned int irq, int cpu_id)
+{
+ struct irq_ent *irqe;
+ struct device *dev;
+
+ dev = lkpi_pci_find_irq_dev(irq);
+ if (dev == NULL)
+ return (-ENOENT);
+
+ irqe = lkpi_irq_ent(dev, irq);
+ if (irqe == NULL)
+ return (-ENOENT);
+
+ return (-bus_bind_intr(dev->bsddev, irqe->res, cpu_id));
+}
+
+void
+lkpi_free_irq(unsigned int irq, void *device __unused)
+{
+ struct irq_ent *irqe;
+ struct device *dev;
+
+ dev = lkpi_pci_find_irq_dev(irq);
+ if (dev == NULL)
+ return;
+ irqe = lkpi_irq_ent(dev, irq);
+ if (irqe == NULL)
+ return;
+ lkpi_irq_release(dev, irqe);
+ kfree(irqe);
+}
+
+void
+lkpi_devm_free_irq(struct device *xdev, unsigned int irq, void *p __unused)
+{
+ struct device *dev;
+ struct irq_ent *irqe;
+
+ dev = lkpi_pci_find_irq_dev(irq);
+ if (dev == NULL)
+ return;
+ if (xdev != dev)
+ return;
+ irqe = lkpi_irq_ent(dev, irq);
+ if (irqe == NULL)
+ return;
+ lkpi_irq_release(dev, irqe);
+ lkpi_devres_unlink(dev, irqe);
+ lkpi_devres_free(irqe);
+ return;
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_kmod.c b/sys/compat/linuxkpi/common/src/linux_kmod.c
new file mode 100644
index 000000000000..04ae20915cb6
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_kmod.c
@@ -0,0 +1,33 @@
+/*-
+ * Copyright (c) 2015 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/module.h>
+
+MODULE_VERSION(linuxkpi, 1);
+MODULE_DEPEND(linuxkpi, firmware, 1, 1, 1);
+MODULE_DEPEND(linuxkpi, backlight, 1, 1, 1);
+MODULE_DEPEND(linuxkpi, pci, 1, 1, 1);
diff --git a/sys/compat/linuxkpi/common/src/linux_kobject.c b/sys/compat/linuxkpi/common/src/linux_kobject.c
new file mode 100644
index 000000000000..2b9d3dcffa4b
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_kobject.c
@@ -0,0 +1,354 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+static void kset_join(struct kobject *kobj);
+static void kset_leave(struct kobject *kobj);
+static void kset_kfree(struct kobject *kobj);
+
+struct kobject *
+kobject_create(void)
+{
+ struct kobject *kobj;
+
+ kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+ if (kobj == NULL)
+ return (NULL);
+ kobject_init(kobj, &linux_kfree_type);
+
+ return (kobj);
+}
+
+
+int
+kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args)
+{
+ va_list tmp_va;
+ int len;
+ char *old;
+ char *name;
+ char dummy;
+
+ old = kobj->name;
+
+ if (old && fmt == NULL)
+ return (0);
+
+ /* compute length of string */
+ va_copy(tmp_va, args);
+ len = vsnprintf(&dummy, 0, fmt, tmp_va);
+ va_end(tmp_va);
+
+ /* account for zero termination */
+ len++;
+
+ /* check for error */
+ if (len < 1)
+ return (-EINVAL);
+
+ /* allocate memory for string */
+ name = kzalloc(len, GFP_KERNEL);
+ if (name == NULL)
+ return (-ENOMEM);
+ vsnprintf(name, len, fmt, args);
+ kobj->name = name;
+
+ /* free old string */
+ kfree(old);
+
+ /* filter new string */
+ for (; *name != '\0'; name++)
+ if (*name == '/')
+ *name = '!';
+ return (0);
+}
+
+int
+kobject_set_name(struct kobject *kobj, const char *fmt, ...)
+{
+ va_list args;
+ int error;
+
+ va_start(args, fmt);
+ error = kobject_set_name_vargs(kobj, fmt, args);
+ va_end(args);
+
+ return (error);
+}
+
+static int
+kobject_add_complete(struct kobject *kobj)
+{
+ const struct kobj_type *t;
+ int error;
+
+ if (kobj->kset != NULL) {
+ kset_join(kobj);
+ kobj->parent = &kobj->kset->kobj;
+ }
+
+ error = sysfs_create_dir(kobj);
+ if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
+ struct attribute **attr;
+ t = kobj->ktype;
+
+ for (attr = t->default_attrs; *attr != NULL; attr++) {
+ error = sysfs_create_file(kobj, *attr);
+ if (error != 0)
+ break;
+ }
+ if (error != 0)
+ sysfs_remove_dir(kobj);
+ }
+
+ if (error != 0)
+ kset_leave(kobj);
+
+ return (error);
+}
+
+int
+kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
+{
+ va_list args;
+ int error;
+
+ kobj->parent = parent;
+
+ va_start(args, fmt);
+ error = kobject_set_name_vargs(kobj, fmt, args);
+ va_end(args);
+ if (error)
+ return (error);
+
+ return kobject_add_complete(kobj);
+}
+
+int
+kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
+ struct kobject *parent, const char *fmt, ...)
+{
+ va_list args;
+ int error;
+
+ kobject_init(kobj, ktype);
+ kobj->ktype = ktype;
+ kobj->parent = parent;
+ kobj->name = NULL;
+
+ va_start(args, fmt);
+ error = kobject_set_name_vargs(kobj, fmt, args);
+ va_end(args);
+ if (error)
+ return (error);
+ return kobject_add_complete(kobj);
+}
+
+void
+linux_kobject_release(struct kref *kref)
+{
+ struct kobject *kobj;
+ char *name;
+
+ kobj = container_of(kref, struct kobject, kref);
+ sysfs_remove_dir(kobj);
+ kset_leave(kobj);
+ name = kobj->name;
+ if (kobj->ktype && kobj->ktype->release)
+ kobj->ktype->release(kobj);
+ kfree(name);
+}
+
+static void
+linux_kobject_kfree(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+const struct kobj_type linux_kfree_type = {
+ .release = linux_kobject_kfree
+};
+
+void
+linux_kobject_kfree_name(struct kobject *kobj)
+{
+ if (kobj) {
+ kfree(kobj->name);
+ }
+}
+
+static ssize_t
+lkpi_kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct kobj_attribute *ka =
+ container_of(attr, struct kobj_attribute, attr);
+
+ if (ka->show == NULL)
+ return (-EIO);
+
+ return (ka->show(kobj, ka, buf));
+}
+
+static ssize_t
+lkpi_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kobj_attribute *ka =
+ container_of(attr, struct kobj_attribute, attr);
+
+ if (ka->store == NULL)
+ return (-EIO);
+
+ return (ka->store(kobj, ka, buf, count));
+}
+
+const struct sysfs_ops kobj_sysfs_ops = {
+ .show = lkpi_kobj_attr_show,
+ .store = lkpi_kobj_attr_store,
+};
+
+const struct kobj_type linux_kset_kfree_type = {
+ .release = kset_kfree
+};
+
+static struct kset *
+kset_create(const char *name,
+ const struct kset_uevent_ops *uevent_ops,
+ struct kobject *parent_kobj)
+{
+ struct kset *kset;
+
+ kset = kzalloc(sizeof(*kset), GFP_KERNEL);
+ if (kset == NULL)
+ return (NULL);
+
+ kset->uevent_ops = uevent_ops;
+
+ kobject_set_name(&kset->kobj, "%s", name);
+ kset->kobj.parent = parent_kobj;
+ kset->kobj.kset = NULL;
+
+ return (kset);
+}
+
+void
+kset_init(struct kset *kset)
+{
+ kobject_init(&kset->kobj, &linux_kset_kfree_type);
+ INIT_LIST_HEAD(&kset->list);
+ spin_lock_init(&kset->list_lock);
+}
+
+static void
+kset_join(struct kobject *kobj)
+{
+ struct kset *kset;
+
+ kset = kobj->kset;
+ if (kset == NULL)
+ return;
+
+ kset_get(kobj->kset);
+
+ spin_lock(&kset->list_lock);
+ list_add_tail(&kobj->entry, &kset->list);
+ spin_unlock(&kset->list_lock);
+}
+
+static void
+kset_leave(struct kobject *kobj)
+{
+ struct kset *kset;
+
+ kset = kobj->kset;
+ if (kset == NULL)
+ return;
+
+ spin_lock(&kset->list_lock);
+ list_del_init(&kobj->entry);
+ spin_unlock(&kset->list_lock);
+
+ kset_put(kobj->kset);
+}
+
+struct kset *
+kset_create_and_add(const char *name, const struct kset_uevent_ops *u,
+ struct kobject *parent_kobj)
+{
+ int ret;
+ struct kset *kset;
+
+ kset = kset_create(name, u, parent_kobj);
+ if (kset == NULL)
+ return (NULL);
+
+ ret = kset_register(kset);
+ if (ret != 0) {
+ linux_kobject_kfree_name(&kset->kobj);
+ kfree(kset);
+ return (NULL);
+ }
+
+ return (kset);
+}
+
+int
+kset_register(struct kset *kset)
+{
+ int ret;
+
+ if (kset == NULL)
+ return -EINVAL;
+
+ kset_init(kset);
+ ret = kobject_add_complete(&kset->kobj);
+
+ return ret;
+}
+
+void
+kset_unregister(struct kset *kset)
+{
+ if (kset == NULL)
+ return;
+
+ kobject_del(&kset->kobj);
+ kobject_put(&kset->kobj);
+}
+
+static void
+kset_kfree(struct kobject *kobj)
+{
+ struct kset *kset;
+
+ kset = to_kset(kobj);
+ kfree(kset);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_kthread.c b/sys/compat/linuxkpi/common/src/linux_kthread.c
new file mode 100644
index 000000000000..2fba700fa283
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_kthread.c
@@ -0,0 +1,181 @@
+/*-
+ * Copyright (c) 2017 Hans Petter Selasky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <linux/compat.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/priority.h>
+
+enum {
+ KTHREAD_SHOULD_STOP_MASK = (1 << 0),
+ KTHREAD_SHOULD_PARK_MASK = (1 << 1),
+ KTHREAD_IS_PARKED_MASK = (1 << 2),
+};
+
+bool
+linux_kthread_should_stop_task(struct task_struct *task)
+{
+
+ return (atomic_read(&task->kthread_flags) & KTHREAD_SHOULD_STOP_MASK);
+}
+
+bool
+linux_kthread_should_stop(void)
+{
+
+ return (atomic_read(&current->kthread_flags) & KTHREAD_SHOULD_STOP_MASK);
+}
+
+int
+linux_kthread_stop(struct task_struct *task)
+{
+ int retval;
+
+ /*
+ * Assume task is still alive else caller should not call
+ * kthread_stop():
+ */
+ atomic_or(KTHREAD_SHOULD_STOP_MASK, &task->kthread_flags);
+ kthread_unpark(task);
+ wake_up_process(task);
+ wait_for_completion(&task->exited);
+
+ /*
+ * Get return code and free task structure:
+ */
+ retval = task->task_ret;
+ put_task_struct(task);
+
+ return (retval);
+}
+
+int
+linux_kthread_park(struct task_struct *task)
+{
+
+ atomic_or(KTHREAD_SHOULD_PARK_MASK, &task->kthread_flags);
+ wake_up_process(task);
+ wait_for_completion(&task->parked);
+ return (0);
+}
+
+void
+linux_kthread_parkme(void)
+{
+ struct task_struct *task;
+
+ task = current;
+ set_task_state(task, TASK_PARKED | TASK_UNINTERRUPTIBLE);
+ while (linux_kthread_should_park()) {
+ while ((atomic_fetch_or(KTHREAD_IS_PARKED_MASK,
+ &task->kthread_flags) & KTHREAD_IS_PARKED_MASK) == 0)
+ complete(&task->parked);
+ schedule();
+ set_task_state(task, TASK_PARKED | TASK_UNINTERRUPTIBLE);
+ }
+ atomic_andnot(KTHREAD_IS_PARKED_MASK, &task->kthread_flags);
+ set_task_state(task, TASK_RUNNING);
+}
+
+bool
+linux_kthread_should_park(void)
+{
+ struct task_struct *task;
+
+ task = current;
+ return (atomic_read(&task->kthread_flags) & KTHREAD_SHOULD_PARK_MASK);
+}
+
+void
+linux_kthread_unpark(struct task_struct *task)
+{
+
+ atomic_andnot(KTHREAD_SHOULD_PARK_MASK, &task->kthread_flags);
+ if ((atomic_fetch_andnot(KTHREAD_IS_PARKED_MASK, &task->kthread_flags) &
+ KTHREAD_IS_PARKED_MASK) != 0)
+ wake_up_state(task, TASK_PARKED);
+}
+
+struct task_struct *
+linux_kthread_setup_and_run(struct thread *td, linux_task_fn_t *task_fn, void *arg)
+{
+ struct task_struct *task;
+
+ linux_set_current(td);
+
+ task = td->td_lkpi_task;
+ task->task_fn = task_fn;
+ task->task_data = arg;
+
+ thread_lock(td);
+ /* make sure the scheduler priority is raised */
+ sched_prio(td, PI_SWI(SWI_NET));
+ /* put thread into run-queue */
+ sched_add(td, SRQ_BORING);
+
+ return (task);
+}
+
+void
+linux_kthread_fn(void *arg __unused)
+{
+ struct task_struct *task = current;
+
+ if (linux_kthread_should_stop_task(task) == 0)
+ task->task_ret = task->task_fn(task->task_data);
+
+ if (linux_kthread_should_stop_task(task) != 0) {
+ struct thread *td = curthread;
+
+ /* let kthread_stop() free data */
+ td->td_lkpi_task = NULL;
+
+ /* wakeup kthread_stop() */
+ complete(&task->exited);
+ }
+ kthread_exit();
+}
+
+void
+lkpi_kthread_work_fn(void *context, int pending __unused)
+{
+ struct kthread_work *work = context;
+
+ work->func(work);
+}
+
+void
+lkpi_kthread_worker_init_fn(void *context, int pending __unused)
+{
+ struct kthread_worker *worker = context;
+
+ worker->task = current;
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_lock.c b/sys/compat/linuxkpi/common/src/linux_lock.c
new file mode 100644
index 000000000000..3cebfc6ae3bb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_lock.c
@@ -0,0 +1,184 @@
+/*-
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <linux/sched.h>
+#include <linux/ww_mutex.h>
+
+struct ww_mutex_thread {
+ TAILQ_ENTRY(ww_mutex_thread) entry;
+ struct thread *thread;
+ struct ww_mutex *lock;
+};
+
+static TAILQ_HEAD(, ww_mutex_thread) ww_mutex_head;
+static struct mtx ww_mutex_global;
+
+static void
+linux_ww_init(void *arg)
+{
+ TAILQ_INIT(&ww_mutex_head);
+ mtx_init(&ww_mutex_global, "lkpi-ww-mtx", NULL, MTX_DEF);
+}
+
+SYSINIT(ww_init, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_init, NULL);
+
+static void
+linux_ww_uninit(void *arg)
+{
+ mtx_destroy(&ww_mutex_global);
+}
+
+SYSUNINIT(ww_uninit, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_uninit, NULL);
+
+static inline void
+linux_ww_lock(void)
+{
+ mtx_lock(&ww_mutex_global);
+}
+
+static inline void
+linux_ww_unlock(void)
+{
+ mtx_unlock(&ww_mutex_global);
+}
+
+/* lock a mutex with deadlock avoidance */
+int
+linux_ww_mutex_lock_sub(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx, int catch_signal)
+{
+ struct task_struct *task;
+ struct ww_mutex_thread entry;
+ struct ww_mutex_thread *other;
+ int retval = 0;
+
+ task = current;
+
+ linux_ww_lock();
+ if (unlikely(sx_try_xlock(&lock->base.sx) == 0)) {
+ entry.thread = curthread;
+ entry.lock = lock;
+ TAILQ_INSERT_TAIL(&ww_mutex_head, &entry, entry);
+
+ do {
+ struct thread *owner = (struct thread *)
+ SX_OWNER(lock->base.sx.sx_lock);
+
+ /* scan for deadlock */
+ TAILQ_FOREACH(other, &ww_mutex_head, entry) {
+ /* skip own thread */
+ if (other == &entry)
+ continue;
+ /*
+ * If another thread is owning our
+ * lock and is at the same time trying
+ * to acquire a lock this thread owns,
+ * that means deadlock.
+ */
+ if (other->thread == owner &&
+ (struct thread *)SX_OWNER(
+ other->lock->base.sx.sx_lock) == curthread) {
+ retval = -EDEADLK;
+ goto done;
+ }
+ }
+ if (catch_signal) {
+ retval = -cv_wait_sig(&lock->condvar, &ww_mutex_global);
+ if (retval != 0) {
+ linux_schedule_save_interrupt_value(task, retval);
+ retval = -EINTR;
+ goto done;
+ }
+ } else {
+ cv_wait(&lock->condvar, &ww_mutex_global);
+ }
+ } while (sx_try_xlock(&lock->base.sx) == 0);
+done:
+ TAILQ_REMOVE(&ww_mutex_head, &entry, entry);
+
+ /* if the lock is free, wakeup next lock waiter, if any */
+ if ((struct thread *)SX_OWNER(lock->base.sx.sx_lock) == NULL)
+ cv_signal(&lock->condvar);
+ }
+
+ if (retval == 0)
+ lock->ctx = ctx;
+ linux_ww_unlock();
+ return (retval);
+}
+
+void
+linux_ww_mutex_unlock_sub(struct ww_mutex *lock)
+{
+ /* protect ww_mutex ownership change */
+ linux_ww_lock();
+ lock->ctx = NULL;
+ sx_xunlock(&lock->base.sx);
+ /* wakeup a lock waiter, if any */
+ cv_signal(&lock->condvar);
+ linux_ww_unlock();
+}
+
+int
+linux_mutex_lock_interruptible(mutex_t *m)
+{
+ int error;
+
+ error = -sx_xlock_sig(&m->sx);
+ if (error != 0) {
+ linux_schedule_save_interrupt_value(current, error);
+ error = -EINTR;
+ }
+ return (error);
+}
+
+int
+linux_down_read_killable(struct rw_semaphore *rw)
+{
+ int error;
+
+ error = -sx_slock_sig(&rw->sx);
+ if (error != 0) {
+ linux_schedule_save_interrupt_value(current, error);
+ error = -EINTR;
+ }
+ return (error);
+}
+
+int
+linux_down_write_killable(struct rw_semaphore *rw)
+{
+ int error;
+
+ error = -sx_xlock_sig(&rw->sx);
+ if (error != 0) {
+ linux_schedule_save_interrupt_value(current, error);
+ error = -EINTR;
+ }
+ return (error);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_mhi.c b/sys/compat/linuxkpi/common/src/linux_mhi.c
new file mode 100644
index 000000000000..5d3c391f91ab
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_mhi.c
@@ -0,0 +1,89 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <linux/kernel.h> /* pr_debug */
+#include <linux/mhi.h>
+
+static MALLOC_DEFINE(M_LKPIMHI, "lkpimhi", "LinuxKPI MHI compat");
+
+struct mhi_controller *
+linuxkpi_mhi_alloc_controller(void)
+{
+ struct mhi_controller *mhi_ctrl;
+
+ mhi_ctrl = malloc(sizeof(*mhi_ctrl), M_LKPIMHI, M_NOWAIT | M_ZERO);
+
+ return (mhi_ctrl);
+}
+
+void
+linuxkpi_mhi_free_controller(struct mhi_controller *mhi_ctrl)
+{
+
+ /* What else do we need to check that it is gone? */
+ free(mhi_ctrl, M_LKPIMHI);
+}
+
+int
+linuxkpi_mhi_register_controller(struct mhi_controller *mhi_ctrl,
+ const struct mhi_controller_config *cfg)
+{
+
+ if (mhi_ctrl == NULL || cfg == NULL)
+ return (-EINVAL);
+
+#define CHECK_FIELD(_f) \
+ if (!mhi_ctrl->_f) \
+ return (-ENXIO);
+ CHECK_FIELD(cntrl_dev);
+ CHECK_FIELD(regs);
+ CHECK_FIELD(irq);
+ CHECK_FIELD(reg_len);
+ CHECK_FIELD(nr_irqs);
+
+ CHECK_FIELD(runtime_get);
+ CHECK_FIELD(runtime_put);
+ CHECK_FIELD(status_cb);
+ CHECK_FIELD(read_reg);
+ CHECK_FIELD(write_reg);
+#undef CHECK_FIELD
+
+ printf("%s: XXX-BZ TODO\n", __func__);
+ return (0);
+}
+
+void
+linuxkpi_mhi_unregister_controller(struct mhi_controller *mhi_ctrl)
+{
+
+ pr_debug("%s: TODO\n", __func__);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_netdev.c b/sys/compat/linuxkpi/common/src/linux_netdev.c
new file mode 100644
index 000000000000..ce9153614104
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_netdev.c
@@ -0,0 +1,436 @@
+/*-
+ * Copyright (c) 2021 The FreeBSD Foundation
+ * Copyright (c) 2022 Bjoern A. Zeeb
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+
+MALLOC_DEFINE(M_NETDEV, "lkpindev", "Linux KPI netdevice compat");
+
+#define NAPI_LOCK_INIT(_ndev) \
+ mtx_init(&(_ndev)->napi_mtx, "napi_mtx", NULL, MTX_DEF)
+#define NAPI_LOCK_DESTROY(_ndev) mtx_destroy(&(_ndev)->napi_mtx)
+#define NAPI_LOCK_ASSERT(_ndev) mtx_assert(&(_ndev)->napi_mtx, MA_OWNED)
+#define NAPI_LOCK(_ndev) mtx_lock(&(_ndev)->napi_mtx)
+#define NAPI_UNLOCK(_ndev) mtx_unlock(&(_ndev)->napi_mtx)
+
+/* -------------------------------------------------------------------------- */
+
+#define LKPI_NAPI_FLAGS \
+ "\20\1DISABLE_PENDING\2IS_SCHEDULED\3LOST_RACE_TRY_AGAIN"
+
+/* #define NAPI_DEBUG */
+#ifdef NAPI_DEBUG
+static int debug_napi;
+SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug_napi, CTLFLAG_RWTUN,
+ &debug_napi, 0, "NAPI debug level");
+
+#define DNAPI_TODO 0x01
+#define DNAPI_IMPROVE 0x02
+#define DNAPI_TRACE 0x10
+#define DNAPI_TRACE_TASK 0x20
+#define DNAPI_DIRECT_DISPATCH 0x1000
+
+#define NAPI_TRACE(_n) if (debug_napi & DNAPI_TRACE) \
+ printf("NAPI_TRACE %s:%d %lu %p (%#jx %b)\n", __func__, __LINE__, \
+ jiffies, _n, (uintmax_t)(_n)->state, \
+ (int)(_n)->state, LKPI_NAPI_FLAGS)
+#define NAPI_TRACE2D(_n, _d) if (debug_napi & DNAPI_TRACE) \
+ printf("NAPI_TRACE %s:%d %lu %p (%#jx %b) %d\n", __func__, __LINE__, \
+ jiffies, _n, (uintmax_t)(_n)->state, \
+ (int)(_n)->state, LKPI_NAPI_FLAGS, _d)
+#define NAPI_TRACE_TASK(_n, _p, _c) if (debug_napi & DNAPI_TRACE_TASK) \
+ printf("NAPI_TRACE %s:%d %lu %p (%#jx %b) pending %d count %d " \
+ "rx_count %d\n", __func__, __LINE__, \
+ jiffies, _n, (uintmax_t)(_n)->state, \
+ (int)(_n)->state, LKPI_NAPI_FLAGS, _p, _c, (_n)->rx_count)
+#define NAPI_TODO() if (debug_napi & DNAPI_TODO) \
+ printf("NAPI_TODO %s:%d %lu\n", __func__, __LINE__, jiffies)
+#define NAPI_IMPROVE() if (debug_napi & DNAPI_IMPROVE) \
+ printf("NAPI_IMPROVE %s:%d %lu\n", __func__, __LINE__, jiffies)
+
+#define NAPI_DIRECT_DISPATCH() ((debug_napi & DNAPI_DIRECT_DISPATCH) != 0)
+#else
+#define NAPI_TRACE(_n) do { } while(0)
+#define NAPI_TRACE2D(_n, _d) do { } while(0)
+#define NAPI_TRACE_TASK(_n, _p, _c) do { } while(0)
+#define NAPI_TODO() do { } while(0)
+#define NAPI_IMPROVE() do { } while(0)
+
+#define NAPI_DIRECT_DISPATCH() (0)
+#endif
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Check if a poll is running or can run and and if the latter
+ * make us as running. That way we ensure that only one poll
+ * can only ever run at the same time. Returns true if no poll
+ * was scheduled yet.
+ */
+bool
+linuxkpi_napi_schedule_prep(struct napi_struct *napi)
+{
+ unsigned long old, new;
+
+ NAPI_TRACE(napi);
+
+ /* Can can only update/return if all flags agree. */
+ do {
+ old = READ_ONCE(napi->state);
+
+ /* If we are stopping, cannot run again. */
+ if ((old & BIT(LKPI_NAPI_FLAG_DISABLE_PENDING)) != 0) {
+ NAPI_TRACE(napi);
+ return (false);
+ }
+
+ new = old;
+ /* We were already scheduled. Need to try again? */
+ if ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) != 0)
+ new |= BIT(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN);
+ new |= BIT(LKPI_NAPI_FLAG_IS_SCHEDULED);
+
+ } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
+
+ NAPI_TRACE(napi);
+ return ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) == 0);
+}
+
+static void
+lkpi___napi_schedule_dd(struct napi_struct *napi)
+{
+ unsigned long old, new;
+ int rc;
+
+ rc = 0;
+again:
+ NAPI_TRACE2D(napi, rc);
+ if (napi->poll != NULL)
+ rc = napi->poll(napi, napi->budget);
+ napi->rx_count += rc;
+
+ /* Check if interrupts are still disabled, more work to do. */
+ /* Bandaid for now. */
+ if (rc >= napi->budget)
+ goto again;
+
+ /* Bandaid for now. */
+ if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state))
+ goto again;
+
+ do {
+ new = old = READ_ONCE(napi->state);
+ clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new);
+ clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new);
+ } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
+
+ NAPI_TRACE2D(napi, rc);
+}
+
+void
+linuxkpi___napi_schedule(struct napi_struct *napi)
+{
+ int rc;
+
+ NAPI_TRACE(napi);
+ if (test_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state)) {
+ clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state);
+ clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state);
+ NAPI_TRACE(napi);
+ return;
+ }
+
+ if (NAPI_DIRECT_DISPATCH()) {
+ lkpi___napi_schedule_dd(napi);
+ } else {
+ rc = taskqueue_enqueue(napi->dev->napi_tq, &napi->napi_task);
+ NAPI_TRACE2D(napi, rc);
+ if (rc != 0) {
+ /* Should we assert EPIPE? */
+ return;
+ }
+ }
+}
+
+bool
+linuxkpi_napi_schedule(struct napi_struct *napi)
+{
+
+ NAPI_TRACE(napi);
+
+ /*
+ * iwlwifi calls this sequence instead of napi_schedule()
+ * to be able to test the prep result.
+ */
+ if (napi_schedule_prep(napi)) {
+ __napi_schedule(napi);
+ return (true);
+ }
+
+ return (false);
+}
+
+void
+linuxkpi_napi_reschedule(struct napi_struct *napi)
+{
+
+ NAPI_TRACE(napi);
+
+ /* Not sure what is different to napi_schedule yet. */
+ if (napi_schedule_prep(napi))
+ __napi_schedule(napi);
+}
+
+bool
+linuxkpi_napi_complete_done(struct napi_struct *napi, int ret)
+{
+ unsigned long old, new;
+
+ NAPI_TRACE(napi);
+ if (NAPI_DIRECT_DISPATCH())
+ return (true);
+
+ do {
+ new = old = READ_ONCE(napi->state);
+
+ /*
+ * If we lost a race before, we need to re-schedule.
+ * Leave IS_SCHEDULED set essentially doing "_prep".
+ */
+ if (!test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old))
+ clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new);
+ clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new);
+ } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0);
+
+ NAPI_TRACE(napi);
+
+ /* Someone tried to schedule while poll was running. Re-sched. */
+ if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old)) {
+ __napi_schedule(napi);
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+linuxkpi_napi_complete(struct napi_struct *napi)
+{
+
+ NAPI_TRACE(napi);
+ return (napi_complete_done(napi, 0));
+}
+
+void
+linuxkpi_napi_disable(struct napi_struct *napi)
+{
+ NAPI_TRACE(napi);
+ set_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state);
+ while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state))
+ pause_sbt("napidslp", SBT_1MS, 0, C_HARDCLOCK);
+ clear_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state);
+}
+
+void
+linuxkpi_napi_enable(struct napi_struct *napi)
+{
+
+ NAPI_TRACE(napi);
+ KASSERT(!test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state),
+ ("%s: enabling napi %p already scheduled\n", __func__, napi));
+ mb();
+ /* Let us be scheduled. */
+ clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state);
+}
+
+void
+linuxkpi_napi_synchronize(struct napi_struct *napi)
+{
+ NAPI_TRACE(napi);
+#if defined(SMP)
+ /* Check & sleep while a napi is scheduled. */
+ while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state))
+ pause_sbt("napisslp", SBT_1MS, 0, C_HARDCLOCK);
+#else
+ mb();
+#endif
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+lkpi_napi_task(void *ctx, int pending)
+{
+ struct napi_struct *napi;
+ int count;
+
+ KASSERT(ctx != NULL, ("%s: napi %p, pending %d\n",
+ __func__, ctx, pending));
+ napi = ctx;
+ KASSERT(napi->poll != NULL, ("%s: napi %p poll is NULL\n",
+ __func__, napi));
+
+ NAPI_TRACE_TASK(napi, pending, napi->budget);
+ count = napi->poll(napi, napi->budget);
+ napi->rx_count += count;
+ NAPI_TRACE_TASK(napi, pending, count);
+
+ /*
+ * We must not check against count < pending here. There are situations
+ * when a driver may "poll" and we may not have any work to do and that
+ * would make us re-schedule ourseless for ever.
+ */
+ if (count >= napi->budget) {
+ /*
+ * Have to re-schedule ourselves. napi_complete() was not run
+ * in this case which means we are still SCHEDULED.
+ * In order to queue another task we have to directly call
+ * __napi_schedule() without _prep() in the way.
+ */
+ __napi_schedule(napi);
+ }
+}
+
+/* -------------------------------------------------------------------------- */
+
+void
+linuxkpi_netif_napi_add(struct net_device *ndev, struct napi_struct *napi,
+ int(*napi_poll)(struct napi_struct *, int))
+{
+
+ napi->dev = ndev;
+ napi->poll = napi_poll;
+ napi->budget = NAPI_POLL_WEIGHT;
+
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+
+ TASK_INIT(&napi->napi_task, 0, lkpi_napi_task, napi);
+
+ NAPI_LOCK(ndev);
+ TAILQ_INSERT_TAIL(&ndev->napi_head, napi, entry);
+ NAPI_UNLOCK(ndev);
+
+ /* Anything else to do on the ndev? */
+ clear_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state);
+}
+
+static void
+lkpi_netif_napi_del_locked(struct napi_struct *napi)
+{
+ struct net_device *ndev;
+
+ ndev = napi->dev;
+ NAPI_LOCK_ASSERT(ndev);
+
+ set_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state);
+ TAILQ_REMOVE(&ndev->napi_head, napi, entry);
+ while (taskqueue_cancel(ndev->napi_tq, &napi->napi_task, NULL) != 0)
+ taskqueue_drain(ndev->napi_tq, &napi->napi_task);
+}
+
+void
+linuxkpi_netif_napi_del(struct napi_struct *napi)
+{
+ struct net_device *ndev;
+
+ ndev = napi->dev;
+ NAPI_LOCK(ndev);
+ lkpi_netif_napi_del_locked(napi);
+ NAPI_UNLOCK(ndev);
+}
+
+/* -------------------------------------------------------------------------- */
+
+void
+linuxkpi_init_dummy_netdev(struct net_device *ndev)
+{
+
+ memset(ndev, 0, sizeof(*ndev));
+
+ ndev->reg_state = NETREG_DUMMY;
+ NAPI_LOCK_INIT(ndev);
+ TAILQ_INIT(&ndev->napi_head);
+ /* Anything else? */
+
+ ndev->napi_tq = taskqueue_create("tq_ndev_napi", M_WAITOK,
+ taskqueue_thread_enqueue, &ndev->napi_tq);
+ /* One thread for now. */
+ (void) taskqueue_start_threads(&ndev->napi_tq, 1, PWAIT,
+ "ndev napi taskq");
+}
+
+struct net_device *
+linuxkpi_alloc_netdev(size_t len, const char *name, uint32_t flags,
+ void(*setup_func)(struct net_device *))
+{
+ struct net_device *ndev;
+
+ ndev = malloc(sizeof(*ndev) + len, M_NETDEV, M_NOWAIT);
+ if (ndev == NULL)
+ return (ndev);
+
+ /* Always first as it zeros! */
+ linuxkpi_init_dummy_netdev(ndev);
+
+ strlcpy(ndev->name, name, sizeof(*ndev->name));
+
+ /* This needs extending as we support more. */
+
+ if (setup_func != NULL)
+ setup_func(ndev);
+
+ return (ndev);
+}
+
+void
+linuxkpi_free_netdev(struct net_device *ndev)
+{
+ struct napi_struct *napi, *temp;
+
+ NAPI_LOCK(ndev);
+ TAILQ_FOREACH_SAFE(napi, &ndev->napi_head, entry, temp) {
+ lkpi_netif_napi_del_locked(napi);
+ }
+ NAPI_UNLOCK(ndev);
+
+ taskqueue_free(ndev->napi_tq);
+ ndev->napi_tq = NULL;
+ NAPI_LOCK_DESTROY(ndev);
+
+ /* This needs extending as we support more. */
+
+ free(ndev, M_NETDEV);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c
new file mode 100644
index 000000000000..628af17df853
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_page.c
@@ -0,0 +1,575 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/rwlock.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/memrange.h>
+
+#include <machine/bus.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_radix.h>
+#include <vm/vm_reserv.h>
+#include <vm/vm_extern.h>
+
+#include <vm/uma.h>
+#include <vm/uma_int.h>
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/preempt.h>
+#include <linux/fs.h>
+#include <linux/shmem_fs.h>
+#include <linux/kernel.h>
+#include <linux/idr.h>
+#include <linux/io.h>
+#include <linux/io-mapping.h>
+
+#ifdef __i386__
+DEFINE_IDR(mtrr_idr);
+static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat");
+extern int pat_works;
+#endif
+
+void
+si_meminfo(struct sysinfo *si)
+{
+ si->totalram = physmem;
+ si->freeram = vm_free_count();
+ si->totalhigh = 0;
+ si->freehigh = 0;
+ si->mem_unit = PAGE_SIZE;
+}
+
+void *
+linux_page_address(const struct page *page)
+{
+
+ if (page->object != kernel_object) {
+ return (PMAP_HAS_DMAP ?
+ ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) :
+ NULL);
+ }
+ return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
+ IDX_TO_OFF(page->pindex)));
+}
+
+struct page *
+linux_alloc_pages(gfp_t flags, unsigned int order)
+{
+ struct page *page;
+
+ if (PMAP_HAS_DMAP) {
+ unsigned long npages = 1UL << order;
+ int req = VM_ALLOC_WIRED;
+
+ if ((flags & M_ZERO) != 0)
+ req |= VM_ALLOC_ZERO;
+
+ if (order == 0 && (flags & GFP_DMA32) == 0) {
+ page = vm_page_alloc_noobj(req);
+ if (page == NULL)
+ return (NULL);
+ } else {
+ vm_paddr_t pmax = (flags & GFP_DMA32) ?
+ BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
+
+ if ((flags & __GFP_NORETRY) != 0)
+ req |= VM_ALLOC_NORECLAIM;
+
+ retry:
+ page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
+ PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
+ if (page == NULL) {
+ if ((flags & (M_WAITOK | __GFP_NORETRY)) ==
+ M_WAITOK) {
+ int err = vm_page_reclaim_contig(req,
+ npages, 0, pmax, PAGE_SIZE, 0);
+ if (err == ENOMEM)
+ vm_wait(NULL);
+ else if (err != 0)
+ return (NULL);
+ flags &= ~M_WAITOK;
+ goto retry;
+ }
+ return (NULL);
+ }
+ }
+ } else {
+ vm_offset_t vaddr;
+
+ vaddr = linux_alloc_kmem(flags, order);
+ if (vaddr == 0)
+ return (NULL);
+
+ page = virt_to_page((void *)vaddr);
+
+ KASSERT(vaddr == (vm_offset_t)page_address(page),
+ ("Page address mismatch"));
+ }
+
+ return (page);
+}
+
+static void
+_linux_free_kmem(vm_offset_t addr, unsigned int order)
+{
+ size_t size = ((size_t)PAGE_SIZE) << order;
+
+ kmem_free((void *)addr, size);
+}
+
+void
+linux_free_pages(struct page *page, unsigned int order)
+{
+ if (PMAP_HAS_DMAP) {
+ unsigned long npages = 1UL << order;
+ unsigned long x;
+
+ for (x = 0; x != npages; x++) {
+ vm_page_t pgo = page + x;
+
+ /*
+ * The "free page" function is used in several
+ * contexts.
+ *
+ * Some pages are allocated by `linux_alloc_pages()`
+ * above, but not all of them are. For instance in the
+ * DRM drivers, some pages come from
+ * `shmem_read_mapping_page_gfp()`.
+ *
+ * That's why we need to check if the page is managed
+ * or not here.
+ */
+ if ((pgo->oflags & VPO_UNMANAGED) == 0) {
+ vm_page_unwire(pgo, PQ_ACTIVE);
+ } else {
+ if (vm_page_unwire_noq(pgo))
+ vm_page_free(pgo);
+ }
+ }
+ } else {
+ vm_offset_t vaddr;
+
+ vaddr = (vm_offset_t)page_address(page);
+
+ _linux_free_kmem(vaddr, order);
+ }
+}
+
+void
+linux_release_pages(release_pages_arg arg, int nr)
+{
+ int i;
+
+ CTASSERT(offsetof(struct folio, page) == 0);
+
+ for (i = 0; i < nr; i++)
+ __free_page(arg.pages[i]);
+}
+
+vm_offset_t
+linux_alloc_kmem(gfp_t flags, unsigned int order)
+{
+ size_t size = ((size_t)PAGE_SIZE) << order;
+ void *addr;
+
+ addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
+ ((flags & GFP_DMA32) == 0) ? -1UL : BUS_SPACE_MAXADDR_32BIT,
+ PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
+
+ return ((vm_offset_t)addr);
+}
+
+void
+linux_free_kmem(vm_offset_t addr, unsigned int order)
+{
+ KASSERT((addr & ~PAGE_MASK) == 0,
+ ("%s: addr %p is not page aligned", __func__, (void *)addr));
+
+ if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) {
+ _linux_free_kmem(addr, order);
+ } else {
+ vm_page_t page;
+
+ page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr));
+ linux_free_pages(page, order);
+ }
+}
+
+static int
+linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
+ int write, struct page **pages)
+{
+ vm_prot_t prot;
+ size_t len;
+ int count;
+
+ prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
+ len = ptoa((vm_offset_t)nr_pages);
+ count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
+ return (count == -1 ? -EFAULT : nr_pages);
+}
+
+int
+__get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ vm_map_t map;
+ vm_page_t *mp;
+ vm_offset_t va;
+ vm_offset_t end;
+ vm_prot_t prot;
+ int count;
+
+ if (nr_pages == 0 || in_interrupt())
+ return (0);
+
+ MPASS(pages != NULL);
+ map = &curthread->td_proc->p_vmspace->vm_map;
+ end = start + ptoa((vm_offset_t)nr_pages);
+ if (!vm_map_range_valid(map, start, end))
+ return (-EINVAL);
+ prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
+ for (count = 0, mp = pages, va = start; va < end;
+ mp++, va += PAGE_SIZE, count++) {
+ *mp = pmap_extract_and_hold(map->pmap, va, prot);
+ if (*mp == NULL)
+ break;
+
+ if ((prot & VM_PROT_WRITE) != 0 &&
+ (*mp)->dirty != VM_PAGE_BITS_ALL) {
+ /*
+ * Explicitly dirty the physical page. Otherwise, the
+ * caller's changes may go unnoticed because they are
+ * performed through an unmanaged mapping or by a DMA
+ * operation.
+ *
+ * The object lock is not held here.
+ * See vm_page_clear_dirty_mask().
+ */
+ vm_page_dirty(*mp);
+ }
+ }
+ return (count);
+}
+
+long
+get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages, unsigned int gup_flags,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ vm_map_t map;
+
+ map = &task->task_thread->td_proc->p_vmspace->vm_map;
+ return (linux_get_user_pages_internal(map, start, nr_pages,
+ !!(gup_flags & FOLL_WRITE), pages));
+}
+
+long
+lkpi_get_user_pages(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages)
+{
+ vm_map_t map;
+
+ map = &curthread->td_proc->p_vmspace->vm_map;
+ return (linux_get_user_pages_internal(map, start, nr_pages,
+ !!(gup_flags & FOLL_WRITE), pages));
+}
+
+int
+is_vmalloc_addr(const void *addr)
+{
+ return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
+}
+
+vm_fault_t
+lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t prot)
+{
+ struct pctrie_iter pages;
+ vm_object_t vm_obj = vma->vm_obj;
+ vm_object_t tmp_obj;
+ vm_page_t page;
+ vm_pindex_t pindex;
+
+ VM_OBJECT_ASSERT_WLOCKED(vm_obj);
+ vm_page_iter_init(&pages, vm_obj);
+ pindex = OFF_TO_IDX(addr - vma->vm_start);
+ if (vma->vm_pfn_count == 0)
+ vma->vm_pfn_first = pindex;
+ MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
+
+retry:
+ page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages);
+ if (page == NULL) {
+ page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
+ if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
+ pctrie_iter_reset(&pages);
+ goto retry;
+ }
+ if (page->object != NULL) {
+ tmp_obj = page->object;
+ vm_page_xunbusy(page);
+ VM_OBJECT_WUNLOCK(vm_obj);
+ VM_OBJECT_WLOCK(tmp_obj);
+ if (page->object == tmp_obj &&
+ vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
+ KASSERT(page->object == tmp_obj,
+ ("page has changed identity"));
+ KASSERT((page->oflags & VPO_UNMANAGED) == 0,
+ ("page does not belong to shmem"));
+ vm_pager_page_unswapped(page);
+ if (pmap_page_is_mapped(page)) {
+ vm_page_xunbusy(page);
+ VM_OBJECT_WUNLOCK(tmp_obj);
+ printf("%s: page rename failed: page "
+ "is mapped\n", __func__);
+ VM_OBJECT_WLOCK(vm_obj);
+ return (VM_FAULT_NOPAGE);
+ }
+ vm_page_remove(page);
+ }
+ VM_OBJECT_WUNLOCK(tmp_obj);
+ pctrie_iter_reset(&pages);
+ VM_OBJECT_WLOCK(vm_obj);
+ goto retry;
+ }
+ if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) {
+ vm_page_xunbusy(page);
+ return (VM_FAULT_OOM);
+ }
+ vm_page_valid(page);
+ }
+ pmap_page_set_memattr(page, pgprot2cachemode(prot));
+ vma->vm_pfn_count++;
+
+ return (VM_FAULT_NOPAGE);
+}
+
+int
+lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr,
+ unsigned long start_pfn, unsigned long size, pgprot_t prot)
+{
+ vm_object_t vm_obj;
+ unsigned long addr, pfn;
+ int err = 0;
+
+ vm_obj = vma->vm_obj;
+
+ VM_OBJECT_WLOCK(vm_obj);
+ for (addr = start_addr, pfn = start_pfn;
+ addr < start_addr + size;
+ addr += PAGE_SIZE) {
+ vm_fault_t ret;
+retry:
+ ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
+
+ if ((ret & VM_FAULT_OOM) != 0) {
+ VM_OBJECT_WUNLOCK(vm_obj);
+ vm_wait(NULL);
+ VM_OBJECT_WLOCK(vm_obj);
+ goto retry;
+ }
+
+ if ((ret & VM_FAULT_ERROR) != 0) {
+ err = -EFAULT;
+ break;
+ }
+
+ pfn++;
+ }
+ VM_OBJECT_WUNLOCK(vm_obj);
+
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, start_addr,
+ (pfn - start_pfn) << PAGE_SHIFT);
+ return (err);
+ }
+
+ return (0);
+}
+
+int
+lkpi_io_mapping_map_user(struct io_mapping *iomap,
+ struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size)
+{
+ pgprot_t prot;
+ int ret;
+
+ prot = cachemode2protval(iomap->attr);
+ ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot);
+
+ return (ret);
+}
+
+/*
+ * Although FreeBSD version of unmap_mapping_range has semantics and types of
+ * parameters compatible with Linux version, the values passed in are different
+ * @obj should match to vm_private_data field of vm_area_struct returned by
+ * mmap file operation handler, see linux_file_mmap_single() sources
+ * @holelen should match to size of area to be munmapped.
+ */
+void
+lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
+ loff_t const holelen __unused, int even_cows __unused)
+{
+ vm_object_t devobj;
+
+ devobj = cdev_pager_lookup(obj);
+ if (devobj != NULL) {
+ cdev_mgtdev_pager_free_pages(devobj);
+ vm_object_deallocate(devobj);
+ }
+}
+
+int
+lkpi_arch_phys_wc_add(unsigned long base, unsigned long size)
+{
+#ifdef __i386__
+ struct mem_range_desc *mrdesc;
+ int error, id, act;
+
+ /* If PAT is available, do nothing */
+ if (pat_works)
+ return (0);
+
+ mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK);
+ mrdesc->mr_base = base;
+ mrdesc->mr_len = size;
+ mrdesc->mr_flags = MDF_WRITECOMBINE;
+ strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner));
+ act = MEMRANGE_SET_UPDATE;
+ error = mem_range_attr_set(mrdesc, &act);
+ if (error == 0) {
+ error = idr_get_new(&mtrr_idr, mrdesc, &id);
+ MPASS(idr_find(&mtrr_idr, id) == mrdesc);
+ if (error != 0) {
+ act = MEMRANGE_SET_REMOVE;
+ mem_range_attr_set(mrdesc, &act);
+ }
+ }
+ if (error != 0) {
+ free(mrdesc, M_LKMTRR);
+ pr_warn(
+ "Failed to add WC MTRR for [%p-%p]: %d; "
+ "performance may suffer\n",
+ (void *)base, (void *)(base + size - 1), error);
+ } else
+ pr_warn("Successfully added WC MTRR for [%p-%p]\n",
+ (void *)base, (void *)(base + size - 1));
+
+ return (error != 0 ? -error : id + __MTRR_ID_BASE);
+#else
+ return (0);
+#endif
+}
+
+void
+lkpi_arch_phys_wc_del(int reg)
+{
+#ifdef __i386__
+ struct mem_range_desc *mrdesc;
+ int act;
+
+ /* Check if arch_phys_wc_add() failed. */
+ if (reg < __MTRR_ID_BASE)
+ return;
+
+ mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE);
+ MPASS(mrdesc != NULL);
+ idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE);
+ act = MEMRANGE_SET_REMOVE;
+ mem_range_attr_set(mrdesc, &act);
+ free(mrdesc, M_LKMTRR);
+#endif
+}
+
+/*
+ * This is a highly simplified version of the Linux page_frag_cache.
+ * We only support up-to 1 single page as fragment size and we will
+ * always return a full page. This may be wasteful on small objects
+ * but the only known consumer (mt76) is either asking for a half-page
+ * or a full page. If this was to become a problem we can implement
+ * a more elaborate version.
+ */
+void *
+linuxkpi_page_frag_alloc(struct page_frag_cache *pfc,
+ size_t fragsz, gfp_t gfp)
+{
+ vm_page_t pages;
+
+ if (fragsz == 0)
+ return (NULL);
+
+ KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet "
+ "supported", __func__, fragsz));
+
+ pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1));
+ if (pages == NULL)
+ return (NULL);
+ pfc->va = linux_page_address(pages);
+
+ /* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */
+ pfc->pagecnt_bias = 0;
+
+ return (pfc->va);
+}
+
+void
+linuxkpi_page_frag_free(void *addr)
+{
+ vm_page_t page;
+
+ page = virt_to_page(addr);
+ linux_free_pages(page, 0);
+}
+
+void
+linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused)
+{
+
+ linux_free_pages(page, 0);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c
new file mode 100644
index 000000000000..55202da00440
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_pci.c
@@ -0,0 +1,2048 @@
+/*-
+ * Copyright (c) 2015-2016 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ * Copyright (c) 2020-2022 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filio.h>
+#include <sys/pciio.h>
+#include <sys/pctrie.h>
+#include <sys/rman.h>
+#include <sys/rwlock.h>
+#include <sys/stdarg.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+#include <dev/pci/pci_iov.h>
+#include <dev/backlight/backlight.h>
+
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/file.h>
+#include <linux/sysfs.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/pci.h>
+#include <linux/compat.h>
+
+#include <linux/backlight.h>
+
+#include "backlight_if.h"
+#include "pcib_if.h"
+
+/* Undef the linux function macro defined in linux/pci.h */
+#undef pci_get_class
+
+extern int linuxkpi_debug;
+
+SYSCTL_DECL(_compat_linuxkpi);
+
+static counter_u64_t lkpi_pci_nseg1_fail;
+SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD,
+ &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment");
+
+static device_probe_t linux_pci_probe;
+static device_attach_t linux_pci_attach;
+static device_detach_t linux_pci_detach;
+static device_suspend_t linux_pci_suspend;
+static device_resume_t linux_pci_resume;
+static device_shutdown_t linux_pci_shutdown;
+static pci_iov_init_t linux_pci_iov_init;
+static pci_iov_uninit_t linux_pci_iov_uninit;
+static pci_iov_add_vf_t linux_pci_iov_add_vf;
+static int linux_backlight_get_status(device_t dev, struct backlight_props *props);
+static int linux_backlight_update_status(device_t dev, struct backlight_props *props);
+static int linux_backlight_get_info(device_t dev, struct backlight_info *info);
+static void lkpi_pcim_iomap_table_release(struct device *, void *);
+
+static device_method_t pci_methods[] = {
+ DEVMETHOD(device_probe, linux_pci_probe),
+ DEVMETHOD(device_attach, linux_pci_attach),
+ DEVMETHOD(device_detach, linux_pci_detach),
+ DEVMETHOD(device_suspend, linux_pci_suspend),
+ DEVMETHOD(device_resume, linux_pci_resume),
+ DEVMETHOD(device_shutdown, linux_pci_shutdown),
+ DEVMETHOD(pci_iov_init, linux_pci_iov_init),
+ DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit),
+ DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf),
+
+ /* backlight interface */
+ DEVMETHOD(backlight_update_status, linux_backlight_update_status),
+ DEVMETHOD(backlight_get_status, linux_backlight_get_status),
+ DEVMETHOD(backlight_get_info, linux_backlight_get_info),
+ DEVMETHOD_END
+};
+
+const char *pci_power_names[] = {
+ "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold"
+};
+
+/* We need some meta-struct to keep track of these for devres. */
+struct pci_devres {
+ bool enable_io;
+ /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */
+ uint8_t region_mask;
+ struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */
+};
+struct pcim_iomap_devres {
+ void *mmio_table[PCIR_MAX_BAR_0 + 1];
+ struct resource *res_table[PCIR_MAX_BAR_0 + 1];
+};
+
+struct linux_dma_priv {
+ uint64_t dma_mask;
+ bus_dma_tag_t dmat;
+ uint64_t dma_coherent_mask;
+ bus_dma_tag_t dmat_coherent;
+ struct mtx lock;
+ struct pctrie ptree;
+};
+#define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock)
+#define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock)
+
+static int
+linux_pdev_dma_uninit(struct pci_dev *pdev)
+{
+ struct linux_dma_priv *priv;
+
+ priv = pdev->dev.dma_priv;
+ if (priv->dmat)
+ bus_dma_tag_destroy(priv->dmat);
+ if (priv->dmat_coherent)
+ bus_dma_tag_destroy(priv->dmat_coherent);
+ mtx_destroy(&priv->lock);
+ pdev->dev.dma_priv = NULL;
+ free(priv, M_DEVBUF);
+ return (0);
+}
+
+static int
+linux_pdev_dma_init(struct pci_dev *pdev)
+{
+ struct linux_dma_priv *priv;
+ int error;
+
+ priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO);
+
+ mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF);
+ pctrie_init(&priv->ptree);
+
+ pdev->dev.dma_priv = priv;
+
+ /* Create a default DMA tags. */
+ error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64));
+ if (error != 0)
+ goto err;
+ /* Coherent is lower 32bit only by default in Linux. */
+ error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (error != 0)
+ goto err;
+
+ return (error);
+
+err:
+ linux_pdev_dma_uninit(pdev);
+ return (error);
+}
+
+int
+linux_dma_tag_init(struct device *dev, u64 dma_mask)
+{
+ struct linux_dma_priv *priv;
+ int error;
+
+ priv = dev->dma_priv;
+
+ if (priv->dmat) {
+ if (priv->dma_mask == dma_mask)
+ return (0);
+
+ bus_dma_tag_destroy(priv->dmat);
+ }
+
+ priv->dma_mask = dma_mask;
+
+ error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
+ 1, 0, /* alignment, boundary */
+ dma_mask, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ BUS_SPACE_MAXSIZE, /* maxsize */
+ 1, /* nsegments */
+ BUS_SPACE_MAXSIZE, /* maxsegsz */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &priv->dmat);
+ return (-error);
+}
+
+int
+linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask)
+{
+ struct linux_dma_priv *priv;
+ int error;
+
+ priv = dev->dma_priv;
+
+ if (priv->dmat_coherent) {
+ if (priv->dma_coherent_mask == dma_mask)
+ return (0);
+
+ bus_dma_tag_destroy(priv->dmat_coherent);
+ }
+
+ priv->dma_coherent_mask = dma_mask;
+
+ error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
+ 1, 0, /* alignment, boundary */
+ dma_mask, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ BUS_SPACE_MAXSIZE, /* maxsize */
+ 1, /* nsegments */
+ BUS_SPACE_MAXSIZE, /* maxsegsz */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &priv->dmat_coherent);
+ return (-error);
+}
+
+static struct pci_driver *
+linux_pci_find(device_t dev, const struct pci_device_id **idp)
+{
+ const struct pci_device_id *id;
+ struct pci_driver *pdrv;
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+
+ vendor = pci_get_vendor(dev);
+ device = pci_get_device(dev);
+ subvendor = pci_get_subvendor(dev);
+ subdevice = pci_get_subdevice(dev);
+
+ spin_lock(&pci_lock);
+ list_for_each_entry(pdrv, &pci_drivers, node) {
+ for (id = pdrv->id_table; id->vendor != 0; id++) {
+ if (vendor == id->vendor &&
+ (PCI_ANY_ID == id->device || device == id->device) &&
+ (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) &&
+ (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) {
+ *idp = id;
+ spin_unlock(&pci_lock);
+ return (pdrv);
+ }
+ }
+ }
+ spin_unlock(&pci_lock);
+ return (NULL);
+}
+
+struct pci_dev *
+lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev)
+{
+ struct pci_dev *pdev, *found;
+
+ KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__));
+
+ found = NULL;
+ spin_lock(&pci_lock);
+ list_for_each_entry(pdev, &pci_devices, links) {
+ if (pdev->vendor == vendor && pdev->device == device) {
+ found = pdev;
+ break;
+ }
+ }
+ pci_dev_get(found);
+ spin_unlock(&pci_lock);
+
+ return (found);
+}
+
+static void
+lkpi_pci_dev_release(struct device *dev)
+{
+
+ lkpi_devres_release_free_list(dev);
+ spin_lock_destroy(&dev->devres_lock);
+}
+
+static int
+lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
+{
+ int error;
+
+ error = kobject_init_and_add(&pdev->dev.kobj, &linux_dev_ktype,
+ &linux_root_device.kobj, device_get_nameunit(dev));
+ if (error != 0) {
+ printf("%s:%d: kobject_init_and_add returned %d\n",
+ __func__, __LINE__, error);
+ return (error);
+ }
+
+ pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev));
+ pdev->vendor = pci_get_vendor(dev);
+ pdev->device = pci_get_device(dev);
+ pdev->subsystem_vendor = pci_get_subvendor(dev);
+ pdev->subsystem_device = pci_get_subdevice(dev);
+ pdev->class = pci_get_class(dev);
+ pdev->revision = pci_get_revid(dev);
+ pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d",
+ pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
+ pci_get_function(dev));
+ pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO);
+ /*
+ * This should be the upstream bridge; pci_upstream_bridge()
+ * handles that case on demand as otherwise we'll shadow the
+ * entire PCI hierarchy.
+ */
+ pdev->bus->self = pdev;
+ pdev->bus->number = pci_get_bus(dev);
+ pdev->bus->domain = pci_get_domain(dev);
+ pdev->dev.bsddev = dev;
+ pdev->dev.parent = &linux_root_device;
+ pdev->dev.release = lkpi_pci_dev_release;
+ INIT_LIST_HEAD(&pdev->dev.irqents);
+
+ if (pci_msi_count(dev) > 0)
+ pdev->msi_desc = malloc(pci_msi_count(dev) *
+ sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO);
+
+ spin_lock_init(&pdev->dev.devres_lock);
+ INIT_LIST_HEAD(&pdev->dev.devres_head);
+
+ return (0);
+}
+
+static void
+lkpinew_pci_dev_release(struct device *dev)
+{
+ struct pci_dev *pdev;
+ int i;
+
+ pdev = to_pci_dev(dev);
+ if (pdev->root != NULL)
+ pci_dev_put(pdev->root);
+ if (pdev->bus->self != pdev)
+ pci_dev_put(pdev->bus->self);
+ free(pdev->bus, M_DEVBUF);
+ if (pdev->msi_desc != NULL) {
+ for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--)
+ free(pdev->msi_desc[i], M_DEVBUF);
+ free(pdev->msi_desc, M_DEVBUF);
+ }
+ kfree(pdev->path_name);
+ free(pdev, M_DEVBUF);
+}
+
+struct pci_dev *
+lkpinew_pci_dev(device_t dev)
+{
+ struct pci_dev *pdev;
+ int error;
+
+ pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO);
+ error = lkpifill_pci_dev(dev, pdev);
+ if (error != 0) {
+ free(pdev, M_DEVBUF);
+ return (NULL);
+ }
+ pdev->dev.release = lkpinew_pci_dev_release;
+
+ return (pdev);
+}
+
+struct pci_dev *
+lkpi_pci_get_class(unsigned int class, struct pci_dev *from)
+{
+ device_t dev;
+ device_t devfrom = NULL;
+ struct pci_dev *pdev;
+
+ if (from != NULL)
+ devfrom = from->dev.bsddev;
+
+ dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom);
+ if (dev == NULL)
+ return (NULL);
+
+ pdev = lkpinew_pci_dev(dev);
+ return (pdev);
+}
+
+struct pci_dev *
+lkpi_pci_get_base_class(unsigned int baseclass, struct pci_dev *from)
+{
+ device_t dev;
+ device_t devfrom = NULL;
+ struct pci_dev *pdev;
+
+ if (from != NULL)
+ devfrom = from->dev.bsddev;
+
+ dev = pci_find_base_class_from(baseclass, devfrom);
+ if (dev == NULL)
+ return (NULL);
+
+ pdev = lkpinew_pci_dev(dev);
+ return (pdev);
+}
+
+struct pci_dev *
+lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus,
+ unsigned int devfn)
+{
+ device_t dev;
+ struct pci_dev *pdev;
+
+ dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ if (dev == NULL)
+ return (NULL);
+
+ pdev = lkpinew_pci_dev(dev);
+ return (pdev);
+}
+
+static int
+linux_pci_probe(device_t dev)
+{
+ const struct pci_device_id *id;
+ struct pci_driver *pdrv;
+
+ if ((pdrv = linux_pci_find(dev, &id)) == NULL)
+ return (ENXIO);
+ if (device_get_driver(dev) != &pdrv->bsddriver)
+ return (ENXIO);
+ device_set_desc(dev, pdrv->name);
+
+ /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */
+ if (pdrv->bsd_probe_return == 0)
+ return (BUS_PROBE_DEFAULT);
+ else
+ return (pdrv->bsd_probe_return);
+}
+
+static int
+linux_pci_attach(device_t dev)
+{
+ const struct pci_device_id *id;
+ struct pci_driver *pdrv;
+ struct pci_dev *pdev;
+
+ pdrv = linux_pci_find(dev, &id);
+ pdev = device_get_softc(dev);
+
+ MPASS(pdrv != NULL);
+ MPASS(pdev != NULL);
+
+ return (linux_pci_attach_device(dev, pdrv, id, pdev));
+}
+
+static struct resource_list_entry *
+linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,
+ int type, int rid)
+{
+ device_t dev;
+ struct resource *res;
+
+ KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY,
+ ("trying to reserve non-BAR type %d", type));
+
+ dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
+ device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
+ res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0,
+ 1, 1, 0);
+ if (res == NULL)
+ return (NULL);
+ return (resource_list_find(rl, type, rid));
+}
+
+static struct resource_list_entry *
+linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar)
+{
+ struct pci_devinfo *dinfo;
+ struct resource_list *rl;
+ struct resource_list_entry *rle;
+
+ dinfo = device_get_ivars(pdev->dev.bsddev);
+ rl = &dinfo->resources;
+ rle = resource_list_find(rl, type, rid);
+ /* Reserve resources for this BAR if needed. */
+ if (rle == NULL && reserve_bar)
+ rle = linux_pci_reserve_bar(pdev, rl, type, rid);
+ return (rle);
+}
+
+int
+linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
+ const struct pci_device_id *id, struct pci_dev *pdev)
+{
+ struct resource_list_entry *rle;
+ device_t parent;
+ uintptr_t rid;
+ int error;
+ bool isdrm;
+
+ linux_set_current(curthread);
+
+ parent = device_get_parent(dev);
+ isdrm = pdrv != NULL && pdrv->isdrm;
+
+ if (isdrm) {
+ struct pci_devinfo *dinfo;
+
+ dinfo = device_get_ivars(parent);
+ device_set_ivars(dev, dinfo);
+ }
+
+ error = lkpifill_pci_dev(dev, pdev);
+ if (error != 0)
+ return (error);
+
+ if (isdrm)
+ PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid);
+ else
+ PCI_GET_ID(parent, dev, PCI_ID_RID, &rid);
+ pdev->devfn = rid;
+ pdev->pdrv = pdrv;
+ rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false);
+ if (rle != NULL)
+ pdev->dev.irq = rle->start;
+ else
+ pdev->dev.irq = LINUX_IRQ_INVALID;
+ pdev->irq = pdev->dev.irq;
+ error = linux_pdev_dma_init(pdev);
+ if (error)
+ goto out_dma_init;
+
+ TAILQ_INIT(&pdev->mmio);
+ spin_lock_init(&pdev->pcie_cap_lock);
+
+ spin_lock(&pci_lock);
+ list_add(&pdev->links, &pci_devices);
+ spin_unlock(&pci_lock);
+
+ if (pdrv != NULL) {
+ error = pdrv->probe(pdev, id);
+ if (error)
+ goto out_probe;
+ }
+ return (0);
+
+out_probe:
+ free(pdev->bus, M_DEVBUF);
+ spin_lock_destroy(&pdev->pcie_cap_lock);
+ linux_pdev_dma_uninit(pdev);
+out_dma_init:
+ spin_lock(&pci_lock);
+ list_del(&pdev->links);
+ spin_unlock(&pci_lock);
+ put_device(&pdev->dev);
+ return (-error);
+}
+
+static int
+linux_pci_detach(device_t dev)
+{
+ struct pci_dev *pdev;
+
+ pdev = device_get_softc(dev);
+
+ MPASS(pdev != NULL);
+
+ device_set_desc(dev, NULL);
+
+ return (linux_pci_detach_device(pdev));
+}
+
+int
+linux_pci_detach_device(struct pci_dev *pdev)
+{
+
+ linux_set_current(curthread);
+
+ if (pdev->pdrv != NULL)
+ pdev->pdrv->remove(pdev);
+
+ if (pdev->root != NULL)
+ pci_dev_put(pdev->root);
+ free(pdev->bus, M_DEVBUF);
+ linux_pdev_dma_uninit(pdev);
+
+ spin_lock(&pci_lock);
+ list_del(&pdev->links);
+ spin_unlock(&pci_lock);
+ spin_lock_destroy(&pdev->pcie_cap_lock);
+ put_device(&pdev->dev);
+
+ return (0);
+}
+
+static int
+lkpi_pci_disable_dev(struct device *dev)
+{
+
+ (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY);
+ (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT);
+ return (0);
+}
+
+static struct pci_devres *
+lkpi_pci_devres_get_alloc(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+
+ dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL);
+ if (dr == NULL) {
+ dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr),
+ GFP_KERNEL | __GFP_ZERO);
+ if (dr != NULL)
+ lkpi_devres_add(&pdev->dev, dr);
+ }
+
+ return (dr);
+}
+
+static struct pci_devres *
+lkpi_pci_devres_find(struct pci_dev *pdev)
+{
+ if (!pdev->managed)
+ return (NULL);
+
+ return (lkpi_pci_devres_get_alloc(pdev));
+}
+
+void
+lkpi_pci_devres_release(struct device *dev, void *p)
+{
+ struct pci_devres *dr;
+ struct pci_dev *pdev;
+ int bar;
+
+ pdev = to_pci_dev(dev);
+ dr = p;
+
+ if (pdev->msix_enabled)
+ lkpi_pci_disable_msix(pdev);
+ if (pdev->msi_enabled)
+ lkpi_pci_disable_msi(pdev);
+
+ if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0)
+ dr->enable_io = false;
+
+ if (dr->region_mask == 0)
+ return;
+ for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
+
+ if ((dr->region_mask & (1 << bar)) == 0)
+ continue;
+ pci_release_region(pdev, bar);
+ }
+}
+
+int
+linuxkpi_pcim_enable_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+ int error;
+
+ /* Here we cannot run through the pdev->managed check. */
+ dr = lkpi_pci_devres_get_alloc(pdev);
+ if (dr == NULL)
+ return (-ENOMEM);
+
+ /* If resources were enabled before do not do it again. */
+ if (dr->enable_io)
+ return (0);
+
+ error = pci_enable_device(pdev);
+ if (error == 0)
+ dr->enable_io = true;
+
+ /* This device is not managed. */
+ pdev->managed = true;
+
+ return (error);
+}
+
+static struct pcim_iomap_devres *
+lkpi_pcim_iomap_devres_find(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr;
+
+ dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release,
+ NULL, NULL);
+ if (dr == NULL) {
+ dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release,
+ sizeof(*dr), GFP_KERNEL | __GFP_ZERO);
+ if (dr != NULL)
+ lkpi_devres_add(&pdev->dev, dr);
+ }
+
+ if (dr == NULL)
+ device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__);
+
+ return (dr);
+}
+
+void __iomem **
+linuxkpi_pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr;
+
+ dr = lkpi_pcim_iomap_devres_find(pdev);
+ if (dr == NULL)
+ return (NULL);
+
+ /*
+ * If the driver has manually set a flag to be able to request the
+ * resource to use bus_read/write_<n>, return the shadow table.
+ */
+ if (pdev->want_iomap_res)
+ return ((void **)dr->res_table);
+
+ /* This is the Linux default. */
+ return (dr->mmio_table);
+}
+
+static struct resource *
+_lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused)
+{
+ struct pci_mmio_region *mmio, *p;
+ int type;
+
+ type = pci_resource_type(pdev, bar);
+ if (type < 0) {
+ device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
+ __func__, bar, type);
+ return (NULL);
+ }
+
+ /*
+ * Check for duplicate mappings.
+ * This can happen if a driver calls pci_request_region() first.
+ */
+ TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
+ if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) {
+ return (mmio->res);
+ }
+ }
+
+ mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
+ mmio->rid = PCIR_BAR(bar);
+ mmio->type = type;
+ mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,
+ &mmio->rid, RF_ACTIVE|RF_SHAREABLE);
+ if (mmio->res == NULL) {
+ device_printf(pdev->dev.bsddev, "%s: failed to alloc "
+ "bar %d type %d rid %d\n",
+ __func__, bar, type, PCIR_BAR(bar));
+ free(mmio, M_DEVBUF);
+ return (NULL);
+ }
+ TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
+
+ return (mmio->res);
+}
+
+void *
+linuxkpi_pci_iomap_range(struct pci_dev *pdev, int mmio_bar,
+ unsigned long mmio_off, unsigned long mmio_size)
+{
+ struct resource *res;
+
+ res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size);
+ if (res == NULL)
+ return (NULL);
+ /* This is a FreeBSD extension so we can use bus_*(). */
+ if (pdev->want_iomap_res)
+ return (res);
+ MPASS(mmio_off < rman_get_size(res));
+ return ((void *)(rman_get_bushandle(res) + mmio_off));
+}
+
+void *
+linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size)
+{
+ return (linuxkpi_pci_iomap_range(pdev, mmio_bar, 0, mmio_size));
+}
+
+void
+linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res)
+{
+ struct pci_mmio_region *mmio, *p;
+ bus_space_handle_t bh = (bus_space_handle_t)res;
+
+ TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
+ if (pdev->want_iomap_res) {
+ if (res != mmio->res)
+ continue;
+ } else {
+ if (bh < rman_get_bushandle(mmio->res) ||
+ bh >= rman_get_bushandle(mmio->res) +
+ rman_get_size(mmio->res))
+ continue;
+ }
+ bus_release_resource(pdev->dev.bsddev,
+ mmio->type, mmio->rid, mmio->res);
+ TAILQ_REMOVE(&pdev->mmio, mmio, next);
+ free(mmio, M_DEVBUF);
+ return;
+ }
+}
+
+int
+linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name)
+{
+ struct pcim_iomap_devres *dr;
+ void *res;
+ uint32_t mappings;
+ int bar;
+
+ dr = lkpi_pcim_iomap_devres_find(pdev);
+ if (dr == NULL)
+ return (-ENOMEM);
+
+ /* Now iomap all the requested (by "mask") ones. */
+ for (bar = mappings = 0; mappings != mask; bar++) {
+ if ((mask & (1 << bar)) == 0)
+ continue;
+
+ /* Request double is not allowed. */
+ if (dr->mmio_table[bar] != NULL) {
+ device_printf(pdev->dev.bsddev, "%s: bar %d %p\n",
+ __func__, bar, dr->mmio_table[bar]);
+ goto err;
+ }
+
+ res = _lkpi_pci_iomap(pdev, bar, 0);
+ if (res == NULL)
+ goto err;
+ dr->mmio_table[bar] = (void *)rman_get_bushandle(res);
+ dr->res_table[bar] = res;
+
+ mappings |= (1 << bar);
+ }
+
+ return (0);
+err:
+ for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
+ if ((mappings & (1 << bar)) != 0) {
+ res = dr->mmio_table[bar];
+ if (res == NULL)
+ continue;
+ pci_iounmap(pdev, res);
+ }
+ }
+
+ return (-EINVAL);
+}
+
+static void
+lkpi_pcim_iomap_table_release(struct device *dev, void *p)
+{
+ struct pcim_iomap_devres *dr;
+ struct pci_dev *pdev;
+ int bar;
+
+ dr = p;
+ pdev = to_pci_dev(dev);
+ for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
+
+ if (dr->mmio_table[bar] == NULL)
+ continue;
+
+ pci_iounmap(pdev, dr->mmio_table[bar]);
+ }
+}
+
+static int
+linux_pci_suspend(device_t dev)
+{
+ const struct dev_pm_ops *pmops;
+ struct pm_message pm = { };
+ struct pci_dev *pdev;
+ int error;
+
+ error = 0;
+ linux_set_current(curthread);
+ pdev = device_get_softc(dev);
+ pmops = pdev->pdrv->driver.pm;
+
+ if (pdev->pdrv->suspend != NULL)
+ error = -pdev->pdrv->suspend(pdev, pm);
+ else if (pmops != NULL && pmops->suspend != NULL) {
+ error = -pmops->suspend(&pdev->dev);
+ if (error == 0 && pmops->suspend_late != NULL)
+ error = -pmops->suspend_late(&pdev->dev);
+ if (error == 0 && pmops->suspend_noirq != NULL)
+ error = -pmops->suspend_noirq(&pdev->dev);
+ }
+ return (error);
+}
+
+static int
+linux_pci_resume(device_t dev)
+{
+ const struct dev_pm_ops *pmops;
+ struct pci_dev *pdev;
+ int error;
+
+ error = 0;
+ linux_set_current(curthread);
+ pdev = device_get_softc(dev);
+ pmops = pdev->pdrv->driver.pm;
+
+ if (pdev->pdrv->resume != NULL)
+ error = -pdev->pdrv->resume(pdev);
+ else if (pmops != NULL && pmops->resume != NULL) {
+ if (pmops->resume_early != NULL)
+ error = -pmops->resume_early(&pdev->dev);
+ if (error == 0 && pmops->resume != NULL)
+ error = -pmops->resume(&pdev->dev);
+ }
+ return (error);
+}
+
+static int
+linux_pci_shutdown(device_t dev)
+{
+ struct pci_dev *pdev;
+
+ linux_set_current(curthread);
+ pdev = device_get_softc(dev);
+ if (pdev->pdrv->shutdown != NULL)
+ pdev->pdrv->shutdown(pdev);
+ return (0);
+}
+
+static int
+linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config)
+{
+ struct pci_dev *pdev;
+ int error;
+
+ linux_set_current(curthread);
+ pdev = device_get_softc(dev);
+ if (pdev->pdrv->bsd_iov_init != NULL)
+ error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config);
+ else
+ error = EINVAL;
+ return (error);
+}
+
+static void
+linux_pci_iov_uninit(device_t dev)
+{
+ struct pci_dev *pdev;
+
+ linux_set_current(curthread);
+ pdev = device_get_softc(dev);
+ if (pdev->pdrv->bsd_iov_uninit != NULL)
+ pdev->pdrv->bsd_iov_uninit(dev);
+}
+
+static int
+linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)
+{
+ struct pci_dev *pdev;
+ int error;
+
+ linux_set_current(curthread);
+ pdev = device_get_softc(dev);
+ if (pdev->pdrv->bsd_iov_add_vf != NULL)
+ error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config);
+ else
+ error = EINVAL;
+ return (error);
+}
+
+static int
+_linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc)
+{
+ int error;
+
+ linux_set_current(curthread);
+ spin_lock(&pci_lock);
+ list_add(&pdrv->node, &pci_drivers);
+ spin_unlock(&pci_lock);
+ if (pdrv->bsddriver.name == NULL)
+ pdrv->bsddriver.name = pdrv->name;
+ pdrv->bsddriver.methods = pci_methods;
+ pdrv->bsddriver.size = sizeof(struct pci_dev);
+
+ bus_topo_lock();
+ error = devclass_add_driver(dc, &pdrv->bsddriver,
+ BUS_PASS_DEFAULT, &pdrv->bsdclass);
+ bus_topo_unlock();
+ return (-error);
+}
+
+int
+linux_pci_register_driver(struct pci_driver *pdrv)
+{
+ devclass_t dc;
+
+ pdrv->isdrm = strcmp(pdrv->name, "drmn") == 0;
+ dc = pdrv->isdrm ? devclass_create("vgapci") : devclass_find("pci");
+ if (dc == NULL)
+ return (-ENXIO);
+ return (_linux_pci_register_driver(pdrv, dc));
+}
+
+static struct resource_list_entry *
+lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve)
+{
+ int type;
+
+ type = pci_resource_type(pdev, bar);
+ if (type < 0)
+ return (NULL);
+ bar = PCIR_BAR(bar);
+ return (linux_pci_get_rle(pdev, type, bar, reserve));
+}
+
+struct device *
+lkpi_pci_find_irq_dev(unsigned int irq)
+{
+ struct pci_dev *pdev;
+ struct device *found;
+
+ found = NULL;
+ spin_lock(&pci_lock);
+ list_for_each_entry(pdev, &pci_devices, links) {
+ if (irq == pdev->dev.irq ||
+ (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
+ found = &pdev->dev;
+ break;
+ }
+ }
+ spin_unlock(&pci_lock);
+ return (found);
+}
+
+unsigned long
+pci_resource_start(struct pci_dev *pdev, int bar)
+{
+ struct resource_list_entry *rle;
+ rman_res_t newstart;
+ device_t dev;
+ int error;
+
+ if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
+ return (0);
+ dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
+ device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
+ error = bus_translate_resource(dev, rle->type, rle->start, &newstart);
+ if (error != 0) {
+ device_printf(pdev->dev.bsddev,
+ "translate of %#jx failed: %d\n",
+ (uintmax_t)rle->start, error);
+ return (0);
+ }
+ return (newstart);
+}
+
+unsigned long
+pci_resource_len(struct pci_dev *pdev, int bar)
+{
+ struct resource_list_entry *rle;
+
+ if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
+ return (0);
+ return (rle->count);
+}
+
+int
+pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+{
+ struct resource *res;
+ struct pci_devres *dr;
+ struct pci_mmio_region *mmio;
+ int rid;
+ int type;
+
+ type = pci_resource_type(pdev, bar);
+ if (type < 0)
+ return (-ENODEV);
+ rid = PCIR_BAR(bar);
+ res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
+ RF_ACTIVE|RF_SHAREABLE);
+ if (res == NULL) {
+ device_printf(pdev->dev.bsddev, "%s: failed to alloc "
+ "bar %d type %d rid %d\n",
+ __func__, bar, type, PCIR_BAR(bar));
+ return (-ENODEV);
+ }
+
+ /*
+ * It seems there is an implicit devres tracking on these if the device
+ * is managed; otherwise the resources are not automatiaclly freed on
+ * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux
+ * drivers.
+ */
+ dr = lkpi_pci_devres_find(pdev);
+ if (dr != NULL) {
+ dr->region_mask |= (1 << bar);
+ dr->region_table[bar] = res;
+ }
+
+ /* Even if the device is not managed we need to track it for iomap. */
+ mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
+ mmio->rid = PCIR_BAR(bar);
+ mmio->type = type;
+ mmio->res = res;
+ TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
+
+ return (0);
+}
+
+int
+linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name)
+{
+ int error;
+ int i;
+
+ for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
+ error = pci_request_region(pdev, i, res_name);
+ if (error && error != -ENODEV) {
+ pci_release_regions(pdev);
+ return (error);
+ }
+ }
+ return (0);
+}
+
+void
+linuxkpi_pci_release_region(struct pci_dev *pdev, int bar)
+{
+ struct resource_list_entry *rle;
+ struct pci_devres *dr;
+ struct pci_mmio_region *mmio, *p;
+
+ if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL)
+ return;
+
+ /*
+ * As we implicitly track the requests we also need to clear them on
+ * release. Do clear before resource release.
+ */
+ dr = lkpi_pci_devres_find(pdev);
+ if (dr != NULL) {
+ KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d"
+ " region_table res %p != rel->res %p\n", __func__, pdev,
+ bar, dr->region_table[bar], rle->res));
+ dr->region_table[bar] = NULL;
+ dr->region_mask &= ~(1 << bar);
+ }
+
+ TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
+ if (rle->res != (void *)rman_get_bushandle(mmio->res))
+ continue;
+ TAILQ_REMOVE(&pdev->mmio, mmio, next);
+ free(mmio, M_DEVBUF);
+ }
+
+ bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
+}
+
+void
+linuxkpi_pci_release_regions(struct pci_dev *pdev)
+{
+ int i;
+
+ for (i = 0; i <= PCIR_MAX_BAR_0; i++)
+ pci_release_region(pdev, i);
+}
+
+int
+linux_pci_register_drm_driver(struct pci_driver *pdrv)
+{
+ devclass_t dc;
+
+ dc = devclass_create("vgapci");
+ if (dc == NULL)
+ return (-ENXIO);
+ pdrv->isdrm = true;
+ pdrv->name = "drmn";
+ return (_linux_pci_register_driver(pdrv, dc));
+}
+
+void
+linux_pci_unregister_driver(struct pci_driver *pdrv)
+{
+ devclass_t bus;
+
+ bus = devclass_find(pdrv->isdrm ? "vgapci" : "pci");
+
+ spin_lock(&pci_lock);
+ list_del(&pdrv->node);
+ spin_unlock(&pci_lock);
+ bus_topo_lock();
+ if (bus != NULL)
+ devclass_delete_driver(bus, &pdrv->bsddriver);
+ bus_topo_unlock();
+}
+
+void
+linux_pci_unregister_drm_driver(struct pci_driver *pdrv)
+{
+ devclass_t bus;
+
+ bus = devclass_find("vgapci");
+
+ spin_lock(&pci_lock);
+ list_del(&pdrv->node);
+ spin_unlock(&pci_lock);
+ bus_topo_lock();
+ if (bus != NULL)
+ devclass_delete_driver(bus, &pdrv->bsddriver);
+ bus_topo_unlock();
+}
+
+int
+linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
+ int nreq)
+{
+ struct resource_list_entry *rle;
+ int error;
+ int avail;
+ int i;
+
+ avail = pci_msix_count(pdev->dev.bsddev);
+ if (avail < nreq) {
+ if (avail == 0)
+ return -EINVAL;
+ return avail;
+ }
+ avail = nreq;
+ if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
+ return error;
+ /*
+ * Handle case where "pci_alloc_msix()" may allocate less
+ * interrupts than available and return with no error:
+ */
+ if (avail < nreq) {
+ pci_release_msi(pdev->dev.bsddev);
+ return avail;
+ }
+ rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
+ pdev->dev.irq_start = rle->start;
+ pdev->dev.irq_end = rle->start + avail;
+ for (i = 0; i < nreq; i++)
+ entries[i].vector = pdev->dev.irq_start + i;
+ pdev->msix_enabled = true;
+ return (0);
+}
+
+int
+_lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec)
+{
+ struct resource_list_entry *rle;
+ int error;
+ int nvec;
+
+ if (maxvec < minvec)
+ return (-EINVAL);
+
+ nvec = pci_msi_count(pdev->dev.bsddev);
+ if (nvec < 1 || nvec < minvec)
+ return (-ENOSPC);
+
+ nvec = min(nvec, maxvec);
+ if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0)
+ return error;
+
+ /* Native PCI might only ever ask for 32 vectors. */
+ if (nvec < minvec) {
+ pci_release_msi(pdev->dev.bsddev);
+ return (-ENOSPC);
+ }
+
+ rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
+ pdev->dev.irq_start = rle->start;
+ pdev->dev.irq_end = rle->start + nvec;
+ pdev->irq = rle->start;
+ pdev->msi_enabled = true;
+ return (0);
+}
+
+int
+pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
+ unsigned int flags)
+{
+ int error;
+
+ if (flags & PCI_IRQ_MSIX) {
+ struct msix_entry *entries;
+ int i;
+
+ entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL);
+ if (entries == NULL) {
+ error = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < maxv; ++i)
+ entries[i].entry = i;
+ error = pci_enable_msix(pdev, entries, maxv);
+out:
+ kfree(entries);
+ if (error == 0 && pdev->msix_enabled)
+ return (pdev->dev.irq_end - pdev->dev.irq_start);
+ }
+ if (flags & PCI_IRQ_MSI) {
+ if (pci_msi_count(pdev->dev.bsddev) < minv)
+ return (-ENOSPC);
+ error = _lkpi_pci_enable_msi_range(pdev, minv, maxv);
+ if (error == 0 && pdev->msi_enabled)
+ return (pdev->dev.irq_end - pdev->dev.irq_start);
+ }
+ if (flags & PCI_IRQ_INTX) {
+ if (pdev->irq)
+ return (1);
+ }
+
+ return (-EINVAL);
+}
+
+struct msi_desc *
+lkpi_pci_msi_desc_alloc(int irq)
+{
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct msi_desc *desc;
+ struct pci_devinfo *dinfo;
+ struct pcicfg_msi *msi;
+ int vec;
+
+ dev = lkpi_pci_find_irq_dev(irq);
+ if (dev == NULL)
+ return (NULL);
+
+ pdev = to_pci_dev(dev);
+
+ if (pdev->msi_desc == NULL)
+ return (NULL);
+
+ if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end)
+ return (NULL);
+
+ vec = pdev->dev.irq_start - irq;
+
+ if (pdev->msi_desc[vec] != NULL)
+ return (pdev->msi_desc[vec]);
+
+ dinfo = device_get_ivars(dev->bsddev);
+ msi = &dinfo->cfg.msi;
+
+ desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO);
+
+ desc->pci.msi_attrib.is_64 =
+ (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false;
+ desc->msg.data = msi->msi_data;
+
+ pdev->msi_desc[vec] = desc;
+
+ return (desc);
+}
+
+bool
+pci_device_is_present(struct pci_dev *pdev)
+{
+ device_t dev;
+
+ dev = pdev->dev.bsddev;
+
+ return (bus_child_present(dev));
+}
+
+CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t));
+
+struct linux_dma_obj {
+ void *vaddr;
+ uint64_t dma_addr;
+ bus_dmamap_t dmamap;
+ bus_dma_tag_t dmat;
+};
+
+static uma_zone_t linux_dma_trie_zone;
+static uma_zone_t linux_dma_obj_zone;
+
+static void
+linux_dma_init(void *arg)
+{
+
+ linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie",
+ pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL,
+ UMA_ALIGN_PTR, 0);
+ linux_dma_obj_zone = uma_zcreate("linux_dma_object",
+ sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, 0);
+ lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK);
+}
+SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL);
+
+static void
+linux_dma_uninit(void *arg)
+{
+
+ counter_u64_free(lkpi_pci_nseg1_fail);
+ uma_zdestroy(linux_dma_obj_zone);
+ uma_zdestroy(linux_dma_trie_zone);
+}
+SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL);
+
+static void *
+linux_dma_trie_alloc(struct pctrie *ptree)
+{
+
+ return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT));
+}
+
+static void
+linux_dma_trie_free(struct pctrie *ptree, void *node)
+{
+
+ uma_zfree(linux_dma_trie_zone, node);
+}
+
+PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc,
+ linux_dma_trie_free);
+
+#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
+static dma_addr_t
+linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len,
+ bus_dma_tag_t dmat)
+{
+ struct linux_dma_priv *priv;
+ struct linux_dma_obj *obj;
+ int error, nseg;
+ bus_dma_segment_t seg;
+
+ priv = dev->dma_priv;
+
+ /*
+ * If the resultant mapping will be entirely 1:1 with the
+ * physical address, short-circuit the remainder of the
+ * bus_dma API. This avoids tracking collisions in the pctrie
+ * with the additional benefit of reducing overhead.
+ */
+ if (bus_dma_id_mapped(dmat, phys, len))
+ return (phys);
+
+ obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT);
+ if (obj == NULL) {
+ return (0);
+ }
+ obj->dmat = dmat;
+
+ DMA_PRIV_LOCK(priv);
+ if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) {
+ DMA_PRIV_UNLOCK(priv);
+ uma_zfree(linux_dma_obj_zone, obj);
+ return (0);
+ }
+
+ nseg = -1;
+ error = _bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len,
+ BUS_DMA_NOWAIT, &seg, &nseg);
+ if (error != 0) {
+ bus_dmamap_destroy(obj->dmat, obj->dmamap);
+ DMA_PRIV_UNLOCK(priv);
+ uma_zfree(linux_dma_obj_zone, obj);
+ counter_u64_add(lkpi_pci_nseg1_fail, 1);
+ if (linuxkpi_debug) {
+ device_printf(dev->bsddev, "%s: _bus_dmamap_load_phys "
+ "error %d, phys %#018jx len %zu\n", __func__,
+ error, (uintmax_t)phys, len);
+ dump_stack();
+ }
+ return (0);
+ }
+
+ KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
+ obj->dma_addr = seg.ds_addr;
+
+ error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj);
+ if (error != 0) {
+ bus_dmamap_unload(obj->dmat, obj->dmamap);
+ bus_dmamap_destroy(obj->dmat, obj->dmamap);
+ DMA_PRIV_UNLOCK(priv);
+ uma_zfree(linux_dma_obj_zone, obj);
+ return (0);
+ }
+ DMA_PRIV_UNLOCK(priv);
+ return (obj->dma_addr);
+}
+#else
+static dma_addr_t
+linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys,
+ size_t len __unused, bus_dma_tag_t dmat __unused)
+{
+ return (phys);
+}
+#endif
+
+dma_addr_t
+lkpi_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len,
+ enum dma_data_direction direction, unsigned long attrs)
+{
+ struct linux_dma_priv *priv;
+ dma_addr_t dma;
+
+ priv = dev->dma_priv;
+ dma = linux_dma_map_phys_common(dev, phys, len, priv->dmat);
+ if (dma_mapping_error(dev, dma))
+ return (dma);
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ dma_sync_single_for_device(dev, dma, len, direction);
+
+ return (dma);
+}
+
+/* For backward compat only so we can MFC this. Remove before 15. */
+dma_addr_t
+linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
+{
+ return (lkpi_dma_map_phys(dev, phys, len, DMA_NONE, 0));
+}
+
+#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
+void
+lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len,
+ enum dma_data_direction direction, unsigned long attrs)
+{
+ struct linux_dma_priv *priv;
+ struct linux_dma_obj *obj;
+
+ priv = dev->dma_priv;
+
+ if (pctrie_is_empty(&priv->ptree))
+ return;
+
+ DMA_PRIV_LOCK(priv);
+ obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
+ if (obj == NULL) {
+ DMA_PRIV_UNLOCK(priv);
+ return;
+ }
+ LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr);
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ dma_sync_single_for_cpu(dev, dma_addr, len, direction);
+
+ bus_dmamap_unload(obj->dmat, obj->dmamap);
+ bus_dmamap_destroy(obj->dmat, obj->dmamap);
+ DMA_PRIV_UNLOCK(priv);
+
+ uma_zfree(linux_dma_obj_zone, obj);
+}
+#else
+void
+lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len,
+ enum dma_data_direction direction, unsigned long attrs)
+{
+}
+#endif
+
+/* For backward compat only so we can MFC this. Remove before 15. */
+void
+linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
+{
+ lkpi_dma_unmap(dev, dma_addr, len, DMA_NONE, 0);
+}
+
+void *
+linux_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ struct linux_dma_priv *priv;
+ vm_paddr_t high;
+ size_t align;
+ void *mem;
+
+ if (dev == NULL || dev->dma_priv == NULL) {
+ *dma_handle = 0;
+ return (NULL);
+ }
+ priv = dev->dma_priv;
+ if (priv->dma_coherent_mask)
+ high = priv->dma_coherent_mask;
+ else
+ /* Coherent is lower 32bit only by default in Linux. */
+ high = BUS_SPACE_MAXADDR_32BIT;
+ align = PAGE_SIZE << get_order(size);
+ /* Always zero the allocation. */
+ flag |= M_ZERO;
+ mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
+ align, 0, VM_MEMATTR_DEFAULT);
+ if (mem != NULL) {
+ *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size,
+ priv->dmat_coherent);
+ if (*dma_handle == 0) {
+ kmem_free(mem, size);
+ mem = NULL;
+ }
+ } else {
+ *dma_handle = 0;
+ }
+ return (mem);
+}
+
+struct lkpi_devres_dmam_coherent {
+ size_t size;
+ dma_addr_t *handle;
+ void *mem;
+};
+
+static void
+lkpi_dmam_free_coherent(struct device *dev, void *p)
+{
+ struct lkpi_devres_dmam_coherent *dr;
+
+ dr = p;
+ dma_free_coherent(dev, dr->size, dr->mem, *dr->handle);
+}
+
+void *
+linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+ struct lkpi_devres_dmam_coherent *dr;
+
+ dr = lkpi_devres_alloc(lkpi_dmam_free_coherent,
+ sizeof(*dr), GFP_KERNEL | __GFP_ZERO);
+
+ if (dr == NULL)
+ return (NULL);
+
+ dr->size = size;
+ dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag);
+ dr->handle = dma_handle;
+ if (dr->mem == NULL) {
+ lkpi_devres_free(dr);
+ return (NULL);
+ }
+
+ lkpi_devres_add(dev, dr);
+ return (dr->mem);
+}
+
+void
+linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size,
+ bus_dmasync_op_t op)
+{
+ struct linux_dma_priv *priv;
+ struct linux_dma_obj *obj;
+
+ priv = dev->dma_priv;
+
+ if (pctrie_is_empty(&priv->ptree))
+ return;
+
+ DMA_PRIV_LOCK(priv);
+ obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
+ if (obj == NULL) {
+ DMA_PRIV_UNLOCK(priv);
+ return;
+ }
+
+ bus_dmamap_sync(obj->dmat, obj->dmamap, op);
+ DMA_PRIV_UNLOCK(priv);
+}
+
+int
+linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction, unsigned long attrs)
+{
+ struct linux_dma_priv *priv;
+ struct scatterlist *sg;
+ int i, nseg;
+ bus_dma_segment_t seg;
+
+ priv = dev->dma_priv;
+
+ DMA_PRIV_LOCK(priv);
+
+ /* create common DMA map in the first S/G entry */
+ if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) {
+ DMA_PRIV_UNLOCK(priv);
+ return (0);
+ }
+
+ /* load all S/G list entries */
+ for_each_sg(sgl, sg, nents, i) {
+ nseg = -1;
+ if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map,
+ sg_phys(sg), sg->length, BUS_DMA_NOWAIT,
+ &seg, &nseg) != 0) {
+ bus_dmamap_unload(priv->dmat, sgl->dma_map);
+ bus_dmamap_destroy(priv->dmat, sgl->dma_map);
+ DMA_PRIV_UNLOCK(priv);
+ return (0);
+ }
+ KASSERT(nseg == 0,
+ ("More than one segment (nseg=%d)", nseg + 1));
+
+ sg_dma_address(sg) = seg.ds_addr;
+ }
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0)
+ goto skip_sync;
+
+ switch (direction) {
+ case DMA_BIDIRECTIONAL:
+ bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);
+ break;
+ case DMA_TO_DEVICE:
+ bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);
+ break;
+ case DMA_FROM_DEVICE:
+ bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);
+ break;
+ default:
+ break;
+ }
+skip_sync:
+
+ DMA_PRIV_UNLOCK(priv);
+
+ return (nents);
+}
+
+void
+linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
+ int nents __unused, enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ struct linux_dma_priv *priv;
+
+ priv = dev->dma_priv;
+
+ DMA_PRIV_LOCK(priv);
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0)
+ goto skip_sync;
+
+ switch (direction) {
+ case DMA_BIDIRECTIONAL:
+ bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);
+ break;
+ case DMA_TO_DEVICE:
+ bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE);
+ break;
+ case DMA_FROM_DEVICE:
+ bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);
+ break;
+ default:
+ break;
+ }
+skip_sync:
+
+ bus_dmamap_unload(priv->dmat, sgl->dma_map);
+ bus_dmamap_destroy(priv->dmat, sgl->dma_map);
+ DMA_PRIV_UNLOCK(priv);
+}
+
+struct dma_pool {
+ struct device *pool_device;
+ uma_zone_t pool_zone;
+ struct mtx pool_lock;
+ bus_dma_tag_t pool_dmat;
+ size_t pool_entry_size;
+ struct pctrie pool_ptree;
+};
+
+#define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock)
+#define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock)
+
+static inline int
+dma_pool_obj_ctor(void *mem, int size, void *arg, int flags)
+{
+ struct linux_dma_obj *obj = mem;
+ struct dma_pool *pool = arg;
+ int error, nseg;
+ bus_dma_segment_t seg;
+
+ nseg = -1;
+ DMA_POOL_LOCK(pool);
+ error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap,
+ vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT,
+ &seg, &nseg);
+ DMA_POOL_UNLOCK(pool);
+ if (error != 0) {
+ return (error);
+ }
+ KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
+ obj->dma_addr = seg.ds_addr;
+
+ return (0);
+}
+
+static void
+dma_pool_obj_dtor(void *mem, int size, void *arg)
+{
+ struct linux_dma_obj *obj = mem;
+ struct dma_pool *pool = arg;
+
+ DMA_POOL_LOCK(pool);
+ bus_dmamap_unload(pool->pool_dmat, obj->dmamap);
+ DMA_POOL_UNLOCK(pool);
+}
+
+static int
+dma_pool_obj_import(void *arg, void **store, int count, int domain __unused,
+ int flags)
+{
+ struct dma_pool *pool = arg;
+ struct linux_dma_obj *obj;
+ int error, i;
+
+ for (i = 0; i < count; i++) {
+ obj = uma_zalloc(linux_dma_obj_zone, flags);
+ if (obj == NULL)
+ break;
+
+ error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr,
+ BUS_DMA_NOWAIT, &obj->dmamap);
+ if (error!= 0) {
+ uma_zfree(linux_dma_obj_zone, obj);
+ break;
+ }
+
+ store[i] = obj;
+ }
+
+ return (i);
+}
+
+static void
+dma_pool_obj_release(void *arg, void **store, int count)
+{
+ struct dma_pool *pool = arg;
+ struct linux_dma_obj *obj;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ obj = store[i];
+ bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap);
+ uma_zfree(linux_dma_obj_zone, obj);
+ }
+}
+
+struct dma_pool *
+linux_dma_pool_create(char *name, struct device *dev, size_t size,
+ size_t align, size_t boundary)
+{
+ struct linux_dma_priv *priv;
+ struct dma_pool *pool;
+
+ priv = dev->dma_priv;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ pool->pool_device = dev;
+ pool->pool_entry_size = size;
+
+ if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
+ align, boundary, /* alignment, boundary */
+ priv->dma_mask, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsz */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &pool->pool_dmat)) {
+ kfree(pool);
+ return (NULL);
+ }
+
+ pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor,
+ dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import,
+ dma_pool_obj_release, pool, 0);
+
+ mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF);
+ pctrie_init(&pool->pool_ptree);
+
+ return (pool);
+}
+
+void
+linux_dma_pool_destroy(struct dma_pool *pool)
+{
+
+ uma_zdestroy(pool->pool_zone);
+ bus_dma_tag_destroy(pool->pool_dmat);
+ mtx_destroy(&pool->pool_lock);
+ kfree(pool);
+}
+
+void
+lkpi_dmam_pool_destroy(struct device *dev, void *p)
+{
+ struct dma_pool *pool;
+
+ pool = *(struct dma_pool **)p;
+ LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree);
+ linux_dma_pool_destroy(pool);
+}
+
+void *
+linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle)
+{
+ struct linux_dma_obj *obj;
+
+ obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK);
+ if (obj == NULL)
+ return (NULL);
+
+ DMA_POOL_LOCK(pool);
+ if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) {
+ DMA_POOL_UNLOCK(pool);
+ uma_zfree_arg(pool->pool_zone, obj, pool);
+ return (NULL);
+ }
+ DMA_POOL_UNLOCK(pool);
+
+ *handle = obj->dma_addr;
+ return (obj->vaddr);
+}
+
+void
+linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr)
+{
+ struct linux_dma_obj *obj;
+
+ DMA_POOL_LOCK(pool);
+ obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr);
+ if (obj == NULL) {
+ DMA_POOL_UNLOCK(pool);
+ return;
+ }
+ LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr);
+ DMA_POOL_UNLOCK(pool);
+
+ uma_zfree_arg(pool->pool_zone, obj, pool);
+}
+
+static int
+linux_backlight_get_status(device_t dev, struct backlight_props *props)
+{
+ struct pci_dev *pdev;
+
+ linux_set_current(curthread);
+ pdev = device_get_softc(dev);
+
+ props->brightness = pdev->dev.bd->props.brightness;
+ props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness;
+ props->nlevels = 0;
+
+ return (0);
+}
+
+static int
+linux_backlight_get_info(device_t dev, struct backlight_info *info)
+{
+ struct pci_dev *pdev;
+
+ linux_set_current(curthread);
+ pdev = device_get_softc(dev);
+
+ info->type = BACKLIGHT_TYPE_PANEL;
+ strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH);
+ return (0);
+}
+
+static int
+linux_backlight_update_status(device_t dev, struct backlight_props *props)
+{
+ struct pci_dev *pdev;
+
+ linux_set_current(curthread);
+ pdev = device_get_softc(dev);
+
+ pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness *
+ props->brightness / 100;
+ pdev->dev.bd->props.power = props->brightness == 0 ?
+ 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */;
+ return (pdev->dev.bd->ops->update_status(pdev->dev.bd));
+}
+
+struct backlight_device *
+linux_backlight_device_register(const char *name, struct device *dev,
+ void *data, const struct backlight_ops *ops, struct backlight_properties *props)
+{
+
+ dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO);
+ dev->bd->ops = ops;
+ dev->bd->props.type = props->type;
+ dev->bd->props.max_brightness = props->max_brightness;
+ dev->bd->props.brightness = props->brightness;
+ dev->bd->props.power = props->power;
+ dev->bd->data = data;
+ dev->bd->dev = dev;
+ dev->bd->name = strdup(name, M_DEVBUF);
+
+ dev->backlight_dev = backlight_register(name, dev->bsddev);
+
+ return (dev->bd);
+}
+
+void
+linux_backlight_device_unregister(struct backlight_device *bd)
+{
+
+ backlight_destroy(bd->dev->backlight_dev);
+ free(bd->name, M_DEVBUF);
+ free(bd, M_DEVBUF);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_radix.c b/sys/compat/linuxkpi/common/src/linux_radix.c
new file mode 100644
index 000000000000..af53d8bff366
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_radix.c
@@ -0,0 +1,382 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2020 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/radix-tree.h>
+#include <linux/err.h>
+
+static MALLOC_DEFINE(M_RADIX, "radix", "Linux radix compat");
+
+static inline unsigned long
+radix_max(struct radix_tree_root *root)
+{
+ return ((1UL << (root->height * RADIX_TREE_MAP_SHIFT)) - 1UL);
+}
+
+static inline int
+radix_pos(long id, int height)
+{
+ return (id >> (RADIX_TREE_MAP_SHIFT * height)) & RADIX_TREE_MAP_MASK;
+}
+
+static void
+radix_tree_clean_root_node(struct radix_tree_root *root)
+{
+ /* Check if the root node should be freed */
+ if (root->rnode->count == 0) {
+ free(root->rnode, M_RADIX);
+ root->rnode = NULL;
+ root->height = 0;
+ }
+}
+
+void *
+radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
+{
+ struct radix_tree_node *node;
+ void *item;
+ int height;
+
+ item = NULL;
+ node = root->rnode;
+ height = root->height - 1;
+ if (index > radix_max(root))
+ goto out;
+ while (height && node)
+ node = node->slots[radix_pos(index, height--)];
+ if (node)
+ item = node->slots[radix_pos(index, 0)];
+
+out:
+ return (item);
+}
+
+bool
+radix_tree_iter_find(struct radix_tree_root *root, struct radix_tree_iter *iter,
+ void ***pppslot)
+{
+ struct radix_tree_node *node;
+ unsigned long index = iter->index;
+ int height;
+
+restart:
+ node = root->rnode;
+ if (node == NULL)
+ return (false);
+ height = root->height - 1;
+ if (height == -1 || index > radix_max(root))
+ return (false);
+ do {
+ unsigned long mask = RADIX_TREE_MAP_MASK << (RADIX_TREE_MAP_SHIFT * height);
+ unsigned long step = 1UL << (RADIX_TREE_MAP_SHIFT * height);
+ int pos = radix_pos(index, height);
+ struct radix_tree_node *next;
+
+ /* track last slot */
+ *pppslot = node->slots + pos;
+
+ next = node->slots[pos];
+ if (next == NULL) {
+ index += step;
+ index &= -step;
+ if ((index & mask) == 0)
+ goto restart;
+ } else {
+ node = next;
+ height--;
+ }
+ } while (height != -1);
+ iter->index = index;
+ return (true);
+}
+
+void *
+radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+{
+ struct radix_tree_node *stack[RADIX_TREE_MAX_HEIGHT];
+ struct radix_tree_node *node;
+ void *item;
+ int height;
+ int idx;
+
+ item = NULL;
+ node = root->rnode;
+ height = root->height - 1;
+ if (index > radix_max(root))
+ goto out;
+ /*
+ * Find the node and record the path in stack.
+ */
+ while (height && node) {
+ stack[height] = node;
+ node = node->slots[radix_pos(index, height--)];
+ }
+ idx = radix_pos(index, 0);
+ if (node)
+ item = node->slots[idx];
+ /*
+ * If we removed something reduce the height of the tree.
+ */
+ if (item)
+ for (;;) {
+ node->slots[idx] = NULL;
+ node->count--;
+ if (node->count > 0)
+ break;
+ free(node, M_RADIX);
+ if (node == root->rnode) {
+ root->rnode = NULL;
+ root->height = 0;
+ break;
+ }
+ height++;
+ node = stack[height];
+ idx = radix_pos(index, height);
+ }
+out:
+ return (item);
+}
+
+void
+radix_tree_iter_delete(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, void **slot)
+{
+ radix_tree_delete(root, iter->index);
+}
+
+int
+radix_tree_insert(struct radix_tree_root *root, unsigned long index, void *item)
+{
+ struct radix_tree_node *node;
+ struct radix_tree_node *temp[RADIX_TREE_MAX_HEIGHT - 1];
+ int height;
+ int idx;
+
+ /* bail out upon insertion of a NULL item */
+ if (item == NULL)
+ return (-EINVAL);
+
+ /* get root node, if any */
+ node = root->rnode;
+
+ /* allocate root node, if any */
+ if (node == NULL) {
+ node = malloc(sizeof(*node), M_RADIX, root->gfp_mask | M_ZERO);
+ if (node == NULL)
+ return (-ENOMEM);
+ root->rnode = node;
+ root->height++;
+ }
+
+ /* expand radix tree as needed */
+ while (radix_max(root) < index) {
+ /* check if the radix tree is getting too big */
+ if (root->height == RADIX_TREE_MAX_HEIGHT) {
+ radix_tree_clean_root_node(root);
+ return (-E2BIG);
+ }
+
+ /*
+ * If the root radix level is not empty, we need to
+ * allocate a new radix level:
+ */
+ if (node->count != 0) {
+ node = malloc(sizeof(*node), M_RADIX, root->gfp_mask | M_ZERO);
+ if (node == NULL) {
+ /*
+ * Freeing the already allocated radix
+ * levels, if any, will be handled by
+ * the radix_tree_delete() function.
+ * This code path can only happen when
+ * the tree is not empty.
+ */
+ return (-ENOMEM);
+ }
+ node->slots[0] = root->rnode;
+ node->count++;
+ root->rnode = node;
+ }
+ root->height++;
+ }
+
+ /* get radix tree height index */
+ height = root->height - 1;
+
+ /* walk down the tree until the first missing node, if any */
+ for ( ; height != 0; height--) {
+ idx = radix_pos(index, height);
+ if (node->slots[idx] == NULL)
+ break;
+ node = node->slots[idx];
+ }
+
+ /* allocate the missing radix levels, if any */
+ for (idx = 0; idx != height; idx++) {
+ temp[idx] = malloc(sizeof(*node), M_RADIX,
+ root->gfp_mask | M_ZERO);
+ if (temp[idx] == NULL) {
+ while (idx--)
+ free(temp[idx], M_RADIX);
+ radix_tree_clean_root_node(root);
+ return (-ENOMEM);
+ }
+ }
+
+ /* setup new radix levels, if any */
+ for ( ; height != 0; height--) {
+ idx = radix_pos(index, height);
+ node->slots[idx] = temp[height - 1];
+ node->count++;
+ node = node->slots[idx];
+ }
+
+ /*
+ * Insert and adjust count if the item does not already exist.
+ */
+ idx = radix_pos(index, 0);
+ if (node->slots[idx])
+ return (-EEXIST);
+ node->slots[idx] = item;
+ node->count++;
+
+ return (0);
+}
+
+int
+radix_tree_store(struct radix_tree_root *root, unsigned long index, void **ppitem)
+{
+ struct radix_tree_node *node;
+ struct radix_tree_node *temp[RADIX_TREE_MAX_HEIGHT - 1];
+ void *pitem;
+ int height;
+ int idx;
+
+ /*
+ * Inserting a NULL item means delete it. The old pointer is
+ * stored at the location pointed to by "ppitem".
+ */
+ if (*ppitem == NULL) {
+ *ppitem = radix_tree_delete(root, index);
+ return (0);
+ }
+
+ /* get root node, if any */
+ node = root->rnode;
+
+ /* allocate root node, if any */
+ if (node == NULL) {
+ node = malloc(sizeof(*node), M_RADIX, root->gfp_mask | M_ZERO);
+ if (node == NULL)
+ return (-ENOMEM);
+ root->rnode = node;
+ root->height++;
+ }
+
+ /* expand radix tree as needed */
+ while (radix_max(root) < index) {
+ /* check if the radix tree is getting too big */
+ if (root->height == RADIX_TREE_MAX_HEIGHT) {
+ radix_tree_clean_root_node(root);
+ return (-E2BIG);
+ }
+
+ /*
+ * If the root radix level is not empty, we need to
+ * allocate a new radix level:
+ */
+ if (node->count != 0) {
+ node = malloc(sizeof(*node), M_RADIX, root->gfp_mask | M_ZERO);
+ if (node == NULL) {
+ /*
+ * Freeing the already allocated radix
+ * levels, if any, will be handled by
+ * the radix_tree_delete() function.
+ * This code path can only happen when
+ * the tree is not empty.
+ */
+ return (-ENOMEM);
+ }
+ node->slots[0] = root->rnode;
+ node->count++;
+ root->rnode = node;
+ }
+ root->height++;
+ }
+
+ /* get radix tree height index */
+ height = root->height - 1;
+
+ /* walk down the tree until the first missing node, if any */
+ for ( ; height != 0; height--) {
+ idx = radix_pos(index, height);
+ if (node->slots[idx] == NULL)
+ break;
+ node = node->slots[idx];
+ }
+
+ /* allocate the missing radix levels, if any */
+ for (idx = 0; idx != height; idx++) {
+ temp[idx] = malloc(sizeof(*node), M_RADIX,
+ root->gfp_mask | M_ZERO);
+ if (temp[idx] == NULL) {
+ while (idx--)
+ free(temp[idx], M_RADIX);
+ radix_tree_clean_root_node(root);
+ return (-ENOMEM);
+ }
+ }
+
+ /* setup new radix levels, if any */
+ for ( ; height != 0; height--) {
+ idx = radix_pos(index, height);
+ node->slots[idx] = temp[height - 1];
+ node->count++;
+ node = node->slots[idx];
+ }
+
+ /*
+ * Insert and adjust count if the item does not already exist.
+ */
+ idx = radix_pos(index, 0);
+ /* swap */
+ pitem = node->slots[idx];
+ node->slots[idx] = *ppitem;
+ *ppitem = pitem;
+
+ if (pitem == NULL)
+ node->count++;
+ return (0);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_rcu.c b/sys/compat/linuxkpi/common/src/linux_rcu.c
new file mode 100644
index 000000000000..c0b864d269b3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_rcu.c
@@ -0,0 +1,461 @@
+/*-
+ * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
+ * Copyright (c) 2017-2021 Hans Petter Selasky (hselasky@freebsd.org)
+ * All rights reserved.
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+#include <sys/kdb.h>
+
+#include <ck_epoch.h>
+
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+#include <linux/srcu.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include <linux/llist.h>
+#include <linux/irq_work.h>
+
+/*
+ * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will
+ * not be skipped during panic().
+ */
+#ifdef CONFIG_NO_RCU_SKIP
+#define RCU_SKIP(void) 0
+#else
+#define RCU_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active)
+#endif
+
+struct callback_head {
+ union {
+ STAILQ_ENTRY(callback_head) entry;
+ struct llist_node node;
+ };
+ rcu_callback_t func;
+};
+
+struct linux_epoch_head {
+ struct llist_head cb_head;
+ struct task task;
+} __aligned(CACHE_LINE_SIZE);
+
+struct linux_epoch_record {
+ ck_epoch_record_t epoch_record;
+ TAILQ_HEAD(, task_struct) ts_head;
+ int cpuid;
+ int type;
+} __aligned(CACHE_LINE_SIZE);
+
+/*
+ * Verify that "struct rcu_head" is big enough to hold "struct
+ * callback_head". This has been done to avoid having to add special
+ * compile flags for including ck_epoch.h to all clients of the
+ * LinuxKPI.
+ */
+CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head));
+
+/*
+ * Verify that "rcu_section[0]" has the same size as
+ * "ck_epoch_section_t". This has been done to avoid having to add
+ * special compile flags for including ck_epoch.h to all clients of
+ * the LinuxKPI.
+ */
+CTASSERT(sizeof(((struct task_struct *)0)->rcu_section[0] ==
+ sizeof(ck_epoch_section_t)));
+
+/*
+ * Verify that "epoch_record" is at beginning of "struct
+ * linux_epoch_record":
+ */
+CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0);
+
+CTASSERT(TS_RCU_TYPE_MAX == RCU_TYPE_MAX);
+
+static ck_epoch_t linux_epoch[RCU_TYPE_MAX];
+static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX];
+DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]);
+
+static void linux_rcu_cleaner_func(void *, int);
+
+static void
+linux_rcu_runtime_init(void *arg __unused)
+{
+ struct linux_epoch_head *head;
+ int i;
+ int j;
+
+ for (j = 0; j != RCU_TYPE_MAX; j++) {
+ ck_epoch_init(&linux_epoch[j]);
+
+ head = &linux_epoch_head[j];
+
+ TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head);
+ init_llist_head(&head->cb_head);
+
+ CPU_FOREACH(i) {
+ struct linux_epoch_record *record;
+
+ record = &DPCPU_ID_GET(i, linux_epoch_record[j]);
+
+ record->cpuid = i;
+ record->type = j;
+ ck_epoch_register(&linux_epoch[j],
+ &record->epoch_record, NULL);
+ TAILQ_INIT(&record->ts_head);
+ }
+ }
+}
+SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL);
+
+static void
+linux_rcu_cleaner_func(void *context, int pending __unused)
+{
+ struct linux_epoch_head *head = context;
+ struct callback_head *rcu;
+ STAILQ_HEAD(, callback_head) tmp_head;
+ struct llist_node *node, *next;
+ uintptr_t offset;
+
+ /* move current callbacks into own queue */
+ STAILQ_INIT(&tmp_head);
+ llist_for_each_safe(node, next, llist_del_all(&head->cb_head)) {
+ rcu = container_of(node, struct callback_head, node);
+ /* re-reverse list to restore chronological order */
+ STAILQ_INSERT_HEAD(&tmp_head, rcu, entry);
+ }
+
+ /* synchronize */
+ linux_synchronize_rcu(head - linux_epoch_head);
+
+ /* dispatch all callbacks, if any */
+ while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) {
+ STAILQ_REMOVE_HEAD(&tmp_head, entry);
+
+ offset = (uintptr_t)rcu->func;
+
+ if (offset < LINUX_KFREE_RCU_OFFSET_MAX)
+ kfree((char *)rcu - offset);
+ else
+ rcu->func((struct rcu_head *)rcu);
+ }
+}
+
+void
+linux_rcu_read_lock(unsigned type)
+{
+ struct linux_epoch_record *record;
+ struct task_struct *ts;
+
+ MPASS(type < RCU_TYPE_MAX);
+
+ if (RCU_SKIP())
+ return;
+
+ ts = current;
+
+ /* assert valid refcount */
+ MPASS(ts->rcu_recurse[type] != INT_MAX);
+
+ if (++(ts->rcu_recurse[type]) != 1)
+ return;
+
+ /*
+ * Pin thread to current CPU so that the unlock code gets the
+ * same per-CPU epoch record:
+ */
+ sched_pin();
+
+ record = &DPCPU_GET(linux_epoch_record[type]);
+
+ /*
+ * Use a critical section to prevent recursion inside
+ * ck_epoch_begin(). Else this function supports recursion.
+ */
+ critical_enter();
+ ck_epoch_begin(&record->epoch_record,
+ (ck_epoch_section_t *)&ts->rcu_section[type]);
+ TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry[type]);
+ critical_exit();
+}
+
+void
+linux_rcu_read_unlock(unsigned type)
+{
+ struct linux_epoch_record *record;
+ struct task_struct *ts;
+
+ MPASS(type < RCU_TYPE_MAX);
+
+ if (RCU_SKIP())
+ return;
+
+ ts = current;
+
+ /* assert valid refcount */
+ MPASS(ts->rcu_recurse[type] > 0);
+
+ if (--(ts->rcu_recurse[type]) != 0)
+ return;
+
+ record = &DPCPU_GET(linux_epoch_record[type]);
+
+ /*
+ * Use a critical section to prevent recursion inside
+ * ck_epoch_end(). Else this function supports recursion.
+ */
+ critical_enter();
+ ck_epoch_end(&record->epoch_record,
+ (ck_epoch_section_t *)&ts->rcu_section[type]);
+ TAILQ_REMOVE(&record->ts_head, ts, rcu_entry[type]);
+ critical_exit();
+
+ sched_unpin();
+}
+
+bool
+linux_rcu_read_lock_held(unsigned type)
+{
+#ifdef INVARINATS
+ struct linux_epoch_record *record __diagused;
+ struct task_struct *ts;
+
+ MPASS(type < RCU_TYPE_MAX);
+
+ if (RCU_SKIP())
+ return (false);
+
+ if (__current_unallocated(curthread))
+ return (false);
+
+ ts = current;
+ if (ts->rcu_recurse[type] == 0)
+ return (false);
+
+ MPASS(curthread->td_pinned != 0);
+ MPASS((record = &DPCPU_GET(linux_epoch_record[type])) &&
+ record->epoch_record.active != 0);
+#endif
+
+ return (true);
+}
+
+static void
+linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused)
+{
+ struct linux_epoch_record *record =
+ container_of(epoch_record, struct linux_epoch_record, epoch_record);
+ struct thread *td = curthread;
+ struct task_struct *ts;
+
+ /* check if blocked on the current CPU */
+ if (record->cpuid == PCPU_GET(cpuid)) {
+ bool is_sleeping = 0;
+ u_char prio = 0;
+
+ /*
+ * Find the lowest priority or sleeping thread which
+ * is blocking synchronization on this CPU core. All
+ * the threads in the queue are CPU-pinned and cannot
+ * go anywhere while the current thread is locked.
+ */
+ TAILQ_FOREACH(ts, &record->ts_head, rcu_entry[record->type]) {
+ if (ts->task_thread->td_priority > prio)
+ prio = ts->task_thread->td_priority;
+ is_sleeping |= (ts->task_thread->td_inhibitors != 0);
+ }
+
+ if (is_sleeping) {
+ thread_unlock(td);
+ pause("W", 1);
+ thread_lock(td);
+ } else {
+ /* set new thread priority */
+ sched_prio(td, prio);
+ /* task switch */
+ mi_switch(SW_VOL | SWT_RELINQUISH);
+ /*
+ * It is important the thread lock is dropped
+ * while yielding to allow other threads to
+ * acquire the lock pointed to by
+ * TDQ_LOCKPTR(td). Currently mi_switch() will
+ * unlock the thread lock before
+ * returning. Else a deadlock like situation
+ * might happen.
+ */
+ thread_lock(td);
+ }
+ } else {
+ /*
+ * To avoid spinning move execution to the other CPU
+ * which is blocking synchronization. Set highest
+ * thread priority so that code gets run. The thread
+ * priority will be restored later.
+ */
+ sched_prio(td, 0);
+ sched_bind(td, record->cpuid);
+ }
+}
+
+void
+linux_synchronize_rcu(unsigned type)
+{
+ struct thread *td;
+ int was_bound;
+ int old_cpu;
+ int old_pinned;
+ u_char old_prio;
+
+ MPASS(type < RCU_TYPE_MAX);
+
+ if (RCU_SKIP())
+ return;
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "linux_synchronize_rcu() can sleep");
+
+ td = curthread;
+ DROP_GIANT();
+
+ /*
+ * Synchronizing RCU might change the CPU core this function
+ * is running on. Save current values:
+ */
+ thread_lock(td);
+
+ old_cpu = PCPU_GET(cpuid);
+ old_pinned = td->td_pinned;
+ old_prio = td->td_priority;
+ was_bound = sched_is_bound(td);
+ sched_unbind(td);
+ td->td_pinned = 0;
+ sched_bind(td, old_cpu);
+
+ ck_epoch_synchronize_wait(&linux_epoch[type],
+ &linux_synchronize_rcu_cb, NULL);
+
+ /* restore CPU binding, if any */
+ if (was_bound != 0) {
+ sched_bind(td, old_cpu);
+ } else {
+ /* get thread back to initial CPU, if any */
+ if (old_pinned != 0)
+ sched_bind(td, old_cpu);
+ sched_unbind(td);
+ }
+ /* restore pinned after bind */
+ td->td_pinned = old_pinned;
+
+ /* restore thread priority */
+ sched_prio(td, old_prio);
+ thread_unlock(td);
+
+ PICKUP_GIANT();
+}
+
+void
+linux_rcu_barrier(unsigned type)
+{
+ struct linux_epoch_head *head;
+
+ MPASS(type < RCU_TYPE_MAX);
+
+ /*
+ * This function is not obligated to wait for a grace period.
+ * It only waits for RCU callbacks that have already been posted.
+ * If there are no RCU callbacks posted, rcu_barrier() can return
+ * immediately.
+ */
+ head = &linux_epoch_head[type];
+
+ /* wait for callbacks to complete */
+ taskqueue_drain(linux_irq_work_tq, &head->task);
+}
+
+void
+linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func)
+{
+ struct callback_head *rcu;
+ struct linux_epoch_head *head;
+
+ MPASS(type < RCU_TYPE_MAX);
+
+ rcu = (struct callback_head *)context;
+ head = &linux_epoch_head[type];
+
+ rcu->func = func;
+ llist_add(&rcu->node, &head->cb_head);
+ taskqueue_enqueue(linux_irq_work_tq, &head->task);
+}
+
+int
+init_srcu_struct(struct srcu_struct *srcu)
+{
+ return (0);
+}
+
+void
+cleanup_srcu_struct(struct srcu_struct *srcu)
+{
+}
+
+int
+srcu_read_lock(struct srcu_struct *srcu)
+{
+ linux_rcu_read_lock(RCU_TYPE_SLEEPABLE);
+ return (0);
+}
+
+void
+srcu_read_unlock(struct srcu_struct *srcu, int key __unused)
+{
+ linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE);
+}
+
+void
+synchronize_srcu(struct srcu_struct *srcu)
+{
+ linux_synchronize_rcu(RCU_TYPE_SLEEPABLE);
+}
+
+void
+srcu_barrier(struct srcu_struct *srcu)
+{
+ linux_rcu_barrier(RCU_TYPE_SLEEPABLE);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_schedule.c b/sys/compat/linuxkpi/common/src/linux_schedule.c
new file mode 100644
index 000000000000..c6b7a2ebbd66
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_schedule.c
@@ -0,0 +1,475 @@
+/*-
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conds
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conds, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conds and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <sys/sleepqueue.h>
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+/*
+ * Convert a relative time in jiffies to a tick count, suitable for use with
+ * native FreeBSD interfaces (callouts, sleepqueues, etc.).
+ */
+static int
+linux_jiffies_timeout_to_ticks(long timeout)
+{
+ if (timeout < 1)
+ return (1);
+ else if (timeout == MAX_SCHEDULE_TIMEOUT)
+ return (0);
+ else if (timeout > INT_MAX)
+ return (INT_MAX);
+ else
+ return (timeout);
+}
+
+static int
+linux_add_to_sleepqueue(void *wchan, struct task_struct *task,
+ const char *wmesg, long timeout, int state)
+{
+ int flags, ret, stimeout;
+
+ MPASS((state & ~(TASK_PARKED | TASK_NORMAL)) == 0);
+
+ flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ?
+ SLEEPQ_INTERRUPTIBLE : 0);
+ stimeout = linux_jiffies_timeout_to_ticks(timeout);
+
+ sleepq_add(wchan, NULL, wmesg, flags, 0);
+ if (stimeout != 0)
+ sleepq_set_timeout(wchan, stimeout);
+
+ DROP_GIANT();
+ if ((state & TASK_INTERRUPTIBLE) != 0) {
+ if (stimeout == 0)
+ ret = -sleepq_wait_sig(wchan, 0);
+ else
+ ret = -sleepq_timedwait_sig(wchan, 0);
+ } else {
+ if (stimeout == 0) {
+ sleepq_wait(wchan, 0);
+ ret = 0;
+ } else
+ ret = -sleepq_timedwait(wchan, 0);
+ }
+ PICKUP_GIANT();
+
+ /* filter return value */
+ if (ret != 0 && ret != -EWOULDBLOCK) {
+ linux_schedule_save_interrupt_value(task, ret);
+ ret = -ERESTARTSYS;
+ }
+ return (ret);
+}
+
+unsigned int
+linux_msleep_interruptible(unsigned int ms)
+{
+ int ret;
+
+ /* guard against invalid values */
+ if (ms == 0)
+ ms = 1;
+ ret = -pause_sbt("lnxsleep", mstosbt(ms), 0, C_HARDCLOCK | C_CATCH);
+
+ switch (ret) {
+ case -EWOULDBLOCK:
+ return (0);
+ default:
+ linux_schedule_save_interrupt_value(current, ret);
+ return (ms);
+ }
+}
+
+static int
+wake_up_task(struct task_struct *task, unsigned int state)
+{
+ int ret;
+
+ ret = 0;
+ sleepq_lock(task);
+ if ((atomic_read(&task->state) & state) != 0) {
+ set_task_state(task, TASK_WAKING);
+ sleepq_signal(task, SLEEPQ_SLEEP, 0, 0);
+ ret = 1;
+ }
+ sleepq_release(task);
+ return (ret);
+}
+
+bool
+linux_signal_pending(struct task_struct *task)
+{
+ struct thread *td;
+ sigset_t pending;
+
+ td = task->task_thread;
+ PROC_LOCK(td->td_proc);
+ pending = td->td_siglist;
+ SIGSETOR(pending, td->td_proc->p_siglist);
+ SIGSETNAND(pending, td->td_sigmask);
+ PROC_UNLOCK(td->td_proc);
+ return (!SIGISEMPTY(pending));
+}
+
+bool
+linux_fatal_signal_pending(struct task_struct *task)
+{
+ struct thread *td;
+ bool ret;
+
+ td = task->task_thread;
+ PROC_LOCK(td->td_proc);
+ ret = SIGISMEMBER(td->td_siglist, SIGKILL) ||
+ SIGISMEMBER(td->td_proc->p_siglist, SIGKILL);
+ PROC_UNLOCK(td->td_proc);
+ return (ret);
+}
+
+bool
+linux_signal_pending_state(long state, struct task_struct *task)
+{
+
+ MPASS((state & ~TASK_NORMAL) == 0);
+
+ if ((state & TASK_INTERRUPTIBLE) == 0)
+ return (false);
+ return (linux_signal_pending(task));
+}
+
+void
+linux_send_sig(int signo, struct task_struct *task)
+{
+ struct thread *td;
+
+ td = task->task_thread;
+ PROC_LOCK(td->td_proc);
+ tdsignal(td, signo);
+ PROC_UNLOCK(td->td_proc);
+}
+
+int
+autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags,
+ void *key __unused)
+{
+ struct task_struct *task;
+ int ret;
+
+ task = wq->private;
+ if ((ret = wake_up_task(task, state)) != 0)
+ list_del_init(&wq->task_list);
+ return (ret);
+}
+
+int
+default_wake_function(wait_queue_t *wq, unsigned int state, int flags,
+ void *key __unused)
+{
+ return (wake_up_task(wq->private, state));
+}
+
+long
+linux_wait_woken(wait_queue_t *wq, unsigned state, long timeout)
+{
+ void *wchan;
+ struct task_struct *task;
+ int ret;
+ int remainder;
+
+ task = current;
+ wchan = wq->private;
+
+ remainder = jiffies + timeout;
+
+ set_task_state(task, state);
+
+ sleepq_lock(wchan);
+ if (!(wq->flags & WQ_FLAG_WOKEN)) {
+ ret = linux_add_to_sleepqueue(wchan, task, "woken",
+ timeout, state);
+ } else {
+ sleepq_release(wchan);
+ ret = 0;
+ }
+
+ set_task_state(task, TASK_RUNNING);
+ wq->flags &= ~WQ_FLAG_WOKEN;
+
+ if (timeout == MAX_SCHEDULE_TIMEOUT)
+ return (MAX_SCHEDULE_TIMEOUT);
+
+ /* range check return value */
+ remainder -= jiffies;
+
+ /* range check return value */
+ if (ret == -ERESTARTSYS && remainder < 1)
+ remainder = 1;
+ else if (remainder < 0)
+ remainder = 0;
+ else if (remainder > timeout)
+ remainder = timeout;
+ return (remainder);
+}
+
+int
+woken_wake_function(wait_queue_t *wq, unsigned int state,
+ int flags __unused, void *key __unused)
+{
+ void *wchan;
+
+ wchan = wq->private;
+
+ sleepq_lock(wchan);
+ wq->flags |= WQ_FLAG_WOKEN;
+ sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0);
+ sleepq_release(wchan);
+
+ return (1);
+}
+
+void
+linux_init_wait_entry(wait_queue_t *wq, int flags)
+{
+
+ memset(wq, 0, sizeof(*wq));
+ wq->flags = flags;
+ wq->private = current;
+ wq->func = autoremove_wake_function;
+ INIT_LIST_HEAD(&wq->task_list);
+}
+
+void
+linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked)
+{
+ wait_queue_t *pos, *next;
+
+ if (!locked)
+ spin_lock(&wqh->lock);
+ list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) {
+ if (pos->func == NULL) {
+ if (wake_up_task(pos->private, state) != 0 && --nr == 0)
+ break;
+ } else {
+ if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0)
+ break;
+ }
+ }
+ if (!locked)
+ spin_unlock(&wqh->lock);
+}
+
+void
+linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state)
+{
+
+ spin_lock(&wqh->lock);
+ if (list_empty(&wq->task_list))
+ __add_wait_queue(wqh, wq);
+ set_task_state(current, state);
+ spin_unlock(&wqh->lock);
+}
+
+void
+linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+
+ spin_lock(&wqh->lock);
+ set_task_state(current, TASK_RUNNING);
+ if (!list_empty(&wq->task_list)) {
+ __remove_wait_queue(wqh, wq);
+ INIT_LIST_HEAD(&wq->task_list);
+ }
+ spin_unlock(&wqh->lock);
+}
+
+bool
+linux_waitqueue_active(wait_queue_head_t *wqh)
+{
+ bool ret;
+
+ spin_lock(&wqh->lock);
+ ret = !list_empty(&wqh->task_list);
+ spin_unlock(&wqh->lock);
+ return (ret);
+}
+
+int
+linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, long timeout,
+ unsigned int state, spinlock_t *lock)
+{
+ struct task_struct *task;
+ int ret;
+
+ if (lock != NULL)
+ spin_unlock_irq(lock);
+
+ task = current;
+
+ sleepq_lock(task);
+ if (atomic_read(&task->state) != TASK_WAKING) {
+ ret = linux_add_to_sleepqueue(task, task, "wevent", timeout,
+ state);
+ } else {
+ sleepq_release(task);
+ ret = 0;
+ }
+
+ if (lock != NULL)
+ spin_lock_irq(lock);
+ return (ret);
+}
+
+long
+linux_schedule_timeout(long timeout)
+{
+ struct task_struct *task;
+ long remainder;
+ int ret, state;
+
+ task = current;
+
+ remainder = jiffies + timeout;
+
+ sleepq_lock(task);
+ state = atomic_read(&task->state);
+ if (state != TASK_WAKING) {
+ ret = linux_add_to_sleepqueue(task, task, "sched", timeout,
+ state);
+ } else {
+ sleepq_release(task);
+ ret = 0;
+ }
+ set_task_state(task, TASK_RUNNING);
+
+ if (timeout == MAX_SCHEDULE_TIMEOUT)
+ return (MAX_SCHEDULE_TIMEOUT);
+
+ /* range check return value */
+ remainder -= jiffies;
+
+ /* range check return value */
+ if (ret == -ERESTARTSYS && remainder < 1)
+ remainder = 1;
+ else if (remainder < 0)
+ remainder = 0;
+ else if (remainder > timeout)
+ remainder = timeout;
+ return (remainder);
+}
+
+static void
+wake_up_sleepers(void *wchan)
+{
+ sleepq_lock(wchan);
+ sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0);
+ sleepq_release(wchan);
+}
+
+#define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit)))
+
+void
+linux_wake_up_bit(void *word, int bit)
+{
+
+ wake_up_sleepers(bit_to_wchan(word, bit));
+}
+
+int
+linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state,
+ long timeout)
+{
+ struct task_struct *task;
+ void *wchan;
+ int ret;
+
+ task = current;
+ wchan = bit_to_wchan(word, bit);
+ for (;;) {
+ sleepq_lock(wchan);
+ if ((*word & (1 << bit)) == 0) {
+ sleepq_release(wchan);
+ ret = 0;
+ break;
+ }
+ set_task_state(task, state);
+ ret = linux_add_to_sleepqueue(wchan, task, "wbit", timeout,
+ state);
+ if (ret != 0)
+ break;
+ }
+ set_task_state(task, TASK_RUNNING);
+
+ return (ret);
+}
+
+void
+linux_wake_up_atomic_t(atomic_t *a)
+{
+
+ wake_up_sleepers(a);
+}
+
+int
+linux_wait_on_atomic_t(atomic_t *a, unsigned int state)
+{
+ struct task_struct *task;
+ void *wchan;
+ int ret;
+
+ task = current;
+ wchan = a;
+ for (;;) {
+ sleepq_lock(wchan);
+ if (atomic_read(a) == 0) {
+ sleepq_release(wchan);
+ ret = 0;
+ break;
+ }
+ set_task_state(task, state);
+ ret = linux_add_to_sleepqueue(wchan, task, "watomic", 0, state);
+ if (ret != 0)
+ break;
+ }
+ set_task_state(task, TASK_RUNNING);
+
+ return (ret);
+}
+
+bool
+linux_wake_up_state(struct task_struct *task, unsigned int state)
+{
+
+ return (wake_up_task(task, state) != 0);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_seq_file.c b/sys/compat/linuxkpi/common/src/linux_seq_file.c
new file mode 100644
index 000000000000..8b426825cc78
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_seq_file.c
@@ -0,0 +1,301 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2016-2018, Matthew Macy <mmacy@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/param.h>
+#include <sys/sbuf.h>
+#include <sys/syslog.h>
+#include <sys/vnode.h>
+
+#include <linux/seq_file.h>
+#include <linux/file.h>
+
+#undef file
+MALLOC_DEFINE(M_LSEQ, "seq_file", "seq_file");
+
+ssize_t
+seq_read(struct linux_file *f, char *ubuf, size_t size, off_t *ppos)
+{
+ struct seq_file *m;
+ struct sbuf *sbuf;
+ void *p;
+ ssize_t rc;
+
+ m = f->private_data;
+ sbuf = m->buf;
+
+ p = m->op->start(m, ppos);
+ rc = m->op->show(m, p);
+ if (rc)
+ return (rc);
+
+ rc = sbuf_finish(sbuf);
+ if (rc)
+ return (rc);
+
+ rc = sbuf_len(sbuf);
+ if (*ppos >= rc || size < 1)
+ return (-EINVAL);
+
+ size = min(rc - *ppos, size);
+ rc = strscpy(ubuf, sbuf_data(sbuf) + *ppos, size + 1);
+
+ /* add 1 for null terminator */
+ if (rc > 0)
+ rc += 1;
+
+ return (rc);
+}
+
+int
+seq_write(struct seq_file *seq, const void *data, size_t len)
+{
+ int ret;
+
+ ret = sbuf_bcpy(seq->buf, data, len);
+ if (ret == 0)
+ seq->size = sbuf_len(seq->buf);
+
+ return (ret);
+}
+
+void
+seq_putc(struct seq_file *seq, char c)
+{
+ int ret;
+
+ ret = sbuf_putc(seq->buf, c);
+ if (ret == 0)
+ seq->size = sbuf_len(seq->buf);
+}
+
+void
+seq_puts(struct seq_file *seq, const char *str)
+{
+ int ret;
+
+ ret = sbuf_printf(seq->buf, "%s", str);
+ if (ret == 0)
+ seq->size = sbuf_len(seq->buf);
+}
+
+/*
+ * This only needs to be a valid address for lkpi
+ * drivers it should never actually be called
+ */
+off_t
+seq_lseek(struct linux_file *file, off_t offset, int whence)
+{
+
+ panic("%s not supported\n", __FUNCTION__);
+ return (0);
+}
+
+static void *
+single_start(struct seq_file *p, off_t *pos)
+{
+
+ return ((void *)(uintptr_t)(*pos == 0));
+}
+
+static void *
+single_next(struct seq_file *p, void *v, off_t *pos)
+{
+
+ ++*pos;
+ return (NULL);
+}
+
+static void
+single_stop(struct seq_file *p, void *v)
+{
+}
+
+static int
+_seq_open_without_sbuf(struct linux_file *f, const struct seq_operations *op)
+{
+ struct seq_file *p;
+
+ if ((p = malloc(sizeof(*p), M_LSEQ, M_NOWAIT|M_ZERO)) == NULL)
+ return (-ENOMEM);
+
+ p->file = f;
+ p->op = op;
+ f->private_data = (void *) p;
+ return (0);
+}
+
+int
+seq_open(struct linux_file *f, const struct seq_operations *op)
+{
+ int ret;
+
+ ret = _seq_open_without_sbuf(f, op);
+ if (ret == 0)
+ ((struct seq_file *)f->private_data)->buf = sbuf_new_auto();
+
+ return (ret);
+}
+
+void *
+__seq_open_private(struct linux_file *f, const struct seq_operations *op, int size)
+{
+ struct seq_file *seq_file;
+ void *private;
+ int error;
+
+ private = malloc(size, M_LSEQ, M_NOWAIT|M_ZERO);
+ if (private == NULL)
+ return (NULL);
+
+ error = seq_open(f, op);
+ if (error < 0) {
+ free(private, M_LSEQ);
+ return (NULL);
+ }
+
+ seq_file = (struct seq_file *)f->private_data;
+ seq_file->private = private;
+
+ return (private);
+}
+
+static int
+_single_open_without_sbuf(struct linux_file *f, int (*show)(struct seq_file *, void *), void *d)
+{
+ struct seq_operations *op;
+ int rc = -ENOMEM;
+
+ op = malloc(sizeof(*op), M_LSEQ, M_NOWAIT);
+ if (op) {
+ op->start = single_start;
+ op->next = single_next;
+ op->stop = single_stop;
+ op->show = show;
+ rc = _seq_open_without_sbuf(f, op);
+ if (rc)
+ free(op, M_LSEQ);
+ else
+ ((struct seq_file *)f->private_data)->private = d;
+ }
+ return (rc);
+}
+
+int
+single_open(struct linux_file *f, int (*show)(struct seq_file *, void *), void *d)
+{
+ int ret;
+
+ ret = _single_open_without_sbuf(f, show, d);
+ if (ret == 0)
+ ((struct seq_file *)f->private_data)->buf = sbuf_new_auto();
+
+ return (ret);
+}
+
+int
+single_open_size(struct linux_file *f, int (*show)(struct seq_file *, void *), void *d, size_t size)
+{
+ int ret;
+
+ ret = _single_open_without_sbuf(f, show, d);
+ if (ret == 0)
+ ((struct seq_file *)f->private_data)->buf = sbuf_new(
+ NULL, NULL, size, SBUF_AUTOEXTEND);
+
+ return (ret);
+}
+
+int
+seq_release(struct inode *inode __unused, struct linux_file *file)
+{
+ struct seq_file *m;
+ struct sbuf *s;
+
+ m = file->private_data;
+ s = m->buf;
+
+ sbuf_delete(s);
+ free(m, M_LSEQ);
+
+ return (0);
+}
+
+int
+seq_release_private(struct inode *inode __unused, struct linux_file *f)
+{
+ struct seq_file *seq;
+
+ seq = (struct seq_file *)f->private_data;
+ free(seq->private, M_LSEQ);
+ return (seq_release(inode, f));
+}
+
+int
+single_release(struct vnode *v, struct linux_file *f)
+{
+ const struct seq_operations *op;
+ struct seq_file *m;
+ int rc;
+
+ /* be NULL safe */
+ if ((m = f->private_data) == NULL)
+ return (0);
+
+ op = m->op;
+ rc = seq_release(v, f);
+ free(__DECONST(void *, op), M_LSEQ);
+ return (rc);
+}
+
+void
+lkpi_seq_vprintf(struct seq_file *m, const char *fmt, va_list args)
+{
+ int ret;
+
+ ret = sbuf_vprintf(m->buf, fmt, args);
+ if (ret == 0)
+ m->size = sbuf_len(m->buf);
+}
+
+void
+lkpi_seq_printf(struct seq_file *m, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ lkpi_seq_vprintf(m, fmt, args);
+ va_end(args);
+}
+
+bool
+seq_has_overflowed(struct seq_file *m)
+{
+ return (sbuf_len(m->buf) == -1);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_shmemfs.c b/sys/compat/linuxkpi/common/src/linux_shmemfs.c
new file mode 100644
index 000000000000..1fb17bc5c0cb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_shmemfs.c
@@ -0,0 +1,125 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/rwlock.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_object.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/shmem_fs.h>
+
+struct page *
+linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex, gfp_t gfp)
+{
+ struct page *page;
+ int rv;
+
+ if ((gfp & GFP_NOWAIT) != 0)
+ panic("GFP_NOWAIT is unimplemented");
+
+ VM_OBJECT_WLOCK(obj);
+ rv = vm_page_grab_valid(&page, obj, pindex, VM_ALLOC_NORMAL |
+ VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
+ VM_OBJECT_WUNLOCK(obj);
+ if (rv != VM_PAGER_OK)
+ return (ERR_PTR(-EINVAL));
+ return (page);
+}
+
+struct linux_file *
+linux_shmem_file_setup(const char *name, loff_t size, unsigned long flags)
+{
+ struct fileobj {
+ struct linux_file file __aligned(sizeof(void *));
+ struct vnode vnode __aligned(sizeof(void *));
+ };
+ struct fileobj *fileobj;
+ struct linux_file *filp;
+ struct vnode *vp;
+ int error;
+
+ fileobj = kzalloc(sizeof(*fileobj), GFP_KERNEL);
+ if (fileobj == NULL) {
+ error = -ENOMEM;
+ goto err_0;
+ }
+ filp = &fileobj->file;
+ vp = &fileobj->vnode;
+
+ filp->f_count = 1;
+ filp->f_vnode = vp;
+ filp->f_shmem = vm_pager_allocate(OBJT_SWAP, NULL, size,
+ VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred);
+ if (filp->f_shmem == NULL) {
+ error = -ENOMEM;
+ goto err_1;
+ }
+ return (filp);
+err_1:
+ kfree(filp);
+err_0:
+ return (ERR_PTR(error));
+}
+
+static vm_ooffset_t
+linux_invalidate_mapping_pages_sub(vm_object_t obj, vm_pindex_t start,
+ vm_pindex_t end, int flags)
+{
+ int start_count, end_count;
+
+ VM_OBJECT_WLOCK(obj);
+ start_count = obj->resident_page_count;
+ vm_object_page_remove(obj, start, end, flags);
+ end_count = obj->resident_page_count;
+ VM_OBJECT_WUNLOCK(obj);
+ return (start_count - end_count);
+}
+
+unsigned long
+linux_invalidate_mapping_pages(vm_object_t obj, pgoff_t start, pgoff_t end)
+{
+
+ return (linux_invalidate_mapping_pages_sub(obj, start, end, OBJPR_CLEANONLY));
+}
+
+void
+linux_shmem_truncate_range(vm_object_t obj, loff_t lstart, loff_t lend)
+{
+ vm_pindex_t start = OFF_TO_IDX(lstart + PAGE_SIZE - 1);
+ vm_pindex_t end = OFF_TO_IDX(lend + 1);
+
+ (void) linux_invalidate_mapping_pages_sub(obj, start, end, 0);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_shrinker.c b/sys/compat/linuxkpi/common/src/linux_shrinker.c
new file mode 100644
index 000000000000..e06490b92ed1
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_shrinker.c
@@ -0,0 +1,156 @@
+/*-
+ * Copyright (c) 2020 Emmanuel Vadot <manu@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+#include <sys/eventhandler.h>
+#include <sys/sx.h>
+
+#include <linux/compat.h>
+#include <linux/shrinker.h>
+#include <linux/slab.h>
+
+TAILQ_HEAD(, shrinker) lkpi_shrinkers = TAILQ_HEAD_INITIALIZER(lkpi_shrinkers);
+static struct sx sx_shrinker;
+
+struct shrinker *
+linuxkpi_shrinker_alloc(unsigned int flags, const char *fmt, ...)
+{
+ struct shrinker *shrinker;
+
+ shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
+ if (shrinker == NULL)
+ return (NULL);
+
+ shrinker->flags = flags | SHRINKER_ALLOCATED;
+ shrinker->seeks = DEFAULT_SEEKS;
+
+ return (shrinker);
+}
+
+int
+linuxkpi_register_shrinker(struct shrinker *s)
+{
+
+ KASSERT(s != NULL, ("NULL shrinker"));
+ KASSERT(s->count_objects != NULL, ("NULL shrinker"));
+ KASSERT(s->scan_objects != NULL, ("NULL shrinker"));
+ sx_xlock(&sx_shrinker);
+ s->flags |= SHRINKER_REGISTERED;
+ TAILQ_INSERT_TAIL(&lkpi_shrinkers, s, next);
+ sx_xunlock(&sx_shrinker);
+ return (0);
+}
+
+void
+linuxkpi_unregister_shrinker(struct shrinker *s)
+{
+
+ sx_xlock(&sx_shrinker);
+ TAILQ_REMOVE(&lkpi_shrinkers, s, next);
+ s->flags &= ~SHRINKER_REGISTERED;
+ sx_xunlock(&sx_shrinker);
+}
+
+void
+linuxkpi_shrinker_free(struct shrinker *shrinker)
+{
+
+ if (shrinker->flags & SHRINKER_REGISTERED)
+ unregister_shrinker(shrinker);
+
+ kfree(shrinker);
+}
+
+void
+linuxkpi_synchronize_shrinkers(void)
+{
+
+ sx_xlock(&sx_shrinker);
+ sx_xunlock(&sx_shrinker);
+}
+
+#define SHRINKER_BATCH 512
+
+static void
+shrinker_shrink(struct shrinker *s)
+{
+ struct shrink_control sc;
+ unsigned long can_free;
+ unsigned long batch;
+ unsigned long scanned = 0;
+ unsigned long ret;
+
+ can_free = s->count_objects(s, &sc);
+ if (can_free <= 0)
+ return;
+
+ batch = s->batch ? s->batch : SHRINKER_BATCH;
+ while (scanned <= can_free) {
+ sc.nr_to_scan = batch;
+ ret = s->scan_objects(s, &sc);
+ if (ret == SHRINK_STOP)
+ break;
+ scanned += batch;
+ }
+}
+
+static void
+linuxkpi_vm_lowmem(void *arg __unused, int flags __unused)
+{
+ struct shrinker *s;
+
+ sx_xlock(&sx_shrinker);
+ TAILQ_FOREACH(s, &lkpi_shrinkers, next) {
+ shrinker_shrink(s);
+ }
+ sx_xunlock(&sx_shrinker);
+}
+
+static eventhandler_tag lowmem_tag;
+
+static void
+linuxkpi_sysinit_shrinker(void *arg __unused)
+{
+
+ sx_init(&sx_shrinker, "lkpi-shrinker");
+ lowmem_tag = EVENTHANDLER_REGISTER(vm_lowmem, linuxkpi_vm_lowmem,
+ NULL, EVENTHANDLER_PRI_FIRST);
+}
+
+static void
+linuxkpi_sysuninit_shrinker(void *arg __unused)
+{
+
+ sx_destroy(&sx_shrinker);
+ EVENTHANDLER_DEREGISTER(vm_lowmem, lowmem_tag);
+}
+
+SYSINIT(linuxkpi_shrinker, SI_SUB_DRIVERS, SI_ORDER_ANY,
+ linuxkpi_sysinit_shrinker, NULL);
+SYSUNINIT(linuxkpi_shrinker, SI_SUB_DRIVERS, SI_ORDER_ANY,
+ linuxkpi_sysuninit_shrinker, NULL);
diff --git a/sys/compat/linuxkpi/common/src/linux_simple_attr.c b/sys/compat/linuxkpi/common/src/linux_simple_attr.c
new file mode 100644
index 000000000000..8cc9ec7ecbc9
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_simple_attr.c
@@ -0,0 +1,207 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022, Jake Freeland <jfree@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <linux/fs.h>
+
+MALLOC_DEFINE(M_LSATTR, "simple_attr", "Linux Simple Attribute File");
+
+struct simple_attr {
+ int (*get)(void *, uint64_t *);
+ int (*set)(void *, uint64_t);
+ void *data;
+ const char *fmt;
+ struct mutex mutex;
+};
+
+/*
+ * simple_attr_open: open and populate simple attribute data
+ *
+ * @inode: file inode
+ * @filp: file pointer
+ * @get: ->get() for reading file data
+ * @set: ->set() for writing file data
+ * @fmt: format specifier for data returned by @get
+ *
+ * Memory allocate a simple_attr and appropriately initialize its members.
+ * The simple_attr must be stored in filp->private_data.
+ * Simple attr files do not support seeking. Open the file as nonseekable.
+ *
+ * Return value: simple attribute file descriptor
+ */
+int
+simple_attr_open(struct inode *inode, struct file *filp,
+ int (*get)(void *, uint64_t *), int (*set)(void *, uint64_t),
+ const char *fmt)
+{
+ struct simple_attr *sattr;
+ sattr = malloc(sizeof(*sattr), M_LSATTR, M_ZERO | M_NOWAIT);
+ if (sattr == NULL)
+ return (-ENOMEM);
+
+ sattr->get = get;
+ sattr->set = set;
+ sattr->data = inode->i_private;
+ sattr->fmt = fmt;
+ mutex_init(&sattr->mutex);
+
+ filp->private_data = (void *) sattr;
+
+ return (nonseekable_open(inode, filp));
+}
+
+int
+simple_attr_release(struct inode *inode, struct file *filp)
+{
+ free(filp->private_data, M_LSATTR);
+ return (0);
+}
+
+/*
+ * simple_attr_read: read simple attr data and transfer into buffer
+ *
+ * @filp: file pointer
+ * @buf: kernel space buffer
+ * @read_size: number of bytes to be transferred
+ * @ppos: starting pointer position for transfer
+ *
+ * The simple_attr structure is stored in filp->private_data.
+ * ->get() retrieves raw file data.
+ * The ->fmt specifier can format this data to be human readable.
+ * This output is then transferred into the @buf buffer.
+ *
+ * Return value:
+ * On success, number of bytes transferred
+ * On failure, negative signed ERRNO
+ */
+ssize_t
+simple_attr_read(struct file *filp, char *buf, size_t read_size, loff_t *ppos)
+{
+ struct simple_attr *sattr;
+ uint64_t data;
+ ssize_t ret;
+ char prebuf[24];
+
+ sattr = filp->private_data;
+
+ if (sattr->get == NULL)
+ return (-EFAULT);
+
+ mutex_lock(&sattr->mutex);
+
+ ret = sattr->get(sattr->data, &data);
+ if (ret)
+ goto unlock;
+
+ scnprintf(prebuf, sizeof(prebuf), sattr->fmt, data);
+
+ ret = strlen(prebuf) + 1;
+ if (*ppos >= ret || read_size < 1) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ read_size = min(ret - *ppos, read_size);
+ ret = strscpy(buf, prebuf + *ppos, read_size);
+
+ /* add 1 for null terminator */
+ if (ret > 0)
+ ret += 1;
+
+unlock:
+ mutex_unlock(&sattr->mutex);
+ return (ret);
+}
+
+/*
+ * simple_attr_write_common: write contents of buffer into simple attribute file
+ *
+ * @filp: file pointer
+ * @buf: kernel space buffer
+ * @write_size: number bytes to be transferred
+ * @ppos: starting pointer position for transfer
+ * @is_signed: signedness of data in @buf
+ *
+ * The simple_attr structure is stored in filp->private_data.
+ * Convert the @buf string to unsigned long long.
+ * ->set() writes unsigned long long data into the simple attr file.
+ *
+ * Return value:
+ * On success, number of bytes written to simple attr
+ * On failure, negative signed ERRNO
+ */
+static ssize_t
+simple_attr_write_common(struct file *filp, const char *buf, size_t write_size,
+ loff_t *ppos, bool is_signed)
+{
+ struct simple_attr *sattr;
+ unsigned long long data;
+ size_t bufsize;
+ ssize_t ret;
+
+ sattr = filp->private_data;
+ bufsize = strlen(buf) + 1;
+
+ if (sattr->set == NULL)
+ return (-EFAULT);
+
+ if (*ppos >= bufsize || write_size < 1)
+ return (-EINVAL);
+
+ mutex_lock(&sattr->mutex);
+
+ if (is_signed)
+ ret = kstrtoll(buf + *ppos, 0, &data);
+ else
+ ret = kstrtoull(buf + *ppos, 0, &data);
+ if (ret)
+ goto unlock;
+
+ ret = sattr->set(sattr->data, data);
+ if (ret)
+ goto unlock;
+
+ ret = bufsize - *ppos;
+
+unlock:
+ mutex_unlock(&sattr->mutex);
+ return (ret);
+}
+
+ssize_t
+simple_attr_write(struct file *filp, const char *buf, size_t write_size,
+ loff_t *ppos)
+{
+ return (simple_attr_write_common(filp, buf, write_size, ppos, false));
+}
+
+ssize_t
+simple_attr_write_signed(struct file *filp, const char *buf, size_t write_size,
+ loff_t *ppos)
+{
+ return (simple_attr_write_common(filp, buf, write_size, ppos, true));
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_skbuff.c b/sys/compat/linuxkpi/common/src/linux_skbuff.c
new file mode 100644
index 000000000000..abfb642ba708
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_skbuff.c
@@ -0,0 +1,361 @@
+/*-
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
+ * Copyright (c) 2021-2022 Bjoern A. Zeeb
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
+ * Do not rely on the internals of this implementation. They are highly
+ * likely to change as we will improve the integration to FreeBSD mbufs.
+ */
+
+#include <sys/cdefs.h>
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+
+#include <vm/uma.h>
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#ifdef __LP64__
+#include <linux/log2.h>
+#endif
+
+SYSCTL_DECL(_compat_linuxkpi);
+SYSCTL_NODE(_compat_linuxkpi, OID_AUTO, skb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "LinuxKPI skbuff");
+
+#ifdef SKB_DEBUG
+int linuxkpi_debug_skb;
+SYSCTL_INT(_compat_linuxkpi_skb, OID_AUTO, debug, CTLFLAG_RWTUN,
+ &linuxkpi_debug_skb, 0, "SKB debug level");
+#endif
+
+static uma_zone_t skbzone;
+
+#define SKB_DMA32_MALLOC
+#ifdef SKB_DMA32_MALLOC
+/*
+ * Realtek wireless drivers (e.g., rtw88) require 32bit DMA in a single segment.
+ * busdma(9) has a hard time providing this currently for 3-ish pages at large
+ * quantities (see lkpi_pci_nseg1_fail in linux_pci.c).
+ * Work around this for now by allowing a tunable to enforce physical addresses
+ * allocation limits using "old-school" contigmalloc(9) to avoid bouncing.
+ * Note: with the malloc/contigmalloc + kmalloc changes also providing physical
+ * contiguous memory, and the nseg=1 limit for bouncing we should in theory be
+ * fine now and not need any of this anymore, however busdma still has troubles
+ * boncing three contiguous pages so for now this stays.
+ */
+static int linuxkpi_skb_memlimit;
+SYSCTL_INT(_compat_linuxkpi_skb, OID_AUTO, mem_limit, CTLFLAG_RDTUN,
+ &linuxkpi_skb_memlimit, 0, "SKB memory limit: 0=no limit, "
+ "1=32bit, 2=36bit, other=undef (currently 32bit)");
+
+static MALLOC_DEFINE(M_LKPISKB, "lkpiskb", "Linux KPI skbuff compat");
+#endif
+
+struct sk_buff *
+linuxkpi_alloc_skb(size_t size, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ void *p;
+ size_t len;
+
+ skb = uma_zalloc(skbzone, linux_check_m_flags(gfp) | M_ZERO);
+ if (skb == NULL)
+ return (NULL);
+
+ skb->prev = skb->next = skb;
+ skb->truesize = size;
+ skb->shinfo = (struct skb_shared_info *)(skb + 1);
+
+ if (size == 0)
+ return (skb);
+
+ len = size;
+#ifdef SKB_DMA32_MALLOC
+ /*
+ * Using our own type here not backing my kmalloc.
+ * We assume no one calls kfree directly on the skb.
+ */
+ if (__predict_false(linuxkpi_skb_memlimit != 0)) {
+ vm_paddr_t high;
+
+ switch (linuxkpi_skb_memlimit) {
+#ifdef __LP64__
+ case 2:
+ high = (0xfffffffff); /* 1<<36 really. */
+ break;
+#endif
+ case 1:
+ default:
+ high = (0xffffffff); /* 1<<32 really. */
+ break;
+ }
+ len = roundup_pow_of_two(len);
+ p = contigmalloc(len, M_LKPISKB,
+ linux_check_m_flags(gfp) | M_ZERO, 0, high, PAGE_SIZE, 0);
+ } else
+#endif
+ p = __kmalloc(len, linux_check_m_flags(gfp) | M_ZERO);
+ if (p == NULL) {
+ uma_zfree(skbzone, skb);
+ return (NULL);
+ }
+
+ skb->head = skb->data = (uint8_t *)p;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->head + size;
+
+ SKB_TRACE_FMT(skb, "data %p size %zu", (skb) ? skb->data : NULL, size);
+ return (skb);
+}
+
+struct sk_buff *
+linuxkpi_dev_alloc_skb(size_t size, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ size_t len;
+
+ len = size + NET_SKB_PAD;
+ skb = linuxkpi_alloc_skb(len, gfp);
+
+ if (skb != NULL)
+ skb_reserve(skb, NET_SKB_PAD);
+
+ SKB_TRACE_FMT(skb, "data %p size %zu len %zu",
+ (skb) ? skb->data : NULL, size, len);
+ return (skb);
+}
+
+struct sk_buff *
+linuxkpi_build_skb(void *data, size_t fragsz)
+{
+ struct sk_buff *skb;
+
+ if (data == NULL || fragsz == 0)
+ return (NULL);
+
+ /* Just allocate a skb without data area. */
+ skb = linuxkpi_alloc_skb(0, GFP_KERNEL);
+ if (skb == NULL)
+ return (NULL);
+
+ skb->_flags |= _SKB_FLAGS_SKBEXTFRAG;
+ skb->truesize = fragsz;
+ skb->head = skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->head + fragsz;
+
+ return (skb);
+}
+
+struct sk_buff *
+linuxkpi_skb_copy(const struct sk_buff *skb, gfp_t gfp)
+{
+ struct sk_buff *new;
+ struct skb_shared_info *shinfo;
+ size_t len;
+ unsigned int headroom;
+
+ /* Full buffer size + any fragments. */
+ len = skb->end - skb->head + skb->data_len;
+
+ new = linuxkpi_alloc_skb(len, gfp);
+ if (new == NULL)
+ return (NULL);
+
+ headroom = skb_headroom(skb);
+ /* Fixup head and end. */
+ skb_reserve(new, headroom); /* data and tail move headroom forward. */
+ skb_put(new, skb->len); /* tail and len get adjusted */
+
+ /* Copy data. */
+ memcpy(new->head, skb->data - headroom, headroom + skb->len);
+
+ /* Deal with fragments. */
+ shinfo = skb->shinfo;
+ if (shinfo->nr_frags > 0) {
+ printf("%s:%d: NOT YET SUPPORTED; missing %d frags\n",
+ __func__, __LINE__, shinfo->nr_frags);
+ SKB_TODO();
+ }
+
+ /* Deal with header fields. */
+ memcpy(new->cb, skb->cb, sizeof(skb->cb));
+ SKB_IMPROVE("more header fields to copy?");
+
+ return (new);
+}
+
+void
+linuxkpi_kfree_skb(struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo;
+ uint16_t fragno, count;
+
+ SKB_TRACE(skb);
+ if (skb == NULL)
+ return;
+
+ /*
+ * XXX TODO this will go away once we have skb backed by mbuf.
+ * currently we allow the mbuf to stay around and use a private
+ * free function to allow secondary resources to be freed along.
+ */
+ if (skb->m != NULL) {
+ void *m;
+
+ m = skb->m;
+ skb->m = NULL;
+
+ KASSERT(skb->m_free_func != NULL, ("%s: skb %p has m %p but no "
+ "m_free_func %p\n", __func__, skb, m, skb->m_free_func));
+ skb->m_free_func(m);
+ }
+ KASSERT(skb->m == NULL,
+ ("%s: skb %p m %p != NULL\n", __func__, skb, skb->m));
+
+ shinfo = skb->shinfo;
+ for (count = fragno = 0;
+ count < shinfo->nr_frags && fragno < nitems(shinfo->frags);
+ fragno++) {
+
+ if (shinfo->frags[fragno].page != NULL) {
+ struct page *p;
+
+ p = shinfo->frags[fragno].page;
+ shinfo->frags[fragno].size = 0;
+ shinfo->frags[fragno].offset = 0;
+ shinfo->frags[fragno].page = NULL;
+ __free_page(p);
+ count++;
+ }
+ }
+
+ if ((skb->_flags & _SKB_FLAGS_SKBEXTFRAG) != 0) {
+ void *p;
+
+ p = skb->head;
+ skb_free_frag(p);
+ skb->head = NULL;
+ }
+
+#ifdef SKB_DMA32_MALLOC
+ if (__predict_false(linuxkpi_skb_memlimit != 0))
+ free(skb->head, M_LKPISKB);
+ else
+#endif
+ kfree(skb->head);
+ uma_zfree(skbzone, skb);
+}
+
+static void
+lkpi_skbuff_init(void *arg __unused)
+{
+ skbzone = uma_zcreate("skbuff",
+ sizeof(struct sk_buff) + sizeof(struct skb_shared_info),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ /* Do we need to apply limits? */
+}
+SYSINIT(linuxkpi_skbuff, SI_SUB_DRIVERS, SI_ORDER_FIRST, lkpi_skbuff_init, NULL);
+
+static void
+lkpi_skbuff_destroy(void *arg __unused)
+{
+ uma_zdestroy(skbzone);
+}
+SYSUNINIT(linuxkpi_skbuff, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_skbuff_destroy, NULL);
+
+#ifdef DDB
+DB_SHOW_COMMAND(skb, db_show_skb)
+{
+ struct sk_buff *skb;
+ int i;
+
+ if (!have_addr) {
+ db_printf("usage: show skb <addr>\n");
+ return;
+ }
+
+ skb = (struct sk_buff *)addr;
+
+ db_printf("skb %p\n", skb);
+ db_printf("\tnext %p prev %p\n", skb->next, skb->prev);
+ db_printf("\tlist %p\n", &skb->list);
+ db_printf("\tlen %u data_len %u truesize %u mac_len %u\n",
+ skb->len, skb->data_len, skb->truesize, skb->mac_len);
+ db_printf("\tcsum %#06x l3hdroff %u l4hdroff %u priority %u qmap %u\n",
+ skb->csum, skb->l3hdroff, skb->l4hdroff, skb->priority, skb->qmap);
+ db_printf("\tpkt_type %d dev %p sk %p\n",
+ skb->pkt_type, skb->dev, skb->sk);
+ db_printf("\tcsum_offset %d csum_start %d ip_summed %d protocol %d\n",
+ skb->csum_offset, skb->csum_start, skb->ip_summed, skb->protocol);
+ db_printf("\t_flags %#06x\n", skb->_flags); /* XXX-BZ print names? */
+ db_printf("\thead %p data %p tail %p end %p\n",
+ skb->head, skb->data, skb->tail, skb->end);
+ db_printf("\tshinfo %p m %p m_free_func %p\n",
+ skb->shinfo, skb->m, skb->m_free_func);
+
+ if (skb->shinfo != NULL) {
+ struct skb_shared_info *shinfo;
+
+ shinfo = skb->shinfo;
+ db_printf("\t\tgso_type %d gso_size %u nr_frags %u\n",
+ shinfo->gso_type, shinfo->gso_size, shinfo->nr_frags);
+ for (i = 0; i < nitems(shinfo->frags); i++) {
+ struct skb_frag *frag;
+
+ frag = &shinfo->frags[i];
+ if (frag == NULL || frag->page == NULL)
+ continue;
+ db_printf("\t\t\tfrag %p fragno %d page %p %p "
+ "offset %ju size %zu\n",
+ frag, i, frag->page, linux_page_address(frag->page),
+ (uintmax_t)frag->offset, frag->size);
+ }
+ }
+ db_printf("\tcb[] %p {", skb->cb);
+ for (i = 0; i < nitems(skb->cb); i++) {
+ db_printf("%#04x%s",
+ skb->cb[i], (i < (nitems(skb->cb)-1)) ? ", " : "");
+ }
+ db_printf("}\n");
+
+ db_printf("\t__scratch[0] %p\n", skb->__scratch);
+};
+#endif
diff --git a/sys/compat/linuxkpi/common/src/linux_slab.c b/sys/compat/linuxkpi/common/src/linux_slab.c
new file mode 100644
index 000000000000..3d75ca480661
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_slab.c
@@ -0,0 +1,330 @@
+/*-
+ * Copyright (c) 2017 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Björn Zeeb
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/kernel.h>
+#include <linux/irq_work.h>
+#include <linux/llist.h>
+
+#include <sys/param.h>
+#include <sys/taskqueue.h>
+#include <vm/uma.h>
+
+struct linux_kmem_rcu {
+ struct rcu_head rcu_head;
+ struct linux_kmem_cache *cache;
+};
+
+struct linux_kmem_cache {
+ uma_zone_t cache_zone;
+ linux_kmem_ctor_t *cache_ctor;
+ unsigned cache_flags;
+ unsigned cache_size;
+ struct llist_head cache_items;
+ struct task cache_task;
+};
+
+#define LINUX_KMEM_TO_RCU(c, m) \
+ ((struct linux_kmem_rcu *)((char *)(m) + \
+ (c)->cache_size - sizeof(struct linux_kmem_rcu)))
+
+#define LINUX_RCU_TO_KMEM(r) \
+ ((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \
+ (r)->cache->cache_size))
+
+static LLIST_HEAD(linux_kfree_async_list);
+
+static void lkpi_kmem_cache_free_async_fn(void *, int);
+
+void *
+lkpi_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
+{
+ return (uma_zalloc_arg(c->cache_zone, c,
+ linux_check_m_flags(flags)));
+}
+
+void *
+lkpi_kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
+{
+ return (uma_zalloc_arg(c->cache_zone, c,
+ linux_check_m_flags(flags | M_ZERO)));
+}
+
+static int
+linux_kmem_ctor(void *mem, int size, void *arg, int flags)
+{
+ struct linux_kmem_cache *c = arg;
+
+ if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
+ struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, mem);
+
+ /* duplicate cache pointer */
+ rcu->cache = c;
+ }
+
+ /* check for constructor */
+ if (likely(c->cache_ctor != NULL))
+ c->cache_ctor(mem);
+
+ return (0);
+}
+
+static void
+linux_kmem_cache_free_rcu_callback(struct rcu_head *head)
+{
+ struct linux_kmem_rcu *rcu =
+ container_of(head, struct linux_kmem_rcu, rcu_head);
+
+ uma_zfree(rcu->cache->cache_zone, LINUX_RCU_TO_KMEM(rcu));
+}
+
+struct linux_kmem_cache *
+linux_kmem_cache_create(const char *name, size_t size, size_t align,
+ unsigned flags, linux_kmem_ctor_t *ctor)
+{
+ struct linux_kmem_cache *c;
+
+ c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK);
+
+ if (flags & SLAB_HWCACHE_ALIGN)
+ align = UMA_ALIGN_CACHE;
+ else if (align != 0)
+ align--;
+
+ if (flags & SLAB_TYPESAFE_BY_RCU) {
+ /* make room for RCU structure */
+ size = ALIGN(size, sizeof(void *));
+ size += sizeof(struct linux_kmem_rcu);
+
+ /* create cache_zone */
+ c->cache_zone = uma_zcreate(name, size,
+ linux_kmem_ctor, NULL, NULL, NULL,
+ align, UMA_ZONE_ZINIT);
+ } else {
+ /* make room for async task list items */
+ size = MAX(size, sizeof(struct llist_node));
+
+ /* create cache_zone */
+ c->cache_zone = uma_zcreate(name, size,
+ ctor ? linux_kmem_ctor : NULL, NULL,
+ NULL, NULL, align, 0);
+ }
+
+ c->cache_flags = flags;
+ c->cache_ctor = ctor;
+ c->cache_size = size;
+ init_llist_head(&c->cache_items);
+ TASK_INIT(&c->cache_task, 0, lkpi_kmem_cache_free_async_fn, c);
+ return (c);
+}
+
+static inline void
+lkpi_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
+{
+ struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m);
+
+ call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback);
+}
+
+static inline void
+lkpi_kmem_cache_free_sync(struct linux_kmem_cache *c, void *m)
+{
+ uma_zfree(c->cache_zone, m);
+}
+
+static void
+lkpi_kmem_cache_free_async_fn(void *context, int pending)
+{
+ struct linux_kmem_cache *c = context;
+ struct llist_node *freed, *next;
+
+ llist_for_each_safe(freed, next, llist_del_all(&c->cache_items))
+ lkpi_kmem_cache_free_sync(c, freed);
+}
+
+static inline void
+lkpi_kmem_cache_free_async(struct linux_kmem_cache *c, void *m)
+{
+ if (m == NULL)
+ return;
+
+ llist_add(m, &c->cache_items);
+ taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
+}
+
+void
+lkpi_kmem_cache_free(struct linux_kmem_cache *c, void *m)
+{
+ if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
+ lkpi_kmem_cache_free_rcu(c, m);
+ else if (unlikely(curthread->td_critnest != 0))
+ lkpi_kmem_cache_free_async(c, m);
+ else
+ lkpi_kmem_cache_free_sync(c, m);
+}
+
+void
+linux_kmem_cache_destroy(struct linux_kmem_cache *c)
+{
+ if (c == NULL)
+ return;
+
+ if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
+ /* make sure all free callbacks have been called */
+ rcu_barrier();
+ }
+
+ if (!llist_empty(&c->cache_items))
+ taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
+ taskqueue_drain(linux_irq_work_tq, &c->cache_task);
+ uma_zdestroy(c->cache_zone);
+ free(c, M_KMALLOC);
+}
+
+void *
+lkpi___kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (size <= PAGE_SIZE)
+ return (malloc_domainset(size, M_KMALLOC,
+ linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+ else
+ return (contigmalloc_domainset(size, M_KMALLOC,
+ linux_get_vm_domain_set(node), linux_check_m_flags(flags),
+ 0, -1UL, PAGE_SIZE, 0));
+}
+
+void *
+lkpi___kmalloc(size_t size, gfp_t flags)
+{
+ size_t _s;
+
+ /* sizeof(struct llist_node) is used for kfree_async(). */
+ _s = MAX(size, sizeof(struct llist_node));
+
+ if (_s <= PAGE_SIZE)
+ return (malloc(_s, M_KMALLOC, linux_check_m_flags(flags)));
+ else
+ return (contigmalloc(_s, M_KMALLOC, linux_check_m_flags(flags),
+ 0, -1UL, PAGE_SIZE, 0));
+}
+
+void *
+lkpi_krealloc(void *ptr, size_t size, gfp_t flags)
+{
+ void *nptr;
+ size_t osize;
+
+ /*
+ * First handle invariants based on function arguments.
+ */
+ if (ptr == NULL)
+ return (kmalloc(size, flags));
+
+ osize = ksize(ptr);
+ if (size <= osize)
+ return (ptr);
+
+ /*
+ * We know the new size > original size. realloc(9) does not (and cannot)
+ * know about our requirements for physically contiguous memory, so we can
+ * only call it for sizes up to and including PAGE_SIZE, and otherwise have
+ * to replicate its functionality using kmalloc to get the contigmalloc(9)
+ * backing.
+ */
+ if (size <= PAGE_SIZE)
+ return (realloc(ptr, size, M_KMALLOC, linux_check_m_flags(flags)));
+
+ nptr = kmalloc(size, flags);
+ if (nptr == NULL)
+ return (NULL);
+
+ memcpy(nptr, ptr, osize);
+ kfree(ptr);
+ return (nptr);
+}
+
+struct lkpi_kmalloc_ctx {
+ size_t size;
+ gfp_t flags;
+ void *addr;
+};
+
+static void
+lkpi_kmalloc_cb(void *ctx)
+{
+ struct lkpi_kmalloc_ctx *lmc = ctx;
+
+ lmc->addr = __kmalloc(lmc->size, lmc->flags);
+}
+
+void *
+lkpi_kmalloc(size_t size, gfp_t flags)
+{
+ struct lkpi_kmalloc_ctx lmc = { .size = size, .flags = flags };
+
+ lkpi_fpu_safe_exec(&lkpi_kmalloc_cb, &lmc);
+ return(lmc.addr);
+}
+
+static void
+linux_kfree_async_fn(void *context, int pending)
+{
+ struct llist_node *freed;
+
+ while((freed = llist_del_first(&linux_kfree_async_list)) != NULL)
+ kfree(freed);
+}
+static struct task linux_kfree_async_task =
+ TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task);
+
+static void
+linux_kfree_async(void *addr)
+{
+ if (addr == NULL)
+ return;
+ llist_add(addr, &linux_kfree_async_list);
+ taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task);
+}
+
+void
+lkpi_kfree(const void *ptr)
+{
+ if (ZERO_OR_NULL_PTR(ptr))
+ return;
+
+ if (curthread->td_critnest != 0)
+ linux_kfree_async(__DECONST(void *, ptr));
+ else
+ free(__DECONST(void *, ptr), M_KMALLOC);
+}
+
diff --git a/sys/compat/linuxkpi/common/src/linux_tasklet.c b/sys/compat/linuxkpi/common/src/linux_tasklet.c
new file mode 100644
index 000000000000..e443ab3958b4
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_tasklet.c
@@ -0,0 +1,277 @@
+/*-
+ * Copyright (c) 2017 Hans Petter Selasky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/gtaskqueue.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+
+#include <linux/compiler.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+
+#define TASKLET_ST_IDLE 0
+#define TASKLET_ST_BUSY 1
+#define TASKLET_ST_EXEC 2
+#define TASKLET_ST_LOOP 3
+
+#define TASKLET_ST_CMPSET(ts, old, new) \
+ atomic_cmpset_int((volatile u_int *)&(ts)->tasklet_state, old, new)
+
+#define TASKLET_ST_SET(ts, new) \
+ WRITE_ONCE(*(volatile u_int *)&(ts)->tasklet_state, new)
+
+#define TASKLET_ST_GET(ts) \
+ READ_ONCE(*(volatile u_int *)&(ts)->tasklet_state)
+
+struct tasklet_worker {
+ struct mtx mtx;
+ TAILQ_HEAD(tasklet_list, tasklet_struct) head;
+ struct grouptask gtask;
+} __aligned(CACHE_LINE_SIZE);
+
+#define TASKLET_WORKER_LOCK(tw) mtx_lock(&(tw)->mtx)
+#define TASKLET_WORKER_UNLOCK(tw) mtx_unlock(&(tw)->mtx)
+
+DPCPU_DEFINE_STATIC(struct tasklet_worker, tasklet_worker);
+
+static void
+tasklet_handler(void *arg)
+{
+ struct tasklet_worker *tw = (struct tasklet_worker *)arg;
+ struct tasklet_struct *ts;
+ struct tasklet_struct *last;
+
+ linux_set_current(curthread);
+
+ TASKLET_WORKER_LOCK(tw);
+ last = TAILQ_LAST(&tw->head, tasklet_list);
+ while (1) {
+ ts = TAILQ_FIRST(&tw->head);
+ if (ts == NULL)
+ break;
+ TAILQ_REMOVE(&tw->head, ts, entry);
+
+ if (!atomic_read(&ts->count)) {
+ TASKLET_WORKER_UNLOCK(tw);
+ do {
+ /* reset executing state */
+ TASKLET_ST_SET(ts, TASKLET_ST_EXEC);
+
+ if (ts->use_callback)
+ ts->callback(ts);
+ else
+ ts->func(ts->data);
+
+ } while (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC,
+ TASKLET_ST_IDLE) == 0);
+ TASKLET_WORKER_LOCK(tw);
+ } else {
+ TAILQ_INSERT_TAIL(&tw->head, ts, entry);
+ }
+ if (ts == last)
+ break;
+ }
+ TASKLET_WORKER_UNLOCK(tw);
+}
+
+static void
+tasklet_subsystem_init(void *arg __unused)
+{
+ struct tasklet_worker *tw;
+ char buf[32];
+ int i;
+
+ CPU_FOREACH(i) {
+ if (CPU_ABSENT(i))
+ continue;
+
+ tw = DPCPU_ID_PTR(i, tasklet_worker);
+
+ mtx_init(&tw->mtx, "linux_tasklet", NULL, MTX_DEF);
+ TAILQ_INIT(&tw->head);
+ GROUPTASK_INIT(&tw->gtask, 0, tasklet_handler, tw);
+ snprintf(buf, sizeof(buf), "softirq%d", i);
+ taskqgroup_attach_cpu(qgroup_softirq, &tw->gtask,
+ "tasklet", i, NULL, NULL, buf);
+ }
+}
+SYSINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_init, NULL);
+
+static void
+tasklet_subsystem_uninit(void *arg __unused)
+{
+ struct tasklet_worker *tw;
+ int i;
+
+ taskqgroup_drain_all(qgroup_softirq);
+
+ CPU_FOREACH(i) {
+ if (CPU_ABSENT(i))
+ continue;
+
+ tw = DPCPU_ID_PTR(i, tasklet_worker);
+
+ taskqgroup_detach(qgroup_softirq, &tw->gtask);
+ mtx_destroy(&tw->mtx);
+ }
+}
+SYSUNINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_uninit, NULL);
+
+void
+tasklet_init(struct tasklet_struct *ts,
+ tasklet_func_t *func, unsigned long data)
+{
+ ts->entry.tqe_prev = NULL;
+ ts->entry.tqe_next = NULL;
+ ts->func = func;
+ ts->callback = NULL;
+ ts->data = data;
+ atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE);
+ atomic_set(&ts->count, 0);
+ ts->use_callback = false;
+}
+
+void
+tasklet_setup(struct tasklet_struct *ts, tasklet_callback_t *c)
+{
+ ts->entry.tqe_prev = NULL;
+ ts->entry.tqe_next = NULL;
+ ts->func = NULL;
+ ts->callback = c;
+ ts->data = 0;
+ atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE);
+ atomic_set(&ts->count, 0);
+ ts->use_callback = true;
+}
+
+void
+local_bh_enable(void)
+{
+ sched_unpin();
+}
+
+void
+local_bh_disable(void)
+{
+ sched_pin();
+}
+
+void
+tasklet_schedule(struct tasklet_struct *ts)
+{
+
+ /* tasklet is paused */
+ if (atomic_read(&ts->count))
+ return;
+
+ if (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_LOOP)) {
+ /* tasklet_handler() will loop */
+ } else if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY)) {
+ struct tasklet_worker *tw;
+
+ tw = &DPCPU_GET(tasklet_worker);
+
+ /* tasklet_handler() was not queued */
+ TASKLET_WORKER_LOCK(tw);
+ /* enqueue tasklet */
+ TAILQ_INSERT_TAIL(&tw->head, ts, entry);
+ /* schedule worker */
+ GROUPTASK_ENQUEUE(&tw->gtask);
+ TASKLET_WORKER_UNLOCK(tw);
+ } else {
+ /*
+ * tasklet_handler() is already executing
+ *
+ * If the state is neither EXEC nor IDLE, it is either
+ * LOOP or BUSY. If the state changed between the two
+ * CMPSET's above the only possible transitions by
+ * elimination are LOOP->EXEC and BUSY->EXEC. If a
+ * EXEC->LOOP transition was missed that is not a
+ * problem because the callback function is then
+ * already about to be called again.
+ */
+ }
+}
+
+void
+tasklet_kill(struct tasklet_struct *ts)
+{
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep");
+
+ /* wait until tasklet is no longer busy */
+ while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE)
+ pause("W", 1);
+}
+
+void
+tasklet_enable(struct tasklet_struct *ts)
+{
+
+ atomic_dec(&ts->count);
+}
+
+void
+tasklet_disable(struct tasklet_struct *ts)
+{
+
+ atomic_inc(&ts->count);
+ tasklet_unlock_wait(ts);
+}
+
+void
+tasklet_disable_nosync(struct tasklet_struct *ts)
+{
+ atomic_inc(&ts->count);
+ barrier();
+}
+
+int
+tasklet_trylock(struct tasklet_struct *ts)
+{
+
+ return (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY));
+}
+
+void
+tasklet_unlock(struct tasklet_struct *ts)
+{
+
+ TASKLET_ST_SET(ts, TASKLET_ST_IDLE);
+}
+
+void
+tasklet_unlock_wait(struct tasklet_struct *ts)
+{
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep");
+
+ /* wait until tasklet is no longer busy */
+ while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE)
+ pause("W", 1);
+}
diff --git a/sys/compat/linuxkpi/common/src/linux_usb.c b/sys/compat/linuxkpi/common/src/linux_usb.c
new file mode 100644
index 000000000000..cdd3d9a01f35
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_usb.c
@@ -0,0 +1,1720 @@
+/*-
+ * Copyright (c) 2007 Luigi Rizzo - Universita` di Pisa. All rights reserved.
+ * Copyright (c) 2007 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef USB_GLOBAL_INCLUDE_FILE
+#include USB_GLOBAL_INCLUDE_FILE
+#else
+#include <sys/stdint.h>
+#include <sys/stddef.h>
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/sysctl.h>
+#include <sys/sx.h>
+#include <sys/unistd.h>
+#include <sys/callout.h>
+#include <sys/malloc.h>
+#include <sys/priv.h>
+
+#include <dev/usb/usb.h>
+#include <dev/usb/usbdi.h>
+#include <dev/usb/usbdi_util.h>
+
+#define USB_DEBUG_VAR usb_debug
+
+#include <dev/usb/usb_core.h>
+#include <linux/usb.h>
+#include <dev/usb/usb_process.h>
+#include <dev/usb/usb_device.h>
+#include <dev/usb/usb_util.h>
+#include <dev/usb/usb_busdma.h>
+#include <dev/usb/usb_transfer.h>
+#include <dev/usb/usb_hub.h>
+#include <dev/usb/usb_request.h>
+#include <dev/usb/usb_debug.h>
+#include <dev/usb/usb_dynamic.h>
+#endif /* USB_GLOBAL_INCLUDE_FILE */
+
+struct usb_linux_softc {
+ LIST_ENTRY(usb_linux_softc) sc_attached_list;
+
+ device_t sc_fbsd_dev;
+ struct usb_device *sc_fbsd_udev;
+ struct usb_interface *sc_ui;
+ struct usb_driver *sc_udrv;
+};
+
+/* prototypes */
+static device_probe_t usb_linux_probe;
+static device_attach_t usb_linux_attach;
+static device_detach_t usb_linux_detach;
+static device_suspend_t usb_linux_suspend;
+static device_resume_t usb_linux_resume;
+
+static usb_callback_t usb_linux_isoc_callback;
+static usb_callback_t usb_linux_non_isoc_callback;
+
+static usb_complete_t usb_linux_wait_complete;
+
+static uint16_t usb_max_isoc_frames(struct usb_device *);
+static int usb_start_wait_urb(struct urb *, usb_timeout_t, uint16_t *);
+static const struct usb_device_id *usb_linux_lookup_id(
+ const struct usb_device_id *, struct usb_attach_arg *);
+static struct usb_driver *usb_linux_get_usb_driver(struct usb_linux_softc *);
+static int usb_linux_create_usb_device(struct usb_device *, device_t);
+static void usb_linux_cleanup_interface(struct usb_device *,
+ struct usb_interface *);
+static void usb_linux_complete(struct usb_xfer *);
+static int usb_unlink_urb_sub(struct urb *, uint8_t);
+
+/*------------------------------------------------------------------------*
+ * FreeBSD USB interface
+ *------------------------------------------------------------------------*/
+
+static LIST_HEAD(, usb_linux_softc) usb_linux_attached_list;
+static LIST_HEAD(, usb_driver) usb_linux_driver_list;
+
+static device_method_t usb_linux_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, usb_linux_probe),
+ DEVMETHOD(device_attach, usb_linux_attach),
+ DEVMETHOD(device_detach, usb_linux_detach),
+ DEVMETHOD(device_suspend, usb_linux_suspend),
+ DEVMETHOD(device_resume, usb_linux_resume),
+
+ DEVMETHOD_END
+};
+
+static driver_t usb_linux_driver = {
+ .name = "usb_linux",
+ .methods = usb_linux_methods,
+ .size = sizeof(struct usb_linux_softc),
+};
+
+DRIVER_MODULE(usb_linux, uhub, usb_linux_driver, NULL, NULL);
+MODULE_VERSION(usb_linux, 1);
+
+/*------------------------------------------------------------------------*
+ * usb_linux_lookup_id
+ *
+ * This functions takes an array of "struct usb_device_id" and tries
+ * to match the entries with the information in "struct usb_attach_arg".
+ * If it finds a match the matching entry will be returned.
+ * Else "NULL" will be returned.
+ *------------------------------------------------------------------------*/
+static const struct usb_device_id *
+usb_linux_lookup_id(const struct usb_device_id *id, struct usb_attach_arg *uaa)
+{
+ if (id == NULL) {
+ goto done;
+ }
+ /*
+ * Keep on matching array entries until we find one with
+ * "match_flags" equal to zero, which indicates the end of the
+ * array:
+ */
+ for (; id->match_flags; id++) {
+ if ((id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) &&
+ (id->idVendor != uaa->info.idVendor)) {
+ continue;
+ }
+ if ((id->match_flags & USB_DEVICE_ID_MATCH_PRODUCT) &&
+ (id->idProduct != uaa->info.idProduct)) {
+ continue;
+ }
+ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO) &&
+ (id->bcdDevice_lo > uaa->info.bcdDevice)) {
+ continue;
+ }
+ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI) &&
+ (id->bcdDevice_hi < uaa->info.bcdDevice)) {
+ continue;
+ }
+ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_CLASS) &&
+ (id->bDeviceClass != uaa->info.bDeviceClass)) {
+ continue;
+ }
+ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_SUBCLASS) &&
+ (id->bDeviceSubClass != uaa->info.bDeviceSubClass)) {
+ continue;
+ }
+ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_PROTOCOL) &&
+ (id->bDeviceProtocol != uaa->info.bDeviceProtocol)) {
+ continue;
+ }
+ if ((uaa->info.bDeviceClass == 0xFF) &&
+ !(id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) &&
+ (id->match_flags & (USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL))) {
+ continue;
+ }
+ if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_CLASS) &&
+ (id->bInterfaceClass != uaa->info.bInterfaceClass)) {
+ continue;
+ }
+ if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_SUBCLASS) &&
+ (id->bInterfaceSubClass != uaa->info.bInterfaceSubClass)) {
+ continue;
+ }
+ if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_PROTOCOL) &&
+ (id->bInterfaceProtocol != uaa->info.bInterfaceProtocol)) {
+ continue;
+ }
+ /* we found a match! */
+ return (id);
+ }
+
+done:
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_probe
+ *
+ * This function is the FreeBSD probe callback. It is called from the
+ * FreeBSD USB stack through the "device_probe_and_attach()" function.
+ *------------------------------------------------------------------------*/
+static int
+usb_linux_probe(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct usb_driver *udrv;
+ int err = ENXIO;
+
+ if (uaa->usb_mode != USB_MODE_HOST) {
+ return (ENXIO);
+ }
+ mtx_lock(&Giant);
+ LIST_FOREACH(udrv, &usb_linux_driver_list, linux_driver_list) {
+ if (usb_linux_lookup_id(udrv->id_table, uaa)) {
+ err = BUS_PROBE_DEFAULT;
+ break;
+ }
+ }
+ mtx_unlock(&Giant);
+
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_get_usb_driver
+ *
+ * This function returns the pointer to the "struct usb_driver" where
+ * the Linux USB device driver "struct usb_device_id" match was found.
+ * We apply a lock before reading out the pointer to avoid races.
+ *------------------------------------------------------------------------*/
+static struct usb_driver *
+usb_linux_get_usb_driver(struct usb_linux_softc *sc)
+{
+ struct usb_driver *udrv;
+
+ mtx_lock(&Giant);
+ udrv = sc->sc_udrv;
+ mtx_unlock(&Giant);
+ return (udrv);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_attach
+ *
+ * This function is the FreeBSD attach callback. It is called from the
+ * FreeBSD USB stack through the "device_probe_and_attach()" function.
+ * This function is called when "usb_linux_probe()" returns zero.
+ *------------------------------------------------------------------------*/
+static int
+usb_linux_attach(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct usb_linux_softc *sc = device_get_softc(dev);
+ struct usb_driver *udrv;
+ const struct usb_device_id *id = NULL;
+
+ mtx_lock(&Giant);
+ LIST_FOREACH(udrv, &usb_linux_driver_list, linux_driver_list) {
+ id = usb_linux_lookup_id(udrv->id_table, uaa);
+ if (id)
+ break;
+ }
+ mtx_unlock(&Giant);
+
+ if (id == NULL) {
+ return (ENXIO);
+ }
+ if (usb_linux_create_usb_device(uaa->device, dev) != 0)
+ return (ENOMEM);
+ device_set_usb_desc(dev);
+
+ sc->sc_fbsd_udev = uaa->device;
+ sc->sc_fbsd_dev = dev;
+ sc->sc_udrv = udrv;
+ sc->sc_ui = usb_ifnum_to_if(uaa->device, uaa->info.bIfaceNum);
+ if (sc->sc_ui == NULL) {
+ return (EINVAL);
+ }
+ if (udrv->probe) {
+ if ((udrv->probe) (sc->sc_ui, id)) {
+ return (ENXIO);
+ }
+ }
+ mtx_lock(&Giant);
+ LIST_INSERT_HEAD(&usb_linux_attached_list, sc, sc_attached_list);
+ mtx_unlock(&Giant);
+
+ /* success */
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_detach
+ *
+ * This function is the FreeBSD detach callback. It is called from the
+ * FreeBSD USB stack through the "device_detach()" function.
+ *------------------------------------------------------------------------*/
+static int
+usb_linux_detach(device_t dev)
+{
+ struct usb_linux_softc *sc = device_get_softc(dev);
+ struct usb_driver *udrv = NULL;
+
+ mtx_lock(&Giant);
+ if (sc->sc_attached_list.le_prev) {
+ LIST_REMOVE(sc, sc_attached_list);
+ sc->sc_attached_list.le_prev = NULL;
+ udrv = sc->sc_udrv;
+ sc->sc_udrv = NULL;
+ }
+ mtx_unlock(&Giant);
+
+ if (udrv && udrv->disconnect) {
+ (udrv->disconnect) (sc->sc_ui);
+ }
+ /*
+ * Make sure that we free all FreeBSD USB transfers belonging to
+ * this Linux "usb_interface", hence they will most likely not be
+ * needed any more.
+ */
+ usb_linux_cleanup_interface(sc->sc_fbsd_udev, sc->sc_ui);
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_suspend
+ *
+ * This function is the FreeBSD suspend callback. Usually it does nothing.
+ *------------------------------------------------------------------------*/
+static int
+usb_linux_suspend(device_t dev)
+{
+ struct usb_linux_softc *sc = device_get_softc(dev);
+ struct usb_driver *udrv = usb_linux_get_usb_driver(sc);
+ pm_message_t pm_msg;
+ int err;
+
+ err = 0;
+ if (udrv && udrv->suspend) {
+ pm_msg.event = 0; /* XXX */
+ err = (udrv->suspend) (sc->sc_ui, pm_msg);
+ }
+ return (-err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_resume
+ *
+ * This function is the FreeBSD resume callback. Usually it does nothing.
+ *------------------------------------------------------------------------*/
+static int
+usb_linux_resume(device_t dev)
+{
+ struct usb_linux_softc *sc = device_get_softc(dev);
+ struct usb_driver *udrv = usb_linux_get_usb_driver(sc);
+ int err;
+
+ err = 0;
+ if (udrv && udrv->resume)
+ err = (udrv->resume) (sc->sc_ui);
+ return (-err);
+}
+
+/*------------------------------------------------------------------------*
+ * Linux emulation layer
+ *------------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------------*
+ * usb_max_isoc_frames
+ *
+ * The following function returns the maximum number of isochronous
+ * frames that we support per URB. It is not part of the Linux USB API.
+ *------------------------------------------------------------------------*/
+static uint16_t
+usb_max_isoc_frames(struct usb_device *dev)
+{
+ ; /* indent fix */
+ switch (usbd_get_speed(dev)) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ return (USB_MAX_FULL_SPEED_ISOC_FRAMES);
+ default:
+ return (USB_MAX_HIGH_SPEED_ISOC_FRAMES);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_submit_urb
+ *
+ * This function is used to queue an URB after that it has been
+ * initialized. If it returns non-zero, it means that the URB was not
+ * queued.
+ *------------------------------------------------------------------------*/
+int
+usb_submit_urb(struct urb *urb, uint16_t mem_flags)
+{
+ struct usb_host_endpoint *uhe;
+ uint8_t do_unlock;
+ int err;
+
+ if (urb == NULL)
+ return (-EINVAL);
+
+ do_unlock = mtx_owned(&Giant) ? 0 : 1;
+ if (do_unlock)
+ mtx_lock(&Giant);
+
+ if (urb->endpoint == NULL) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Check to see if the urb is in the process of being killed
+ * and stop a urb that is in the process of being killed from
+ * being re-submitted (e.g. from its completion callback
+ * function).
+ */
+ if (urb->kill_count != 0) {
+ err = -EPERM;
+ goto done;
+ }
+
+ uhe = urb->endpoint;
+
+ /*
+ * Check that we have got a FreeBSD USB transfer that will dequeue
+ * the URB structure and do the real transfer. If there are no USB
+ * transfers, then we return an error.
+ */
+ if (uhe->bsd_xfer[0] ||
+ uhe->bsd_xfer[1]) {
+ /* we are ready! */
+
+ TAILQ_INSERT_TAIL(&uhe->bsd_urb_list, urb, bsd_urb_list);
+
+ urb->status = -EINPROGRESS;
+
+ usbd_transfer_start(uhe->bsd_xfer[0]);
+ usbd_transfer_start(uhe->bsd_xfer[1]);
+ err = 0;
+ } else {
+ /* no pipes have been setup yet! */
+ urb->status = -EINVAL;
+ err = -EINVAL;
+ }
+done:
+ if (do_unlock)
+ mtx_unlock(&Giant);
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_unlink_urb
+ *
+ * This function is used to stop an URB after that it is been
+ * submitted, but before the "complete" callback has been called. On
+ *------------------------------------------------------------------------*/
+int
+usb_unlink_urb(struct urb *urb)
+{
+ return (usb_unlink_urb_sub(urb, 0));
+}
+
+static void
+usb_unlink_bsd(struct usb_xfer *xfer,
+ struct urb *urb, uint8_t drain)
+{
+ if (xfer == NULL)
+ return;
+ if (!usbd_transfer_pending(xfer))
+ return;
+ if (xfer->priv_fifo == (void *)urb) {
+ if (drain) {
+ mtx_unlock(&Giant);
+ usbd_transfer_drain(xfer);
+ mtx_lock(&Giant);
+ } else {
+ usbd_transfer_stop(xfer);
+ }
+ usbd_transfer_start(xfer);
+ }
+}
+
+static int
+usb_unlink_urb_sub(struct urb *urb, uint8_t drain)
+{
+ struct usb_host_endpoint *uhe;
+ uint16_t x;
+ uint8_t do_unlock;
+ int err;
+
+ if (urb == NULL)
+ return (-EINVAL);
+
+ do_unlock = mtx_owned(&Giant) ? 0 : 1;
+ if (do_unlock)
+ mtx_lock(&Giant);
+ if (drain)
+ urb->kill_count++;
+
+ if (urb->endpoint == NULL) {
+ err = -EINVAL;
+ goto done;
+ }
+ uhe = urb->endpoint;
+
+ if (urb->bsd_urb_list.tqe_prev) {
+ /* not started yet, just remove it from the queue */
+ TAILQ_REMOVE(&uhe->bsd_urb_list, urb, bsd_urb_list);
+ urb->bsd_urb_list.tqe_prev = NULL;
+ urb->status = -ECONNRESET;
+ urb->actual_length = 0;
+
+ for (x = 0; x < urb->number_of_packets; x++) {
+ urb->iso_frame_desc[x].actual_length = 0;
+ }
+
+ if (urb->complete) {
+ (urb->complete) (urb);
+ }
+ } else {
+ /*
+ * If the URB is not on the URB list, then check if one of
+ * the FreeBSD USB transfer are processing the current URB.
+ * If so, re-start that transfer, which will lead to the
+ * termination of that URB:
+ */
+ usb_unlink_bsd(uhe->bsd_xfer[0], urb, drain);
+ usb_unlink_bsd(uhe->bsd_xfer[1], urb, drain);
+ }
+ err = 0;
+done:
+ if (drain)
+ urb->kill_count--;
+ if (do_unlock)
+ mtx_unlock(&Giant);
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_clear_halt
+ *
+ * This function must always be used to clear the stall. Stall is when
+ * an USB endpoint returns a stall message to the USB host controller.
+ * Until the stall is cleared, no data can be transferred.
+ *------------------------------------------------------------------------*/
+int
+usb_clear_halt(struct usb_device *dev, struct usb_host_endpoint *uhe)
+{
+ struct usb_config cfg[1];
+ struct usb_endpoint *ep;
+ uint8_t type;
+ uint8_t addr;
+
+ if (uhe == NULL)
+ return (-EINVAL);
+
+ type = uhe->desc.bmAttributes & UE_XFERTYPE;
+ addr = uhe->desc.bEndpointAddress;
+
+ memset(cfg, 0, sizeof(cfg));
+
+ cfg[0].type = type;
+ cfg[0].endpoint = addr & UE_ADDR;
+ cfg[0].direction = addr & (UE_DIR_OUT | UE_DIR_IN);
+
+ ep = usbd_get_endpoint(dev, uhe->bsd_iface_index, cfg);
+ if (ep == NULL)
+ return (-EINVAL);
+
+ usbd_clear_data_toggle(dev, ep);
+
+ return (usb_control_msg(dev, &dev->ep0,
+ UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT,
+ UF_ENDPOINT_HALT, addr, NULL, 0, 1000));
+}
+
+/*------------------------------------------------------------------------*
+ * usb_start_wait_urb
+ *
+ * This is an internal function that is used to perform synchronous
+ * Linux USB transfers.
+ *------------------------------------------------------------------------*/
+static int
+usb_start_wait_urb(struct urb *urb, usb_timeout_t timeout, uint16_t *p_actlen)
+{
+ int err;
+ uint8_t do_unlock;
+
+ /* you must have a timeout! */
+ if (timeout == 0) {
+ timeout = 1;
+ }
+ urb->complete = &usb_linux_wait_complete;
+ urb->timeout = timeout;
+ urb->transfer_flags |= URB_WAIT_WAKEUP;
+ urb->transfer_flags &= ~URB_IS_SLEEPING;
+
+ do_unlock = mtx_owned(&Giant) ? 0 : 1;
+ if (do_unlock)
+ mtx_lock(&Giant);
+ err = usb_submit_urb(urb, 0);
+ if (err)
+ goto done;
+
+ /*
+ * the URB might have completed before we get here, so check that by
+ * using some flags!
+ */
+ while (urb->transfer_flags & URB_WAIT_WAKEUP) {
+ urb->transfer_flags |= URB_IS_SLEEPING;
+ cv_wait(&urb->cv_wait, &Giant);
+ urb->transfer_flags &= ~URB_IS_SLEEPING;
+ }
+
+ err = urb->status;
+
+done:
+ if (do_unlock)
+ mtx_unlock(&Giant);
+ if (p_actlen != NULL) {
+ if (err)
+ *p_actlen = 0;
+ else
+ *p_actlen = urb->actual_length;
+ }
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_control_msg
+ *
+ * The following function performs a control transfer sequence one any
+ * control, bulk or interrupt endpoint, specified by "uhe". A control
+ * transfer means that you transfer an 8-byte header first followed by
+ * a data-phase as indicated by the 8-byte header. The "timeout" is
+ * given in milliseconds.
+ *
+ * Return values:
+ * 0: Success
+ * < 0: Failure
+ * > 0: Actual length
+ *------------------------------------------------------------------------*/
+int
+usb_control_msg(struct usb_device *dev, struct usb_host_endpoint *uhe,
+ uint8_t request, uint8_t requesttype,
+ uint16_t value, uint16_t index, void *data,
+ uint16_t size, usb_timeout_t timeout)
+{
+ struct usb_device_request req;
+ struct urb *urb;
+ int err;
+ uint16_t actlen;
+ uint8_t type;
+ uint8_t addr;
+
+ req.bmRequestType = requesttype;
+ req.bRequest = request;
+ USETW(req.wValue, value);
+ USETW(req.wIndex, index);
+ USETW(req.wLength, size);
+
+ if (uhe == NULL) {
+ return (-EINVAL);
+ }
+ type = (uhe->desc.bmAttributes & UE_XFERTYPE);
+ addr = (uhe->desc.bEndpointAddress & UE_ADDR);
+
+ if (type != UE_CONTROL) {
+ return (-EINVAL);
+ }
+ if (addr == 0) {
+ /*
+ * The FreeBSD USB stack supports standard control
+ * transfers on control endpoint zero:
+ */
+ err = usbd_do_request_flags(dev,
+ NULL, &req, data, USB_SHORT_XFER_OK,
+ &actlen, timeout);
+ if (err) {
+ err = -EPIPE;
+ } else {
+ err = actlen;
+ }
+ return (err);
+ }
+ if (dev->flags.usb_mode != USB_MODE_HOST) {
+ /* not supported */
+ return (-EINVAL);
+ }
+ err = usb_setup_endpoint(dev, uhe, 1 /* dummy */ );
+
+ /*
+ * NOTE: we need to allocate real memory here so that we don't
+ * transfer data to/from the stack!
+ *
+ * 0xFFFF is a FreeBSD specific magic value.
+ */
+ urb = usb_alloc_urb(0xFFFF, size);
+
+ urb->dev = dev;
+ urb->endpoint = uhe;
+
+ memcpy(urb->setup_packet, &req, sizeof(req));
+
+ if (size && (!(req.bmRequestType & UT_READ))) {
+ /* move the data to a real buffer */
+ memcpy(USB_ADD_BYTES(urb->setup_packet, sizeof(req)),
+ data, size);
+ }
+ err = usb_start_wait_urb(urb, timeout, &actlen);
+
+ if (req.bmRequestType & UT_READ) {
+ if (actlen) {
+ bcopy(USB_ADD_BYTES(urb->setup_packet,
+ sizeof(req)), data, actlen);
+ }
+ }
+ usb_free_urb(urb);
+
+ if (err == 0) {
+ err = actlen;
+ }
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_set_interface
+ *
+ * The following function will select which alternate setting of an
+ * USB interface you plan to use. By default alternate setting with
+ * index zero is selected. Note that "iface_no" is not the interface
+ * index, but rather the value of "bInterfaceNumber".
+ *------------------------------------------------------------------------*/
+int
+usb_set_interface(struct usb_device *dev, uint8_t iface_no, uint8_t alt_index)
+{
+ struct usb_interface *p_ui = usb_ifnum_to_if(dev, iface_no);
+ int err;
+
+ if (p_ui == NULL)
+ return (-EINVAL);
+ if (alt_index >= p_ui->num_altsetting)
+ return (-EINVAL);
+ usb_linux_cleanup_interface(dev, p_ui);
+ err = -usbd_set_alt_interface_index(dev,
+ p_ui->bsd_iface_index, alt_index);
+ if (err == 0) {
+ p_ui->cur_altsetting = p_ui->altsetting + alt_index;
+ }
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_setup_endpoint
+ *
+ * The following function is an extension to the Linux USB API that
+ * allows you to set a maximum buffer size for a given USB endpoint.
+ * The maximum buffer size is per URB. If you don't call this function
+ * to set a maximum buffer size, the endpoint will not be functional.
+ * Note that for isochronous endpoints the maximum buffer size must be
+ * a non-zero dummy, hence this function will base the maximum buffer
+ * size on "wMaxPacketSize".
+ *------------------------------------------------------------------------*/
+int
+usb_setup_endpoint(struct usb_device *dev,
+ struct usb_host_endpoint *uhe, usb_size_t bufsize)
+{
+ struct usb_config cfg[2];
+ uint8_t type = uhe->desc.bmAttributes & UE_XFERTYPE;
+ uint8_t addr = uhe->desc.bEndpointAddress;
+
+ if (uhe->fbsd_buf_size == bufsize) {
+ /* optimize */
+ return (0);
+ }
+ usbd_transfer_unsetup(uhe->bsd_xfer, 2);
+
+ uhe->fbsd_buf_size = bufsize;
+
+ if (bufsize == 0) {
+ return (0);
+ }
+ memset(cfg, 0, sizeof(cfg));
+
+ if (type == UE_ISOCHRONOUS) {
+ /*
+ * Isochronous transfers are special in that they don't fit
+ * into the BULK/INTR/CONTROL transfer model.
+ */
+
+ cfg[0].type = type;
+ cfg[0].endpoint = addr & UE_ADDR;
+ cfg[0].direction = addr & (UE_DIR_OUT | UE_DIR_IN);
+ cfg[0].callback = &usb_linux_isoc_callback;
+ cfg[0].bufsize = 0; /* use wMaxPacketSize */
+ cfg[0].frames = usb_max_isoc_frames(dev);
+ cfg[0].flags.proxy_buffer = 1;
+#if 0
+ /*
+ * The Linux USB API allows non back-to-back
+ * isochronous frames which we do not support. If the
+ * isochronous frames are not back-to-back we need to
+ * do a copy, and then we need a buffer for
+ * that. Enable this at your own risk.
+ */
+ cfg[0].flags.ext_buffer = 1;
+#endif
+ cfg[0].flags.short_xfer_ok = 1;
+
+ bcopy(cfg, cfg + 1, sizeof(*cfg));
+
+ /* Allocate and setup two generic FreeBSD USB transfers */
+
+ if (usbd_transfer_setup(dev, &uhe->bsd_iface_index,
+ uhe->bsd_xfer, cfg, 2, uhe, &Giant)) {
+ return (-EINVAL);
+ }
+ } else {
+ if (bufsize > (1 << 22)) {
+ /* limit buffer size */
+ bufsize = (1 << 22);
+ }
+ /* Allocate and setup one generic FreeBSD USB transfer */
+
+ cfg[0].type = type;
+ cfg[0].endpoint = addr & UE_ADDR;
+ cfg[0].direction = addr & (UE_DIR_OUT | UE_DIR_IN);
+ cfg[0].callback = &usb_linux_non_isoc_callback;
+ cfg[0].bufsize = bufsize;
+ cfg[0].flags.ext_buffer = 1; /* enable zero-copy */
+ cfg[0].flags.proxy_buffer = 1;
+ cfg[0].flags.short_xfer_ok = 1;
+
+ if (usbd_transfer_setup(dev, &uhe->bsd_iface_index,
+ uhe->bsd_xfer, cfg, 1, uhe, &Giant)) {
+ return (-EINVAL);
+ }
+ }
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_create_usb_device
+ *
+ * The following function is used to build up a per USB device
+ * structure tree, that mimics the Linux one. The root structure
+ * is returned by this function.
+ *------------------------------------------------------------------------*/
+static int
+usb_linux_create_usb_device(struct usb_device *udev, device_t dev)
+{
+ struct usb_config_descriptor *cd = usbd_get_config_descriptor(udev);
+ struct usb_descriptor *desc;
+ struct usb_interface_descriptor *id;
+ struct usb_endpoint_descriptor *ed;
+ struct usb_interface *p_ui = NULL;
+ struct usb_host_interface *p_uhi = NULL;
+ struct usb_host_endpoint *p_uhe = NULL;
+ usb_size_t size;
+ uint16_t niface_total;
+ uint16_t nedesc;
+ uint16_t iface_no_curr;
+ uint16_t iface_index;
+ uint8_t pass;
+ uint8_t iface_no;
+
+ /*
+ * We do two passes. One pass for computing necessary memory size
+ * and one pass to initialize all the allocated memory structures.
+ */
+ for (pass = 0; pass < 2; pass++) {
+ iface_no_curr = 0xFFFF;
+ niface_total = 0;
+ iface_index = 0;
+ nedesc = 0;
+ desc = NULL;
+
+ /*
+ * Iterate over all the USB descriptors. Use the USB config
+ * descriptor pointer provided by the FreeBSD USB stack.
+ */
+ while ((desc = usb_desc_foreach(cd, desc))) {
+ /*
+ * Build up a tree according to the descriptors we
+ * find:
+ */
+ switch (desc->bDescriptorType) {
+ case UDESC_DEVICE:
+ break;
+
+ case UDESC_ENDPOINT:
+ ed = (void *)desc;
+ if ((ed->bLength < sizeof(*ed)) ||
+ (iface_index == 0))
+ break;
+ if (p_uhe) {
+ bcopy(ed, &p_uhe->desc, sizeof(p_uhe->desc));
+ p_uhe->bsd_iface_index = iface_index - 1;
+ TAILQ_INIT(&p_uhe->bsd_urb_list);
+ p_uhe++;
+ }
+ if (p_uhi) {
+ (p_uhi - 1)->desc.bNumEndpoints++;
+ }
+ nedesc++;
+ break;
+
+ case UDESC_INTERFACE:
+ id = (void *)desc;
+ if (id->bLength < sizeof(*id))
+ break;
+ if (p_uhi) {
+ bcopy(id, &p_uhi->desc, sizeof(p_uhi->desc));
+ p_uhi->desc.bNumEndpoints = 0;
+ p_uhi->endpoint = p_uhe;
+ p_uhi->string = "";
+ p_uhi->bsd_iface_index = iface_index;
+ p_uhi++;
+ }
+ iface_no = id->bInterfaceNumber;
+ niface_total++;
+ if (iface_no_curr != iface_no) {
+ if (p_ui) {
+ p_ui->altsetting = p_uhi - 1;
+ p_ui->cur_altsetting = p_uhi - 1;
+ p_ui->bsd_iface_index = iface_index;
+ p_ui->linux_udev = udev;
+ p_ui++;
+ }
+ iface_no_curr = iface_no;
+ iface_index++;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (pass == 0) {
+ size = (sizeof(*p_uhe) * nedesc) +
+ (sizeof(*p_ui) * iface_index) +
+ (sizeof(*p_uhi) * niface_total);
+
+ p_uhe = malloc(size, M_USBDEV, M_WAITOK | M_ZERO);
+ p_ui = (void *)(p_uhe + nedesc);
+ p_uhi = (void *)(p_ui + iface_index);
+
+ udev->linux_iface_start = p_ui;
+ udev->linux_iface_end = p_ui + iface_index;
+ udev->linux_endpoint_start = p_uhe;
+ udev->linux_endpoint_end = p_uhe + nedesc;
+ udev->devnum = device_get_unit(dev);
+ bcopy(&udev->ddesc, &udev->descriptor,
+ sizeof(udev->descriptor));
+ bcopy(udev->ctrl_ep.edesc, &udev->ep0.desc,
+ sizeof(udev->ep0.desc));
+ }
+ }
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_alloc_urb
+ *
+ * This function should always be used when you allocate an URB for
+ * use with the USB Linux stack. In case of an isochronous transfer
+ * you must specifiy the maximum number of "iso_packets" which you
+ * plan to transfer per URB. This function is always blocking, and
+ * "mem_flags" are not regarded like on Linux.
+ *------------------------------------------------------------------------*/
+struct urb *
+usb_alloc_urb(uint16_t iso_packets, uint16_t mem_flags)
+{
+ struct urb *urb;
+ usb_size_t size;
+
+ if (iso_packets == 0xFFFF) {
+ /*
+ * FreeBSD specific magic value to ask for control transfer
+ * memory allocation:
+ */
+ size = sizeof(*urb) + sizeof(struct usb_device_request) + mem_flags;
+ } else {
+ size = sizeof(*urb) + (iso_packets * sizeof(urb->iso_frame_desc[0]));
+ }
+
+ urb = malloc(size, M_USBDEV, M_WAITOK | M_ZERO);
+
+ cv_init(&urb->cv_wait, "URBWAIT");
+ if (iso_packets == 0xFFFF) {
+ urb->setup_packet = (void *)(urb + 1);
+ urb->transfer_buffer = (void *)(urb->setup_packet +
+ sizeof(struct usb_device_request));
+ } else {
+ urb->number_of_packets = iso_packets;
+ }
+ return (urb);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_find_host_endpoint
+ *
+ * The following function will return the Linux USB host endpoint
+ * structure that matches the given endpoint type and endpoint
+ * value. If no match is found, NULL is returned. This function is not
+ * part of the Linux USB API and is only used internally.
+ *------------------------------------------------------------------------*/
+struct usb_host_endpoint *
+usb_find_host_endpoint(struct usb_device *dev, uint8_t type, uint8_t ep)
+{
+ struct usb_host_endpoint *uhe;
+ struct usb_host_endpoint *uhe_end;
+ struct usb_host_interface *uhi;
+ struct usb_interface *ui;
+ uint8_t ea;
+ uint8_t at;
+ uint8_t mask;
+
+ if (dev == NULL) {
+ return (NULL);
+ }
+ if (type == UE_CONTROL) {
+ mask = UE_ADDR;
+ } else {
+ mask = (UE_DIR_IN | UE_DIR_OUT | UE_ADDR);
+ }
+
+ ep &= mask;
+
+ /*
+ * Iterate over all the interfaces searching the selected alternate
+ * setting only, and all belonging endpoints.
+ */
+ for (ui = dev->linux_iface_start;
+ ui != dev->linux_iface_end;
+ ui++) {
+ uhi = ui->cur_altsetting;
+ if (uhi) {
+ uhe_end = uhi->endpoint + uhi->desc.bNumEndpoints;
+ for (uhe = uhi->endpoint;
+ uhe != uhe_end;
+ uhe++) {
+ ea = uhe->desc.bEndpointAddress;
+ at = uhe->desc.bmAttributes;
+
+ if (((ea & mask) == ep) &&
+ ((at & UE_XFERTYPE) == type)) {
+ return (uhe);
+ }
+ }
+ }
+ }
+
+ if ((type == UE_CONTROL) && ((ep & UE_ADDR) == 0)) {
+ return (&dev->ep0);
+ }
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_altnum_to_altsetting
+ *
+ * The following function returns a pointer to an alternate setting by
+ * index given a "usb_interface" pointer. If the alternate setting by
+ * index does not exist, NULL is returned. And alternate setting is a
+ * variant of an interface, but usually with slightly different
+ * characteristics.
+ *------------------------------------------------------------------------*/
+struct usb_host_interface *
+usb_altnum_to_altsetting(const struct usb_interface *intf, uint8_t alt_index)
+{
+ if (alt_index >= intf->num_altsetting) {
+ return (NULL);
+ }
+ return (intf->altsetting + alt_index);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_ifnum_to_if
+ *
+ * The following function searches up an USB interface by
+ * "bInterfaceNumber". If no match is found, NULL is returned.
+ *------------------------------------------------------------------------*/
+struct usb_interface *
+usb_ifnum_to_if(struct usb_device *dev, uint8_t iface_no)
+{
+ struct usb_interface *p_ui;
+
+ for (p_ui = dev->linux_iface_start;
+ p_ui != dev->linux_iface_end;
+ p_ui++) {
+ if ((p_ui->num_altsetting > 0) &&
+ (p_ui->altsetting->desc.bInterfaceNumber == iface_no)) {
+ return (p_ui);
+ }
+ }
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_buffer_alloc
+ *------------------------------------------------------------------------*/
+void *
+usb_buffer_alloc(struct usb_device *dev, usb_size_t size, uint16_t mem_flags, uint8_t *dma_addr)
+{
+ return (malloc(size, M_USBDEV, M_WAITOK | M_ZERO));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_get_intfdata
+ *------------------------------------------------------------------------*/
+void *
+usbd_get_intfdata(struct usb_interface *intf)
+{
+ return (intf->bsd_priv_sc);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_register
+ *
+ * The following function is used by the "USB_DRIVER_EXPORT()" macro,
+ * and is used to register a Linux USB driver, so that its
+ * "usb_device_id" structures gets searched a probe time. This
+ * function is not part of the Linux USB API, and is for internal use
+ * only.
+ *------------------------------------------------------------------------*/
+void
+usb_linux_register(void *arg)
+{
+ struct usb_driver *drv = arg;
+
+ mtx_lock(&Giant);
+ LIST_INSERT_HEAD(&usb_linux_driver_list, drv, linux_driver_list);
+ mtx_unlock(&Giant);
+
+ usb_needs_explore_all();
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_deregister
+ *
+ * The following function is used by the "USB_DRIVER_EXPORT()" macro,
+ * and is used to deregister a Linux USB driver. This function will
+ * ensure that all driver instances belonging to the Linux USB device
+ * driver in question, gets detached before the driver is
+ * unloaded. This function is not part of the Linux USB API, and is
+ * for internal use only.
+ *------------------------------------------------------------------------*/
+void
+usb_linux_deregister(void *arg)
+{
+ struct usb_driver *drv = arg;
+ struct usb_linux_softc *sc;
+
+repeat:
+ mtx_lock(&Giant);
+ LIST_FOREACH(sc, &usb_linux_attached_list, sc_attached_list) {
+ if (sc->sc_udrv == drv) {
+ mtx_unlock(&Giant);
+ bus_topo_lock();
+ device_detach(sc->sc_fbsd_dev);
+ bus_topo_unlock();
+ goto repeat;
+ }
+ }
+ LIST_REMOVE(drv, linux_driver_list);
+ mtx_unlock(&Giant);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_free_device
+ *
+ * The following function is only used by the FreeBSD USB stack, to
+ * cleanup and free memory after that a Linux USB device was attached.
+ *------------------------------------------------------------------------*/
+void
+usb_linux_free_device(struct usb_device *dev)
+{
+ struct usb_host_endpoint *uhe;
+ struct usb_host_endpoint *uhe_end;
+
+ uhe = dev->linux_endpoint_start;
+ uhe_end = dev->linux_endpoint_end;
+ while (uhe != uhe_end) {
+ usb_setup_endpoint(dev, uhe, 0);
+ uhe++;
+ }
+ usb_setup_endpoint(dev, &dev->ep0, 0);
+ free(dev->linux_endpoint_start, M_USBDEV);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_buffer_free
+ *------------------------------------------------------------------------*/
+void
+usb_buffer_free(struct usb_device *dev, usb_size_t size,
+ void *addr, uint8_t dma_addr)
+{
+ free(addr, M_USBDEV);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_free_urb
+ *------------------------------------------------------------------------*/
+void
+usb_free_urb(struct urb *urb)
+{
+ if (urb == NULL) {
+ return;
+ }
+ /* make sure that the current URB is not active */
+ usb_kill_urb(urb);
+
+ /* destroy condition variable */
+ cv_destroy(&urb->cv_wait);
+
+ /* just free it */
+ free(urb, M_USBDEV);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_init_urb
+ *
+ * The following function can be used to initialize a custom URB. It
+ * is not recommended to use this function. Use "usb_alloc_urb()"
+ * instead.
+ *------------------------------------------------------------------------*/
+void
+usb_init_urb(struct urb *urb)
+{
+ if (urb == NULL) {
+ return;
+ }
+ memset(urb, 0, sizeof(*urb));
+}
+
+/*------------------------------------------------------------------------*
+ * usb_kill_urb
+ *------------------------------------------------------------------------*/
+void
+usb_kill_urb(struct urb *urb)
+{
+ usb_unlink_urb_sub(urb, 1);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_set_intfdata
+ *
+ * The following function sets the per Linux USB interface private
+ * data pointer. It is used by most Linux USB device drivers.
+ *------------------------------------------------------------------------*/
+void
+usb_set_intfdata(struct usb_interface *intf, void *data)
+{
+ intf->bsd_priv_sc = data;
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_cleanup_interface
+ *
+ * The following function will release all FreeBSD USB transfers
+ * associated with a Linux USB interface. It is for internal use only.
+ *------------------------------------------------------------------------*/
+static void
+usb_linux_cleanup_interface(struct usb_device *dev, struct usb_interface *iface)
+{
+ struct usb_host_interface *uhi;
+ struct usb_host_interface *uhi_end;
+ struct usb_host_endpoint *uhe;
+ struct usb_host_endpoint *uhe_end;
+
+ uhi = iface->altsetting;
+ uhi_end = iface->altsetting + iface->num_altsetting;
+ while (uhi != uhi_end) {
+ uhe = uhi->endpoint;
+ uhe_end = uhi->endpoint + uhi->desc.bNumEndpoints;
+ while (uhe != uhe_end) {
+ usb_setup_endpoint(dev, uhe, 0);
+ uhe++;
+ }
+ uhi++;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_wait_complete
+ *
+ * The following function is used by "usb_start_wait_urb()" to wake it
+ * up, when an USB transfer has finished.
+ *------------------------------------------------------------------------*/
+static void
+usb_linux_wait_complete(struct urb *urb)
+{
+ if (urb->transfer_flags & URB_IS_SLEEPING) {
+ cv_signal(&urb->cv_wait);
+ }
+ urb->transfer_flags &= ~URB_WAIT_WAKEUP;
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_complete
+ *------------------------------------------------------------------------*/
+static void
+usb_linux_complete(struct usb_xfer *xfer)
+{
+ struct urb *urb;
+
+ urb = usbd_xfer_get_priv(xfer);
+ usbd_xfer_set_priv(xfer, NULL);
+ if (urb->complete) {
+ (urb->complete) (urb);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_isoc_callback
+ *
+ * The following is the FreeBSD isochronous USB callback. Isochronous
+ * frames are USB packets transferred 1000 or 8000 times per second,
+ * depending on whether a full- or high- speed USB transfer is
+ * used.
+ *------------------------------------------------------------------------*/
+static void
+usb_linux_isoc_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ usb_frlength_t max_frame = xfer->max_frame_size;
+ usb_frlength_t offset;
+ usb_frcount_t x;
+ struct urb *urb = usbd_xfer_get_priv(xfer);
+ struct usb_host_endpoint *uhe = usbd_xfer_softc(xfer);
+ struct usb_iso_packet_descriptor *uipd;
+
+ DPRINTF("\n");
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+
+ if (urb->bsd_isread) {
+ /* copy in data with regard to the URB */
+
+ offset = 0;
+
+ for (x = 0; x < urb->number_of_packets; x++) {
+ uipd = urb->iso_frame_desc + x;
+ if (uipd->length > xfer->frlengths[x]) {
+ if (urb->transfer_flags & URB_SHORT_NOT_OK) {
+ /* XXX should be EREMOTEIO */
+ uipd->status = -EPIPE;
+ } else {
+ uipd->status = 0;
+ }
+ } else {
+ uipd->status = 0;
+ }
+ uipd->actual_length = xfer->frlengths[x];
+ if (!xfer->flags.ext_buffer) {
+ usbd_copy_out(xfer->frbuffers, offset,
+ USB_ADD_BYTES(urb->transfer_buffer,
+ uipd->offset), uipd->actual_length);
+ }
+ offset += max_frame;
+ }
+ } else {
+ for (x = 0; x < urb->number_of_packets; x++) {
+ uipd = urb->iso_frame_desc + x;
+ uipd->actual_length = xfer->frlengths[x];
+ uipd->status = 0;
+ }
+ }
+
+ urb->actual_length = xfer->actlen;
+
+ /* check for short transfer */
+ if (xfer->actlen < xfer->sumlen) {
+ /* short transfer */
+ if (urb->transfer_flags & URB_SHORT_NOT_OK) {
+ /* XXX should be EREMOTEIO */
+ urb->status = -EPIPE;
+ } else {
+ urb->status = 0;
+ }
+ } else {
+ /* success */
+ urb->status = 0;
+ }
+
+ /* call callback */
+ usb_linux_complete(xfer);
+
+ case USB_ST_SETUP:
+tr_setup:
+
+ if (xfer->priv_fifo == NULL) {
+ /* get next transfer */
+ urb = TAILQ_FIRST(&uhe->bsd_urb_list);
+ if (urb == NULL) {
+ /* nothing to do */
+ return;
+ }
+ TAILQ_REMOVE(&uhe->bsd_urb_list, urb, bsd_urb_list);
+ urb->bsd_urb_list.tqe_prev = NULL;
+
+ x = xfer->max_frame_count;
+ if (urb->number_of_packets > x) {
+ /* XXX simply truncate the transfer */
+ urb->number_of_packets = x;
+ }
+ } else {
+ DPRINTF("Already got a transfer\n");
+
+ /* already got a transfer (should not happen) */
+ urb = usbd_xfer_get_priv(xfer);
+ }
+
+ urb->bsd_isread = (uhe->desc.bEndpointAddress & UE_DIR_IN) ? 1 : 0;
+
+ if (xfer->flags.ext_buffer) {
+ /* set virtual address to load */
+ usbd_xfer_set_frame_data(xfer, 0, urb->transfer_buffer, 0);
+ }
+ if (!(urb->bsd_isread)) {
+ /* copy out data with regard to the URB */
+
+ offset = 0;
+
+ for (x = 0; x < urb->number_of_packets; x++) {
+ uipd = urb->iso_frame_desc + x;
+ usbd_xfer_set_frame_len(xfer, x, uipd->length);
+ if (!xfer->flags.ext_buffer) {
+ usbd_copy_in(xfer->frbuffers, offset,
+ USB_ADD_BYTES(urb->transfer_buffer,
+ uipd->offset), uipd->length);
+ }
+ offset += uipd->length;
+ }
+ } else {
+ /*
+ * compute the transfer length into the "offset"
+ * variable
+ */
+
+ offset = urb->number_of_packets * max_frame;
+
+ /* setup "frlengths" array */
+
+ for (x = 0; x < urb->number_of_packets; x++) {
+ uipd = urb->iso_frame_desc + x;
+ usbd_xfer_set_frame_len(xfer, x, max_frame);
+ }
+ }
+ usbd_xfer_set_priv(xfer, urb);
+ xfer->flags.force_short_xfer = 0;
+ xfer->timeout = urb->timeout;
+ xfer->nframes = urb->number_of_packets;
+ usbd_transfer_submit(xfer);
+ return;
+
+ default: /* Error */
+ if (xfer->error == USB_ERR_CANCELLED) {
+ urb->status = -ECONNRESET;
+ } else {
+ urb->status = -EPIPE; /* stalled */
+ }
+
+ /* Set zero for "actual_length" */
+ urb->actual_length = 0;
+
+ /* Set zero for "actual_length" */
+ for (x = 0; x < urb->number_of_packets; x++) {
+ urb->iso_frame_desc[x].actual_length = 0;
+ urb->iso_frame_desc[x].status = urb->status;
+ }
+
+ /* call callback */
+ usb_linux_complete(xfer);
+
+ if (xfer->error == USB_ERR_CANCELLED) {
+ /* we need to return in this case */
+ return;
+ }
+ goto tr_setup;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_linux_non_isoc_callback
+ *
+ * The following is the FreeBSD BULK/INTERRUPT and CONTROL USB
+ * callback. It dequeues Linux USB stack compatible URB's, transforms
+ * the URB fields into a FreeBSD USB transfer, and defragments the USB
+ * transfer as required. When the transfer is complete the "complete"
+ * callback is called.
+ *------------------------------------------------------------------------*/
+static void
+usb_linux_non_isoc_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ enum {
+ REQ_SIZE = sizeof(struct usb_device_request)
+ };
+ struct urb *urb = usbd_xfer_get_priv(xfer);
+ struct usb_host_endpoint *uhe = usbd_xfer_softc(xfer);
+ uint8_t *ptr;
+ usb_frlength_t max_bulk = usbd_xfer_max_len(xfer);
+ uint8_t data_frame = xfer->flags_int.control_xfr ? 1 : 0;
+
+ DPRINTF("\n");
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+
+ if (xfer->flags_int.control_xfr) {
+ /* don't transfer the setup packet again: */
+
+ usbd_xfer_set_frame_len(xfer, 0, 0);
+ }
+ if (urb->bsd_isread && (!xfer->flags.ext_buffer)) {
+ /* copy in data with regard to the URB */
+ usbd_copy_out(xfer->frbuffers + data_frame, 0,
+ urb->bsd_data_ptr, xfer->frlengths[data_frame]);
+ }
+ urb->bsd_length_rem -= xfer->frlengths[data_frame];
+ urb->bsd_data_ptr += xfer->frlengths[data_frame];
+ urb->actual_length += xfer->frlengths[data_frame];
+
+ /* check for short transfer */
+ if (xfer->actlen < xfer->sumlen) {
+ urb->bsd_length_rem = 0;
+
+ /* short transfer */
+ if (urb->transfer_flags & URB_SHORT_NOT_OK) {
+ urb->status = -EPIPE;
+ } else {
+ urb->status = 0;
+ }
+ } else {
+ /* check remainder */
+ if (urb->bsd_length_rem > 0) {
+ goto setup_bulk;
+ }
+ /* success */
+ urb->status = 0;
+ }
+
+ /* call callback */
+ usb_linux_complete(xfer);
+
+ case USB_ST_SETUP:
+tr_setup:
+ /* get next transfer */
+ urb = TAILQ_FIRST(&uhe->bsd_urb_list);
+ if (urb == NULL) {
+ /* nothing to do */
+ return;
+ }
+ TAILQ_REMOVE(&uhe->bsd_urb_list, urb, bsd_urb_list);
+ urb->bsd_urb_list.tqe_prev = NULL;
+
+ usbd_xfer_set_priv(xfer, urb);
+ xfer->flags.force_short_xfer = 0;
+ xfer->timeout = urb->timeout;
+
+ if (xfer->flags_int.control_xfr) {
+ /*
+ * USB control transfers need special handling.
+ * First copy in the header, then copy in data!
+ */
+ if (!xfer->flags.ext_buffer) {
+ usbd_copy_in(xfer->frbuffers, 0,
+ urb->setup_packet, REQ_SIZE);
+ usbd_xfer_set_frame_len(xfer, 0, REQ_SIZE);
+ } else {
+ /* set virtual address to load */
+ usbd_xfer_set_frame_data(xfer, 0,
+ urb->setup_packet, REQ_SIZE);
+ }
+
+ ptr = urb->setup_packet;
+
+ /* setup data transfer direction and length */
+ urb->bsd_isread = (ptr[0] & UT_READ) ? 1 : 0;
+ urb->bsd_length_rem = ptr[6] | (ptr[7] << 8);
+
+ } else {
+ /* setup data transfer direction */
+
+ urb->bsd_length_rem = urb->transfer_buffer_length;
+ urb->bsd_isread = (uhe->desc.bEndpointAddress &
+ UE_DIR_IN) ? 1 : 0;
+ }
+
+ urb->bsd_data_ptr = urb->transfer_buffer;
+ urb->actual_length = 0;
+
+setup_bulk:
+ if (max_bulk > urb->bsd_length_rem) {
+ max_bulk = urb->bsd_length_rem;
+ }
+ /* check if we need to force a short transfer */
+
+ if ((max_bulk == urb->bsd_length_rem) &&
+ (urb->transfer_flags & URB_ZERO_PACKET) &&
+ (!xfer->flags_int.control_xfr)) {
+ xfer->flags.force_short_xfer = 1;
+ }
+ /* check if we need to copy in data */
+
+ if (xfer->flags.ext_buffer) {
+ /* set virtual address to load */
+ usbd_xfer_set_frame_data(xfer, data_frame,
+ urb->bsd_data_ptr, max_bulk);
+ } else if (!urb->bsd_isread) {
+ /* copy out data with regard to the URB */
+ usbd_copy_in(xfer->frbuffers + data_frame, 0,
+ urb->bsd_data_ptr, max_bulk);
+ usbd_xfer_set_frame_len(xfer, data_frame, max_bulk);
+ }
+ if (xfer->flags_int.control_xfr) {
+ if (max_bulk > 0) {
+ xfer->nframes = 2;
+ } else {
+ xfer->nframes = 1;
+ }
+ } else {
+ xfer->nframes = 1;
+ }
+ usbd_transfer_submit(xfer);
+ return;
+
+ default:
+ if (xfer->error == USB_ERR_CANCELLED) {
+ urb->status = -ECONNRESET;
+ } else {
+ urb->status = -EPIPE;
+ }
+
+ /* Set zero for "actual_length" */
+ urb->actual_length = 0;
+
+ /* call callback */
+ usb_linux_complete(xfer);
+
+ if (xfer->error == USB_ERR_CANCELLED) {
+ /* we need to return in this case */
+ return;
+ }
+ goto tr_setup;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fill_bulk_urb
+ *------------------------------------------------------------------------*/
+void
+usb_fill_bulk_urb(struct urb *urb, struct usb_device *udev,
+ struct usb_host_endpoint *uhe, void *buf,
+ int length, usb_complete_t callback, void *arg)
+{
+ urb->dev = udev;
+ urb->endpoint = uhe;
+ urb->transfer_buffer = buf;
+ urb->transfer_buffer_length = length;
+ urb->complete = callback;
+ urb->context = arg;
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bulk_msg
+ *
+ * NOTE: This function can also be used for interrupt endpoints!
+ *
+ * Return values:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+int
+usb_bulk_msg(struct usb_device *udev, struct usb_host_endpoint *uhe,
+ void *data, int len, uint16_t *pactlen, usb_timeout_t timeout)
+{
+ struct urb *urb;
+ int err;
+
+ if (uhe == NULL)
+ return (-EINVAL);
+ if (len < 0)
+ return (-EINVAL);
+
+ err = usb_setup_endpoint(udev, uhe, 4096 /* bytes */);
+ if (err)
+ return (err);
+
+ urb = usb_alloc_urb(0, 0);
+
+ usb_fill_bulk_urb(urb, udev, uhe, data, len,
+ usb_linux_wait_complete, NULL);
+
+ err = usb_start_wait_urb(urb, timeout, pactlen);
+
+ usb_free_urb(urb);
+
+ return (err);
+}
+MODULE_DEPEND(linuxkpi, usb, 1, 1, 1);
+
+static void
+usb_linux_init(void *arg)
+{
+ /* register our function */
+ usb_linux_free_device_p = &usb_linux_free_device;
+}
+SYSINIT(usb_linux_init, SI_SUB_LOCK, SI_ORDER_FIRST, usb_linux_init, NULL);
+SYSUNINIT(usb_linux_unload, SI_SUB_LOCK, SI_ORDER_ANY, usb_linux_unload, NULL);
diff --git a/sys/compat/linuxkpi/common/src/linux_work.c b/sys/compat/linuxkpi/common/src/linux_work.c
new file mode 100644
index 000000000000..b1975d16025e
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_work.c
@@ -0,0 +1,789 @@
+/*-
+ * Copyright (c) 2017-2019 Hans Petter Selasky
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/compat.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/irq_work.h>
+
+#include <sys/kernel.h>
+
+/*
+ * Define all work struct states
+ */
+enum {
+ WORK_ST_IDLE, /* idle - not started */
+ WORK_ST_TIMER, /* timer is being started */
+ WORK_ST_TASK, /* taskqueue is being queued */
+ WORK_ST_EXEC, /* callback is being called */
+ WORK_ST_CANCEL, /* cancel is being requested */
+ WORK_ST_MAX,
+};
+
+/*
+ * Define global workqueues
+ */
+static struct workqueue_struct *linux_system_short_wq;
+static struct workqueue_struct *linux_system_long_wq;
+
+struct workqueue_struct *system_wq;
+struct workqueue_struct *system_long_wq;
+struct workqueue_struct *system_unbound_wq;
+struct workqueue_struct *system_highpri_wq;
+struct workqueue_struct *system_power_efficient_wq;
+
+struct taskqueue *linux_irq_work_tq;
+
+static int linux_default_wq_cpus = 4;
+
+static void linux_delayed_work_timer_fn(void *);
+
+/*
+ * This function atomically updates the work state and returns the
+ * previous state at the time of update.
+ */
+static uint8_t
+linux_update_state(atomic_t *v, const uint8_t *pstate)
+{
+ int c, old;
+
+ c = v->counter;
+
+ while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
+ c = old;
+
+ return (c);
+}
+
+/*
+ * A LinuxKPI task is allowed to free itself inside the callback function
+ * and cannot safely be referred after the callback function has
+ * completed. This function gives the linux_work_fn() function a hint,
+ * that the task is not going away and can have its state checked
+ * again. Without this extra hint LinuxKPI tasks cannot be serialized
+ * across multiple worker threads.
+ */
+static bool
+linux_work_exec_unblock(struct work_struct *work)
+{
+ struct workqueue_struct *wq;
+ struct work_exec *exec;
+ bool retval = false;
+
+ wq = work->work_queue;
+ if (unlikely(wq == NULL))
+ goto done;
+
+ WQ_EXEC_LOCK(wq);
+ TAILQ_FOREACH(exec, &wq->exec_head, entry) {
+ if (exec->target == work) {
+ exec->target = NULL;
+ retval = true;
+ break;
+ }
+ }
+ WQ_EXEC_UNLOCK(wq);
+done:
+ return (retval);
+}
+
+static void
+linux_delayed_work_enqueue(struct delayed_work *dwork)
+{
+ struct taskqueue *tq;
+
+ tq = dwork->work.work_queue->taskqueue;
+ taskqueue_enqueue(tq, &dwork->work.work_task);
+}
+
+/*
+ * This function queues the given work structure on the given
+ * workqueue. It returns non-zero if the work was successfully
+ * [re-]queued. Else the work is already pending for completion.
+ */
+bool
+linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
+ [WORK_ST_IDLE] = WORK_ST_TASK, /* start queuing task */
+ [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */
+ [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
+ [WORK_ST_EXEC] = WORK_ST_TASK, /* queue task another time */
+ [WORK_ST_CANCEL] = WORK_ST_TASK, /* start queuing task again */
+ };
+
+ if (atomic_read(&wq->draining) != 0)
+ return (!work_pending(work));
+
+ switch (linux_update_state(&work->state, states)) {
+ case WORK_ST_EXEC:
+ case WORK_ST_CANCEL:
+ if (linux_work_exec_unblock(work) != 0)
+ return (true);
+ /* FALLTHROUGH */
+ case WORK_ST_IDLE:
+ work->work_queue = wq;
+ taskqueue_enqueue(wq->taskqueue, &work->work_task);
+ return (true);
+ default:
+ return (false); /* already on a queue */
+ }
+}
+
+/*
+ * Callback func for linux_queue_rcu_work
+ */
+static void
+rcu_work_func(struct rcu_head *rcu)
+{
+ struct rcu_work *rwork;
+
+ rwork = container_of(rcu, struct rcu_work, rcu);
+ linux_queue_work_on(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
+}
+
+/*
+ * This function queue a work after a grace period
+ * If the work was already pending it returns false,
+ * if not it calls call_rcu and returns true.
+ */
+bool
+linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
+{
+
+ if (!linux_work_pending(&rwork->work)) {
+ rwork->wq = wq;
+ linux_call_rcu(RCU_TYPE_REGULAR, &rwork->rcu, rcu_work_func);
+ return (true);
+ }
+ return (false);
+}
+
+/*
+ * This function waits for the last execution of a work and then
+ * flush the work.
+ * It returns true if the work was pending and we waited, it returns
+ * false otherwise.
+ */
+bool
+linux_flush_rcu_work(struct rcu_work *rwork)
+{
+
+ if (linux_work_pending(&rwork->work)) {
+ linux_rcu_barrier(RCU_TYPE_REGULAR);
+ linux_flush_work(&rwork->work);
+ return (true);
+ }
+ return (linux_flush_work(&rwork->work));
+}
+
+/*
+ * This function queues the given work structure on the given
+ * workqueue after a given delay in ticks. It returns true if the
+ * work was successfully [re-]queued. Else the work is already pending
+ * for completion.
+ */
+bool
+linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ struct delayed_work *dwork, unsigned long delay)
+{
+ static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
+ [WORK_ST_IDLE] = WORK_ST_TIMER, /* start timeout */
+ [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */
+ [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
+ [WORK_ST_EXEC] = WORK_ST_TIMER, /* start timeout */
+ [WORK_ST_CANCEL] = WORK_ST_TIMER, /* start timeout */
+ };
+ bool res;
+
+ if (atomic_read(&wq->draining) != 0)
+ return (!work_pending(&dwork->work));
+
+ /*
+ * Clamp the delay to a valid ticks value, some consumers pass
+ * MAX_SCHEDULE_TIMEOUT.
+ */
+ if (delay > INT_MAX)
+ delay = INT_MAX;
+
+ mtx_lock(&dwork->timer.mtx);
+ switch (linux_update_state(&dwork->work.state, states)) {
+ case WORK_ST_EXEC:
+ case WORK_ST_CANCEL:
+ if (delay == 0 && linux_work_exec_unblock(&dwork->work)) {
+ dwork->timer.expires = jiffies;
+ res = true;
+ goto out;
+ }
+ /* FALLTHROUGH */
+ case WORK_ST_IDLE:
+ dwork->work.work_queue = wq;
+ dwork->timer.expires = jiffies + delay;
+
+ if (delay == 0) {
+ linux_delayed_work_enqueue(dwork);
+ } else if (unlikely(cpu != WORK_CPU_UNBOUND)) {
+ callout_reset_on(&dwork->timer.callout, delay,
+ &linux_delayed_work_timer_fn, dwork, cpu);
+ } else {
+ callout_reset(&dwork->timer.callout, delay,
+ &linux_delayed_work_timer_fn, dwork);
+ }
+ res = true;
+ break;
+ default:
+ res = false;
+ break;
+ }
+out:
+ mtx_unlock(&dwork->timer.mtx);
+ return (res);
+}
+
+void
+linux_work_fn(void *context, int pending)
+{
+ static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
+ [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
+ [WORK_ST_TIMER] = WORK_ST_EXEC, /* delayed work w/o timeout */
+ [WORK_ST_TASK] = WORK_ST_EXEC, /* call callback */
+ [WORK_ST_EXEC] = WORK_ST_IDLE, /* complete callback */
+ [WORK_ST_CANCEL] = WORK_ST_EXEC, /* failed to cancel */
+ };
+ struct work_struct *work;
+ struct workqueue_struct *wq;
+ struct work_exec exec;
+ struct task_struct *task;
+
+ task = current;
+
+ /* setup local variables */
+ work = context;
+ wq = work->work_queue;
+
+ /* store target pointer */
+ exec.target = work;
+
+ /* insert executor into list */
+ WQ_EXEC_LOCK(wq);
+ TAILQ_INSERT_TAIL(&wq->exec_head, &exec, entry);
+ while (1) {
+ switch (linux_update_state(&work->state, states)) {
+ case WORK_ST_TIMER:
+ case WORK_ST_TASK:
+ case WORK_ST_CANCEL:
+ WQ_EXEC_UNLOCK(wq);
+
+ /* set current work structure */
+ task->work = work;
+
+ /* call work function */
+ work->func(work);
+
+ /* set current work structure */
+ task->work = NULL;
+
+ WQ_EXEC_LOCK(wq);
+ /* check if unblocked */
+ if (exec.target != work) {
+ /* reapply block */
+ exec.target = work;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ goto done;
+ }
+ }
+done:
+ /* remove executor from list */
+ TAILQ_REMOVE(&wq->exec_head, &exec, entry);
+ WQ_EXEC_UNLOCK(wq);
+}
+
+void
+linux_delayed_work_fn(void *context, int pending)
+{
+ struct delayed_work *dwork = context;
+
+ /*
+ * Make sure the timer belonging to the delayed work gets
+ * drained before invoking the work function. Else the timer
+ * mutex may still be in use which can lead to use-after-free
+ * situations, because the work function might free the work
+ * structure before returning.
+ */
+ callout_drain(&dwork->timer.callout);
+
+ linux_work_fn(&dwork->work, pending);
+}
+
+static void
+linux_delayed_work_timer_fn(void *arg)
+{
+ static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
+ [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
+ [WORK_ST_TIMER] = WORK_ST_TASK, /* start queueing task */
+ [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
+ [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
+ [WORK_ST_CANCEL] = WORK_ST_TASK, /* failed to cancel */
+ };
+ struct delayed_work *dwork = arg;
+
+ switch (linux_update_state(&dwork->work.state, states)) {
+ case WORK_ST_TIMER:
+ case WORK_ST_CANCEL:
+ linux_delayed_work_enqueue(dwork);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * This function cancels the given work structure in a
+ * non-blocking fashion. It returns non-zero if the work was
+ * successfully cancelled. Else the work may still be busy or already
+ * cancelled.
+ */
+bool
+linux_cancel_work(struct work_struct *work)
+{
+ static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
+ [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
+ [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */
+ [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel */
+ [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
+ [WORK_ST_CANCEL] = WORK_ST_IDLE, /* can't happen */
+ };
+ struct taskqueue *tq;
+
+ MPASS(atomic_read(&work->state) != WORK_ST_TIMER);
+ MPASS(atomic_read(&work->state) != WORK_ST_CANCEL);
+
+ switch (linux_update_state(&work->state, states)) {
+ case WORK_ST_TASK:
+ tq = work->work_queue->taskqueue;
+ if (taskqueue_cancel(tq, &work->work_task, NULL) == 0)
+ return (true);
+ /* FALLTHROUGH */
+ default:
+ return (false);
+ }
+}
+
+/*
+ * This function cancels the given work structure in a synchronous
+ * fashion. It returns non-zero if the work was successfully
+ * cancelled. Else the work was already cancelled.
+ */
+bool
+linux_cancel_work_sync(struct work_struct *work)
+{
+ static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
+ [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
+ [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */
+ [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */
+ [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */
+ [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */
+ };
+ struct taskqueue *tq;
+ bool retval = false;
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "linux_cancel_work_sync() might sleep");
+retry:
+ switch (linux_update_state(&work->state, states)) {
+ case WORK_ST_IDLE:
+ case WORK_ST_TIMER:
+ return (retval);
+ case WORK_ST_EXEC:
+ tq = work->work_queue->taskqueue;
+ if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
+ taskqueue_drain(tq, &work->work_task);
+ goto retry; /* work may have restarted itself */
+ default:
+ tq = work->work_queue->taskqueue;
+ if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
+ taskqueue_drain(tq, &work->work_task);
+ retval = true;
+ goto retry;
+ }
+}
+
+/*
+ * This function atomically stops the timer and callback. The timer
+ * callback will not be called after this function returns. This
+ * functions returns true when the timeout was cancelled. Else the
+ * timeout was not started or has already been called.
+ */
+static inline bool
+linux_cancel_timer(struct delayed_work *dwork, bool drain)
+{
+ bool cancelled;
+
+ mtx_lock(&dwork->timer.mtx);
+ cancelled = (callout_stop(&dwork->timer.callout) == 1);
+ mtx_unlock(&dwork->timer.mtx);
+
+ /* check if we should drain */
+ if (drain)
+ callout_drain(&dwork->timer.callout);
+ return (cancelled);
+}
+
+/*
+ * This function cancels the given delayed work structure in a
+ * non-blocking fashion. It returns non-zero if the work was
+ * successfully cancelled. Else the work may still be busy or already
+ * cancelled.
+ */
+bool
+linux_cancel_delayed_work(struct delayed_work *dwork)
+{
+ static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
+ [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
+ [WORK_ST_TIMER] = WORK_ST_CANCEL, /* try to cancel */
+ [WORK_ST_TASK] = WORK_ST_CANCEL, /* try to cancel */
+ [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
+ [WORK_ST_CANCEL] = WORK_ST_CANCEL, /* NOP */
+ };
+ struct taskqueue *tq;
+ bool cancelled;
+
+ mtx_lock(&dwork->timer.mtx);
+ switch (linux_update_state(&dwork->work.state, states)) {
+ case WORK_ST_TIMER:
+ case WORK_ST_CANCEL:
+ cancelled = (callout_stop(&dwork->timer.callout) == 1);
+ if (cancelled) {
+ atomic_cmpxchg(&dwork->work.state,
+ WORK_ST_CANCEL, WORK_ST_IDLE);
+ mtx_unlock(&dwork->timer.mtx);
+ return (true);
+ }
+ /* FALLTHROUGH */
+ case WORK_ST_TASK:
+ tq = dwork->work.work_queue->taskqueue;
+ if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {
+ atomic_cmpxchg(&dwork->work.state,
+ WORK_ST_CANCEL, WORK_ST_IDLE);
+ mtx_unlock(&dwork->timer.mtx);
+ return (true);
+ }
+ /* FALLTHROUGH */
+ default:
+ mtx_unlock(&dwork->timer.mtx);
+ return (false);
+ }
+}
+
+/*
+ * This function cancels the given work structure in a synchronous
+ * fashion. It returns true if the work was successfully
+ * cancelled. Else the work was already cancelled.
+ */
+static bool
+linux_cancel_delayed_work_sync_int(struct delayed_work *dwork)
+{
+ static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
+ [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
+ [WORK_ST_TIMER] = WORK_ST_IDLE, /* cancel and drain */
+ [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */
+ [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */
+ [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */
+ };
+ struct taskqueue *tq;
+ int ret, state;
+ bool cancelled;
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "linux_cancel_delayed_work_sync() might sleep");
+ mtx_lock(&dwork->timer.mtx);
+
+ state = linux_update_state(&dwork->work.state, states);
+ switch (state) {
+ case WORK_ST_IDLE:
+ mtx_unlock(&dwork->timer.mtx);
+ return (false);
+ case WORK_ST_TIMER:
+ case WORK_ST_CANCEL:
+ cancelled = (callout_stop(&dwork->timer.callout) == 1);
+
+ tq = dwork->work.work_queue->taskqueue;
+ ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
+ mtx_unlock(&dwork->timer.mtx);
+
+ callout_drain(&dwork->timer.callout);
+ taskqueue_drain(tq, &dwork->work.work_task);
+ return (cancelled || (ret != 0));
+ default:
+ tq = dwork->work.work_queue->taskqueue;
+ ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
+ mtx_unlock(&dwork->timer.mtx);
+ if (ret != 0)
+ taskqueue_drain(tq, &dwork->work.work_task);
+ return (ret != 0);
+ }
+}
+
+bool
+linux_cancel_delayed_work_sync(struct delayed_work *dwork)
+{
+ bool res;
+
+ res = false;
+ while (linux_cancel_delayed_work_sync_int(dwork))
+ res = true;
+ return (res);
+}
+
+/*
+ * This function waits until the given work structure is completed.
+ * It returns non-zero if the work was successfully
+ * waited for. Else the work was not waited for.
+ */
+bool
+linux_flush_work(struct work_struct *work)
+{
+ struct taskqueue *tq;
+ bool retval;
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "linux_flush_work() might sleep");
+
+ switch (atomic_read(&work->state)) {
+ case WORK_ST_IDLE:
+ return (false);
+ default:
+ tq = work->work_queue->taskqueue;
+ retval = taskqueue_poll_is_busy(tq, &work->work_task);
+ taskqueue_drain(tq, &work->work_task);
+ return (retval);
+ }
+}
+
+/*
+ * This function waits until the given delayed work structure is
+ * completed. It returns non-zero if the work was successfully waited
+ * for. Else the work was not waited for.
+ */
+bool
+linux_flush_delayed_work(struct delayed_work *dwork)
+{
+ struct taskqueue *tq;
+ bool retval;
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "linux_flush_delayed_work() might sleep");
+
+ switch (atomic_read(&dwork->work.state)) {
+ case WORK_ST_IDLE:
+ return (false);
+ case WORK_ST_TIMER:
+ if (linux_cancel_timer(dwork, 1))
+ linux_delayed_work_enqueue(dwork);
+ /* FALLTHROUGH */
+ default:
+ tq = dwork->work.work_queue->taskqueue;
+ retval = taskqueue_poll_is_busy(tq, &dwork->work.work_task);
+ taskqueue_drain(tq, &dwork->work.work_task);
+ return (retval);
+ }
+}
+
+/*
+ * This function returns true if the given work is pending, and not
+ * yet executing:
+ */
+bool
+linux_work_pending(struct work_struct *work)
+{
+ switch (atomic_read(&work->state)) {
+ case WORK_ST_TIMER:
+ case WORK_ST_TASK:
+ case WORK_ST_CANCEL:
+ return (true);
+ default:
+ return (false);
+ }
+}
+
+/*
+ * This function returns true if the given work is busy.
+ */
+bool
+linux_work_busy(struct work_struct *work)
+{
+ struct taskqueue *tq;
+
+ switch (atomic_read(&work->state)) {
+ case WORK_ST_IDLE:
+ return (false);
+ case WORK_ST_EXEC:
+ tq = work->work_queue->taskqueue;
+ return (taskqueue_poll_is_busy(tq, &work->work_task));
+ default:
+ return (true);
+ }
+}
+
+struct workqueue_struct *
+linux_create_workqueue_common(const char *name, int cpus)
+{
+ struct workqueue_struct *wq;
+
+ /*
+ * If zero CPUs are specified use the default number of CPUs:
+ */
+ if (cpus == 0)
+ cpus = linux_default_wq_cpus;
+
+ wq = kmalloc(sizeof(*wq), M_WAITOK | M_ZERO);
+ wq->taskqueue = taskqueue_create(name, M_WAITOK,
+ taskqueue_thread_enqueue, &wq->taskqueue);
+ atomic_set(&wq->draining, 0);
+ taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
+ TAILQ_INIT(&wq->exec_head);
+ mtx_init(&wq->exec_mtx, "linux_wq_exec", NULL, MTX_DEF);
+
+ return (wq);
+}
+
+void
+linux_destroy_workqueue(struct workqueue_struct *wq)
+{
+ atomic_inc(&wq->draining);
+ drain_workqueue(wq);
+ taskqueue_free(wq->taskqueue);
+ mtx_destroy(&wq->exec_mtx);
+ kfree(wq);
+}
+
+void
+linux_init_delayed_work(struct delayed_work *dwork, work_func_t func)
+{
+ memset(dwork, 0, sizeof(*dwork));
+ dwork->work.func = func;
+ TASK_INIT(&dwork->work.work_task, 0, linux_delayed_work_fn, dwork);
+ mtx_init(&dwork->timer.mtx, spin_lock_name("lkpi-dwork"), NULL,
+ MTX_DEF | MTX_NOWITNESS);
+ callout_init_mtx(&dwork->timer.callout, &dwork->timer.mtx, 0);
+}
+
+struct work_struct *
+linux_current_work(void)
+{
+ return (current->work);
+}
+
+static void
+linux_work_init(void *arg)
+{
+ int max_wq_cpus = mp_ncpus + 1;
+
+ /* avoid deadlock when there are too few threads */
+ if (max_wq_cpus < 4)
+ max_wq_cpus = 4;
+
+ /* set default number of CPUs */
+ linux_default_wq_cpus = max_wq_cpus;
+
+ linux_system_short_wq = alloc_workqueue("linuxkpi_short_wq", 0, max_wq_cpus);
+ linux_system_long_wq = alloc_workqueue("linuxkpi_long_wq", 0, max_wq_cpus);
+
+ /* populate the workqueue pointers */
+ system_long_wq = linux_system_long_wq;
+ system_wq = linux_system_short_wq;
+ system_power_efficient_wq = linux_system_short_wq;
+ system_unbound_wq = linux_system_short_wq;
+ system_highpri_wq = linux_system_short_wq;
+}
+SYSINIT(linux_work_init, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_init, NULL);
+
+static void
+linux_work_uninit(void *arg)
+{
+ destroy_workqueue(linux_system_short_wq);
+ destroy_workqueue(linux_system_long_wq);
+
+ /* clear workqueue pointers */
+ system_long_wq = NULL;
+ system_wq = NULL;
+ system_power_efficient_wq = NULL;
+ system_unbound_wq = NULL;
+ system_highpri_wq = NULL;
+}
+SYSUNINIT(linux_work_uninit, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_uninit, NULL);
+
+void
+linux_irq_work_fn(void *context, int pending)
+{
+ struct irq_work *irqw = context;
+
+ irqw->func(irqw);
+}
+
+static void
+linux_irq_work_init_fn(void *context, int pending)
+{
+ /*
+ * LinuxKPI performs lazy allocation of memory structures required by
+ * current on the first access to it. As some irq_work clients read
+ * it with spinlock taken, we have to preallocate td_lkpi_task before
+ * first call to irq_work_queue(). As irq_work uses a single thread,
+ * it is enough to read current once at SYSINIT stage.
+ */
+ if (current == NULL)
+ panic("irq_work taskqueue is not initialized");
+}
+static struct task linux_irq_work_init_task =
+ TASK_INITIALIZER(0, linux_irq_work_init_fn, &linux_irq_work_init_task);
+
+static void
+linux_irq_work_init(void *arg)
+{
+ linux_irq_work_tq = taskqueue_create_fast("linuxkpi_irq_wq",
+ M_WAITOK, taskqueue_thread_enqueue, &linux_irq_work_tq);
+ taskqueue_start_threads(&linux_irq_work_tq, 1, PWAIT,
+ "linuxkpi_irq_wq");
+ taskqueue_enqueue(linux_irq_work_tq, &linux_irq_work_init_task);
+}
+SYSINIT(linux_irq_work_init, SI_SUB_TASKQ, SI_ORDER_SECOND,
+ linux_irq_work_init, NULL);
+
+static void
+linux_irq_work_uninit(void *arg)
+{
+ taskqueue_drain_all(linux_irq_work_tq);
+ taskqueue_free(linux_irq_work_tq);
+}
+SYSUNINIT(linux_irq_work_uninit, SI_SUB_TASKQ, SI_ORDER_SECOND,
+ linux_irq_work_uninit, NULL);
diff --git a/sys/compat/linuxkpi/common/src/linux_xarray.c b/sys/compat/linuxkpi/common/src/linux_xarray.c
new file mode 100644
index 000000000000..3f07f6d7c59f
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_xarray.c
@@ -0,0 +1,451 @@
+/*-
+ * Copyright (c) 2020 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <linux/xarray.h>
+
+#include <vm/vm_pageout.h>
+
+/*
+ * Linux' XArray allows to store a NULL pointer as a value. xa_load() would
+ * return NULL for both an unused index and an index set to NULL. But it
+ * impacts xa_alloc() which needs to find the next available index.
+ *
+ * However, our implementation relies on a radix tree (see `linux_radix.c`)
+ * which does not accept NULL pointers as values. I'm not sure this is a
+ * limitation or a feature, so to work around this, a NULL value is replaced by
+ * `NULL_VALUE`, an unlikely address, when we pass it to linux_radix.
+ */
+#define NULL_VALUE (void *)0x1
+
+/*
+ * This function removes the element at the given index and returns
+ * the pointer to the removed element, if any.
+ */
+void *
+__xa_erase(struct xarray *xa, uint32_t index)
+{
+ void *retval;
+
+ XA_ASSERT_LOCKED(xa);
+
+ retval = radix_tree_delete(&xa->xa_head, index);
+ if (retval == NULL_VALUE)
+ retval = NULL;
+
+ return (retval);
+}
+
+void *
+xa_erase(struct xarray *xa, uint32_t index)
+{
+ void *retval;
+
+ xa_lock(xa);
+ retval = __xa_erase(xa, index);
+ xa_unlock(xa);
+
+ return (retval);
+}
+
+/*
+ * This function returns the element pointer at the given index. A
+ * value of NULL is returned if the element does not exist.
+ */
+void *
+xa_load(struct xarray *xa, uint32_t index)
+{
+ void *retval;
+
+ xa_lock(xa);
+ retval = radix_tree_lookup(&xa->xa_head, index);
+ xa_unlock(xa);
+
+ if (retval == NULL_VALUE)
+ retval = NULL;
+
+ return (retval);
+}
+
+/*
+ * This is an internal function used to sleep until more memory
+ * becomes available.
+ */
+static void
+xa_vm_wait_locked(struct xarray *xa)
+{
+ xa_unlock(xa);
+ vm_wait(NULL);
+ xa_lock(xa);
+}
+
+/*
+ * This function iterates the xarray until it finds a free slot where
+ * it can insert the element pointer to by "ptr". It starts at the
+ * index pointed to by "pindex" and updates this value at return. The
+ * "mask" argument defines the maximum index allowed, inclusivly, and
+ * must be a power of two minus one value. The "gfp" argument
+ * basically tells if we can wait for more memory to become available
+ * or not. This function returns zero upon success or a negative error
+ * code on failure. A typical error code is -ENOMEM which means either
+ * the xarray is full, or there was not enough internal memory
+ * available to complete the radix tree insertion.
+ */
+int
+__xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp)
+{
+ int retval;
+
+ XA_ASSERT_LOCKED(xa);
+
+ /* mask should allow to allocate at least one item */
+ MPASS(mask > ((xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0));
+
+ /* mask can be any power of two value minus one */
+ MPASS((mask & (mask + 1)) == 0);
+
+ *pindex = (xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
+ if (ptr == NULL)
+ ptr = NULL_VALUE;
+retry:
+ retval = radix_tree_insert(&xa->xa_head, *pindex, ptr);
+
+ switch (retval) {
+ case -EEXIST:
+ if (likely(*pindex != mask)) {
+ (*pindex)++;
+ goto retry;
+ }
+ retval = -ENOMEM;
+ break;
+ case -ENOMEM:
+ if (likely(gfp & M_WAITOK)) {
+ xa_vm_wait_locked(xa);
+ goto retry;
+ }
+ break;
+ default:
+ break;
+ }
+ return (retval);
+}
+
+int
+xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp)
+{
+ int retval;
+
+ if (ptr == NULL)
+ ptr = NULL_VALUE;
+
+ xa_lock(xa);
+ retval = __xa_alloc(xa, pindex, ptr, mask, gfp);
+ xa_unlock(xa);
+
+ return (retval);
+}
+
+/*
+ * This function works the same like the "xa_alloc" function, except
+ * it wraps the next index value to zero when there are no entries
+ * left at the end of the xarray searching for a free slot from the
+ * beginning of the array. If the xarray is full -ENOMEM is returned.
+ */
+int
+__xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask,
+ uint32_t *pnext_index, gfp_t gfp)
+{
+ int retval;
+ int timeout = 1;
+
+ XA_ASSERT_LOCKED(xa);
+
+ /* mask should allow to allocate at least one item */
+ MPASS(mask > ((xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0));
+
+ /* mask can be any power of two value minus one */
+ MPASS((mask & (mask + 1)) == 0);
+
+ *pnext_index = (xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
+ if (ptr == NULL)
+ ptr = NULL_VALUE;
+retry:
+ retval = radix_tree_insert(&xa->xa_head, *pnext_index, ptr);
+
+ switch (retval) {
+ case -EEXIST:
+ if (unlikely(*pnext_index == mask) && !timeout--) {
+ retval = -ENOMEM;
+ break;
+ }
+ (*pnext_index)++;
+ (*pnext_index) &= mask;
+ if (*pnext_index == 0 && (xa->xa_flags & XA_FLAGS_ALLOC1) != 0)
+ (*pnext_index)++;
+ goto retry;
+ case -ENOMEM:
+ if (likely(gfp & M_WAITOK)) {
+ xa_vm_wait_locked(xa);
+ goto retry;
+ }
+ break;
+ default:
+ break;
+ }
+ *pindex = *pnext_index;
+
+ return (retval);
+}
+
+int
+xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask,
+ uint32_t *pnext_index, gfp_t gfp)
+{
+ int retval;
+
+ xa_lock(xa);
+ retval = __xa_alloc_cyclic(xa, pindex, ptr, mask, pnext_index, gfp);
+ xa_unlock(xa);
+
+ return (retval);
+}
+
+int
+xa_alloc_cyclic_irq(struct xarray *xa, uint32_t *pindex, void *ptr,
+ uint32_t mask, uint32_t *pnext_index, gfp_t gfp)
+{
+ int retval;
+
+ xa_lock_irq(xa);
+ retval = __xa_alloc_cyclic(xa, pindex, ptr, mask, pnext_index, gfp);
+ xa_unlock_irq(xa);
+
+ return (retval);
+}
+
+/*
+ * This function tries to insert an element at the given index. The
+ * "gfp" argument basically decides of this function can sleep or not
+ * trying to allocate internal memory for its radix tree. The
+ * function returns an error code upon failure. Typical error codes
+ * are element exists (-EEXIST) or out of memory (-ENOMEM).
+ */
+int
+__xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
+{
+ int retval;
+
+ XA_ASSERT_LOCKED(xa);
+ if (ptr == NULL)
+ ptr = NULL_VALUE;
+retry:
+ retval = radix_tree_insert(&xa->xa_head, index, ptr);
+
+ switch (retval) {
+ case -ENOMEM:
+ if (likely(gfp & M_WAITOK)) {
+ xa_vm_wait_locked(xa);
+ goto retry;
+ }
+ break;
+ default:
+ break;
+ }
+ return (retval);
+}
+
+int
+xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
+{
+ int retval;
+
+ xa_lock(xa);
+ retval = __xa_insert(xa, index, ptr, gfp);
+ xa_unlock(xa);
+
+ return (retval);
+}
+
+/*
+ * This function updates the element at the given index and returns a
+ * pointer to the old element. The "gfp" argument basically decides of
+ * this function can sleep or not trying to allocate internal memory
+ * for its radix tree. The function returns an XA_ERROR() pointer code
+ * upon failure. Code using this function must always check if the
+ * return value is an XA_ERROR() code before using the returned value.
+ */
+void *
+__xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
+{
+ int retval;
+
+ XA_ASSERT_LOCKED(xa);
+ if (ptr == NULL)
+ ptr = NULL_VALUE;
+retry:
+ retval = radix_tree_store(&xa->xa_head, index, &ptr);
+
+ switch (retval) {
+ case 0:
+ if (ptr == NULL_VALUE)
+ ptr = NULL;
+ break;
+ case -ENOMEM:
+ if (likely(gfp & M_WAITOK)) {
+ xa_vm_wait_locked(xa);
+ goto retry;
+ }
+ ptr = XA_ERROR(retval);
+ break;
+ default:
+ ptr = XA_ERROR(retval);
+ break;
+ }
+ return (ptr);
+}
+
+void *
+xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
+{
+ void *retval;
+
+ xa_lock(xa);
+ retval = __xa_store(xa, index, ptr, gfp);
+ xa_unlock(xa);
+
+ return (retval);
+}
+
+/*
+ * This function initialize an xarray structure.
+ */
+void
+xa_init_flags(struct xarray *xa, uint32_t flags)
+{
+ memset(xa, 0, sizeof(*xa));
+
+ mtx_init(&xa->xa_lock, "lkpi-xarray", NULL, MTX_DEF | MTX_RECURSE);
+ xa->xa_head.gfp_mask = GFP_NOWAIT;
+ xa->xa_flags = flags;
+}
+
+/*
+ * This function destroys an xarray structure and all its internal
+ * memory and locks.
+ */
+void
+xa_destroy(struct xarray *xa)
+{
+ struct radix_tree_iter iter;
+ void **ppslot;
+
+ xa_lock(xa);
+ radix_tree_for_each_slot(ppslot, &xa->xa_head, &iter, 0)
+ radix_tree_iter_delete(&xa->xa_head, &iter, ppslot);
+ xa_unlock(xa);
+
+ /*
+ * The mutex initialized in `xa_init_flags()` is not destroyed here on
+ * purpose. The reason is that on Linux, the xarray remains usable
+ * after a call to `xa_destroy()`. For instance the i915 DRM driver
+ * relies on that during the initialixation of its GuC. Basically,
+ * `xa_destroy()` "resets" the structure to zero but doesn't really
+ * destroy it.
+ */
+}
+
+/*
+ * This function checks if an xarray is empty or not.
+ * It returns true if empty, else false.
+ */
+bool
+__xa_empty(struct xarray *xa)
+{
+ struct radix_tree_iter iter = {};
+ void **temp;
+
+ XA_ASSERT_LOCKED(xa);
+
+ return (!radix_tree_iter_find(&xa->xa_head, &iter, &temp));
+}
+
+bool
+xa_empty(struct xarray *xa)
+{
+ bool retval;
+
+ xa_lock(xa);
+ retval = __xa_empty(xa);
+ xa_unlock(xa);
+
+ return (retval);
+}
+
+/*
+ * This function returns the next valid xarray entry based on the
+ * index given by "pindex". The valued pointed to by "pindex" is
+ * updated before return.
+ */
+void *
+__xa_next(struct xarray *xa, unsigned long *pindex, bool not_first)
+{
+ struct radix_tree_iter iter = { .index = *pindex };
+ void **ppslot;
+ void *retval;
+ bool found;
+
+ XA_ASSERT_LOCKED(xa);
+
+ if (not_first) {
+ /* advance to next index, if any */
+ iter.index++;
+ if (iter.index == 0)
+ return (NULL);
+ }
+
+ found = radix_tree_iter_find(&xa->xa_head, &iter, &ppslot);
+ if (likely(found)) {
+ retval = *ppslot;
+ if (retval == NULL_VALUE)
+ retval = NULL;
+ *pindex = iter.index;
+ } else {
+ retval = NULL;
+ }
+ return (retval);
+}
+
+void *
+xa_next(struct xarray *xa, unsigned long *pindex, bool not_first)
+{
+ void *retval;
+
+ xa_lock(xa);
+ retval = __xa_next(xa, pindex, not_first);
+ xa_unlock(xa);
+
+ return (retval);
+}
diff --git a/sys/compat/linuxkpi/common/src/linuxkpi_hdmikmod.c b/sys/compat/linuxkpi/common/src/linuxkpi_hdmikmod.c
new file mode 100644
index 000000000000..b0d4c013a6f3
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linuxkpi_hdmikmod.c
@@ -0,0 +1,7 @@
+/* Public domain. */
+
+#include <sys/param.h>
+#include <sys/module.h>
+
+MODULE_VERSION(linuxkpi_hdmi, 1);
+MODULE_DEPEND(linuxkpi_hdmi, linuxkpi, 1, 1, 1);
diff --git a/sys/compat/linuxkpi/common/src/linuxkpi_videokmod.c b/sys/compat/linuxkpi/common/src/linuxkpi_videokmod.c
new file mode 100644
index 000000000000..8881adc0d657
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linuxkpi_videokmod.c
@@ -0,0 +1,7 @@
+/* Public domain. */
+
+#include <sys/param.h>
+#include <sys/module.h>
+
+MODULE_VERSION(linuxkpi_video, 1);
+MODULE_DEPEND(linuxkpi_video, linuxkpi, 1, 1, 1);
diff --git a/sys/compat/linuxkpi/common/src/lkpi_iic_if.m b/sys/compat/linuxkpi/common/src/lkpi_iic_if.m
new file mode 100644
index 000000000000..64db427864db
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/lkpi_iic_if.m
@@ -0,0 +1,41 @@
+#-
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (c) 2021 Beckhoff Automation GmbH & Co. KG
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+INTERFACE lkpi_iic;
+
+HEADER {
+ struct i2c_adapter;
+}
+
+METHOD int add_adapter {
+ device_t dev;
+ struct i2c_adapter *adapter;
+};
+
+METHOD struct i2c_adapter * get_adapter {
+ device_t dev;
+};