aboutsummaryrefslogtreecommitdiff
path: root/sys/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arm64/include')
-rw-r--r--sys/arm64/include/_align.h47
-rw-r--r--sys/arm64/include/_bus.h28
-rw-r--r--sys/arm64/include/_inttypes.h218
-rw-r--r--sys/arm64/include/_limits.h88
-rw-r--r--sys/arm64/include/_stdint.h162
-rw-r--r--sys/arm64/include/_types.h71
-rw-r--r--sys/arm64/include/acle-compat.h5
-rw-r--r--sys/arm64/include/acpica_machdep.h61
-rw-r--r--sys/arm64/include/armreg.h2870
-rw-r--r--sys/arm64/include/asan.h68
-rw-r--r--sys/arm64/include/asm.h237
-rw-r--r--sys/arm64/include/atomic.h679
-rw-r--r--sys/arm64/include/bus.h528
-rw-r--r--sys/arm64/include/bus_dma.h164
-rw-r--r--sys/arm64/include/bus_dma_impl.h91
-rw-r--r--sys/arm64/include/clock.h0
-rw-r--r--sys/arm64/include/cmn600_reg.h807
-rw-r--r--sys/arm64/include/counter.h91
-rw-r--r--sys/arm64/include/cpu.h306
-rw-r--r--sys/arm64/include/cpu_feat.h88
-rw-r--r--sys/arm64/include/cpufunc.h217
-rw-r--r--sys/arm64/include/cpuinfo.h5
-rw-r--r--sys/arm64/include/csan.h104
-rw-r--r--sys/arm64/include/db_machdep.h129
-rw-r--r--sys/arm64/include/debug_monitor.h65
-rw-r--r--sys/arm64/include/disassem.h40
-rw-r--r--sys/arm64/include/dump.h75
-rw-r--r--sys/arm64/include/efi.h66
-rw-r--r--sys/arm64/include/elf.h233
-rw-r--r--sys/arm64/include/endian.h36
-rw-r--r--sys/arm64/include/exec.h6
-rw-r--r--sys/arm64/include/float.h97
-rw-r--r--sys/arm64/include/floatingpoint.h2
-rw-r--r--sys/arm64/include/fpu.h4
-rw-r--r--sys/arm64/include/frame.h83
-rw-r--r--sys/arm64/include/gdb_machdep.h83
-rw-r--r--sys/arm64/include/hypervisor.h342
-rw-r--r--sys/arm64/include/ieeefp.h48
-rw-r--r--sys/arm64/include/ifunc.h48
-rw-r--r--sys/arm64/include/in_cksum.h48
-rw-r--r--sys/arm64/include/intr.h55
-rw-r--r--sys/arm64/include/iodev.h62
-rw-r--r--sys/arm64/include/iommu.h10
-rw-r--r--sys/arm64/include/kdb.h57
-rw-r--r--sys/arm64/include/machdep.h69
-rw-r--r--sys/arm64/include/md_var.h76
-rw-r--r--sys/arm64/include/memdev.h38
-rw-r--r--sys/arm64/include/metadata.h57
-rw-r--r--sys/arm64/include/minidump.h53
-rw-r--r--sys/arm64/include/msan.h91
-rw-r--r--sys/arm64/include/ofw_machdep.h41
-rw-r--r--sys/arm64/include/param.h125
-rw-r--r--sys/arm64/include/pcb.h95
-rw-r--r--sys/arm64/include/pci_cfgreg.h33
-rw-r--r--sys/arm64/include/pcpu.h93
-rw-r--r--sys/arm64/include/pcpu_aux.h56
-rw-r--r--sys/arm64/include/pmap.h200
-rw-r--r--sys/arm64/include/pmc_mdep.h91
-rw-r--r--sys/arm64/include/proc.h84
-rw-r--r--sys/arm64/include/procctl.h3
-rw-r--r--sys/arm64/include/profile.h98
-rw-r--r--sys/arm64/include/psl.h0
-rw-r--r--sys/arm64/include/pte.h262
-rw-r--r--sys/arm64/include/ptrace.h10
-rw-r--r--sys/arm64/include/reg.h110
-rw-r--r--sys/arm64/include/reloc.h6
-rw-r--r--sys/arm64/include/resource.h51
-rw-r--r--sys/arm64/include/sdt_machdep.h12
-rw-r--r--sys/arm64/include/setjmp.h77
-rw-r--r--sys/arm64/include/sf_buf.h55
-rw-r--r--sys/arm64/include/sigframe.h1
-rw-r--r--sys/arm64/include/signal.h53
-rw-r--r--sys/arm64/include/smp.h50
-rw-r--r--sys/arm64/include/stack.h59
-rw-r--r--sys/arm64/include/stdarg.h37
-rw-r--r--sys/arm64/include/sysarch.h64
-rw-r--r--sys/arm64/include/sysreg.h5
-rw-r--r--sys/arm64/include/tls.h61
-rw-r--r--sys/arm64/include/trap.h0
-rw-r--r--sys/arm64/include/ucontext.h114
-rw-r--r--sys/arm64/include/undefined.h52
-rw-r--r--sys/arm64/include/vdso.h43
-rw-r--r--sys/arm64/include/vfp.h127
-rw-r--r--sys/arm64/include/vm.h47
-rw-r--r--sys/arm64/include/vmm.h346
-rw-r--r--sys/arm64/include/vmm_dev.h274
-rw-r--r--sys/arm64/include/vmm_instruction_emul.h83
-rw-r--r--sys/arm64/include/vmm_snapshot.h1
-rw-r--r--sys/arm64/include/vmparam.h335
89 files changed, 12162 insertions, 0 deletions
diff --git a/sys/arm64/include/_align.h b/sys/arm64/include/_align.h
new file mode 100644
index 000000000000..b88200e21636
--- /dev/null
+++ b/sys/arm64/include/_align.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/_align.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE__ALIGN_H_
+#define _MACHINE__ALIGN_H_
+
+/*
+ * Round p (pointer or byte index) up to a correctly-aligned value
+ * for all data types (int, long, ...). The result is unsigned int
+ * and must be cast to any desired pointer type.
+ */
+#define _ALIGNBYTES (sizeof(long long) - 1)
+#define _ALIGN(p) (((u_long)(p) + _ALIGNBYTES) & ~_ALIGNBYTES)
+
+#endif /* !_MACHINE__ALIGN_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/_bus.h b/sys/arm64/include/_bus.h
new file mode 100644
index 000000000000..8bee1634c4f2
--- /dev/null
+++ b/sys/arm64/include/_bus.h
@@ -0,0 +1,28 @@
+/*-
+ * Copyright (c) 2005 The FreeBSD Foundation.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Derived in part from NetBSD's bus.h files by (alphabetically):
+ * Christopher G. Demetriou
+ * Charles M. Hannum
+ * Jason Thorpe
+ * The NetBSD Foundation.
+ */
+
+#ifndef _MACHINE__BUS_H_
+#define _MACHINE__BUS_H_
+
+/*
+ * Addresses (in bus space).
+ */
+typedef u_long bus_addr_t;
+typedef u_long bus_size_t;
+
+/*
+ * Access methods for bus space.
+ */
+typedef u_long bus_space_handle_t;
+typedef struct bus_space *bus_space_tag_t;
+
+#endif /* !_MACHINE__BUS_H_ */
diff --git a/sys/arm64/include/_inttypes.h b/sys/arm64/include/_inttypes.h
new file mode 100644
index 000000000000..67ae82fbf031
--- /dev/null
+++ b/sys/arm64/include/_inttypes.h
@@ -0,0 +1,218 @@
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * From: $NetBSD: int_fmtio.h,v 1.4 2008/04/28 20:23:36 martin Exp $
+ */
+
+#ifdef __arm__
+#include <arm/_inttypes.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE__INTTYPES_H_
+#define _MACHINE__INTTYPES_H_
+
+/*
+ * Macros for format specifiers.
+ */
+
+/* fprintf(3) macros for signed integers. */
+
+#define PRId8 "d" /* int8_t */
+#define PRId16 "d" /* int16_t */
+#define PRId32 "d" /* int32_t */
+#define PRId64 "ld" /* int64_t */
+#define PRIdLEAST8 "d" /* int_least8_t */
+#define PRIdLEAST16 "d" /* int_least16_t */
+#define PRIdLEAST32 "d" /* int_least32_t */
+#define PRIdLEAST64 "ld" /* int_least64_t */
+#define PRIdFAST8 "d" /* int_fast8_t */
+#define PRIdFAST16 "d" /* int_fast16_t */
+#define PRIdFAST32 "d" /* int_fast32_t */
+#define PRIdFAST64 "ld" /* int_fast64_t */
+#define PRIdMAX "jd" /* intmax_t */
+#define PRIdPTR "ld" /* intptr_t */
+
+#define PRIi8 "i" /* int8_t */
+#define PRIi16 "i" /* int16_t */
+#define PRIi32 "i" /* int32_t */
+#define PRIi64 "li" /* int64_t */
+#define PRIiLEAST8 "i" /* int_least8_t */
+#define PRIiLEAST16 "i" /* int_least16_t */
+#define PRIiLEAST32 "i" /* int_least32_t */
+#define PRIiLEAST64 "li" /* int_least64_t */
+#define PRIiFAST8 "i" /* int_fast8_t */
+#define PRIiFAST16 "i" /* int_fast16_t */
+#define PRIiFAST32 "i" /* int_fast32_t */
+#define PRIiFAST64 "li" /* int_fast64_t */
+#define PRIiMAX "ji" /* intmax_t */
+#define PRIiPTR "li" /* intptr_t */
+
+/* fprintf(3) macros for unsigned integers. */
+
+#define PRIo8 "o" /* uint8_t */
+#define PRIo16 "o" /* uint16_t */
+#define PRIo32 "o" /* uint32_t */
+#define PRIo64 "lo" /* uint64_t */
+#define PRIoLEAST8 "o" /* uint_least8_t */
+#define PRIoLEAST16 "o" /* uint_least16_t */
+#define PRIoLEAST32 "o" /* uint_least32_t */
+#define PRIoLEAST64 "lo" /* uint_least64_t */
+#define PRIoFAST8 "o" /* uint_fast8_t */
+#define PRIoFAST16 "o" /* uint_fast16_t */
+#define PRIoFAST32 "o" /* uint_fast32_t */
+#define PRIoFAST64 "lo" /* uint_fast64_t */
+#define PRIoMAX "jo" /* uintmax_t */
+#define PRIoPTR "lo" /* uintptr_t */
+
+#define PRIu8 "u" /* uint8_t */
+#define PRIu16 "u" /* uint16_t */
+#define PRIu32 "u" /* uint32_t */
+#define PRIu64 "lu" /* uint64_t */
+#define PRIuLEAST8 "u" /* uint_least8_t */
+#define PRIuLEAST16 "u" /* uint_least16_t */
+#define PRIuLEAST32 "u" /* uint_least32_t */
+#define PRIuLEAST64 "lu" /* uint_least64_t */
+#define PRIuFAST8 "u" /* uint_fast8_t */
+#define PRIuFAST16 "u" /* uint_fast16_t */
+#define PRIuFAST32 "u" /* uint_fast32_t */
+#define PRIuFAST64 "lu" /* uint_fast64_t */
+#define PRIuMAX "ju" /* uintmax_t */
+#define PRIuPTR "lu" /* uintptr_t */
+
+#define PRIx8 "x" /* uint8_t */
+#define PRIx16 "x" /* uint16_t */
+#define PRIx32 "x" /* uint32_t */
+#define PRIx64 "lx" /* uint64_t */
+#define PRIxLEAST8 "x" /* uint_least8_t */
+#define PRIxLEAST16 "x" /* uint_least16_t */
+#define PRIxLEAST32 "x" /* uint_least32_t */
+#define PRIxLEAST64 "lx" /* uint_least64_t */
+#define PRIxFAST8 "x" /* uint_fast8_t */
+#define PRIxFAST16 "x" /* uint_fast16_t */
+#define PRIxFAST32 "x" /* uint_fast32_t */
+#define PRIxFAST64 "lx" /* uint_fast64_t */
+#define PRIxMAX "jx" /* uintmax_t */
+#define PRIxPTR "lx" /* uintptr_t */
+
+#define PRIX8 "X" /* uint8_t */
+#define PRIX16 "X" /* uint16_t */
+#define PRIX32 "X" /* uint32_t */
+#define PRIX64 "lX" /* uint64_t */
+#define PRIXLEAST8 "X" /* uint_least8_t */
+#define PRIXLEAST16 "X" /* uint_least16_t */
+#define PRIXLEAST32 "X" /* uint_least32_t */
+#define PRIXLEAST64 "lX" /* uint_least64_t */
+#define PRIXFAST8 "X" /* uint_fast8_t */
+#define PRIXFAST16 "X" /* uint_fast16_t */
+#define PRIXFAST32 "X" /* uint_fast32_t */
+#define PRIXFAST64 "lX" /* uint_fast64_t */
+#define PRIXMAX "jX" /* uintmax_t */
+#define PRIXPTR "lX" /* uintptr_t */
+
+/* fscanf(3) macros for signed integers. */
+
+#define SCNd8 "hhd" /* int8_t */
+#define SCNd16 "hd" /* int16_t */
+#define SCNd32 "d" /* int32_t */
+#define SCNd64 "ld" /* int64_t */
+#define SCNdLEAST8 "hhd" /* int_least8_t */
+#define SCNdLEAST16 "hd" /* int_least16_t */
+#define SCNdLEAST32 "d" /* int_least32_t */
+#define SCNdLEAST64 "ld" /* int_least64_t */
+#define SCNdFAST8 "d" /* int_fast8_t */
+#define SCNdFAST16 "d" /* int_fast16_t */
+#define SCNdFAST32 "d" /* int_fast32_t */
+#define SCNdFAST64 "ld" /* int_fast64_t */
+#define SCNdMAX "jd" /* intmax_t */
+#define SCNdPTR "ld" /* intptr_t */
+
+#define SCNi8 "hhi" /* int8_t */
+#define SCNi16 "hi" /* int16_t */
+#define SCNi32 "i" /* int32_t */
+#define SCNi64 "li" /* int64_t */
+#define SCNiLEAST8 "hhi" /* int_least8_t */
+#define SCNiLEAST16 "hi" /* int_least16_t */
+#define SCNiLEAST32 "i" /* int_least32_t */
+#define SCNiLEAST64 "li" /* int_least64_t */
+#define SCNiFAST8 "i" /* int_fast8_t */
+#define SCNiFAST16 "i" /* int_fast16_t */
+#define SCNiFAST32 "i" /* int_fast32_t */
+#define SCNiFAST64 "li" /* int_fast64_t */
+#define SCNiMAX "ji" /* intmax_t */
+#define SCNiPTR "li" /* intptr_t */
+
+/* fscanf(3) macros for unsigned integers. */
+
+#define SCNo8 "hho" /* uint8_t */
+#define SCNo16 "ho" /* uint16_t */
+#define SCNo32 "o" /* uint32_t */
+#define SCNo64 "lo" /* uint64_t */
+#define SCNoLEAST8 "hho" /* uint_least8_t */
+#define SCNoLEAST16 "ho" /* uint_least16_t */
+#define SCNoLEAST32 "o" /* uint_least32_t */
+#define SCNoLEAST64 "lo" /* uint_least64_t */
+#define SCNoFAST8 "o" /* uint_fast8_t */
+#define SCNoFAST16 "o" /* uint_fast16_t */
+#define SCNoFAST32 "o" /* uint_fast32_t */
+#define SCNoFAST64 "lo" /* uint_fast64_t */
+#define SCNoMAX "jo" /* uintmax_t */
+#define SCNoPTR "lo" /* uintptr_t */
+
+#define SCNu8 "hhu" /* uint8_t */
+#define SCNu16 "hu" /* uint16_t */
+#define SCNu32 "u" /* uint32_t */
+#define SCNu64 "lu" /* uint64_t */
+#define SCNuLEAST8 "hhu" /* uint_least8_t */
+#define SCNuLEAST16 "hu" /* uint_least16_t */
+#define SCNuLEAST32 "u" /* uint_least32_t */
+#define SCNuLEAST64 "lu" /* uint_least64_t */
+#define SCNuFAST8 "u" /* uint_fast8_t */
+#define SCNuFAST16 "u" /* uint_fast16_t */
+#define SCNuFAST32 "u" /* uint_fast32_t */
+#define SCNuFAST64 "lu" /* uint_fast64_t */
+#define SCNuMAX "ju" /* uintmax_t */
+#define SCNuPTR "lu" /* uintptr_t */
+
+#define SCNx8 "hhx" /* uint8_t */
+#define SCNx16 "hx" /* uint16_t */
+#define SCNx32 "x" /* uint32_t */
+#define SCNx64 "lx" /* uint64_t */
+#define SCNxLEAST8 "hhx" /* uint_least8_t */
+#define SCNxLEAST16 "hx" /* uint_least16_t */
+#define SCNxLEAST32 "x" /* uint_least32_t */
+#define SCNxLEAST64 "lx" /* uint_least64_t */
+#define SCNxFAST8 "x" /* uint_fast8_t */
+#define SCNxFAST16 "x" /* uint_fast16_t */
+#define SCNxFAST32 "x" /* uint_fast32_t */
+#define SCNxFAST64 "lx" /* uint_fast64_t */
+#define SCNxMAX "jx" /* uintmax_t */
+#define SCNxPTR "lx" /* uintptr_t */
+
+#endif /* !_MACHINE__INTTYPES_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/_limits.h b/sys/arm64/include/_limits.h
new file mode 100644
index 000000000000..08649b5c779d
--- /dev/null
+++ b/sys/arm64/include/_limits.h
@@ -0,0 +1,88 @@
+/*-
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/_limits.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE__LIMITS_H_
+#define _MACHINE__LIMITS_H_
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN, etc., is so the value is not unsigned; e.g., 0x80000000 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ */
+
+#define __CHAR_BIT 8 /* number of bits in a char */
+
+#define __SCHAR_MAX 0x7f /* max value for a signed char */
+#define __SCHAR_MIN (-0x7f - 1) /* min value for a signed char */
+
+#define __UCHAR_MAX 0xff /* max value for an unsigned char */
+
+#define __USHRT_MAX 0xffff /* max value for an unsigned short */
+#define __SHRT_MAX 0x7fff /* max value for a short */
+#define __SHRT_MIN (-0x7fff - 1) /* min value for a short */
+
+#define __UINT_MAX 0xffffffff /* max value for an unsigned int */
+#define __INT_MAX 0x7fffffff /* max value for an int */
+#define __INT_MIN (-0x7fffffff - 1) /* min value for an int */
+
+#define __ULONG_MAX 0xffffffffffffffffUL /* max for an unsigned long */
+#define __LONG_MAX 0x7fffffffffffffffL /* max for a long */
+#define __LONG_MIN (-0x7fffffffffffffffL - 1) /* min for a long */
+
+/* Long longs have the same size but not the same type as longs. */
+ /* max for an unsigned long long */
+#define __ULLONG_MAX 0xffffffffffffffffULL
+#define __LLONG_MAX 0x7fffffffffffffffLL /* max for a long long */
+#define __LLONG_MIN (-0x7fffffffffffffffLL - 1) /* min for a long long */
+
+#define __SSIZE_MAX __LONG_MAX /* max value for a ssize_t */
+
+#define __SIZE_T_MAX __ULONG_MAX /* max value for a size_t */
+
+#define __OFF_MAX __LONG_MAX /* max value for an off_t */
+#define __OFF_MIN __LONG_MIN /* min value for an off_t */
+
+/* Quads and longs are the same size. Ensure they stay in sync. */
+#define __UQUAD_MAX (__ULONG_MAX) /* max value for a uquad_t */
+#define __QUAD_MAX (__LONG_MAX) /* max value for a quad_t */
+#define __QUAD_MIN (__LONG_MIN) /* min value for a quad_t */
+
+#define __LONG_BIT 64
+#define __WORD_BIT 32
+
+/* Minimum signal stack size. */
+#define __MINSIGSTKSZ (1024 * 4)
+
+#endif /* !_MACHINE__LIMITS_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/_stdint.h b/sys/arm64/include/_stdint.h
new file mode 100644
index 000000000000..0c183bd60dca
--- /dev/null
+++ b/sys/arm64/include/_stdint.h
@@ -0,0 +1,162 @@
+/*-
+ * Copyright (c) 2001, 2002 Mike Barcroft <mike@FreeBSD.org>
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Klaus Klein.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/_stdint.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE__STDINT_H_
+#define _MACHINE__STDINT_H_
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS)
+
+#define INT8_C(c) (c)
+#define INT16_C(c) (c)
+#define INT32_C(c) (c)
+#define INT64_C(c) (c ## L)
+
+#define UINT8_C(c) (c)
+#define UINT16_C(c) (c)
+#define UINT32_C(c) (c ## U)
+#define UINT64_C(c) (c ## UL)
+
+#define INTMAX_C(c) INT64_C(c)
+#define UINTMAX_C(c) UINT64_C(c)
+
+#endif /* !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) */
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS)
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.1 Limits of exact-width integer types
+ */
+/* Minimum values of exact-width signed integer types. */
+#define INT8_MIN (-0x7f-1)
+#define INT16_MIN (-0x7fff-1)
+#define INT32_MIN (-0x7fffffff-1)
+#define INT64_MIN (-0x7fffffffffffffffL-1)
+
+/* Maximum values of exact-width signed integer types. */
+#define INT8_MAX 0x7f
+#define INT16_MAX 0x7fff
+#define INT32_MAX 0x7fffffff
+#define INT64_MAX 0x7fffffffffffffffL
+
+/* Maximum values of exact-width unsigned integer types. */
+#define UINT8_MAX 0xff
+#define UINT16_MAX 0xffff
+#define UINT32_MAX 0xffffffffU
+#define UINT64_MAX 0xffffffffffffffffUL
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.2 Limits of minimum-width integer types
+ */
+/* Minimum values of minimum-width signed integer types. */
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST64_MIN INT64_MIN
+
+/* Maximum values of minimum-width signed integer types. */
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MAX INT64_MAX
+
+/* Maximum values of minimum-width unsigned integer types. */
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.3 Limits of fastest minimum-width integer types
+ */
+/* Minimum values of fastest minimum-width signed integer types. */
+#define INT_FAST8_MIN INT32_MIN
+#define INT_FAST16_MIN INT32_MIN
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST64_MIN INT64_MIN
+
+/* Maximum values of fastest minimum-width signed integer types. */
+#define INT_FAST8_MAX INT32_MAX
+#define INT_FAST16_MAX INT32_MAX
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MAX INT64_MAX
+
+/* Maximum values of fastest minimum-width unsigned integer types. */
+#define UINT_FAST8_MAX UINT32_MAX
+#define UINT_FAST16_MAX UINT32_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.4 Limits of integer types capable of holding object pointers
+ */
+#define INTPTR_MIN INT64_MIN
+#define INTPTR_MAX INT64_MAX
+#define UINTPTR_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.2.5 Limits of greatest-width integer types
+ */
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+/*
+ * ISO/IEC 9899:1999
+ * 7.18.3 Limits of other integer types
+ */
+/* Limits of ptrdiff_t. */
+#define PTRDIFF_MIN INT64_MIN
+#define PTRDIFF_MAX INT64_MAX
+
+/* Limits of sig_atomic_t. */
+#define SIG_ATOMIC_MIN INT64_MIN
+#define SIG_ATOMIC_MAX INT64_MAX
+
+/* Limit of size_t. */
+#define SIZE_MAX UINT64_MAX
+
+/* Limits of wint_t. */
+#define WINT_MIN INT32_MIN
+#define WINT_MAX INT32_MAX
+
+#endif /* !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) */
+
+#endif /* !_MACHINE__STDINT_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/_types.h b/sys/arm64/include/_types.h
new file mode 100644
index 000000000000..98f7dea67520
--- /dev/null
+++ b/sys/arm64/include/_types.h
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 2002 Mike Barcroft <mike@FreeBSD.org>
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/_types.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE__TYPES_H_
+#define _MACHINE__TYPES_H_
+
+#ifndef _SYS__TYPES_H_
+#error do not include this header, use sys/_types.h
+#endif
+
+/*
+ * Standard type definitions.
+ */
+typedef __int32_t __clock_t; /* clock()... */
+typedef __int64_t __critical_t;
+#ifndef _STANDALONE
+typedef double __double_t;
+typedef float __float_t;
+#endif
+typedef __int32_t __int_fast8_t;
+typedef __int32_t __int_fast16_t;
+typedef __int32_t __int_fast32_t;
+typedef __int64_t __int_fast64_t;
+typedef __int64_t __register_t;
+typedef __int64_t __segsz_t; /* segment size (in pages) */
+typedef __int64_t __time_t; /* time()... */
+typedef __uint32_t __uint_fast8_t;
+typedef __uint32_t __uint_fast16_t;
+typedef __uint32_t __uint_fast32_t;
+typedef __uint64_t __uint_fast64_t;
+typedef __uint64_t __u_register_t;
+typedef __uint64_t __vm_paddr_t;
+typedef unsigned int ___wchar_t;
+
+#define __WCHAR_MIN 0 /* min value for a wchar_t */
+#define __WCHAR_MAX __UINT_MAX /* max value for a wchar_t */
+
+#endif /* !_MACHINE__TYPES_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/acle-compat.h b/sys/arm64/include/acle-compat.h
new file mode 100644
index 000000000000..9954f27fd424
--- /dev/null
+++ b/sys/arm64/include/acle-compat.h
@@ -0,0 +1,5 @@
+#ifdef __arm__
+#include <arm/acle-compat.h>
+#else /* !__arm__ */
+#error Do not include this header, used only for 32-bit compatibility
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/acpica_machdep.h b/sys/arm64/include/acpica_machdep.h
new file mode 100644
index 000000000000..973ebe1b81bc
--- /dev/null
+++ b/sys/arm64/include/acpica_machdep.h
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2002 Mitsuru IWASAKI
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/******************************************************************************
+ *
+ * Name: acpica_machdep.h - arch-specific defines, etc.
+ * $Revision$
+ *
+ *****************************************************************************/
+
+#ifndef __ACPICA_MACHDEP_H__
+#define __ACPICA_MACHDEP_H__
+
+#ifdef _KERNEL
+
+#include <machine/_bus.h>
+
+/* Only use the reduced hardware model */
+#define ACPI_REDUCED_HARDWARE 1
+
+/* Section 5.2.10.1: global lock acquire/release functions */
+int acpi_acquire_global_lock(volatile uint32_t *);
+int acpi_release_global_lock(volatile uint32_t *);
+
+void *acpi_map_table(vm_paddr_t pa, const char *sig);
+void acpi_unmap_table(void *table);
+vm_paddr_t acpi_find_table(const char *sig);
+
+struct acpi_generic_address;
+
+int acpi_map_addr(struct acpi_generic_address *, bus_space_tag_t *,
+ bus_space_handle_t *, bus_size_t);
+
+extern int (*apei_nmi)(void);
+
+#endif /* _KERNEL */
+
+#endif /* __ACPICA_MACHDEP_H__ */
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
new file mode 100644
index 000000000000..cd770386f852
--- /dev/null
+++ b/sys/arm64/include/armreg.h
@@ -0,0 +1,2870 @@
+/*-
+ * Copyright (c) 2013, 2014 Andrew Turner
+ * Copyright (c) 2015,2021 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/armreg.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_ARMREG_H_
+#define _MACHINE_ARMREG_H_
+
+#define INSN_SIZE 4
+
+#define __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
+ S##op0##_##op1##_C##crn##_C##crm##_##op2
+#define _MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
+ __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2)
+#define MRS_REG_ALT_NAME(reg) \
+ _MRS_REG_ALT_NAME(reg##_op0, reg##_op1, reg##_CRn, reg##_CRm, reg##_op2)
+
+
+#define READ_SPECIALREG(reg) \
+({ uint64_t _val; \
+ __asm __volatile("mrs %0, " __STRING(reg) : "=&r" (_val)); \
+ _val; \
+})
+#define WRITE_SPECIALREG(reg, _val) \
+ __asm __volatile("msr " __STRING(reg) ", %0" : : "r"((uint64_t)_val))
+
+#define UL(x) UINT64_C(x)
+
+/* AFSR0_EL1 - Auxiliary Fault Status Register 0 */
+#define AFSR0_EL1_REG MRS_REG_ALT_NAME(AFSR0_EL1)
+#define AFSR0_EL1_op0 3
+#define AFSR0_EL1_op1 0
+#define AFSR0_EL1_CRn 5
+#define AFSR0_EL1_CRm 1
+#define AFSR0_EL1_op2 0
+
+/* AFSR0_EL12 */
+#define AFSR0_EL12_REG MRS_REG_ALT_NAME(AFSR0_EL12)
+#define AFSR0_EL12_op0 3
+#define AFSR0_EL12_op1 5
+#define AFSR0_EL12_CRn 5
+#define AFSR0_EL12_CRm 1
+#define AFSR0_EL12_op2 0
+
+/* AFSR1_EL1 - Auxiliary Fault Status Register 1 */
+#define AFSR1_EL1_REG MRS_REG_ALT_NAME(AFSR1_EL1)
+#define AFSR1_EL1_op0 3
+#define AFSR1_EL1_op1 0
+#define AFSR1_EL1_CRn 5
+#define AFSR1_EL1_CRm 1
+#define AFSR1_EL1_op2 1
+
+/* AFSR1_EL12 */
+#define AFSR1_EL12_REG MRS_REG_ALT_NAME(AFSR1_EL12)
+#define AFSR1_EL12_op0 3
+#define AFSR1_EL12_op1 5
+#define AFSR1_EL12_CRn 5
+#define AFSR1_EL12_CRm 1
+#define AFSR1_EL12_op2 1
+
+/* AMAIR_EL1 - Auxiliary Memory Attribute Indirection Register */
+#define AMAIR_EL1_REG MRS_REG_ALT_NAME(AMAIR_EL1)
+#define AMAIR_EL1_op0 3
+#define AMAIR_EL1_op1 0
+#define AMAIR_EL1_CRn 10
+#define AMAIR_EL1_CRm 3
+#define AMAIR_EL1_op2 0
+
+/* AMAIR_EL12 */
+#define AMAIR_EL12_REG MRS_REG_ALT_NAME(AMAIR_EL12)
+#define AMAIR_EL12_op0 3
+#define AMAIR_EL12_op1 5
+#define AMAIR_EL12_CRn 10
+#define AMAIR_EL12_CRm 3
+#define AMAIR_EL12_op2 0
+
+/* APDAKeyHi_EL1 */
+#define APDAKeyHi_EL1_REG MRS_REG_ALT_NAME(APDAKeyHi_EL1)
+#define APDAKeyHi_EL1_op0 3
+#define APDAKeyHi_EL1_op1 0
+#define APDAKeyHi_EL1_CRn 2
+#define APDAKeyHi_EL1_CRm 2
+#define APDAKeyHi_EL1_op2 1
+
+/* APDAKeyLo_EL1 */
+#define APDAKeyLo_EL1_REG MRS_REG_ALT_NAME(APDAKeyLo_EL1)
+#define APDAKeyLo_EL1_op0 3
+#define APDAKeyLo_EL1_op1 0
+#define APDAKeyLo_EL1_CRn 2
+#define APDAKeyLo_EL1_CRm 2
+#define APDAKeyLo_EL1_op2 0
+
+/* APDBKeyHi_EL1 */
+#define APDBKeyHi_EL1_REG MRS_REG_ALT_NAME(APDBKeyHi_EL1)
+#define APDBKeyHi_EL1_op0 3
+#define APDBKeyHi_EL1_op1 0
+#define APDBKeyHi_EL1_CRn 2
+#define APDBKeyHi_EL1_CRm 2
+#define APDBKeyHi_EL1_op2 3
+
+/* APDBKeyLo_EL1 */
+#define APDBKeyLo_EL1_REG MRS_REG_ALT_NAME(APDBKeyLo_EL1)
+#define APDBKeyLo_EL1_op0 3
+#define APDBKeyLo_EL1_op1 0
+#define APDBKeyLo_EL1_CRn 2
+#define APDBKeyLo_EL1_CRm 2
+#define APDBKeyLo_EL1_op2 2
+
+/* APGAKeyHi_EL1 */
+#define APGAKeyHi_EL1_REG MRS_REG_ALT_NAME(APGAKeyHi_EL1)
+#define APGAKeyHi_EL1_op0 3
+#define APGAKeyHi_EL1_op1 0
+#define APGAKeyHi_EL1_CRn 2
+#define APGAKeyHi_EL1_CRm 3
+#define APGAKeyHi_EL1_op2 1
+
+/* APGAKeyLo_EL1 */
+#define APGAKeyLo_EL1_REG MRS_REG_ALT_NAME(APGAKeyLo_EL1)
+#define APGAKeyLo_EL1_op0 3
+#define APGAKeyLo_EL1_op1 0
+#define APGAKeyLo_EL1_CRn 2
+#define APGAKeyLo_EL1_CRm 3
+#define APGAKeyLo_EL1_op2 0
+
+/* APIAKeyHi_EL1 */
+#define APIAKeyHi_EL1_REG MRS_REG_ALT_NAME(APIAKeyHi_EL1)
+#define APIAKeyHi_EL1_op0 3
+#define APIAKeyHi_EL1_op1 0
+#define APIAKeyHi_EL1_CRn 2
+#define APIAKeyHi_EL1_CRm 1
+#define APIAKeyHi_EL1_op2 1
+
+/* APIAKeyLo_EL1 */
+#define APIAKeyLo_EL1_REG MRS_REG_ALT_NAME(APIAKeyLo_EL1)
+#define APIAKeyLo_EL1_op0 3
+#define APIAKeyLo_EL1_op1 0
+#define APIAKeyLo_EL1_CRn 2
+#define APIAKeyLo_EL1_CRm 1
+#define APIAKeyLo_EL1_op2 0
+
+/* APIBKeyHi_EL1 */
+#define APIBKeyHi_EL1_REG MRS_REG_ALT_NAME(APIBKeyHi_EL1)
+#define APIBKeyHi_EL1_op0 3
+#define APIBKeyHi_EL1_op1 0
+#define APIBKeyHi_EL1_CRn 2
+#define APIBKeyHi_EL1_CRm 1
+#define APIBKeyHi_EL1_op2 3
+
+/* APIBKeyLo_EL1 */
+#define APIBKeyLo_EL1_REG MRS_REG_ALT_NAME(APIBKeyLo_EL1)
+#define APIBKeyLo_EL1_op0 3
+#define APIBKeyLo_EL1_op1 0
+#define APIBKeyLo_EL1_CRn 2
+#define APIBKeyLo_EL1_CRm 1
+#define APIBKeyLo_EL1_op2 2
+
+/* CCSIDR_EL1 - Cache Size ID Register */
+#define CCSIDR_NumSets_MASK 0x0FFFE000
+#define CCSIDR_NumSets64_MASK 0x00FFFFFF00000000
+#define CCSIDR_NumSets_SHIFT 13
+#define CCSIDR_NumSets64_SHIFT 32
+#define CCSIDR_Assoc_MASK 0x00001FF8
+#define CCSIDR_Assoc64_MASK 0x0000000000FFFFF8
+#define CCSIDR_Assoc_SHIFT 3
+#define CCSIDR_Assoc64_SHIFT 3
+#define CCSIDR_LineSize_MASK 0x7
+#define CCSIDR_NSETS(idr) \
+ (((idr) & CCSIDR_NumSets_MASK) >> CCSIDR_NumSets_SHIFT)
+#define CCSIDR_ASSOC(idr) \
+ (((idr) & CCSIDR_Assoc_MASK) >> CCSIDR_Assoc_SHIFT)
+#define CCSIDR_NSETS_64(idr) \
+ (((idr) & CCSIDR_NumSets64_MASK) >> CCSIDR_NumSets64_SHIFT)
+#define CCSIDR_ASSOC_64(idr) \
+ (((idr) & CCSIDR_Assoc64_MASK) >> CCSIDR_Assoc64_SHIFT)
+
+/* CLIDR_EL1 - Cache level ID register */
+#define CLIDR_CTYPE_MASK 0x7 /* Cache type mask bits */
+#define CLIDR_CTYPE_IO 0x1 /* Instruction only */
+#define CLIDR_CTYPE_DO 0x2 /* Data only */
+#define CLIDR_CTYPE_ID 0x3 /* Split instruction and data */
+#define CLIDR_CTYPE_UNIFIED 0x4 /* Unified */
+
+/* CNTKCTL_EL1 - Counter-timer Kernel Control Register */
+#define CNTKCTL_EL1_op0 3
+#define CNTKCTL_EL1_op1 0
+#define CNTKCTL_EL1_CRn 14
+#define CNTKCTL_EL1_CRm 1
+#define CNTKCTL_EL1_op2 0
+
+/* CNTKCTL_EL12 - Counter-timer Kernel Control Register */
+#define CNTKCTL_EL12_op0 3
+#define CNTKCTL_EL12_op1 5
+#define CNTKCTL_EL12_CRn 14
+#define CNTKCTL_EL12_CRm 1
+#define CNTKCTL_EL12_op2 0
+
+/* CNTP_CTL_EL0 - Counter-timer Physical Timer Control register */
+#define CNTP_CTL_EL0_op0 3
+#define CNTP_CTL_EL0_op1 3
+#define CNTP_CTL_EL0_CRn 14
+#define CNTP_CTL_EL0_CRm 2
+#define CNTP_CTL_EL0_op2 1
+#define CNTP_CTL_ENABLE (1 << 0)
+#define CNTP_CTL_IMASK (1 << 1)
+#define CNTP_CTL_ISTATUS (1 << 2)
+
+/* CNTP_CVAL_EL0 - Counter-timer Physical Timer CompareValue register */
+#define CNTP_CVAL_EL0_op0 3
+#define CNTP_CVAL_EL0_op1 3
+#define CNTP_CVAL_EL0_CRn 14
+#define CNTP_CVAL_EL0_CRm 2
+#define CNTP_CVAL_EL0_op2 2
+
+/* CNTP_TVAL_EL0 - Counter-timer Physical Timer TimerValue register */
+#define CNTP_TVAL_EL0_op0 3
+#define CNTP_TVAL_EL0_op1 3
+#define CNTP_TVAL_EL0_CRn 14
+#define CNTP_TVAL_EL0_CRm 2
+#define CNTP_TVAL_EL0_op2 0
+
+/* CNTPCT_EL0 - Counter-timer Physical Count register */
+#define CNTPCT_EL0_ISS ISS_MSR_REG(CNTPCT_EL0)
+#define CNTPCT_EL0_op0 3
+#define CNTPCT_EL0_op1 3
+#define CNTPCT_EL0_CRn 14
+#define CNTPCT_EL0_CRm 0
+#define CNTPCT_EL0_op2 1
+
+/* CNTV_CTL_EL0 - Counter-timer Virtual Timer Control register */
+#define CNTV_CTL_EL0_op0 3
+#define CNTV_CTL_EL0_op1 3
+#define CNTV_CTL_EL0_CRn 14
+#define CNTV_CTL_EL0_CRm 3
+#define CNTV_CTL_EL0_op2 1
+
+/* CNTV_CTL_EL02 - Counter-timer Virtual Timer Control register */
+#define CNTV_CTL_EL02_op0 3
+#define CNTV_CTL_EL02_op1 5
+#define CNTV_CTL_EL02_CRn 14
+#define CNTV_CTL_EL02_CRm 3
+#define CNTV_CTL_EL02_op2 1
+
+/* CNTV_CVAL_EL0 - Counter-timer Virtual Timer CompareValue register */
+#define CNTV_CVAL_EL0_op0 3
+#define CNTV_CVAL_EL0_op1 3
+#define CNTV_CVAL_EL0_CRn 14
+#define CNTV_CVAL_EL0_CRm 3
+#define CNTV_CVAL_EL0_op2 2
+
+/* CNTV_CVAL_EL02 - Counter-timer Virtual Timer CompareValue register */
+#define CNTV_CVAL_EL02_op0 3
+#define CNTV_CVAL_EL02_op1 5
+#define CNTV_CVAL_EL02_CRn 14
+#define CNTV_CVAL_EL02_CRm 3
+#define CNTV_CVAL_EL02_op2 2
+
+/* CONTEXTIDR_EL1 - Context ID register */
+#define CONTEXTIDR_EL1_REG MRS_REG_ALT_NAME(CONTEXTIDR_EL1)
+#define CONTEXTIDR_EL1_op0 3
+#define CONTEXTIDR_EL1_op1 0
+#define CONTEXTIDR_EL1_CRn 13
+#define CONTEXTIDR_EL1_CRm 0
+#define CONTEXTIDR_EL1_op2 1
+
+/* CONTEXTIDR_EL12 */
+#define CONTEXTIDR_EL12_REG MRS_REG_ALT_NAME(CONTEXTIDR_EL12)
+#define CONTEXTIDR_EL12_op0 3
+#define CONTEXTIDR_EL12_op1 5
+#define CONTEXTIDR_EL12_CRn 13
+#define CONTEXTIDR_EL12_CRm 0
+#define CONTEXTIDR_EL12_op2 1
+
+/* CPACR_EL1 */
+#define CPACR_EL1_REG MRS_REG_ALT_NAME(CPACR_EL1)
+#define CPACR_EL1_op0 3
+#define CPACR_EL1_op1 0
+#define CPACR_EL1_CRn 1
+#define CPACR_EL1_CRm 0
+#define CPACR_EL1_op2 2
+#define CPACR_ZEN_MASK (0x3 << 16)
+#define CPACR_ZEN_TRAP_ALL1 (0x0 << 16) /* Traps from EL0 and EL1 */
+#define CPACR_ZEN_TRAP_EL0 (0x1 << 16) /* Traps from EL0 */
+#define CPACR_ZEN_TRAP_ALL2 (0x2 << 16) /* Traps from EL0 and EL1 */
+#define CPACR_ZEN_TRAP_NONE (0x3 << 16) /* No traps */
+#define CPACR_FPEN_MASK (0x3 << 20)
+#define CPACR_FPEN_TRAP_ALL1 (0x0 << 20) /* Traps from EL0 and EL1 */
+#define CPACR_FPEN_TRAP_EL0 (0x1 << 20) /* Traps from EL0 */
+#define CPACR_FPEN_TRAP_ALL2 (0x2 << 20) /* Traps from EL0 and EL1 */
+#define CPACR_FPEN_TRAP_NONE (0x3 << 20) /* No traps */
+#define CPACR_TTA (0x1 << 28)
+
+/* CPACR_EL12 */
+#define CPACR_EL12_REG MRS_REG_ALT_NAME(CPACR_EL12)
+#define CPACR_EL12_op0 3
+#define CPACR_EL12_op1 5
+#define CPACR_EL12_CRn 1
+#define CPACR_EL12_CRm 0
+#define CPACR_EL12_op2 2
+
+/* CSSELR_EL1 - Cache size selection register */
+#define CSSELR_Level(i) (i << 1)
+#define CSSELR_InD 0x00000001
+
+/* CTR_EL0 - Cache Type Register */
+#define CTR_EL0_REG MRS_REG_ALT_NAME(CTR_EL0)
+#define CTR_EL0_ISS ISS_MSR_REG(CTR_EL0)
+#define CTR_EL0_op0 3
+#define CTR_EL0_op1 3
+#define CTR_EL0_CRn 0
+#define CTR_EL0_CRm 0
+#define CTR_EL0_op2 1
+#define CTR_RES1 (1 << 31)
+#define CTR_TminLine_SHIFT 32
+#define CTR_TminLine_MASK (UL(0x3f) << CTR_TminLine_SHIFT)
+#define CTR_TminLine_VAL(reg) ((reg) & CTR_TminLine_MASK)
+#define CTR_DIC_SHIFT 29
+#define CTR_DIC_WIDTH 1
+#define CTR_DIC_MASK (0x1 << CTR_DIC_SHIFT)
+#define CTR_DIC_VAL(reg) ((reg) & CTR_DIC_MASK)
+#define CTR_DIC_NONE (0x0 << CTR_DIC_SHIFT)
+#define CTR_DIC_IMPL (0x1 << CTR_DIC_SHIFT)
+#define CTR_IDC_SHIFT 28
+#define CTR_IDC_WIDTH 1
+#define CTR_IDC_MASK (0x1 << CTR_IDC_SHIFT)
+#define CTR_IDC_VAL(reg) ((reg) & CTR_IDC_MASK)
+#define CTR_IDC_NONE (0x0 << CTR_IDC_SHIFT)
+#define CTR_IDC_IMPL (0x1 << CTR_IDC_SHIFT)
+#define CTR_CWG_SHIFT 24
+#define CTR_CWG_WIDTH 4
+#define CTR_CWG_MASK (0xf << CTR_CWG_SHIFT)
+#define CTR_CWG_VAL(reg) ((reg) & CTR_CWG_MASK)
+#define CTR_CWG_SIZE(reg) (4 << (CTR_CWG_VAL(reg) >> CTR_CWG_SHIFT))
+#define CTR_ERG_SHIFT 20
+#define CTR_ERG_WIDTH 4
+#define CTR_ERG_MASK (0xf << CTR_ERG_SHIFT)
+#define CTR_ERG_VAL(reg) ((reg) & CTR_ERG_MASK)
+#define CTR_ERG_SIZE(reg) (4 << (CTR_ERG_VAL(reg) >> CTR_ERG_SHIFT))
+#define CTR_DLINE_SHIFT 16
+#define CTR_DLINE_WIDTH 4
+#define CTR_DLINE_MASK (0xf << CTR_DLINE_SHIFT)
+#define CTR_DLINE_VAL(reg) ((reg) & CTR_DLINE_MASK)
+#define CTR_DLINE_SIZE(reg) (4 << (CTR_DLINE_VAL(reg) >> CTR_DLINE_SHIFT))
+#define CTR_L1IP_SHIFT 14
+#define CTR_L1IP_WIDTH 2
+#define CTR_L1IP_MASK (0x3 << CTR_L1IP_SHIFT)
+#define CTR_L1IP_VAL(reg) ((reg) & CTR_L1IP_MASK)
+#define CTR_L1IP_VIPT (2 << CTR_L1IP_SHIFT)
+#define CTR_L1IP_PIPT (3 << CTR_L1IP_SHIFT)
+#define CTR_ILINE_SHIFT 0
+#define CTR_ILINE_WIDTH 4
+#define CTR_ILINE_MASK (0xf << CTR_ILINE_SHIFT)
+#define CTR_ILINE_VAL(reg) ((reg) & CTR_ILINE_MASK)
+#define CTR_ILINE_SIZE(reg) (4 << (CTR_ILINE_VAL(reg) >> CTR_ILINE_SHIFT))
+
+/* CurrentEL - Current Exception Level */
+#define CURRENTEL_EL_SHIFT 2
+#define CURRENTEL_EL_MASK (0x3 << CURRENTEL_EL_SHIFT)
+#define CURRENTEL_EL_EL0 (0x0 << CURRENTEL_EL_SHIFT)
+#define CURRENTEL_EL_EL1 (0x1 << CURRENTEL_EL_SHIFT)
+#define CURRENTEL_EL_EL2 (0x2 << CURRENTEL_EL_SHIFT)
+#define CURRENTEL_EL_EL3 (0x3 << CURRENTEL_EL_SHIFT)
+
+/* DAIFSet/DAIFClear */
+#define DAIF_D (1 << 3)
+#define DAIF_A (1 << 2)
+#define DAIF_I (1 << 1)
+#define DAIF_F (1 << 0)
+#define DAIF_ALL (DAIF_D | DAIF_A | DAIF_I | DAIF_F)
+#define DAIF_INTR (DAIF_I | DAIF_F) /* All exceptions that pass */
+ /* through the intr framework */
+
+/* DBGBCR<n>_EL1 - Debug Breakpoint Control Registers */
+#define DBGBCR_EL1_op0 2
+#define DBGBCR_EL1_op1 0
+#define DBGBCR_EL1_CRn 0
+/* DBGBCR_EL1_CRm indicates which watchpoint this register is for */
+#define DBGBCR_EL1_op2 5
+#define DBGBCR_EN 0x1
+#define DBGBCR_PMC_SHIFT 1
+#define DBGBCR_PMC (0x3 << DBGBCR_PMC_SHIFT)
+#define DBGBCR_PMC_EL1 (0x1 << DBGBCR_PMC_SHIFT)
+#define DBGBCR_PMC_EL0 (0x2 << DBGBCR_PMC_SHIFT)
+#define DBGBCR_BAS_SHIFT 5
+#define DBGBCR_BAS (0xf << DBGBCR_BAS_SHIFT)
+#define DBGBCR_HMC_SHIFT 13
+#define DBGBCR_HMC (0x1 << DBGBCR_HMC_SHIFT)
+#define DBGBCR_SSC_SHIFT 14
+#define DBGBCR_SSC (0x3 << DBGBCR_SSC_SHIFT)
+#define DBGBCR_LBN_SHIFT 16
+#define DBGBCR_LBN (0xf << DBGBCR_LBN_SHIFT)
+#define DBGBCR_BT_SHIFT 20
+#define DBGBCR_BT (0xf << DBGBCR_BT_SHIFT)
+
+/* DBGBVR<n>_EL1 - Debug Breakpoint Value Registers */
+#define DBGBVR_EL1_op0 2
+#define DBGBVR_EL1_op1 0
+#define DBGBVR_EL1_CRn 0
+/* DBGBVR_EL1_CRm indicates which watchpoint this register is for */
+#define DBGBVR_EL1_op2 4
+
+/* DBGWCR<n>_EL1 - Debug Watchpoint Control Registers */
+#define DBGWCR_EL1_op0 2
+#define DBGWCR_EL1_op1 0
+#define DBGWCR_EL1_CRn 0
+/* DBGWCR_EL1_CRm indicates which watchpoint this register is for */
+#define DBGWCR_EL1_op2 7
+#define DBGWCR_EN 0x1
+#define DBGWCR_PAC_SHIFT 1
+#define DBGWCR_PAC (0x3 << DBGWCR_PAC_SHIFT)
+#define DBGWCR_PAC_EL1 (0x1 << DBGWCR_PAC_SHIFT)
+#define DBGWCR_PAC_EL0 (0x2 << DBGWCR_PAC_SHIFT)
+#define DBGWCR_LSC_SHIFT 3
+#define DBGWCR_LSC (0x3 << DBGWCR_LSC_SHIFT)
+#define DBGWCR_BAS_SHIFT 5
+#define DBGWCR_BAS (0xff << DBGWCR_BAS_SHIFT)
+#define DBGWCR_HMC_SHIFT 13
+#define DBGWCR_HMC (0x1 << DBGWCR_HMC_SHIFT)
+#define DBGWCR_SSC_SHIFT 14
+#define DBGWCR_SSC (0x3 << DBGWCR_SSC_SHIFT)
+#define DBGWCR_LBN_SHIFT 16
+#define DBGWCR_LBN (0xf << DBGWCR_LBN_SHIFT)
+#define DBGWCR_WT_SHIFT 20
+#define DBGWCR_WT (0x1 << DBGWCR_WT_SHIFT)
+#define DBGWCR_MASK_SHIFT 24
+#define DBGWCR_MASK (0x1f << DBGWCR_MASK_SHIFT)
+
+/* DBGWVR<n>_EL1 - Debug Watchpoint Value Registers */
+#define DBGWVR_EL1_op0 2
+#define DBGWVR_EL1_op1 0
+#define DBGWVR_EL1_CRn 0
+/* DBGWVR_EL1_CRm indicates which watchpoint this register is for */
+#define DBGWVR_EL1_op2 6
+
+/* DCZID_EL0 - Data Cache Zero ID register */
+#define DCZID_DZP (1 << 4) /* DC ZVA prohibited if non-0 */
+#define DCZID_BS_SHIFT 0
+#define DCZID_BS_MASK (0xf << DCZID_BS_SHIFT)
+#define DCZID_BS_SIZE(reg) (((reg) & DCZID_BS_MASK) >> DCZID_BS_SHIFT)
+
+/* DBGAUTHSTATUS_EL1 */
+#define DBGAUTHSTATUS_EL1_op0 2
+#define DBGAUTHSTATUS_EL1_op1 0
+#define DBGAUTHSTATUS_EL1_CRn 7
+#define DBGAUTHSTATUS_EL1_CRm 14
+#define DBGAUTHSTATUS_EL1_op2 6
+
+/* DBGCLAIMCLR_EL1 */
+#define DBGCLAIMCLR_EL1_op0 2
+#define DBGCLAIMCLR_EL1_op1 0
+#define DBGCLAIMCLR_EL1_CRn 7
+#define DBGCLAIMCLR_EL1_CRm 9
+#define DBGCLAIMCLR_EL1_op2 6
+
+/* DBGCLAIMSET_EL1 */
+#define DBGCLAIMSET_EL1_op0 2
+#define DBGCLAIMSET_EL1_op1 0
+#define DBGCLAIMSET_EL1_CRn 7
+#define DBGCLAIMSET_EL1_CRm 8
+#define DBGCLAIMSET_EL1_op2 6
+
+/* DBGPRCR_EL1 */
+#define DBGPRCR_EL1_op0 2
+#define DBGPRCR_EL1_op1 0
+#define DBGPRCR_EL1_CRn 1
+#define DBGPRCR_EL1_CRm 4
+#define DBGPRCR_EL1_op2 4
+
+/* ELR_EL1 */
+#define ELR_EL1_REG MRS_REG_ALT_NAME(ELR_EL1)
+#define ELR_EL1_op0 3
+#define ELR_EL1_op1 0
+#define ELR_EL1_CRn 4
+#define ELR_EL1_CRm 0
+#define ELR_EL1_op2 1
+
+/* ELR_EL12 */
+#define ELR_EL12_REG MRS_REG_ALT_NAME(ELR_EL12)
+#define ELR_EL12_op0 3
+#define ELR_EL12_op1 5
+#define ELR_EL12_CRn 4
+#define ELR_EL12_CRm 0
+#define ELR_EL12_op2 1
+
+/* ESR_ELx */
+#define ESR_ELx_ISS_MASK 0x01ffffff
+#define ISS_FP_TFV_SHIFT 23
+#define ISS_FP_TFV (0x01 << ISS_FP_TFV_SHIFT)
+#define ISS_FP_IOF 0x01
+#define ISS_FP_DZF 0x02
+#define ISS_FP_OFF 0x04
+#define ISS_FP_UFF 0x08
+#define ISS_FP_IXF 0x10
+#define ISS_FP_IDF 0x80
+#define ISS_INSN_FnV (0x01 << 10)
+#define ISS_INSN_EA (0x01 << 9)
+#define ISS_INSN_S1PTW (0x01 << 7)
+#define ISS_INSN_IFSC_MASK (0x1f << 0)
+
+#define ISS_WFx_TI_SHIFT 0
+#define ISS_WFx_TI_MASK (0x03 << ISS_WFx_TI_SHIFT)
+#define ISS_WFx_TI_WFI (0x00 << ISS_WFx_TI_SHIFT)
+#define ISS_WFx_TI_WFE (0x01 << ISS_WFx_TI_SHIFT)
+#define ISS_WFx_TI_WFIT (0x02 << ISS_WFx_TI_SHIFT)
+#define ISS_WFx_TI_WFET (0x03 << ISS_WFx_TI_SHIFT)
+#define ISS_WFx_RV_SHIFT 2
+#define ISS_WFx_RV_MASK (0x01 << ISS_WFx_RV_SHIFT)
+#define ISS_WFx_RV_INVALID (0x00 << ISS_WFx_RV_SHIFT)
+#define ISS_WFx_RV_VALID (0x01 << ISS_WFx_RV_SHIFT)
+#define ISS_WFx_RN_SHIFT 5
+#define ISS_WFx_RN_MASK (0x1f << ISS_WFx_RN_SHIFT)
+#define ISS_WFx_RN(x) (((x) & ISS_WFx_RN_MASK) >> ISS_WFx_RN_SHIFT)
+#define ISS_WFx_COND_SHIFT 20
+#define ISS_WFx_COND_MASK (0x0f << ISS_WFx_COND_SHIFT)
+#define ISS_WFx_CV_SHIFT 24
+#define ISS_WFx_CV_MASK (0x01 << ISS_WFx_CV_SHIFT)
+#define ISS_WFx_CV_INVALID (0x00 << ISS_WFx_CV_SHIFT)
+#define ISS_WFx_CV_VALID (0x01 << ISS_WFx_CV_SHIFT)
+
+#define ISS_MSR_DIR_SHIFT 0
+#define ISS_MSR_DIR (0x01 << ISS_MSR_DIR_SHIFT)
+#define ISS_MSR_Rt_SHIFT 5
+#define ISS_MSR_Rt_MASK (0x1f << ISS_MSR_Rt_SHIFT)
+#define ISS_MSR_Rt(x) (((x) & ISS_MSR_Rt_MASK) >> ISS_MSR_Rt_SHIFT)
+#define ISS_MSR_CRm_SHIFT 1
+#define ISS_MSR_CRm_MASK (0xf << ISS_MSR_CRm_SHIFT)
+#define ISS_MSR_CRm(x) (((x) & ISS_MSR_CRm_MASK) >> ISS_MSR_CRm_SHIFT)
+#define ISS_MSR_CRn_SHIFT 10
+#define ISS_MSR_CRn_MASK (0xf << ISS_MSR_CRn_SHIFT)
+#define ISS_MSR_CRn(x) (((x) & ISS_MSR_CRn_MASK) >> ISS_MSR_CRn_SHIFT)
+#define ISS_MSR_OP1_SHIFT 14
+#define ISS_MSR_OP1_MASK (0x7 << ISS_MSR_OP1_SHIFT)
+#define ISS_MSR_OP1(x) (((x) & ISS_MSR_OP1_MASK) >> ISS_MSR_OP1_SHIFT)
+#define ISS_MSR_OP2_SHIFT 17
+#define ISS_MSR_OP2_MASK (0x7 << ISS_MSR_OP2_SHIFT)
+#define ISS_MSR_OP2(x) (((x) & ISS_MSR_OP2_MASK) >> ISS_MSR_OP2_SHIFT)
+#define ISS_MSR_OP0_SHIFT 20
+#define ISS_MSR_OP0_MASK (0x3 << ISS_MSR_OP0_SHIFT)
+#define ISS_MSR_OP0(x) (((x) & ISS_MSR_OP0_MASK) >> ISS_MSR_OP0_SHIFT)
+#define ISS_MSR_REG_MASK \
+ (ISS_MSR_OP0_MASK | ISS_MSR_OP2_MASK | ISS_MSR_OP1_MASK | \
+ ISS_MSR_CRn_MASK | ISS_MSR_CRm_MASK)
+#define __ISS_MSR_REG(op0, op1, crn, crm, op2) \
+ (((op0) << ISS_MSR_OP0_SHIFT) | \
+ ((op1) << ISS_MSR_OP1_SHIFT) | \
+ ((crn) << ISS_MSR_CRn_SHIFT) | \
+ ((crm) << ISS_MSR_CRm_SHIFT) | \
+ ((op2) << ISS_MSR_OP2_SHIFT))
+#define ISS_MSR_REG(reg) \
+ __ISS_MSR_REG(reg##_op0, reg##_op1, reg##_CRn, reg##_CRm, reg##_op2)
+
+#define ISS_DATA_ISV_SHIFT 24
+#define ISS_DATA_ISV (0x01 << ISS_DATA_ISV_SHIFT)
+#define ISS_DATA_SAS_SHIFT 22
+#define ISS_DATA_SAS_MASK (0x03 << ISS_DATA_SAS_SHIFT)
+#define ISS_DATA_SSE_SHIFT 21
+#define ISS_DATA_SSE (0x01 << ISS_DATA_SSE_SHIFT)
+#define ISS_DATA_SRT_SHIFT 16
+#define ISS_DATA_SRT_MASK (0x1f << ISS_DATA_SRT_SHIFT)
+#define ISS_DATA_SF (0x01 << 15)
+#define ISS_DATA_AR (0x01 << 14)
+#define ISS_DATA_FnV (0x01 << 10)
+#define ISS_DATA_EA (0x01 << 9)
+#define ISS_DATA_CM (0x01 << 8)
+#define ISS_DATA_S1PTW (0x01 << 7)
+#define ISS_DATA_WnR_SHIFT 6
+#define ISS_DATA_WnR (0x01 << ISS_DATA_WnR_SHIFT)
+#define ISS_DATA_DFSC_MASK (0x3f << 0)
+#define ISS_DATA_DFSC_ASF_L0 (0x00 << 0)
+#define ISS_DATA_DFSC_ASF_L1 (0x01 << 0)
+#define ISS_DATA_DFSC_ASF_L2 (0x02 << 0)
+#define ISS_DATA_DFSC_ASF_L3 (0x03 << 0)
+#define ISS_DATA_DFSC_TF_L0 (0x04 << 0)
+#define ISS_DATA_DFSC_TF_L1 (0x05 << 0)
+#define ISS_DATA_DFSC_TF_L2 (0x06 << 0)
+#define ISS_DATA_DFSC_TF_L3 (0x07 << 0)
+#define ISS_DATA_DFSC_AFF_L1 (0x09 << 0)
+#define ISS_DATA_DFSC_AFF_L2 (0x0a << 0)
+#define ISS_DATA_DFSC_AFF_L3 (0x0b << 0)
+#define ISS_DATA_DFSC_PF_L1 (0x0d << 0)
+#define ISS_DATA_DFSC_PF_L2 (0x0e << 0)
+#define ISS_DATA_DFSC_PF_L3 (0x0f << 0)
+#define ISS_DATA_DFSC_EXT (0x10 << 0)
+#define ISS_DATA_DFSC_EXT_L0 (0x14 << 0)
+#define ISS_DATA_DFSC_EXT_L1 (0x15 << 0)
+#define ISS_DATA_DFSC_EXT_L2 (0x16 << 0)
+#define ISS_DATA_DFSC_EXT_L3 (0x17 << 0)
+#define ISS_DATA_DFSC_ECC (0x18 << 0)
+#define ISS_DATA_DFSC_ECC_L0 (0x1c << 0)
+#define ISS_DATA_DFSC_ECC_L1 (0x1d << 0)
+#define ISS_DATA_DFSC_ECC_L2 (0x1e << 0)
+#define ISS_DATA_DFSC_ECC_L3 (0x1f << 0)
+#define ISS_DATA_DFSC_ALIGN (0x21 << 0)
+#define ISS_DATA_DFSC_TLB_CONFLICT (0x30 << 0)
+#define ESR_ELx_IL (0x01 << 25)
+#define ESR_ELx_EC_SHIFT 26
+#define ESR_ELx_EC_MASK (0x3f << 26)
+#define ESR_ELx_EXCEPTION(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
+#define EXCP_UNKNOWN 0x00 /* Unkwn exception */
+#define EXCP_TRAP_WFI_WFE 0x01 /* Trapped WFI or WFE */
+#define EXCP_FP_SIMD 0x07 /* VFP/SIMD trap */
+#define EXCP_BTI 0x0d /* Branch Target Exception */
+#define EXCP_ILL_STATE 0x0e /* Illegal execution state */
+#define EXCP_SVC32 0x11 /* SVC trap for AArch32 */
+#define EXCP_SVC64 0x15 /* SVC trap for AArch64 */
+#define EXCP_HVC 0x16 /* HVC trap */
+#define EXCP_MSR 0x18 /* MSR/MRS trap */
+#define EXCP_SVE 0x19 /* SVE trap */
+#define EXCP_FPAC 0x1c /* Faulting PAC trap */
+#define EXCP_INSN_ABORT_L 0x20 /* Instruction abort, from lower EL */
+#define EXCP_INSN_ABORT 0x21 /* Instruction abort, from same EL */
+#define EXCP_PC_ALIGN 0x22 /* PC alignment fault */
+#define EXCP_DATA_ABORT_L 0x24 /* Data abort, from lower EL */
+#define EXCP_DATA_ABORT 0x25 /* Data abort, from same EL */
+#define EXCP_SP_ALIGN 0x26 /* SP slignment fault */
+#define EXCP_TRAP_FP 0x2c /* Trapped FP exception */
+#define EXCP_SERROR 0x2f /* SError interrupt */
+#define EXCP_BRKPT_EL0 0x30 /* Hardware breakpoint, from same EL */
+#define EXCP_BRKPT_EL1 0x31 /* Hardware breakpoint, from same EL */
+#define EXCP_SOFTSTP_EL0 0x32 /* Software Step, from lower EL */
+#define EXCP_SOFTSTP_EL1 0x33 /* Software Step, from same EL */
+#define EXCP_WATCHPT_EL0 0x34 /* Watchpoint, from lower EL */
+#define EXCP_WATCHPT_EL1 0x35 /* Watchpoint, from same EL */
+#define EXCP_BRKPT_32 0x38 /* 32bits breakpoint */
+#define EXCP_BRK 0x3c /* Breakpoint */
+
+/* ESR_EL1 */
+#define ESR_EL1_REG MRS_REG_ALT_NAME(ESR_EL1)
+#define ESR_EL1_op0 3
+#define ESR_EL1_op1 0
+#define ESR_EL1_CRn 5
+#define ESR_EL1_CRm 2
+#define ESR_EL1_op2 0
+
+/* ESR_EL12 */
+#define ESR_EL12_REG MRS_REG_ALT_NAME(ESR_EL12)
+#define ESR_EL12_op0 3
+#define ESR_EL12_op1 5
+#define ESR_EL12_CRn 5
+#define ESR_EL12_CRm 2
+#define ESR_EL12_op2 0
+
+/* FAR_EL1 */
+#define FAR_EL1_REG MRS_REG_ALT_NAME(FAR_EL1)
+#define FAR_EL1_op0 3
+#define FAR_EL1_op1 0
+#define FAR_EL1_CRn 6
+#define FAR_EL1_CRm 0
+#define FAR_EL1_op2 0
+
+/* FAR_EL12 */
+#define FAR_EL12_REG MRS_REG_ALT_NAME(FAR_EL12)
+#define FAR_EL12_op0 3
+#define FAR_EL12_op1 5
+#define FAR_EL12_CRn 6
+#define FAR_EL12_CRm 0
+#define FAR_EL12_op2 0
+
+/* ICC_CTLR_EL1 */
+#define ICC_CTLR_EL1_EOIMODE (1U << 1)
+
+/* ICC_IAR1_EL1 */
+#define ICC_IAR1_EL1_SPUR (0x03ff)
+
+/* ICC_IGRPEN0_EL1 */
+#define ICC_IGRPEN0_EL1_EN (1U << 0)
+
+/* ICC_PMR_EL1 */
+#define ICC_PMR_EL1_PRIO_MASK (0xFFUL)
+
+/* ICC_SGI1R_EL1 */
+#define ICC_SGI1R_EL1_op0 3
+#define ICC_SGI1R_EL1_op1 0
+#define ICC_SGI1R_EL1_CRn 12
+#define ICC_SGI1R_EL1_CRm 11
+#define ICC_SGI1R_EL1_op2 5
+#define ICC_SGI1R_EL1_TL_SHIFT 0
+#define ICC_SGI1R_EL1_TL_MASK (0xffffUL << ICC_SGI1R_EL1_TL_SHIFT)
+#define ICC_SGI1R_EL1_TL_VAL(x) ((x) & ICC_SGI1R_EL1_TL_MASK)
+#define ICC_SGI1R_EL1_AFF1_SHIFT 16
+#define ICC_SGI1R_EL1_AFF1_MASK (0xfful << ICC_SGI1R_EL1_AFF1_SHIFT)
+#define ICC_SGI1R_EL1_AFF1_VAL(x) ((x) & ICC_SGI1R_EL1_AFF1_MASK)
+#define ICC_SGI1R_EL1_SGIID_SHIFT 24
+#define ICC_SGI1R_EL1_SGIID_MASK (0xfUL << ICC_SGI1R_EL1_SGIID_SHIFT)
+#define ICC_SGI1R_EL1_SGIID_VAL(x) ((x) & ICC_SGI1R_EL1_SGIID_MASK)
+#define ICC_SGI1R_EL1_AFF2_SHIFT 32
+#define ICC_SGI1R_EL1_AFF2_MASK (0xfful << ICC_SGI1R_EL1_AFF2_SHIFT)
+#define ICC_SGI1R_EL1_AFF2_VAL(x) ((x) & ICC_SGI1R_EL1_AFF2_MASK)
+#define ICC_SGI1R_EL1_RS_SHIFT 44
+#define ICC_SGI1R_EL1_RS_MASK (0xful << ICC_SGI1R_EL1_RS_SHIFT)
+#define ICC_SGI1R_EL1_RS_VAL(x) ((x) & ICC_SGI1R_EL1_RS_MASK)
+#define ICC_SGI1R_EL1_AFF3_SHIFT 48
+#define ICC_SGI1R_EL1_AFF3_MASK (0xfful << ICC_SGI1R_EL1_AFF3_SHIFT)
+#define ICC_SGI1R_EL1_AFF3_VAL(x) ((x) & ICC_SGI1R_EL1_AFF3_MASK)
+#define ICC_SGI1R_EL1_IRM (0x1UL << 40)
+
+/* ICC_SRE_EL1 */
+#define ICC_SRE_EL1_SRE (1U << 0)
+
+/* ID_AA64AFR0_EL1 */
+#define ID_AA64AFR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64AFR0_EL1)
+#define ID_AA64AFR0_EL1_ISS ISS_MSR_REG(ID_AA64AFR0_EL1)
+#define ID_AA64AFR0_EL1_op0 3
+#define ID_AA64AFR0_EL1_op1 0
+#define ID_AA64AFR0_EL1_CRn 0
+#define ID_AA64AFR0_EL1_CRm 5
+#define ID_AA64AFR0_EL1_op2 4
+
+/* ID_AA64AFR1_EL1 */
+#define ID_AA64AFR1_EL1_REG MRS_REG_ALT_NAME(ID_AA64AFR1_EL1)
+#define ID_AA64AFR1_EL1_ISS ISS_MSR_REG(ID_AA64AFR1_EL1)
+#define ID_AA64AFR1_EL1_op0 3
+#define ID_AA64AFR1_EL1_op1 0
+#define ID_AA64AFR1_EL1_CRn 0
+#define ID_AA64AFR1_EL1_CRm 5
+#define ID_AA64AFR1_EL1_op2 5
+
+/* ID_AA64DFR0_EL1 */
+#define ID_AA64DFR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64DFR0_EL1)
+#define ID_AA64DFR0_EL1_ISS ISS_MSR_REG(ID_AA64DFR0_EL1)
+#define ID_AA64DFR0_EL1_op0 3
+#define ID_AA64DFR0_EL1_op1 0
+#define ID_AA64DFR0_EL1_CRn 0
+#define ID_AA64DFR0_EL1_CRm 5
+#define ID_AA64DFR0_EL1_op2 0
+#define ID_AA64DFR0_DebugVer_SHIFT 0
+#define ID_AA64DFR0_DebugVer_WIDTH 4
+#define ID_AA64DFR0_DebugVer_MASK (UL(0xf) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_DebugVer_VAL(x) ((x) & ID_AA64DFR0_DebugVer_MASK)
+#define ID_AA64DFR0_DebugVer_8 (UL(0x6) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_DebugVer_8_VHE (UL(0x7) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_DebugVer_8_2 (UL(0x8) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_DebugVer_8_4 (UL(0x9) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_DebugVer_8_8 (UL(0xa) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_DebugVer_8_9 (UL(0xb) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_TraceVer_SHIFT 4
+#define ID_AA64DFR0_TraceVer_WIDTH 4
+#define ID_AA64DFR0_TraceVer_MASK (UL(0xf) << ID_AA64DFR0_TraceVer_SHIFT)
+#define ID_AA64DFR0_TraceVer_VAL(x) ((x) & ID_AA64DFR0_TraceVer_MASK)
+#define ID_AA64DFR0_TraceVer_NONE (UL(0x0) << ID_AA64DFR0_TraceVer_SHIFT)
+#define ID_AA64DFR0_TraceVer_IMPL (UL(0x1) << ID_AA64DFR0_TraceVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_SHIFT 8
+#define ID_AA64DFR0_PMUVer_WIDTH 4
+#define ID_AA64DFR0_PMUVer_MASK (UL(0xf) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_VAL(x) ((x) & ID_AA64DFR0_PMUVer_MASK)
+#define ID_AA64DFR0_PMUVer_NONE (UL(0x0) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_3 (UL(0x1) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_3_1 (UL(0x4) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_3_4 (UL(0x5) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_3_5 (UL(0x6) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_3_7 (UL(0x7) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_3_8 (UL(0x8) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_3_9 (UL(0x9) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_IMPL (UL(0xf) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_BRPs_SHIFT 12
+#define ID_AA64DFR0_BRPs_WIDTH 4
+#define ID_AA64DFR0_BRPs_MASK (UL(0xf) << ID_AA64DFR0_BRPs_SHIFT)
+#define ID_AA64DFR0_BRPs_VAL(x) \
+ ((((x) >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) + 1)
+#define ID_AA64DFR0_PMSS_SHIFT 16
+#define ID_AA64DFR0_PMSS_WIDTH 4
+#define ID_AA64DFR0_PMSS_MASK (UL(0xf) << ID_AA64DFR0_PMSS_SHIFT)
+#define ID_AA64DFR0_PMSS_VAL(x) ((x) & ID_AA64DFR0_PMSS_MASK)
+#define ID_AA64DFR0_PMSS_NONE (UL(0x0) << ID_AA64DFR0_PMSS_SHIFT)
+#define ID_AA64DFR0_PMSS_IMPL (UL(0x1) << ID_AA64DFR0_PMSS_SHIFT)
+#define ID_AA64DFR0_WRPs_SHIFT 20
+#define ID_AA64DFR0_WRPs_WIDTH 4
+#define ID_AA64DFR0_WRPs_MASK (UL(0xf) << ID_AA64DFR0_WRPs_SHIFT)
+#define ID_AA64DFR0_WRPs_VAL(x) \
+ ((((x) >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) + 1)
+#define ID_AA64DFR0_CTX_CMPs_SHIFT 28
+#define ID_AA64DFR0_CTX_CMPs_WIDTH 4
+#define ID_AA64DFR0_CTX_CMPs_MASK (UL(0xf) << ID_AA64DFR0_CTX_CMPs_SHIFT)
+#define ID_AA64DFR0_CTX_CMPs_VAL(x) \
+ ((((x) >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) + 1)
+#define ID_AA64DFR0_PMSVer_SHIFT 32
+#define ID_AA64DFR0_PMSVer_WIDTH 4
+#define ID_AA64DFR0_PMSVer_MASK (UL(0xf) << ID_AA64DFR0_PMSVer_SHIFT)
+#define ID_AA64DFR0_PMSVer_VAL(x) ((x) & ID_AA64DFR0_PMSVer_MASK)
+#define ID_AA64DFR0_PMSVer_NONE (UL(0x0) << ID_AA64DFR0_PMSVer_SHIFT)
+#define ID_AA64DFR0_PMSVer_SPE (UL(0x1) << ID_AA64DFR0_PMSVer_SHIFT)
+#define ID_AA64DFR0_PMSVer_SPE_1_1 (UL(0x2) << ID_AA64DFR0_PMSVer_SHIFT)
+#define ID_AA64DFR0_PMSVer_SPE_1_2 (UL(0x3) << ID_AA64DFR0_PMSVer_SHIFT)
+#define ID_AA64DFR0_PMSVer_SPE_1_3 (UL(0x4) << ID_AA64DFR0_PMSVer_SHIFT)
+#define ID_AA64DFR0_PMSVer_SPE_1_4 (UL(0x5) << ID_AA64DFR0_PMSVer_SHIFT)
+#define ID_AA64DFR0_DoubleLock_SHIFT 36
+#define ID_AA64DFR0_DoubleLock_WIDTH 4
+#define ID_AA64DFR0_DoubleLock_MASK (UL(0xf) << ID_AA64DFR0_DoubleLock_SHIFT)
+#define ID_AA64DFR0_DoubleLock_VAL(x) ((x) & ID_AA64DFR0_DoubleLock_MASK)
+#define ID_AA64DFR0_DoubleLock_IMPL (UL(0x0) << ID_AA64DFR0_DoubleLock_SHIFT)
+#define ID_AA64DFR0_DoubleLock_NONE (UL(0xf) << ID_AA64DFR0_DoubleLock_SHIFT)
+#define ID_AA64DFR0_TraceFilt_SHIFT 40
+#define ID_AA64DFR0_TraceFilt_WIDTH 4
+#define ID_AA64DFR0_TraceFilt_MASK (UL(0xf) << ID_AA64DFR0_TraceFilt_SHIFT)
+#define ID_AA64DFR0_TraceFilt_VAL(x) ((x) & ID_AA64DFR0_TraceFilt_MASK)
+#define ID_AA64DFR0_TraceFilt_NONE (UL(0x0) << ID_AA64DFR0_TraceFilt_SHIFT)
+#define ID_AA64DFR0_TraceFilt_8_4 (UL(0x1) << ID_AA64DFR0_TraceFilt_SHIFT)
+#define ID_AA64DFR0_TraceBuffer_SHIFT 44
+#define ID_AA64DFR0_TraceBuffer_WIDTH 4
+#define ID_AA64DFR0_TraceBuffer_MASK (UL(0xf) << ID_AA64DFR0_TraceBuffer_SHIFT)
+#define ID_AA64DFR0_TraceBuffer_VAL(x) ((x) & ID_AA64DFR0_TraceBuffer_MASK)
+#define ID_AA64DFR0_TraceBuffer_NONE (UL(0x0) << ID_AA64DFR0_TraceBuffer_SHIFT)
+#define ID_AA64DFR0_TraceBuffer_IMPL (UL(0x1) << ID_AA64DFR0_TraceBuffer_SHIFT)
+#define ID_AA64DFR0_MTPMU_SHIFT 48
+#define ID_AA64DFR0_MTPMU_WIDTH 4
+#define ID_AA64DFR0_MTPMU_MASK (UL(0xf) << ID_AA64DFR0_MTPMU_SHIFT)
+#define ID_AA64DFR0_MTPMU_VAL(x) ((x) & ID_AA64DFR0_MTPMU_MASK)
+#define ID_AA64DFR0_MTPMU_NONE (UL(0x0) << ID_AA64DFR0_MTPMU_SHIFT)
+#define ID_AA64DFR0_MTPMU_IMPL (UL(0x1) << ID_AA64DFR0_MTPMU_SHIFT)
+#define ID_AA64DFR0_MTPMU_NONE_MT_RES0 (UL(0xf) << ID_AA64DFR0_MTPMU_SHIFT)
+#define ID_AA64DFR0_BRBE_SHIFT 52
+#define ID_AA64DFR0_BRBE_WIDTH 4
+#define ID_AA64DFR0_BRBE_MASK (UL(0xf) << ID_AA64DFR0_BRBE_SHIFT)
+#define ID_AA64DFR0_BRBE_VAL(x) ((x) & ID_AA64DFR0_BRBE_MASK)
+#define ID_AA64DFR0_BRBE_NONE (UL(0x0) << ID_AA64DFR0_BRBE_SHIFT)
+#define ID_AA64DFR0_BRBE_IMPL (UL(0x1) << ID_AA64DFR0_BRBE_SHIFT)
+#define ID_AA64DFR0_BRBE_EL3 (UL(0x2) << ID_AA64DFR0_BRBE_SHIFT)
+#define ID_AA64DFR0_HPMN0_SHIFT 60
+#define ID_AA64DFR0_HPMN0_WIDTH 4
+#define ID_AA64DFR0_HPMN0_MASK (UL(0xf) << ID_AA64DFR0_HPMN0_SHIFT)
+#define ID_AA64DFR0_HPMN0_VAL(x) ((x) & ID_AA64DFR0_HPMN0_MASK)
+#define ID_AA64DFR0_HPMN0_CONSTR (UL(0x0) << ID_AA64DFR0_HPMN0_SHIFT)
+#define ID_AA64DFR0_HPMN0_DEFINED (UL(0x1) << ID_AA64DFR0_HPMN0_SHIFT)
+
+/* ID_AA64DFR1_EL1 */
+#define ID_AA64DFR1_EL1_REG MRS_REG_ALT_NAME(ID_AA64DFR1_EL1)
+#define ID_AA64DFR1_EL1_ISS ISS_MSR_REG(ID_AA64DFR1_EL1)
+#define ID_AA64DFR1_EL1_op0 3
+#define ID_AA64DFR1_EL1_op1 0
+#define ID_AA64DFR1_EL1_CRn 0
+#define ID_AA64DFR1_EL1_CRm 5
+#define ID_AA64DFR1_EL1_op2 1
+#define ID_AA64DFR1_SPMU_SHIFT 32
+#define ID_AA64DFR1_SPMU_WIDTH 4
+#define ID_AA64DFR1_SPMU_MASK (UL(0xf) << ID_AA64DFR1_SPMU_SHIFT)
+#define ID_AA64DFR1_SPMU_VAL(x) ((x) & ID_AA64DFR1_SPMU_MASK)
+#define ID_AA64DFR1_SPMU_NONE (UL(0x0) << ID_AA64DFR1_SPMU_SHIFT)
+#define ID_AA64DFR1_SPMU_IMPL (UL(0x1) << ID_AA64DFR1_SPMU_SHIFT)
+#define ID_AA64DFR1_PMICNTR_SHIFT 36
+#define ID_AA64DFR1_PMICNTR_WIDTH 4
+#define ID_AA64DFR1_PMICNTR_MASK (UL(0xf) << ID_AA64DFR1_PMICNTR_SHIFT)
+#define ID_AA64DFR1_PMICNTR_VAL(x) ((x) & ID_AA64DFR1_PMICNTR_MASK)
+#define ID_AA64DFR1_PMICNTR_NONE (UL(0x0) << ID_AA64DFR1_PMICNTR_SHIFT)
+#define ID_AA64DFR1_PMICNTR_IMPL (UL(0x1) << ID_AA64DFR1_PMICNTR_SHIFT)
+#define ID_AA64DFR1_DPFZS_SHIFT 52
+#define ID_AA64DFR1_DPFZS_WIDTH 4
+#define ID_AA64DFR1_DPFZS_MASK (UL(0xf) << ID_AA64DFR1_DPFZS_SHIFT)
+#define ID_AA64DFR1_DPFZS_VAL(x) ((x) & ID_AA64DFR1_DPFZS_MASK)
+#define ID_AA64DFR1_DPFZS_NONE (UL(0x0) << ID_AA64DFR1_DPFZS_SHIFT)
+#define ID_AA64DFR1_DPFZS_IMPL (UL(0x1) << ID_AA64DFR1_DPFZS_SHIFT)
+
+/* ID_AA64ISAR0_EL1 */
+#define ID_AA64ISAR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64ISAR0_EL1)
+#define ID_AA64ISAR0_EL1_ISS ISS_MSR_REG(ID_AA64ISAR0_EL1)
+#define ID_AA64ISAR0_EL1_op0 3
+#define ID_AA64ISAR0_EL1_op1 0
+#define ID_AA64ISAR0_EL1_CRn 0
+#define ID_AA64ISAR0_EL1_CRm 6
+#define ID_AA64ISAR0_EL1_op2 0
+#define ID_AA64ISAR0_AES_SHIFT 4
+#define ID_AA64ISAR0_AES_WIDTH 4
+#define ID_AA64ISAR0_AES_MASK (UL(0xf) << ID_AA64ISAR0_AES_SHIFT)
+#define ID_AA64ISAR0_AES_VAL(x) ((x) & ID_AA64ISAR0_AES_MASK)
+#define ID_AA64ISAR0_AES_NONE (UL(0x0) << ID_AA64ISAR0_AES_SHIFT)
+#define ID_AA64ISAR0_AES_BASE (UL(0x1) << ID_AA64ISAR0_AES_SHIFT)
+#define ID_AA64ISAR0_AES_PMULL (UL(0x2) << ID_AA64ISAR0_AES_SHIFT)
+#define ID_AA64ISAR0_SHA1_SHIFT 8
+#define ID_AA64ISAR0_SHA1_WIDTH 4
+#define ID_AA64ISAR0_SHA1_MASK (UL(0xf) << ID_AA64ISAR0_SHA1_SHIFT)
+#define ID_AA64ISAR0_SHA1_VAL(x) ((x) & ID_AA64ISAR0_SHA1_MASK)
+#define ID_AA64ISAR0_SHA1_NONE (UL(0x0) << ID_AA64ISAR0_SHA1_SHIFT)
+#define ID_AA64ISAR0_SHA1_BASE (UL(0x1) << ID_AA64ISAR0_SHA1_SHIFT)
+#define ID_AA64ISAR0_SHA2_SHIFT 12
+#define ID_AA64ISAR0_SHA2_WIDTH 4
+#define ID_AA64ISAR0_SHA2_MASK (UL(0xf) << ID_AA64ISAR0_SHA2_SHIFT)
+#define ID_AA64ISAR0_SHA2_VAL(x) ((x) & ID_AA64ISAR0_SHA2_MASK)
+#define ID_AA64ISAR0_SHA2_NONE (UL(0x0) << ID_AA64ISAR0_SHA2_SHIFT)
+#define ID_AA64ISAR0_SHA2_BASE (UL(0x1) << ID_AA64ISAR0_SHA2_SHIFT)
+#define ID_AA64ISAR0_SHA2_512 (UL(0x2) << ID_AA64ISAR0_SHA2_SHIFT)
+#define ID_AA64ISAR0_CRC32_SHIFT 16
+#define ID_AA64ISAR0_CRC32_WIDTH 4
+#define ID_AA64ISAR0_CRC32_MASK (UL(0xf) << ID_AA64ISAR0_CRC32_SHIFT)
+#define ID_AA64ISAR0_CRC32_VAL(x) ((x) & ID_AA64ISAR0_CRC32_MASK)
+#define ID_AA64ISAR0_CRC32_NONE (UL(0x0) << ID_AA64ISAR0_CRC32_SHIFT)
+#define ID_AA64ISAR0_CRC32_BASE (UL(0x1) << ID_AA64ISAR0_CRC32_SHIFT)
+#define ID_AA64ISAR0_Atomic_SHIFT 20
+#define ID_AA64ISAR0_Atomic_WIDTH 4
+#define ID_AA64ISAR0_Atomic_MASK (UL(0xf) << ID_AA64ISAR0_Atomic_SHIFT)
+#define ID_AA64ISAR0_Atomic_VAL(x) ((x) & ID_AA64ISAR0_Atomic_MASK)
+#define ID_AA64ISAR0_Atomic_NONE (UL(0x0) << ID_AA64ISAR0_Atomic_SHIFT)
+#define ID_AA64ISAR0_Atomic_IMPL (UL(0x2) << ID_AA64ISAR0_Atomic_SHIFT)
+#define ID_AA64ISAR0_TME_SHIFT 24
+#define ID_AA64ISAR0_TME_WIDTH 4
+#define ID_AA64ISAR0_TME_MASK (UL(0xf) << ID_AA64ISAR0_TME_SHIFT)
+#define ID_AA64ISAR0_TME_NONE (UL(0x0) << ID_AA64ISAR0_TME_SHIFT)
+#define ID_AA64ISAR0_TME_IMPL (UL(0x1) << ID_AA64ISAR0_TME_SHIFT)
+#define ID_AA64ISAR0_RDM_SHIFT 28
+#define ID_AA64ISAR0_RDM_WIDTH 4
+#define ID_AA64ISAR0_RDM_MASK (UL(0xf) << ID_AA64ISAR0_RDM_SHIFT)
+#define ID_AA64ISAR0_RDM_VAL(x) ((x) & ID_AA64ISAR0_RDM_MASK)
+#define ID_AA64ISAR0_RDM_NONE (UL(0x0) << ID_AA64ISAR0_RDM_SHIFT)
+#define ID_AA64ISAR0_RDM_IMPL (UL(0x1) << ID_AA64ISAR0_RDM_SHIFT)
+#define ID_AA64ISAR0_SHA3_SHIFT 32
+#define ID_AA64ISAR0_SHA3_WIDTH 4
+#define ID_AA64ISAR0_SHA3_MASK (UL(0xf) << ID_AA64ISAR0_SHA3_SHIFT)
+#define ID_AA64ISAR0_SHA3_VAL(x) ((x) & ID_AA64ISAR0_SHA3_MASK)
+#define ID_AA64ISAR0_SHA3_NONE (UL(0x0) << ID_AA64ISAR0_SHA3_SHIFT)
+#define ID_AA64ISAR0_SHA3_IMPL (UL(0x1) << ID_AA64ISAR0_SHA3_SHIFT)
+#define ID_AA64ISAR0_SM3_SHIFT 36
+#define ID_AA64ISAR0_SM3_WIDTH 4
+#define ID_AA64ISAR0_SM3_MASK (UL(0xf) << ID_AA64ISAR0_SM3_SHIFT)
+#define ID_AA64ISAR0_SM3_VAL(x) ((x) & ID_AA64ISAR0_SM3_MASK)
+#define ID_AA64ISAR0_SM3_NONE (UL(0x0) << ID_AA64ISAR0_SM3_SHIFT)
+#define ID_AA64ISAR0_SM3_IMPL (UL(0x1) << ID_AA64ISAR0_SM3_SHIFT)
+#define ID_AA64ISAR0_SM4_SHIFT 40
+#define ID_AA64ISAR0_SM4_WIDTH 4
+#define ID_AA64ISAR0_SM4_MASK (UL(0xf) << ID_AA64ISAR0_SM4_SHIFT)
+#define ID_AA64ISAR0_SM4_VAL(x) ((x) & ID_AA64ISAR0_SM4_MASK)
+#define ID_AA64ISAR0_SM4_NONE (UL(0x0) << ID_AA64ISAR0_SM4_SHIFT)
+#define ID_AA64ISAR0_SM4_IMPL (UL(0x1) << ID_AA64ISAR0_SM4_SHIFT)
+#define ID_AA64ISAR0_DP_SHIFT 44
+#define ID_AA64ISAR0_DP_WIDTH 4
+#define ID_AA64ISAR0_DP_MASK (UL(0xf) << ID_AA64ISAR0_DP_SHIFT)
+#define ID_AA64ISAR0_DP_VAL(x) ((x) & ID_AA64ISAR0_DP_MASK)
+#define ID_AA64ISAR0_DP_NONE (UL(0x0) << ID_AA64ISAR0_DP_SHIFT)
+#define ID_AA64ISAR0_DP_IMPL (UL(0x1) << ID_AA64ISAR0_DP_SHIFT)
+#define ID_AA64ISAR0_FHM_SHIFT 48
+#define ID_AA64ISAR0_FHM_WIDTH 4
+#define ID_AA64ISAR0_FHM_MASK (UL(0xf) << ID_AA64ISAR0_FHM_SHIFT)
+#define ID_AA64ISAR0_FHM_VAL(x) ((x) & ID_AA64ISAR0_FHM_MASK)
+#define ID_AA64ISAR0_FHM_NONE (UL(0x0) << ID_AA64ISAR0_FHM_SHIFT)
+#define ID_AA64ISAR0_FHM_IMPL (UL(0x1) << ID_AA64ISAR0_FHM_SHIFT)
+#define ID_AA64ISAR0_TS_SHIFT 52
+#define ID_AA64ISAR0_TS_WIDTH 4
+#define ID_AA64ISAR0_TS_MASK (UL(0xf) << ID_AA64ISAR0_TS_SHIFT)
+#define ID_AA64ISAR0_TS_VAL(x) ((x) & ID_AA64ISAR0_TS_MASK)
+#define ID_AA64ISAR0_TS_NONE (UL(0x0) << ID_AA64ISAR0_TS_SHIFT)
+#define ID_AA64ISAR0_TS_CondM_8_4 (UL(0x1) << ID_AA64ISAR0_TS_SHIFT)
+#define ID_AA64ISAR0_TS_CondM_8_5 (UL(0x2) << ID_AA64ISAR0_TS_SHIFT)
+#define ID_AA64ISAR0_TLB_SHIFT 56
+#define ID_AA64ISAR0_TLB_WIDTH 4
+#define ID_AA64ISAR0_TLB_MASK (UL(0xf) << ID_AA64ISAR0_TLB_SHIFT)
+#define ID_AA64ISAR0_TLB_VAL(x) ((x) & ID_AA64ISAR0_TLB_MASK)
+#define ID_AA64ISAR0_TLB_NONE (UL(0x0) << ID_AA64ISAR0_TLB_SHIFT)
+#define ID_AA64ISAR0_TLB_TLBIOS (UL(0x1) << ID_AA64ISAR0_TLB_SHIFT)
+#define ID_AA64ISAR0_TLB_TLBIOSR (UL(0x2) << ID_AA64ISAR0_TLB_SHIFT)
+#define ID_AA64ISAR0_RNDR_SHIFT 60
+#define ID_AA64ISAR0_RNDR_WIDTH 4
+#define ID_AA64ISAR0_RNDR_MASK (UL(0xf) << ID_AA64ISAR0_RNDR_SHIFT)
+#define ID_AA64ISAR0_RNDR_VAL(x) ((x) & ID_AA64ISAR0_RNDR_MASK)
+#define ID_AA64ISAR0_RNDR_NONE (UL(0x0) << ID_AA64ISAR0_RNDR_SHIFT)
+#define ID_AA64ISAR0_RNDR_IMPL (UL(0x1) << ID_AA64ISAR0_RNDR_SHIFT)
+
+/* ID_AA64ISAR1_EL1 */
+#define ID_AA64ISAR1_EL1_REG MRS_REG_ALT_NAME(ID_AA64ISAR1_EL1)
+#define ID_AA64ISAR1_EL1_ISS ISS_MSR_REG(ID_AA64ISAR1_EL1)
+#define ID_AA64ISAR1_EL1_op0 3
+#define ID_AA64ISAR1_EL1_op1 0
+#define ID_AA64ISAR1_EL1_CRn 0
+#define ID_AA64ISAR1_EL1_CRm 6
+#define ID_AA64ISAR1_EL1_op2 1
+#define ID_AA64ISAR1_DPB_SHIFT 0
+#define ID_AA64ISAR1_DPB_WIDTH 4
+#define ID_AA64ISAR1_DPB_MASK (UL(0xf) << ID_AA64ISAR1_DPB_SHIFT)
+#define ID_AA64ISAR1_DPB_VAL(x) ((x) & ID_AA64ISAR1_DPB_MASK)
+#define ID_AA64ISAR1_DPB_NONE (UL(0x0) << ID_AA64ISAR1_DPB_SHIFT)
+#define ID_AA64ISAR1_DPB_DCCVAP (UL(0x1) << ID_AA64ISAR1_DPB_SHIFT)
+#define ID_AA64ISAR1_DPB_DCCVADP (UL(0x2) << ID_AA64ISAR1_DPB_SHIFT)
+#define ID_AA64ISAR1_APA_SHIFT 4
+#define ID_AA64ISAR1_APA_WIDTH 4
+#define ID_AA64ISAR1_APA_MASK (UL(0xf) << ID_AA64ISAR1_APA_SHIFT)
+#define ID_AA64ISAR1_APA_VAL(x) ((x) & ID_AA64ISAR1_APA_MASK)
+#define ID_AA64ISAR1_APA_NONE (UL(0x0) << ID_AA64ISAR1_APA_SHIFT)
+#define ID_AA64ISAR1_APA_PAC (UL(0x1) << ID_AA64ISAR1_APA_SHIFT)
+#define ID_AA64ISAR1_APA_EPAC (UL(0x2) << ID_AA64ISAR1_APA_SHIFT)
+#define ID_AA64ISAR1_APA_EPAC2 (UL(0x3) << ID_AA64ISAR1_APA_SHIFT)
+#define ID_AA64ISAR1_APA_FPAC (UL(0x4) << ID_AA64ISAR1_APA_SHIFT)
+#define ID_AA64ISAR1_APA_FPAC_COMBINED (UL(0x5) << ID_AA64ISAR1_APA_SHIFT)
+#define ID_AA64ISAR1_API_SHIFT 8
+#define ID_AA64ISAR1_API_WIDTH 4
+#define ID_AA64ISAR1_API_MASK (UL(0xf) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_API_VAL(x) ((x) & ID_AA64ISAR1_API_MASK)
+#define ID_AA64ISAR1_API_NONE (UL(0x0) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_API_PAC (UL(0x1) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_API_EPAC (UL(0x2) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_API_EPAC2 (UL(0x3) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_API_FPAC (UL(0x4) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_API_FPAC_COMBINED (UL(0x5) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_JSCVT_SHIFT 12
+#define ID_AA64ISAR1_JSCVT_WIDTH 4
+#define ID_AA64ISAR1_JSCVT_MASK (UL(0xf) << ID_AA64ISAR1_JSCVT_SHIFT)
+#define ID_AA64ISAR1_JSCVT_VAL(x) ((x) & ID_AA64ISAR1_JSCVT_MASK)
+#define ID_AA64ISAR1_JSCVT_NONE (UL(0x0) << ID_AA64ISAR1_JSCVT_SHIFT)
+#define ID_AA64ISAR1_JSCVT_IMPL (UL(0x1) << ID_AA64ISAR1_JSCVT_SHIFT)
+#define ID_AA64ISAR1_FCMA_SHIFT 16
+#define ID_AA64ISAR1_FCMA_WIDTH 4
+#define ID_AA64ISAR1_FCMA_MASK (UL(0xf) << ID_AA64ISAR1_FCMA_SHIFT)
+#define ID_AA64ISAR1_FCMA_VAL(x) ((x) & ID_AA64ISAR1_FCMA_MASK)
+#define ID_AA64ISAR1_FCMA_NONE (UL(0x0) << ID_AA64ISAR1_FCMA_SHIFT)
+#define ID_AA64ISAR1_FCMA_IMPL (UL(0x1) << ID_AA64ISAR1_FCMA_SHIFT)
+#define ID_AA64ISAR1_LRCPC_SHIFT 20
+#define ID_AA64ISAR1_LRCPC_WIDTH 4
+#define ID_AA64ISAR1_LRCPC_MASK (UL(0xf) << ID_AA64ISAR1_LRCPC_SHIFT)
+#define ID_AA64ISAR1_LRCPC_VAL(x) ((x) & ID_AA64ISAR1_LRCPC_MASK)
+#define ID_AA64ISAR1_LRCPC_NONE (UL(0x0) << ID_AA64ISAR1_LRCPC_SHIFT)
+#define ID_AA64ISAR1_LRCPC_RCPC_8_3 (UL(0x1) << ID_AA64ISAR1_LRCPC_SHIFT)
+#define ID_AA64ISAR1_LRCPC_RCPC_8_4 (UL(0x2) << ID_AA64ISAR1_LRCPC_SHIFT)
+#define ID_AA64ISAR1_GPA_SHIFT 24
+#define ID_AA64ISAR1_GPA_WIDTH 4
+#define ID_AA64ISAR1_GPA_MASK (UL(0xf) << ID_AA64ISAR1_GPA_SHIFT)
+#define ID_AA64ISAR1_GPA_VAL(x) ((x) & ID_AA64ISAR1_GPA_MASK)
+#define ID_AA64ISAR1_GPA_NONE (UL(0x0) << ID_AA64ISAR1_GPA_SHIFT)
+#define ID_AA64ISAR1_GPA_IMPL (UL(0x1) << ID_AA64ISAR1_GPA_SHIFT)
+#define ID_AA64ISAR1_GPI_SHIFT 28
+#define ID_AA64ISAR1_GPI_WIDTH 4
+#define ID_AA64ISAR1_GPI_MASK (UL(0xf) << ID_AA64ISAR1_GPI_SHIFT)
+#define ID_AA64ISAR1_GPI_VAL(x) ((x) & ID_AA64ISAR1_GPI_MASK)
+#define ID_AA64ISAR1_GPI_NONE (UL(0x0) << ID_AA64ISAR1_GPI_SHIFT)
+#define ID_AA64ISAR1_GPI_IMPL (UL(0x1) << ID_AA64ISAR1_GPI_SHIFT)
+#define ID_AA64ISAR1_FRINTTS_SHIFT 32
+#define ID_AA64ISAR1_FRINTTS_WIDTH 4
+#define ID_AA64ISAR1_FRINTTS_MASK (UL(0xf) << ID_AA64ISAR1_FRINTTS_SHIFT)
+#define ID_AA64ISAR1_FRINTTS_VAL(x) ((x) & ID_AA64ISAR1_FRINTTS_MASK)
+#define ID_AA64ISAR1_FRINTTS_NONE (UL(0x0) << ID_AA64ISAR1_FRINTTS_SHIFT)
+#define ID_AA64ISAR1_FRINTTS_IMPL (UL(0x1) << ID_AA64ISAR1_FRINTTS_SHIFT)
+#define ID_AA64ISAR1_SB_SHIFT 36
+#define ID_AA64ISAR1_SB_WIDTH 4
+#define ID_AA64ISAR1_SB_MASK (UL(0xf) << ID_AA64ISAR1_SB_SHIFT)
+#define ID_AA64ISAR1_SB_VAL(x) ((x) & ID_AA64ISAR1_SB_MASK)
+#define ID_AA64ISAR1_SB_NONE (UL(0x0) << ID_AA64ISAR1_SB_SHIFT)
+#define ID_AA64ISAR1_SB_IMPL (UL(0x1) << ID_AA64ISAR1_SB_SHIFT)
+#define ID_AA64ISAR1_SPECRES_SHIFT 40
+#define ID_AA64ISAR1_SPECRES_WIDTH 4
+#define ID_AA64ISAR1_SPECRES_MASK (UL(0xf) << ID_AA64ISAR1_SPECRES_SHIFT)
+#define ID_AA64ISAR1_SPECRES_VAL(x) ((x) & ID_AA64ISAR1_SPECRES_MASK)
+#define ID_AA64ISAR1_SPECRES_NONE (UL(0x0) << ID_AA64ISAR1_SPECRES_SHIFT)
+#define ID_AA64ISAR1_SPECRES_8_5 (UL(0x1) << ID_AA64ISAR1_SPECRES_SHIFT)
+#define ID_AA64ISAR1_SPECRES_8_9 (UL(0x2) << ID_AA64ISAR1_SPECRES_SHIFT)
+#define ID_AA64ISAR1_BF16_SHIFT 44
+#define ID_AA64ISAR1_BF16_WIDTH 4
+#define ID_AA64ISAR1_BF16_MASK (UL(0xf) << ID_AA64ISAR1_BF16_SHIFT)
+#define ID_AA64ISAR1_BF16_VAL(x) ((x) & ID_AA64ISAR1_BF16_MASK)
+#define ID_AA64ISAR1_BF16_NONE (UL(0x0) << ID_AA64ISAR1_BF16_SHIFT)
+#define ID_AA64ISAR1_BF16_IMPL (UL(0x1) << ID_AA64ISAR1_BF16_SHIFT)
+#define ID_AA64ISAR1_BF16_EBF (UL(0x2) << ID_AA64ISAR1_BF16_SHIFT)
+#define ID_AA64ISAR1_DGH_SHIFT 48
+#define ID_AA64ISAR1_DGH_WIDTH 4
+#define ID_AA64ISAR1_DGH_MASK (UL(0xf) << ID_AA64ISAR1_DGH_SHIFT)
+#define ID_AA64ISAR1_DGH_VAL(x) ((x) & ID_AA64ISAR1_DGH_MASK)
+#define ID_AA64ISAR1_DGH_NONE (UL(0x0) << ID_AA64ISAR1_DGH_SHIFT)
+#define ID_AA64ISAR1_DGH_IMPL (UL(0x1) << ID_AA64ISAR1_DGH_SHIFT)
+#define ID_AA64ISAR1_I8MM_SHIFT 52
+#define ID_AA64ISAR1_I8MM_WIDTH 4
+#define ID_AA64ISAR1_I8MM_MASK (UL(0xf) << ID_AA64ISAR1_I8MM_SHIFT)
+#define ID_AA64ISAR1_I8MM_VAL(x) ((x) & ID_AA64ISAR1_I8MM_MASK)
+#define ID_AA64ISAR1_I8MM_NONE (UL(0x0) << ID_AA64ISAR1_I8MM_SHIFT)
+#define ID_AA64ISAR1_I8MM_IMPL (UL(0x1) << ID_AA64ISAR1_I8MM_SHIFT)
+#define ID_AA64ISAR1_XS_SHIFT 56
+#define ID_AA64ISAR1_XS_WIDTH 4
+#define ID_AA64ISAR1_XS_MASK (UL(0xf) << ID_AA64ISAR1_XS_SHIFT)
+#define ID_AA64ISAR1_XS_VAL(x) ((x) & ID_AA64ISAR1_XS_MASK)
+#define ID_AA64ISAR1_XS_NONE (UL(0x0) << ID_AA64ISAR1_XS_SHIFT)
+#define ID_AA64ISAR1_XS_IMPL (UL(0x1) << ID_AA64ISAR1_XS_SHIFT)
+#define ID_AA64ISAR1_LS64_SHIFT 60
+#define ID_AA64ISAR1_LS64_WIDTH 4
+#define ID_AA64ISAR1_LS64_MASK (UL(0xf) << ID_AA64ISAR1_LS64_SHIFT)
+#define ID_AA64ISAR1_LS64_VAL(x) ((x) & ID_AA64ISAR1_LS64_MASK)
+#define ID_AA64ISAR1_LS64_NONE (UL(0x0) << ID_AA64ISAR1_LS64_SHIFT)
+#define ID_AA64ISAR1_LS64_IMPL (UL(0x1) << ID_AA64ISAR1_LS64_SHIFT)
+#define ID_AA64ISAR1_LS64_V (UL(0x2) << ID_AA64ISAR1_LS64_SHIFT)
+#define ID_AA64ISAR1_LS64_ACCDATA (UL(0x3) << ID_AA64ISAR1_LS64_SHIFT)
+
+/* ID_AA64ISAR2_EL1 */
+#define ID_AA64ISAR2_EL1_REG MRS_REG_ALT_NAME(ID_AA64ISAR2_EL1)
+#define ID_AA64ISAR2_EL1_ISS ISS_MSR_REG(ID_AA64ISAR2_EL1)
+#define ID_AA64ISAR2_EL1_op0 3
+#define ID_AA64ISAR2_EL1_op1 0
+#define ID_AA64ISAR2_EL1_CRn 0
+#define ID_AA64ISAR2_EL1_CRm 6
+#define ID_AA64ISAR2_EL1_op2 2
+#define ID_AA64ISAR2_WFxT_SHIFT 0
+#define ID_AA64ISAR2_WFxT_WIDTH 4
+#define ID_AA64ISAR2_WFxT_MASK (UL(0xf) << ID_AA64ISAR2_WFxT_SHIFT)
+#define ID_AA64ISAR2_WFxT_VAL(x) ((x) & ID_AA64ISAR2_WFxT_MASK)
+#define ID_AA64ISAR2_WFxT_NONE (UL(0x0) << ID_AA64ISAR2_WFxT_SHIFT)
+#define ID_AA64ISAR2_WFxT_IMPL (UL(0x2) << ID_AA64ISAR2_WFxT_SHIFT)
+#define ID_AA64ISAR2_RPRES_SHIFT 4
+#define ID_AA64ISAR2_RPRES_WIDTH 4
+#define ID_AA64ISAR2_RPRES_MASK (UL(0xf) << ID_AA64ISAR2_RPRES_SHIFT)
+#define ID_AA64ISAR2_RPRES_VAL(x) ((x) & ID_AA64ISAR2_RPRES_MASK)
+#define ID_AA64ISAR2_RPRES_NONE (UL(0x0) << ID_AA64ISAR2_RPRES_SHIFT)
+#define ID_AA64ISAR2_RPRES_IMPL (UL(0x1) << ID_AA64ISAR2_RPRES_SHIFT)
+#define ID_AA64ISAR2_GPA3_SHIFT 8
+#define ID_AA64ISAR2_GPA3_WIDTH 4
+#define ID_AA64ISAR2_GPA3_MASK (UL(0xf) << ID_AA64ISAR2_GPA3_SHIFT)
+#define ID_AA64ISAR2_GPA3_VAL(x) ((x) & ID_AA64ISAR2_GPA3_MASK)
+#define ID_AA64ISAR2_GPA3_NONE (UL(0x0) << ID_AA64ISAR2_GPA3_SHIFT)
+#define ID_AA64ISAR2_GPA3_IMPL (UL(0x1) << ID_AA64ISAR2_GPA3_SHIFT)
+#define ID_AA64ISAR2_APA3_SHIFT 12
+#define ID_AA64ISAR2_APA3_WIDTH 4
+#define ID_AA64ISAR2_APA3_MASK (UL(0xf) << ID_AA64ISAR2_APA3_SHIFT)
+#define ID_AA64ISAR2_APA3_VAL(x) ((x) & ID_AA64ISAR2_APA3_MASK)
+#define ID_AA64ISAR2_APA3_NONE (UL(0x0) << ID_AA64ISAR2_APA3_SHIFT)
+#define ID_AA64ISAR2_APA3_PAC (UL(0x1) << ID_AA64ISAR2_APA3_SHIFT)
+#define ID_AA64ISAR2_APA3_EPAC (UL(0x2) << ID_AA64ISAR2_APA3_SHIFT)
+#define ID_AA64ISAR2_APA3_EPAC2 (UL(0x3) << ID_AA64ISAR2_APA3_SHIFT)
+#define ID_AA64ISAR2_APA3_FPAC (UL(0x4) << ID_AA64ISAR2_APA3_SHIFT)
+#define ID_AA64ISAR2_APA3_FPAC_COMBINED (UL(0x5) << ID_AA64ISAR2_APA3_SHIFT)
+#define ID_AA64ISAR2_MOPS_SHIFT 16
+#define ID_AA64ISAR2_MOPS_WIDTH 4
+#define ID_AA64ISAR2_MOPS_MASK (UL(0xf) << ID_AA64ISAR2_MOPS_SHIFT)
+#define ID_AA64ISAR2_MOPS_VAL(x) ((x) & ID_AA64ISAR2_MOPS_MASK)
+#define ID_AA64ISAR2_MOPS_NONE (UL(0x0) << ID_AA64ISAR2_MOPS_SHIFT)
+#define ID_AA64ISAR2_MOPS_IMPL (UL(0x1) << ID_AA64ISAR2_MOPS_SHIFT)
+#define ID_AA64ISAR2_BC_SHIFT 20
+#define ID_AA64ISAR2_BC_WIDTH 4
+#define ID_AA64ISAR2_BC_MASK (UL(0xf) << ID_AA64ISAR2_BC_SHIFT)
+#define ID_AA64ISAR2_BC_VAL(x) ((x) & ID_AA64ISAR2_BC_MASK)
+#define ID_AA64ISAR2_BC_NONE (UL(0x0) << ID_AA64ISAR2_BC_SHIFT)
+#define ID_AA64ISAR2_BC_IMPL (UL(0x1) << ID_AA64ISAR2_BC_SHIFT)
+#define ID_AA64ISAR2_PAC_frac_SHIFT 24
+#define ID_AA64ISAR2_PAC_frac_WIDTH 4
+#define ID_AA64ISAR2_PAC_frac_MASK (UL(0xf) << ID_AA64ISAR2_PAC_frac_SHIFT)
+#define ID_AA64ISAR2_PAC_frac_VAL(x) ((x) & ID_AA64ISAR2_PAC_frac_MASK)
+#define ID_AA64ISAR2_PAC_frac_NONE (UL(0x0) << ID_AA64ISAR2_PAC_frac_SHIFT)
+#define ID_AA64ISAR2_PAC_frac_IMPL (UL(0x1) << ID_AA64ISAR2_PAC_frac_SHIFT)
+#define ID_AA64ISAR2_CLRBHB_SHIFT 28
+#define ID_AA64ISAR2_CLRBHB_WIDTH 4
+#define ID_AA64ISAR2_CLRBHB_MASK (UL(0xf) << ID_AA64ISAR2_CLRBHB_SHIFT)
+#define ID_AA64ISAR2_CLRBHB_VAL(x) ((x) & ID_AA64ISAR2_CLRBHB_MASK)
+#define ID_AA64ISAR2_CLRBHB_NONE (UL(0x0) << ID_AA64ISAR2_CLRBHB_SHIFT)
+#define ID_AA64ISAR2_CLRBHB_IMPL (UL(0x1) << ID_AA64ISAR2_CLRBHB_SHIFT)
+#define ID_AA64ISAR2_PRFMSLC_SHIFT 40
+#define ID_AA64ISAR2_PRFMSLC_WIDTH 4
+#define ID_AA64ISAR2_PRFMSLC_MASK (UL(0xf) << ID_AA64ISAR2_PRFMSLC_SHIFT)
+#define ID_AA64ISAR2_PRFMSLC_VAL(x) ((x) & ID_AA64ISAR2_PRFMSLC_MASK)
+#define ID_AA64ISAR2_PRFMSLC_NONE (UL(0x0) << ID_AA64ISAR2_PRFMSLC_SHIFT)
+#define ID_AA64ISAR2_PRFMSLC_IMPL (UL(0x1) << ID_AA64ISAR2_PRFMSLC_SHIFT)
+#define ID_AA64ISAR2_RPRFM_SHIFT 48
+#define ID_AA64ISAR2_RPRFM_WIDTH 4
+#define ID_AA64ISAR2_RPRFM_MASK (UL(0xf) << ID_AA64ISAR2_RPRFM_SHIFT)
+#define ID_AA64ISAR2_RPRFM_VAL(x) ((x) & ID_AA64ISAR2_RPRFM_MASK)
+#define ID_AA64ISAR2_RPRFM_NONE (UL(0x0) << ID_AA64ISAR2_RPRFM_SHIFT)
+#define ID_AA64ISAR2_RPRFM_IMPL (UL(0x1) << ID_AA64ISAR2_RPRFM_SHIFT)
+#define ID_AA64ISAR2_CSSC_SHIFT 52
+#define ID_AA64ISAR2_CSSC_WIDTH 4
+#define ID_AA64ISAR2_CSSC_MASK (UL(0xf) << ID_AA64ISAR2_CSSC_SHIFT)
+#define ID_AA64ISAR2_CSSC_VAL(x) ((x) & ID_AA64ISAR2_CSSC_MASK)
+#define ID_AA64ISAR2_CSSC_NONE (UL(0x0) << ID_AA64ISAR2_CSSC_SHIFT)
+#define ID_AA64ISAR2_CSSC_IMPL (UL(0x1) << ID_AA64ISAR2_CSSC_SHIFT)
+#define ID_AA64ISAR2_ATS1A_SHIFT 60
+#define ID_AA64ISAR2_ATS1A_WIDTH 4
+#define ID_AA64ISAR2_ATS1A_MASK (UL(0xf) << ID_AA64ISAR2_ATS1A_SHIFT)
+#define ID_AA64ISAR2_ATS1A_VAL(x) ((x) & ID_AA64ISAR2_ATS1A_MASK)
+#define ID_AA64ISAR2_ATS1A_NONE (UL(0x0) << ID_AA64ISAR2_ATS1A_SHIFT)
+#define ID_AA64ISAR2_ATS1A_IMPL (UL(0x1) << ID_AA64ISAR2_ATS1A_SHIFT)
+
+/* ID_AA64MMFR0_EL1 */
+#define ID_AA64MMFR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64MMFR0_EL1)
+#define ID_AA64MMFR0_EL1_ISS ISS_MSR_REG(ID_AA64MMFR0_EL1)
+#define ID_AA64MMFR0_EL1_op0 3
+#define ID_AA64MMFR0_EL1_op1 0
+#define ID_AA64MMFR0_EL1_CRn 0
+#define ID_AA64MMFR0_EL1_CRm 7
+#define ID_AA64MMFR0_EL1_op2 0
+#define ID_AA64MMFR0_PARange_SHIFT 0
+#define ID_AA64MMFR0_PARange_WIDTH 4
+#define ID_AA64MMFR0_PARange_MASK (UL(0xf) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_VAL(x) ((x) & ID_AA64MMFR0_PARange_MASK)
+#define ID_AA64MMFR0_PARange_4G (UL(0x0) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_64G (UL(0x1) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_1T (UL(0x2) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_4T (UL(0x3) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_16T (UL(0x4) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_256T (UL(0x5) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_PARange_4P (UL(0x6) << ID_AA64MMFR0_PARange_SHIFT)
+#define ID_AA64MMFR0_ASIDBits_SHIFT 4
+#define ID_AA64MMFR0_ASIDBits_WIDTH 4
+#define ID_AA64MMFR0_ASIDBits_MASK (UL(0xf) << ID_AA64MMFR0_ASIDBits_SHIFT)
+#define ID_AA64MMFR0_ASIDBits_VAL(x) ((x) & ID_AA64MMFR0_ASIDBits_MASK)
+#define ID_AA64MMFR0_ASIDBits_8 (UL(0x0) << ID_AA64MMFR0_ASIDBits_SHIFT)
+#define ID_AA64MMFR0_ASIDBits_16 (UL(0x2) << ID_AA64MMFR0_ASIDBits_SHIFT)
+#define ID_AA64MMFR0_BigEnd_SHIFT 8
+#define ID_AA64MMFR0_BigEnd_WIDTH 4
+#define ID_AA64MMFR0_BigEnd_MASK (UL(0xf) << ID_AA64MMFR0_BigEnd_SHIFT)
+#define ID_AA64MMFR0_BigEnd_VAL(x) ((x) & ID_AA64MMFR0_BigEnd_MASK)
+#define ID_AA64MMFR0_BigEnd_FIXED (UL(0x0) << ID_AA64MMFR0_BigEnd_SHIFT)
+#define ID_AA64MMFR0_BigEnd_MIXED (UL(0x1) << ID_AA64MMFR0_BigEnd_SHIFT)
+#define ID_AA64MMFR0_SNSMem_SHIFT 12
+#define ID_AA64MMFR0_SNSMem_WIDTH 4
+#define ID_AA64MMFR0_SNSMem_MASK (UL(0xf) << ID_AA64MMFR0_SNSMem_SHIFT)
+#define ID_AA64MMFR0_SNSMem_VAL(x) ((x) & ID_AA64MMFR0_SNSMem_MASK)
+#define ID_AA64MMFR0_SNSMem_NONE (UL(0x0) << ID_AA64MMFR0_SNSMem_SHIFT)
+#define ID_AA64MMFR0_SNSMem_DISTINCT (UL(0x1) << ID_AA64MMFR0_SNSMem_SHIFT)
+#define ID_AA64MMFR0_BigEndEL0_SHIFT 16
+#define ID_AA64MMFR0_BigEndEL0_WIDTH 4
+#define ID_AA64MMFR0_BigEndEL0_MASK (UL(0xf) << ID_AA64MMFR0_BigEndEL0_SHIFT)
+#define ID_AA64MMFR0_BigEndEL0_VAL(x) ((x) & ID_AA64MMFR0_BigEndEL0_MASK)
+#define ID_AA64MMFR0_BigEndEL0_FIXED (UL(0x0) << ID_AA64MMFR0_BigEndEL0_SHIFT)
+#define ID_AA64MMFR0_BigEndEL0_MIXED (UL(0x1) << ID_AA64MMFR0_BigEndEL0_SHIFT)
+#define ID_AA64MMFR0_TGran16_SHIFT 20
+#define ID_AA64MMFR0_TGran16_WIDTH 4
+#define ID_AA64MMFR0_TGran16_MASK (UL(0xf) << ID_AA64MMFR0_TGran16_SHIFT)
+#define ID_AA64MMFR0_TGran16_VAL(x) ((x) & ID_AA64MMFR0_TGran16_MASK)
+#define ID_AA64MMFR0_TGran16_NONE (UL(0x0) << ID_AA64MMFR0_TGran16_SHIFT)
+#define ID_AA64MMFR0_TGran16_IMPL (UL(0x1) << ID_AA64MMFR0_TGran16_SHIFT)
+#define ID_AA64MMFR0_TGran16_LPA2 (UL(0x2) << ID_AA64MMFR0_TGran16_SHIFT)
+#define ID_AA64MMFR0_TGran64_SHIFT 24
+#define ID_AA64MMFR0_TGran64_WIDTH 4
+#define ID_AA64MMFR0_TGran64_MASK (UL(0xf) << ID_AA64MMFR0_TGran64_SHIFT)
+#define ID_AA64MMFR0_TGran64_VAL(x) ((x) & ID_AA64MMFR0_TGran64_MASK)
+#define ID_AA64MMFR0_TGran64_IMPL (UL(0x0) << ID_AA64MMFR0_TGran64_SHIFT)
+#define ID_AA64MMFR0_TGran64_NONE (UL(0xf) << ID_AA64MMFR0_TGran64_SHIFT)
+#define ID_AA64MMFR0_TGran4_SHIFT 28
+#define ID_AA64MMFR0_TGran4_WIDTH 4
+#define ID_AA64MMFR0_TGran4_MASK (UL(0xf) << ID_AA64MMFR0_TGran4_SHIFT)
+#define ID_AA64MMFR0_TGran4_VAL(x) ((x) & ID_AA64MMFR0_TGran4_MASK)
+#define ID_AA64MMFR0_TGran4_IMPL (UL(0x0) << ID_AA64MMFR0_TGran4_SHIFT)
+#define ID_AA64MMFR0_TGran4_LPA2 (UL(0x1) << ID_AA64MMFR0_TGran4_SHIFT)
+#define ID_AA64MMFR0_TGran4_NONE (UL(0xf) << ID_AA64MMFR0_TGran4_SHIFT)
+#define ID_AA64MMFR0_TGran16_2_SHIFT 32
+#define ID_AA64MMFR0_TGran16_2_WIDTH 4
+#define ID_AA64MMFR0_TGran16_2_MASK (UL(0xf) << ID_AA64MMFR0_TGran16_2_SHIFT)
+#define ID_AA64MMFR0_TGran16_2_VAL(x) ((x) & ID_AA64MMFR0_TGran16_2_MASK)
+#define ID_AA64MMFR0_TGran16_2_TGran16 (UL(0x0) << ID_AA64MMFR0_TGran16_2_SHIFT)
+#define ID_AA64MMFR0_TGran16_2_NONE (UL(0x1) << ID_AA64MMFR0_TGran16_2_SHIFT)
+#define ID_AA64MMFR0_TGran16_2_IMPL (UL(0x2) << ID_AA64MMFR0_TGran16_2_SHIFT)
+#define ID_AA64MMFR0_TGran16_2_LPA2 (UL(0x3) << ID_AA64MMFR0_TGran16_2_SHIFT)
+#define ID_AA64MMFR0_TGran64_2_SHIFT 36
+#define ID_AA64MMFR0_TGran64_2_WIDTH 4
+#define ID_AA64MMFR0_TGran64_2_MASK (UL(0xf) << ID_AA64MMFR0_TGran64_2_SHIFT)
+#define ID_AA64MMFR0_TGran64_2_VAL(x) ((x) & ID_AA64MMFR0_TGran64_2_MASK)
+#define ID_AA64MMFR0_TGran64_2_TGran64 (UL(0x0) << ID_AA64MMFR0_TGran64_2_SHIFT)
+#define ID_AA64MMFR0_TGran64_2_NONE (UL(0x1) << ID_AA64MMFR0_TGran64_2_SHIFT)
+#define ID_AA64MMFR0_TGran64_2_IMPL (UL(0x2) << ID_AA64MMFR0_TGran64_2_SHIFT)
+#define ID_AA64MMFR0_TGran4_2_SHIFT 40
+#define ID_AA64MMFR0_TGran4_2_WIDTH 4
+#define ID_AA64MMFR0_TGran4_2_MASK (UL(0xf) << ID_AA64MMFR0_TGran4_2_SHIFT)
+#define ID_AA64MMFR0_TGran4_2_VAL(x) ((x) & ID_AA64MMFR0_TGran4_2_MASK)
+#define ID_AA64MMFR0_TGran4_2_TGran4 (UL(0x0) << ID_AA64MMFR0_TGran4_2_SHIFT)
+#define ID_AA64MMFR0_TGran4_2_NONE (UL(0x1) << ID_AA64MMFR0_TGran4_2_SHIFT)
+#define ID_AA64MMFR0_TGran4_2_IMPL (UL(0x2) << ID_AA64MMFR0_TGran4_2_SHIFT)
+#define ID_AA64MMFR0_TGran4_2_LPA2 (UL(0x3) << ID_AA64MMFR0_TGran4_2_SHIFT)
+#define ID_AA64MMFR0_ExS_SHIFT 44
+#define ID_AA64MMFR0_ExS_WIDTH 4
+#define ID_AA64MMFR0_ExS_MASK (UL(0xf) << ID_AA64MMFR0_ExS_SHIFT)
+#define ID_AA64MMFR0_ExS_VAL(x) ((x) & ID_AA64MMFR0_ExS_MASK)
+#define ID_AA64MMFR0_ExS_ALL (UL(0x0) << ID_AA64MMFR0_ExS_SHIFT)
+#define ID_AA64MMFR0_ExS_IMPL (UL(0x1) << ID_AA64MMFR0_ExS_SHIFT)
+#define ID_AA64MMFR0_FGT_SHIFT 56
+#define ID_AA64MMFR0_FGT_WIDTH 4
+#define ID_AA64MMFR0_FGT_MASK (UL(0xf) << ID_AA64MMFR0_FGT_SHIFT)
+#define ID_AA64MMFR0_FGT_VAL(x) ((x) & ID_AA64MMFR0_FGT_MASK)
+#define ID_AA64MMFR0_FGT_NONE (UL(0x0) << ID_AA64MMFR0_FGT_SHIFT)
+#define ID_AA64MMFR0_FGT_8_6 (UL(0x1) << ID_AA64MMFR0_FGT_SHIFT)
+#define ID_AA64MMFR0_FGT_8_9 (UL(0x2) << ID_AA64MMFR0_FGT_SHIFT)
+#define ID_AA64MMFR0_ECV_SHIFT 60
+#define ID_AA64MMFR0_ECV_WIDTH 4
+#define ID_AA64MMFR0_ECV_MASK (UL(0xf) << ID_AA64MMFR0_ECV_SHIFT)
+#define ID_AA64MMFR0_ECV_VAL(x) ((x) & ID_AA64MMFR0_ECV_MASK)
+#define ID_AA64MMFR0_ECV_NONE (UL(0x0) << ID_AA64MMFR0_ECV_SHIFT)
+#define ID_AA64MMFR0_ECV_IMPL (UL(0x1) << ID_AA64MMFR0_ECV_SHIFT)
+#define ID_AA64MMFR0_ECV_CNTHCTL (UL(0x2) << ID_AA64MMFR0_ECV_SHIFT)
+
+/* ID_AA64MMFR1_EL1 */
+#define ID_AA64MMFR1_EL1_REG MRS_REG_ALT_NAME(ID_AA64MMFR1_EL1)
+#define ID_AA64MMFR1_EL1_ISS ISS_MSR_REG(ID_AA64MMFR1_EL1)
+#define ID_AA64MMFR1_EL1_op0 3
+#define ID_AA64MMFR1_EL1_op1 0
+#define ID_AA64MMFR1_EL1_CRn 0
+#define ID_AA64MMFR1_EL1_CRm 7
+#define ID_AA64MMFR1_EL1_op2 1
+#define ID_AA64MMFR1_HAFDBS_SHIFT 0
+#define ID_AA64MMFR1_HAFDBS_WIDTH 4
+#define ID_AA64MMFR1_HAFDBS_MASK (UL(0xf) << ID_AA64MMFR1_HAFDBS_SHIFT)
+#define ID_AA64MMFR1_HAFDBS_VAL(x) ((x) & ID_AA64MMFR1_HAFDBS_MASK)
+#define ID_AA64MMFR1_HAFDBS_NONE (UL(0x0) << ID_AA64MMFR1_HAFDBS_SHIFT)
+#define ID_AA64MMFR1_HAFDBS_AF (UL(0x1) << ID_AA64MMFR1_HAFDBS_SHIFT)
+#define ID_AA64MMFR1_HAFDBS_AF_DBS (UL(0x2) << ID_AA64MMFR1_HAFDBS_SHIFT)
+#define ID_AA64MMFR1_VMIDBits_SHIFT 4
+#define ID_AA64MMFR1_VMIDBits_WIDTH 4
+#define ID_AA64MMFR1_VMIDBits_MASK (UL(0xf) << ID_AA64MMFR1_VMIDBits_SHIFT)
+#define ID_AA64MMFR1_VMIDBits_VAL(x) ((x) & ID_AA64MMFR1_VMIDBits_MASK)
+#define ID_AA64MMFR1_VMIDBits_8 (UL(0x0) << ID_AA64MMFR1_VMIDBits_SHIFT)
+#define ID_AA64MMFR1_VMIDBits_16 (UL(0x2) << ID_AA64MMFR1_VMIDBits_SHIFT)
+#define ID_AA64MMFR1_VH_SHIFT 8
+#define ID_AA64MMFR1_VH_WIDTH 4
+#define ID_AA64MMFR1_VH_MASK (UL(0xf) << ID_AA64MMFR1_VH_SHIFT)
+#define ID_AA64MMFR1_VH_VAL(x) ((x) & ID_AA64MMFR1_VH_MASK)
+#define ID_AA64MMFR1_VH_NONE (UL(0x0) << ID_AA64MMFR1_VH_SHIFT)
+#define ID_AA64MMFR1_VH_IMPL (UL(0x1) << ID_AA64MMFR1_VH_SHIFT)
+#define ID_AA64MMFR1_HPDS_SHIFT 12
+#define ID_AA64MMFR1_HPDS_WIDTH 4
+#define ID_AA64MMFR1_HPDS_MASK (UL(0xf) << ID_AA64MMFR1_HPDS_SHIFT)
+#define ID_AA64MMFR1_HPDS_VAL(x) ((x) & ID_AA64MMFR1_HPDS_MASK)
+#define ID_AA64MMFR1_HPDS_NONE (UL(0x0) << ID_AA64MMFR1_HPDS_SHIFT)
+#define ID_AA64MMFR1_HPDS_HPD (UL(0x1) << ID_AA64MMFR1_HPDS_SHIFT)
+#define ID_AA64MMFR1_HPDS_TTPBHA (UL(0x2) << ID_AA64MMFR1_HPDS_SHIFT)
+#define ID_AA64MMFR1_LO_SHIFT 16
+#define ID_AA64MMFR1_LO_WIDTH 4
+#define ID_AA64MMFR1_LO_MASK (UL(0xf) << ID_AA64MMFR1_LO_SHIFT)
+#define ID_AA64MMFR1_LO_VAL(x) ((x) & ID_AA64MMFR1_LO_MASK)
+#define ID_AA64MMFR1_LO_NONE (UL(0x0) << ID_AA64MMFR1_LO_SHIFT)
+#define ID_AA64MMFR1_LO_IMPL (UL(0x1) << ID_AA64MMFR1_LO_SHIFT)
+#define ID_AA64MMFR1_PAN_SHIFT 20
+#define ID_AA64MMFR1_PAN_WIDTH 4
+#define ID_AA64MMFR1_PAN_MASK (UL(0xf) << ID_AA64MMFR1_PAN_SHIFT)
+#define ID_AA64MMFR1_PAN_VAL(x) ((x) & ID_AA64MMFR1_PAN_MASK)
+#define ID_AA64MMFR1_PAN_NONE (UL(0x0) << ID_AA64MMFR1_PAN_SHIFT)
+#define ID_AA64MMFR1_PAN_IMPL (UL(0x1) << ID_AA64MMFR1_PAN_SHIFT)
+#define ID_AA64MMFR1_PAN_ATS1E1 (UL(0x2) << ID_AA64MMFR1_PAN_SHIFT)
+#define ID_AA64MMFR1_PAN_EPAN (UL(0x3) << ID_AA64MMFR1_PAN_SHIFT)
+#define ID_AA64MMFR1_SpecSEI_SHIFT 24
+#define ID_AA64MMFR1_SpecSEI_WIDTH 4
+#define ID_AA64MMFR1_SpecSEI_MASK (UL(0xf) << ID_AA64MMFR1_SpecSEI_SHIFT)
+#define ID_AA64MMFR1_SpecSEI_VAL(x) ((x) & ID_AA64MMFR1_SpecSEI_MASK)
+#define ID_AA64MMFR1_SpecSEI_NONE (UL(0x0) << ID_AA64MMFR1_SpecSEI_SHIFT)
+#define ID_AA64MMFR1_SpecSEI_IMPL (UL(0x1) << ID_AA64MMFR1_SpecSEI_SHIFT)
+#define ID_AA64MMFR1_XNX_SHIFT 28
+#define ID_AA64MMFR1_XNX_WIDTH 4
+#define ID_AA64MMFR1_XNX_MASK (UL(0xf) << ID_AA64MMFR1_XNX_SHIFT)
+#define ID_AA64MMFR1_XNX_VAL(x) ((x) & ID_AA64MMFR1_XNX_MASK)
+#define ID_AA64MMFR1_XNX_NONE (UL(0x0) << ID_AA64MMFR1_XNX_SHIFT)
+#define ID_AA64MMFR1_XNX_IMPL (UL(0x1) << ID_AA64MMFR1_XNX_SHIFT)
+#define ID_AA64MMFR1_TWED_SHIFT 32
+#define ID_AA64MMFR1_TWED_WIDTH 4
+#define ID_AA64MMFR1_TWED_MASK (UL(0xf) << ID_AA64MMFR1_TWED_SHIFT)
+#define ID_AA64MMFR1_TWED_VAL(x) ((x) & ID_AA64MMFR1_TWED_MASK)
+#define ID_AA64MMFR1_TWED_NONE (UL(0x0) << ID_AA64MMFR1_TWED_SHIFT)
+#define ID_AA64MMFR1_TWED_IMPL (UL(0x1) << ID_AA64MMFR1_TWED_SHIFT)
+#define ID_AA64MMFR1_ETS_SHIFT 36
+#define ID_AA64MMFR1_ETS_WIDTH 4
+#define ID_AA64MMFR1_ETS_MASK (UL(0xf) << ID_AA64MMFR1_ETS_SHIFT)
+#define ID_AA64MMFR1_ETS_VAL(x) ((x) & ID_AA64MMFR1_ETS_MASK)
+#define ID_AA64MMFR1_ETS_NONE (UL(0x0) << ID_AA64MMFR1_ETS_SHIFT)
+#define ID_AA64MMFR1_ETS_NONE2 (UL(0x1) << ID_AA64MMFR1_ETS_SHIFT)
+#define ID_AA64MMFR1_ETS_IMPL (UL(0x2) << ID_AA64MMFR1_ETS_SHIFT)
+#define ID_AA64MMFR1_HCX_SHIFT 40
+#define ID_AA64MMFR1_HCX_WIDTH 4
+#define ID_AA64MMFR1_HCX_MASK (UL(0xf) << ID_AA64MMFR1_HCX_SHIFT)
+#define ID_AA64MMFR1_HCX_VAL(x) ((x) & ID_AA64MMFR1_HCX_MASK)
+#define ID_AA64MMFR1_HCX_NONE (UL(0x0) << ID_AA64MMFR1_HCX_SHIFT)
+#define ID_AA64MMFR1_HCX_IMPL (UL(0x1) << ID_AA64MMFR1_HCX_SHIFT)
+#define ID_AA64MMFR1_AFP_SHIFT 44
+#define ID_AA64MMFR1_AFP_WIDTH 4
+#define ID_AA64MMFR1_AFP_MASK (UL(0xf) << ID_AA64MMFR1_AFP_SHIFT)
+#define ID_AA64MMFR1_AFP_VAL(x) ((x) & ID_AA64MMFR1_AFP_MASK)
+#define ID_AA64MMFR1_AFP_NONE (UL(0x0) << ID_AA64MMFR1_AFP_SHIFT)
+#define ID_AA64MMFR1_AFP_IMPL (UL(0x1) << ID_AA64MMFR1_AFP_SHIFT)
+#define ID_AA64MMFR1_nTLBPA_SHIFT 48
+#define ID_AA64MMFR1_nTLBPA_WIDTH 4
+#define ID_AA64MMFR1_nTLBPA_MASK (UL(0xf) << ID_AA64MMFR1_nTLBPA_SHIFT)
+#define ID_AA64MMFR1_nTLBPA_VAL(x) ((x) & ID_AA64MMFR1_nTLBPA_MASK)
+#define ID_AA64MMFR1_nTLBPA_NONE (UL(0x0) << ID_AA64MMFR1_nTLBPA_SHIFT)
+#define ID_AA64MMFR1_nTLBPA_IMPL (UL(0x1) << ID_AA64MMFR1_nTLBPA_SHIFT)
+#define ID_AA64MMFR1_TIDCP1_SHIFT 52
+#define ID_AA64MMFR1_TIDCP1_WIDTH 4
+#define ID_AA64MMFR1_TIDCP1_MASK (UL(0xf) << ID_AA64MMFR1_TIDCP1_SHIFT)
+#define ID_AA64MMFR1_TIDCP1_VAL(x) ((x) & ID_AA64MMFR1_TIDCP1_MASK)
+#define ID_AA64MMFR1_TIDCP1_NONE (UL(0x0) << ID_AA64MMFR1_TIDCP1_SHIFT)
+#define ID_AA64MMFR1_TIDCP1_IMPL (UL(0x1) << ID_AA64MMFR1_TIDCP1_SHIFT)
+#define ID_AA64MMFR1_CMOVW_SHIFT 56
+#define ID_AA64MMFR1_CMOVW_WIDTH 4
+#define ID_AA64MMFR1_CMOVW_MASK (UL(0xf) << ID_AA64MMFR1_CMOVW_SHIFT)
+#define ID_AA64MMFR1_CMOVW_VAL(x) ((x) & ID_AA64MMFR1_CMOVW_MASK)
+#define ID_AA64MMFR1_CMOVW_NONE (UL(0x0) << ID_AA64MMFR1_CMOVW_SHIFT)
+#define ID_AA64MMFR1_CMOVW_IMPL (UL(0x1) << ID_AA64MMFR1_CMOVW_SHIFT)
+#define ID_AA64MMFR1_ECBHB_SHIFT 60
+#define ID_AA64MMFR1_ECBHB_WIDTH 4
+#define ID_AA64MMFR1_ECBHB_MASK (UL(0xf) << ID_AA64MMFR1_ECBHB_SHIFT)
+#define ID_AA64MMFR1_ECBHB_VAL(x) ((x) & ID_AA64MMFR1_ECBHB_MASK)
+#define ID_AA64MMFR1_ECBHB_NONE (UL(0x0) << ID_AA64MMFR1_ECBHB_SHIFT)
+#define ID_AA64MMFR1_ECBHB_IMPL (UL(0x1) << ID_AA64MMFR1_ECBHB_SHIFT)
+
+/* ID_AA64MMFR2_EL1 */
+#define ID_AA64MMFR2_EL1_REG MRS_REG_ALT_NAME(ID_AA64MMFR2_EL1)
+#define ID_AA64MMFR2_EL1_ISS ISS_MSR_REG(ID_AA64MMFR2_EL1)
+#define ID_AA64MMFR2_EL1_op0 3
+#define ID_AA64MMFR2_EL1_op1 0
+#define ID_AA64MMFR2_EL1_CRn 0
+#define ID_AA64MMFR2_EL1_CRm 7
+#define ID_AA64MMFR2_EL1_op2 2
+#define ID_AA64MMFR2_CnP_SHIFT 0
+#define ID_AA64MMFR2_CnP_WIDTH 4
+#define ID_AA64MMFR2_CnP_MASK (UL(0xf) << ID_AA64MMFR2_CnP_SHIFT)
+#define ID_AA64MMFR2_CnP_VAL(x) ((x) & ID_AA64MMFR2_CnP_MASK)
+#define ID_AA64MMFR2_CnP_NONE (UL(0x0) << ID_AA64MMFR2_CnP_SHIFT)
+#define ID_AA64MMFR2_CnP_IMPL (UL(0x1) << ID_AA64MMFR2_CnP_SHIFT)
+#define ID_AA64MMFR2_UAO_SHIFT 4
+#define ID_AA64MMFR2_UAO_WIDTH 4
+#define ID_AA64MMFR2_UAO_MASK (UL(0xf) << ID_AA64MMFR2_UAO_SHIFT)
+#define ID_AA64MMFR2_UAO_VAL(x) ((x) & ID_AA64MMFR2_UAO_MASK)
+#define ID_AA64MMFR2_UAO_NONE (UL(0x0) << ID_AA64MMFR2_UAO_SHIFT)
+#define ID_AA64MMFR2_UAO_IMPL (UL(0x1) << ID_AA64MMFR2_UAO_SHIFT)
+#define ID_AA64MMFR2_LSM_SHIFT 8
+#define ID_AA64MMFR2_LSM_WIDTH 4
+#define ID_AA64MMFR2_LSM_MASK (UL(0xf) << ID_AA64MMFR2_LSM_SHIFT)
+#define ID_AA64MMFR2_LSM_VAL(x) ((x) & ID_AA64MMFR2_LSM_MASK)
+#define ID_AA64MMFR2_LSM_NONE (UL(0x0) << ID_AA64MMFR2_LSM_SHIFT)
+#define ID_AA64MMFR2_LSM_IMPL (UL(0x1) << ID_AA64MMFR2_LSM_SHIFT)
+#define ID_AA64MMFR2_IESB_SHIFT 12
+#define ID_AA64MMFR2_IESB_WIDTH 4
+#define ID_AA64MMFR2_IESB_MASK (UL(0xf) << ID_AA64MMFR2_IESB_SHIFT)
+#define ID_AA64MMFR2_IESB_VAL(x) ((x) & ID_AA64MMFR2_IESB_MASK)
+#define ID_AA64MMFR2_IESB_NONE (UL(0x0) << ID_AA64MMFR2_IESB_SHIFT)
+#define ID_AA64MMFR2_IESB_IMPL (UL(0x1) << ID_AA64MMFR2_IESB_SHIFT)
+#define ID_AA64MMFR2_VARange_SHIFT 16
+#define ID_AA64MMFR2_VARange_WIDTH 4
+#define ID_AA64MMFR2_VARange_MASK (UL(0xf) << ID_AA64MMFR2_VARange_SHIFT)
+#define ID_AA64MMFR2_VARange_VAL(x) ((x) & ID_AA64MMFR2_VARange_MASK)
+#define ID_AA64MMFR2_VARange_48 (UL(0x0) << ID_AA64MMFR2_VARange_SHIFT)
+#define ID_AA64MMFR2_VARange_52 (UL(0x1) << ID_AA64MMFR2_VARange_SHIFT)
+#define ID_AA64MMFR2_CCIDX_SHIFT 20
+#define ID_AA64MMFR2_CCIDX_WIDTH 4
+#define ID_AA64MMFR2_CCIDX_MASK (UL(0xf) << ID_AA64MMFR2_CCIDX_SHIFT)
+#define ID_AA64MMFR2_CCIDX_VAL(x) ((x) & ID_AA64MMFR2_CCIDX_MASK)
+#define ID_AA64MMFR2_CCIDX_32 (UL(0x0) << ID_AA64MMFR2_CCIDX_SHIFT)
+#define ID_AA64MMFR2_CCIDX_64 (UL(0x1) << ID_AA64MMFR2_CCIDX_SHIFT)
+#define ID_AA64MMFR2_NV_SHIFT 24
+#define ID_AA64MMFR2_NV_WIDTH 4
+#define ID_AA64MMFR2_NV_MASK (UL(0xf) << ID_AA64MMFR2_NV_SHIFT)
+#define ID_AA64MMFR2_NV_VAL(x) ((x) & ID_AA64MMFR2_NV_MASK)
+#define ID_AA64MMFR2_NV_NONE (UL(0x0) << ID_AA64MMFR2_NV_SHIFT)
+#define ID_AA64MMFR2_NV_8_3 (UL(0x1) << ID_AA64MMFR2_NV_SHIFT)
+#define ID_AA64MMFR2_NV_8_4 (UL(0x2) << ID_AA64MMFR2_NV_SHIFT)
+#define ID_AA64MMFR2_ST_SHIFT 28
+#define ID_AA64MMFR2_ST_WIDTH 4
+#define ID_AA64MMFR2_ST_MASK (UL(0xf) << ID_AA64MMFR2_ST_SHIFT)
+#define ID_AA64MMFR2_ST_VAL(x) ((x) & ID_AA64MMFR2_ST_MASK)
+#define ID_AA64MMFR2_ST_NONE (UL(0x0) << ID_AA64MMFR2_ST_SHIFT)
+#define ID_AA64MMFR2_ST_IMPL (UL(0x1) << ID_AA64MMFR2_ST_SHIFT)
+#define ID_AA64MMFR2_AT_SHIFT 32
+#define ID_AA64MMFR2_AT_WIDTH 4
+#define ID_AA64MMFR2_AT_MASK (UL(0xf) << ID_AA64MMFR2_AT_SHIFT)
+#define ID_AA64MMFR2_AT_VAL(x) ((x) & ID_AA64MMFR2_AT_MASK)
+#define ID_AA64MMFR2_AT_NONE (UL(0x0) << ID_AA64MMFR2_AT_SHIFT)
+#define ID_AA64MMFR2_AT_IMPL (UL(0x1) << ID_AA64MMFR2_AT_SHIFT)
+#define ID_AA64MMFR2_IDS_SHIFT 36
+#define ID_AA64MMFR2_IDS_WIDTH 4
+#define ID_AA64MMFR2_IDS_MASK (UL(0xf) << ID_AA64MMFR2_IDS_SHIFT)
+#define ID_AA64MMFR2_IDS_VAL(x) ((x) & ID_AA64MMFR2_IDS_MASK)
+#define ID_AA64MMFR2_IDS_NONE (UL(0x0) << ID_AA64MMFR2_IDS_SHIFT)
+#define ID_AA64MMFR2_IDS_IMPL (UL(0x1) << ID_AA64MMFR2_IDS_SHIFT)
+#define ID_AA64MMFR2_FWB_SHIFT 40
+#define ID_AA64MMFR2_FWB_WIDTH 4
+#define ID_AA64MMFR2_FWB_MASK (UL(0xf) << ID_AA64MMFR2_FWB_SHIFT)
+#define ID_AA64MMFR2_FWB_VAL(x) ((x) & ID_AA64MMFR2_FWB_MASK)
+#define ID_AA64MMFR2_FWB_NONE (UL(0x0) << ID_AA64MMFR2_FWB_SHIFT)
+#define ID_AA64MMFR2_FWB_IMPL (UL(0x1) << ID_AA64MMFR2_FWB_SHIFT)
+#define ID_AA64MMFR2_TTL_SHIFT 48
+#define ID_AA64MMFR2_TTL_WIDTH 4
+#define ID_AA64MMFR2_TTL_MASK (UL(0xf) << ID_AA64MMFR2_TTL_SHIFT)
+#define ID_AA64MMFR2_TTL_VAL(x) ((x) & ID_AA64MMFR2_TTL_MASK)
+#define ID_AA64MMFR2_TTL_NONE (UL(0x0) << ID_AA64MMFR2_TTL_SHIFT)
+#define ID_AA64MMFR2_TTL_IMPL (UL(0x1) << ID_AA64MMFR2_TTL_SHIFT)
+#define ID_AA64MMFR2_BBM_SHIFT 52
+#define ID_AA64MMFR2_BBM_WIDTH 4
+#define ID_AA64MMFR2_BBM_MASK (UL(0xf) << ID_AA64MMFR2_BBM_SHIFT)
+#define ID_AA64MMFR2_BBM_VAL(x) ((x) & ID_AA64MMFR2_BBM_MASK)
+#define ID_AA64MMFR2_BBM_LEVEL0 (UL(0x0) << ID_AA64MMFR2_BBM_SHIFT)
+#define ID_AA64MMFR2_BBM_LEVEL1 (UL(0x1) << ID_AA64MMFR2_BBM_SHIFT)
+#define ID_AA64MMFR2_BBM_LEVEL2 (UL(0x2) << ID_AA64MMFR2_BBM_SHIFT)
+#define ID_AA64MMFR2_EVT_SHIFT 56
+#define ID_AA64MMFR2_EVT_WIDTH 4
+#define ID_AA64MMFR2_EVT_MASK (UL(0xf) << ID_AA64MMFR2_EVT_SHIFT)
+#define ID_AA64MMFR2_EVT_VAL(x) ((x) & ID_AA64MMFR2_EVT_MASK)
+#define ID_AA64MMFR2_EVT_NONE (UL(0x0) << ID_AA64MMFR2_EVT_SHIFT)
+#define ID_AA64MMFR2_EVT_8_2 (UL(0x1) << ID_AA64MMFR2_EVT_SHIFT)
+#define ID_AA64MMFR2_EVT_8_5 (UL(0x2) << ID_AA64MMFR2_EVT_SHIFT)
+#define ID_AA64MMFR2_E0PD_SHIFT 60
+#define ID_AA64MMFR2_E0PD_WIDTH 4
+#define ID_AA64MMFR2_E0PD_MASK (UL(0xf) << ID_AA64MMFR2_E0PD_SHIFT)
+#define ID_AA64MMFR2_E0PD_VAL(x) ((x) & ID_AA64MMFR2_E0PD_MASK)
+#define ID_AA64MMFR2_E0PD_NONE (UL(0x0) << ID_AA64MMFR2_E0PD_SHIFT)
+#define ID_AA64MMFR2_E0PD_IMPL (UL(0x1) << ID_AA64MMFR2_E0PD_SHIFT)
+
+/* ID_AA64MMFR3_EL1 */
+#define ID_AA64MMFR3_EL1_REG MRS_REG_ALT_NAME(ID_AA64MMFR3_EL1)
+#define ID_AA64MMFR3_EL1_ISS ISS_MSR_REG(ID_AA64MMFR3_EL1)
+#define ID_AA64MMFR3_EL1_op0 3
+#define ID_AA64MMFR3_EL1_op1 0
+#define ID_AA64MMFR3_EL1_CRn 0
+#define ID_AA64MMFR3_EL1_CRm 7
+#define ID_AA64MMFR3_EL1_op2 3
+#define ID_AA64MMFR3_TCRX_SHIFT 0
+#define ID_AA64MMFR3_TCRX_WIDTH 4
+#define ID_AA64MMFR3_TCRX_MASK (UL(0xf) << ID_AA64MMFR3_TCRX_SHIFT)
+#define ID_AA64MMFR3_TCRX_VAL(x) ((x) & ID_AA64MMFR3_TCRX_MASK)
+#define ID_AA64MMFR3_TCRX_NONE (UL(0x0) << ID_AA64MMFR3_TCRX_SHIFT)
+#define ID_AA64MMFR3_TCRX_IMPL (UL(0x1) << ID_AA64MMFR3_TCRX_SHIFT)
+#define ID_AA64MMFR3_SCTLRX_SHIFT 4
+#define ID_AA64MMFR3_SCTLRX_WIDTH 4
+#define ID_AA64MMFR3_SCTLRX_MASK (UL(0xf) << ID_AA64MMFR3_SCTLRX_SHIFT)
+#define ID_AA64MMFR3_SCTLRX_VAL(x) ((x) & ID_AA64MMFR3_SCTLRX_MASK)
+#define ID_AA64MMFR3_SCTLRX_NONE (UL(0x0) << ID_AA64MMFR3_SCTLRX_SHIFT)
+#define ID_AA64MMFR3_SCTLRX_IMPL (UL(0x1) << ID_AA64MMFR3_SCTLRX_SHIFT)
+#define ID_AA64MMFR3_S1PIE_SHIFT 8
+#define ID_AA64MMFR3_S1PIE_WIDTH 4
+#define ID_AA64MMFR3_S1PIE_MASK (UL(0xf) << ID_AA64MMFR3_S1PIE_SHIFT)
+#define ID_AA64MMFR3_S1PIE_VAL(x) ((x) & ID_AA64MMFR3_S1PIE_MASK)
+#define ID_AA64MMFR3_S1PIE_NONE (UL(0x0) << ID_AA64MMFR3_S1PIE_SHIFT)
+#define ID_AA64MMFR3_S1PIE_IMPL (UL(0x1) << ID_AA64MMFR3_S1PIE_SHIFT)
+#define ID_AA64MMFR3_S2PIE_SHIFT 12
+#define ID_AA64MMFR3_S2PIE_WIDTH 4
+#define ID_AA64MMFR3_S2PIE_MASK (UL(0xf) << ID_AA64MMFR3_S2PIE_SHIFT)
+#define ID_AA64MMFR3_S2PIE_VAL(x) ((x) & ID_AA64MMFR3_S2PIE_MASK)
+#define ID_AA64MMFR3_S2PIE_NONE (UL(0x0) << ID_AA64MMFR3_S2PIE_SHIFT)
+#define ID_AA64MMFR3_S2PIE_IMPL (UL(0x1) << ID_AA64MMFR3_S2PIE_SHIFT)
+#define ID_AA64MMFR3_S1POE_SHIFT 16
+#define ID_AA64MMFR3_S1POE_WIDTH 4
+#define ID_AA64MMFR3_S1POE_MASK (UL(0xf) << ID_AA64MMFR3_S1POE_SHIFT)
+#define ID_AA64MMFR3_S1POE_VAL(x) ((x) & ID_AA64MMFR3_S1POE_MASK)
+#define ID_AA64MMFR3_S1POE_NONE (UL(0x0) << ID_AA64MMFR3_S1POE_SHIFT)
+#define ID_AA64MMFR3_S1POE_IMPL (UL(0x1) << ID_AA64MMFR3_S1POE_SHIFT)
+#define ID_AA64MMFR3_S2POE_SHIFT 20
+#define ID_AA64MMFR3_S2POE_WIDTH 4
+#define ID_AA64MMFR3_S2POE_MASK (UL(0xf) << ID_AA64MMFR3_S2POE_SHIFT)
+#define ID_AA64MMFR3_S2POE_VAL(x) ((x) & ID_AA64MMFR3_S2POE_MASK)
+#define ID_AA64MMFR3_S2POE_NONE (UL(0x0) << ID_AA64MMFR3_S2POE_SHIFT)
+#define ID_AA64MMFR3_S2POE_IMPL (UL(0x1) << ID_AA64MMFR3_S2POE_SHIFT)
+#define ID_AA64MMFR3_AIE_SHIFT 24
+#define ID_AA64MMFR3_AIE_WIDTH 4
+#define ID_AA64MMFR3_AIE_MASK (UL(0xf) << ID_AA64MMFR3_AIE_SHIFT)
+#define ID_AA64MMFR3_AIE_VAL(x) ((x) & ID_AA64MMFR3_AIE_MASK)
+#define ID_AA64MMFR3_AIE_NONE (UL(0x0) << ID_AA64MMFR3_AIE_SHIFT)
+#define ID_AA64MMFR3_AIE_IMPL (UL(0x1) << ID_AA64MMFR3_AIE_SHIFT)
+#define ID_AA64MMFR3_MEC_SHIFT 28
+#define ID_AA64MMFR3_MEC_WIDTH 4
+#define ID_AA64MMFR3_MEC_MASK (UL(0xf) << ID_AA64MMFR3_MEC_SHIFT)
+#define ID_AA64MMFR3_MEC_VAL(x) ((x) & ID_AA64MMFR3_MEC_MASK)
+#define ID_AA64MMFR3_MEC_NONE (UL(0x0) << ID_AA64MMFR3_MEC_SHIFT)
+#define ID_AA64MMFR3_MEC_IMPL (UL(0x1) << ID_AA64MMFR3_MEC_SHIFT)
+#define ID_AA64MMFR3_SNERR_SHIFT 40
+#define ID_AA64MMFR3_SNERR_WIDTH 4
+#define ID_AA64MMFR3_SNERR_MASK (UL(0xf) << ID_AA64MMFR3_SNERR_SHIFT)
+#define ID_AA64MMFR3_SNERR_VAL(x) ((x) & ID_AA64MMFR3_SNERR_MASK)
+#define ID_AA64MMFR3_SNERR_NONE (UL(0x0) << ID_AA64MMFR3_SNERR_SHIFT)
+#define ID_AA64MMFR3_SNERR_ALL (UL(0x1) << ID_AA64MMFR3_SNERR_SHIFT)
+#define ID_AA64MMFR3_ANERR_SHIFT 44
+#define ID_AA64MMFR3_ANERR_WIDTH 4
+#define ID_AA64MMFR3_ANERR_MASK (UL(0xf) << ID_AA64MMFR3_ANERR_SHIFT)
+#define ID_AA64MMFR3_ANERR_VAL(x) ((x) & ID_AA64MMFR3_ANERR_MASK)
+#define ID_AA64MMFR3_ANERR_NONE (UL(0x0) << ID_AA64MMFR3_ANERR_SHIFT)
+#define ID_AA64MMFR3_ANERR_SOME (UL(0x1) << ID_AA64MMFR3_ANERR_SHIFT)
+#define ID_AA64MMFR3_SDERR_SHIFT 52
+#define ID_AA64MMFR3_SDERR_WIDTH 4
+#define ID_AA64MMFR3_SDERR_MASK (UL(0xf) << ID_AA64MMFR3_SDERR_SHIFT)
+#define ID_AA64MMFR3_SDERR_VAL(x) ((x) & ID_AA64MMFR3_SDERR_MASK)
+#define ID_AA64MMFR3_SDERR_NONE (UL(0x0) << ID_AA64MMFR3_SDERR_SHIFT)
+#define ID_AA64MMFR3_SDERR_ALL (UL(0x1) << ID_AA64MMFR3_SDERR_SHIFT)
+#define ID_AA64MMFR3_ADERR_SHIFT 56
+#define ID_AA64MMFR3_ADERR_WIDTH 4
+#define ID_AA64MMFR3_ADERR_MASK (UL(0xf) << ID_AA64MMFR3_ADERR_SHIFT)
+#define ID_AA64MMFR3_ADERR_VAL(x) ((x) & ID_AA64MMFR3_ADERR_MASK)
+#define ID_AA64MMFR3_ADERR_NONE (UL(0x0) << ID_AA64MMFR3_ADERR_SHIFT)
+#define ID_AA64MMFR3_ADERR_SOME (UL(0x1) << ID_AA64MMFR3_ADERR_SHIFT)
+#define ID_AA64MMFR3_Spec_FPACC_SHIFT 60
+#define ID_AA64MMFR3_Spec_FPACC_WIDTH 4
+#define ID_AA64MMFR3_Spec_FPACC_MASK (UL(0xf) << ID_AA64MMFR3_Spec_FPACC_SHIFT)
+#define ID_AA64MMFR3_Spec_FPACC_VAL(x) ((x) & ID_AA64MMFR3_Spec_FPACC_MASK)
+#define ID_AA64MMFR3_Spec_FPACC_NONE (UL(0x0) << ID_AA64MMFR3_Spec_FPACC_SHIFT)
+#define ID_AA64MMFR3_Spec_FPACC_IMPL (UL(0x1) << ID_AA64MMFR3_Spec_FPACC_SHIFT)
+
+/* ID_AA64MMFR4_EL1 */
+#define ID_AA64MMFR4_EL1_REG MRS_REG_ALT_NAME(ID_AA64MMFR4_EL1)
+#define ID_AA64MMFR4_EL1_ISS ISS_MSR_REG(ID_AA64MMFR4_EL1)
+#define ID_AA64MMFR4_EL1_op0 3
+#define ID_AA64MMFR4_EL1_op1 0
+#define ID_AA64MMFR4_EL1_CRn 0
+#define ID_AA64MMFR4_EL1_CRm 7
+#define ID_AA64MMFR4_EL1_op2 4
+
+/* ID_AA64PFR0_EL1 */
+#define ID_AA64PFR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64PFR0_EL1)
+#define ID_AA64PFR0_EL1_ISS ISS_MSR_REG(ID_AA64PFR0_EL1)
+#define ID_AA64PFR0_EL1_op0 3
+#define ID_AA64PFR0_EL1_op1 0
+#define ID_AA64PFR0_EL1_CRn 0
+#define ID_AA64PFR0_EL1_CRm 4
+#define ID_AA64PFR0_EL1_op2 0
+#define ID_AA64PFR0_EL0_SHIFT 0
+#define ID_AA64PFR0_EL0_WIDTH 4
+#define ID_AA64PFR0_EL0_MASK (UL(0xf) << ID_AA64PFR0_EL0_SHIFT)
+#define ID_AA64PFR0_EL0_VAL(x) ((x) & ID_AA64PFR0_EL0_MASK)
+#define ID_AA64PFR0_EL0_64 (UL(0x1) << ID_AA64PFR0_EL0_SHIFT)
+#define ID_AA64PFR0_EL0_64_32 (UL(0x2) << ID_AA64PFR0_EL0_SHIFT)
+#define ID_AA64PFR0_EL1_SHIFT 4
+#define ID_AA64PFR0_EL1_WIDTH 4
+#define ID_AA64PFR0_EL1_MASK (UL(0xf) << ID_AA64PFR0_EL1_SHIFT)
+#define ID_AA64PFR0_EL1_VAL(x) ((x) & ID_AA64PFR0_EL1_MASK)
+#define ID_AA64PFR0_EL1_64 (UL(0x1) << ID_AA64PFR0_EL1_SHIFT)
+#define ID_AA64PFR0_EL1_64_32 (UL(0x2) << ID_AA64PFR0_EL1_SHIFT)
+#define ID_AA64PFR0_EL2_SHIFT 8
+#define ID_AA64PFR0_EL2_WIDTH 4
+#define ID_AA64PFR0_EL2_MASK (UL(0xf) << ID_AA64PFR0_EL2_SHIFT)
+#define ID_AA64PFR0_EL2_VAL(x) ((x) & ID_AA64PFR0_EL2_MASK)
+#define ID_AA64PFR0_EL2_NONE (UL(0x0) << ID_AA64PFR0_EL2_SHIFT)
+#define ID_AA64PFR0_EL2_64 (UL(0x1) << ID_AA64PFR0_EL2_SHIFT)
+#define ID_AA64PFR0_EL2_64_32 (UL(0x2) << ID_AA64PFR0_EL2_SHIFT)
+#define ID_AA64PFR0_EL3_SHIFT 12
+#define ID_AA64PFR0_EL3_WIDTH 4
+#define ID_AA64PFR0_EL3_MASK (UL(0xf) << ID_AA64PFR0_EL3_SHIFT)
+#define ID_AA64PFR0_EL3_VAL(x) ((x) & ID_AA64PFR0_EL3_MASK)
+#define ID_AA64PFR0_EL3_NONE (UL(0x0) << ID_AA64PFR0_EL3_SHIFT)
+#define ID_AA64PFR0_EL3_64 (UL(0x1) << ID_AA64PFR0_EL3_SHIFT)
+#define ID_AA64PFR0_EL3_64_32 (UL(0x2) << ID_AA64PFR0_EL3_SHIFT)
+#define ID_AA64PFR0_FP_SHIFT 16
+#define ID_AA64PFR0_FP_WIDTH 4
+#define ID_AA64PFR0_FP_MASK (UL(0xf) << ID_AA64PFR0_FP_SHIFT)
+#define ID_AA64PFR0_FP_VAL(x) ((x) & ID_AA64PFR0_FP_MASK)
+#define ID_AA64PFR0_FP_IMPL (UL(0x0) << ID_AA64PFR0_FP_SHIFT)
+#define ID_AA64PFR0_FP_HP (UL(0x1) << ID_AA64PFR0_FP_SHIFT)
+#define ID_AA64PFR0_FP_NONE (UL(0xf) << ID_AA64PFR0_FP_SHIFT)
+#define ID_AA64PFR0_AdvSIMD_SHIFT 20
+#define ID_AA64PFR0_AdvSIMD_WIDTH 4
+#define ID_AA64PFR0_AdvSIMD_MASK (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT)
+#define ID_AA64PFR0_AdvSIMD_VAL(x) ((x) & ID_AA64PFR0_AdvSIMD_MASK)
+#define ID_AA64PFR0_AdvSIMD_IMPL (UL(0x0) << ID_AA64PFR0_AdvSIMD_SHIFT)
+#define ID_AA64PFR0_AdvSIMD_HP (UL(0x1) << ID_AA64PFR0_AdvSIMD_SHIFT)
+#define ID_AA64PFR0_AdvSIMD_NONE (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT)
+#define ID_AA64PFR0_GIC_BITS 0x4 /* Number of bits in GIC field */
+#define ID_AA64PFR0_GIC_SHIFT 24
+#define ID_AA64PFR0_GIC_WIDTH 4
+#define ID_AA64PFR0_GIC_MASK (UL(0xf) << ID_AA64PFR0_GIC_SHIFT)
+#define ID_AA64PFR0_GIC_VAL(x) ((x) & ID_AA64PFR0_GIC_MASK)
+#define ID_AA64PFR0_GIC_CPUIF_NONE (UL(0x0) << ID_AA64PFR0_GIC_SHIFT)
+#define ID_AA64PFR0_GIC_CPUIF_EN (UL(0x1) << ID_AA64PFR0_GIC_SHIFT)
+#define ID_AA64PFR0_GIC_CPUIF_4_1 (UL(0x3) << ID_AA64PFR0_GIC_SHIFT)
+#define ID_AA64PFR0_RAS_SHIFT 28
+#define ID_AA64PFR0_RAS_WIDTH 4
+#define ID_AA64PFR0_RAS_MASK (UL(0xf) << ID_AA64PFR0_RAS_SHIFT)
+#define ID_AA64PFR0_RAS_VAL(x) ((x) & ID_AA64PFR0_RAS_MASK)
+#define ID_AA64PFR0_RAS_NONE (UL(0x0) << ID_AA64PFR0_RAS_SHIFT)
+#define ID_AA64PFR0_RAS_IMPL (UL(0x1) << ID_AA64PFR0_RAS_SHIFT)
+#define ID_AA64PFR0_RAS_8_4 (UL(0x2) << ID_AA64PFR0_RAS_SHIFT)
+#define ID_AA64PFR0_RAS_8_9 (UL(0x3) << ID_AA64PFR0_RAS_SHIFT)
+#define ID_AA64PFR0_SVE_SHIFT 32
+#define ID_AA64PFR0_SVE_WIDTH 4
+#define ID_AA64PFR0_SVE_MASK (UL(0xf) << ID_AA64PFR0_SVE_SHIFT)
+#define ID_AA64PFR0_SVE_VAL(x) ((x) & ID_AA64PFR0_SVE_MASK)
+#define ID_AA64PFR0_SVE_NONE (UL(0x0) << ID_AA64PFR0_SVE_SHIFT)
+#define ID_AA64PFR0_SVE_IMPL (UL(0x1) << ID_AA64PFR0_SVE_SHIFT)
+#define ID_AA64PFR0_SEL2_SHIFT 36
+#define ID_AA64PFR0_SEL2_WIDTH 4
+#define ID_AA64PFR0_SEL2_MASK (UL(0xf) << ID_AA64PFR0_SEL2_SHIFT)
+#define ID_AA64PFR0_SEL2_VAL(x) ((x) & ID_AA64PFR0_SEL2_MASK)
+#define ID_AA64PFR0_SEL2_NONE (UL(0x0) << ID_AA64PFR0_SEL2_SHIFT)
+#define ID_AA64PFR0_SEL2_IMPL (UL(0x1) << ID_AA64PFR0_SEL2_SHIFT)
+#define ID_AA64PFR0_MPAM_SHIFT 40
+#define ID_AA64PFR0_MPAM_WIDTH 4
+#define ID_AA64PFR0_MPAM_MASK (UL(0xf) << ID_AA64PFR0_MPAM_SHIFT)
+#define ID_AA64PFR0_MPAM_VAL(x) ((x) & ID_AA64PFR0_MPAM_MASK)
+#define ID_AA64PFR0_MPAM_NONE (UL(0x0) << ID_AA64PFR0_MPAM_SHIFT)
+#define ID_AA64PFR0_MPAM_IMPL (UL(0x1) << ID_AA64PFR0_MPAM_SHIFT)
+#define ID_AA64PFR0_AMU_SHIFT 44
+#define ID_AA64PFR0_AMU_WIDTH 4
+#define ID_AA64PFR0_AMU_MASK (UL(0xf) << ID_AA64PFR0_AMU_SHIFT)
+#define ID_AA64PFR0_AMU_VAL(x) ((x) & ID_AA64PFR0_AMU_MASK)
+#define ID_AA64PFR0_AMU_NONE (UL(0x0) << ID_AA64PFR0_AMU_SHIFT)
+#define ID_AA64PFR0_AMU_V1 (UL(0x1) << ID_AA64PFR0_AMU_SHIFT)
+#define ID_AA64PFR0_AMU_V1_1 (UL(0x2) << ID_AA64PFR0_AMU_SHIFT)
+#define ID_AA64PFR0_DIT_SHIFT 48
+#define ID_AA64PFR0_DIT_WIDTH 4
+#define ID_AA64PFR0_DIT_MASK (UL(0xf) << ID_AA64PFR0_DIT_SHIFT)
+#define ID_AA64PFR0_DIT_VAL(x) ((x) & ID_AA64PFR0_DIT_MASK)
+#define ID_AA64PFR0_DIT_NONE (UL(0x0) << ID_AA64PFR0_DIT_SHIFT)
+#define ID_AA64PFR0_DIT_PSTATE (UL(0x1) << ID_AA64PFR0_DIT_SHIFT)
+#define ID_AA64PFR0_RME_SHIFT 52
+#define ID_AA64PFR0_RME_WIDTH 4
+#define ID_AA64PFR0_RME_MASK (UL(0xf) << ID_AA64PFR0_RME_SHIFT)
+#define ID_AA64PFR0_RME_VAL(x) ((x) & ID_AA64PFR0_RME_MASK)
+#define ID_AA64PFR0_RME_NONE (UL(0x0) << ID_AA64PFR0_RME_SHIFT)
+#define ID_AA64PFR0_RME_IMPL (UL(0x1) << ID_AA64PFR0_RME_SHIFT)
+#define ID_AA64PFR0_CSV2_SHIFT 56
+#define ID_AA64PFR0_CSV2_WIDTH 4
+#define ID_AA64PFR0_CSV2_MASK (UL(0xf) << ID_AA64PFR0_CSV2_SHIFT)
+#define ID_AA64PFR0_CSV2_VAL(x) ((x) & ID_AA64PFR0_CSV2_MASK)
+#define ID_AA64PFR0_CSV2_NONE (UL(0x0) << ID_AA64PFR0_CSV2_SHIFT)
+#define ID_AA64PFR0_CSV2_ISOLATED (UL(0x1) << ID_AA64PFR0_CSV2_SHIFT)
+#define ID_AA64PFR0_CSV2_SCXTNUM (UL(0x2) << ID_AA64PFR0_CSV2_SHIFT)
+#define ID_AA64PFR0_CSV2_3 (UL(0x3) << ID_AA64PFR0_CSV2_SHIFT)
+#define ID_AA64PFR0_CSV3_SHIFT 60
+#define ID_AA64PFR0_CSV3_WIDTH 4
+#define ID_AA64PFR0_CSV3_MASK (UL(0xf) << ID_AA64PFR0_CSV3_SHIFT)
+#define ID_AA64PFR0_CSV3_VAL(x) ((x) & ID_AA64PFR0_CSV3_MASK)
+#define ID_AA64PFR0_CSV3_NONE (UL(0x0) << ID_AA64PFR0_CSV3_SHIFT)
+#define ID_AA64PFR0_CSV3_ISOLATED (UL(0x1) << ID_AA64PFR0_CSV3_SHIFT)
+
+/* ID_AA64PFR1_EL1 */
+#define ID_AA64PFR1_EL1_REG MRS_REG_ALT_NAME(ID_AA64PFR1_EL1)
+#define ID_AA64PFR1_EL1_ISS ISS_MSR_REG(ID_AA64PFR1_EL1)
+#define ID_AA64PFR1_EL1_op0 3
+#define ID_AA64PFR1_EL1_op1 0
+#define ID_AA64PFR1_EL1_CRn 0
+#define ID_AA64PFR1_EL1_CRm 4
+#define ID_AA64PFR1_EL1_op2 1
+#define ID_AA64PFR1_BT_SHIFT 0
+#define ID_AA64PFR1_BT_WIDTH 4
+#define ID_AA64PFR1_BT_MASK (UL(0xf) << ID_AA64PFR1_BT_SHIFT)
+#define ID_AA64PFR1_BT_VAL(x) ((x) & ID_AA64PFR1_BT_MASK)
+#define ID_AA64PFR1_BT_NONE (UL(0x0) << ID_AA64PFR1_BT_SHIFT)
+#define ID_AA64PFR1_BT_IMPL (UL(0x1) << ID_AA64PFR1_BT_SHIFT)
+#define ID_AA64PFR1_SSBS_SHIFT 4
+#define ID_AA64PFR1_SSBS_WIDTH 4
+#define ID_AA64PFR1_SSBS_MASK (UL(0xf) << ID_AA64PFR1_SSBS_SHIFT)
+#define ID_AA64PFR1_SSBS_VAL(x) ((x) & ID_AA64PFR1_SSBS_MASK)
+#define ID_AA64PFR1_SSBS_NONE (UL(0x0) << ID_AA64PFR1_SSBS_SHIFT)
+#define ID_AA64PFR1_SSBS_PSTATE (UL(0x1) << ID_AA64PFR1_SSBS_SHIFT)
+#define ID_AA64PFR1_SSBS_PSTATE_MSR (UL(0x2) << ID_AA64PFR1_SSBS_SHIFT)
+#define ID_AA64PFR1_MTE_SHIFT 8
+#define ID_AA64PFR1_MTE_WIDTH 4
+#define ID_AA64PFR1_MTE_MASK (UL(0xf) << ID_AA64PFR1_MTE_SHIFT)
+#define ID_AA64PFR1_MTE_VAL(x) ((x) & ID_AA64PFR1_MTE_MASK)
+#define ID_AA64PFR1_MTE_NONE (UL(0x0) << ID_AA64PFR1_MTE_SHIFT)
+#define ID_AA64PFR1_MTE_MTE (UL(0x1) << ID_AA64PFR1_MTE_SHIFT)
+#define ID_AA64PFR1_MTE_MTE2 (UL(0x2) << ID_AA64PFR1_MTE_SHIFT)
+#define ID_AA64PFR1_MTE_MTE3 (UL(0x3) << ID_AA64PFR1_MTE_SHIFT)
+#define ID_AA64PFR1_RAS_frac_SHIFT 12
+#define ID_AA64PFR1_RAS_frac_WIDTH 4
+#define ID_AA64PFR1_RAS_frac_MASK (UL(0xf) << ID_AA64PFR1_RAS_frac_SHIFT)
+#define ID_AA64PFR1_RAS_frac_VAL(x) ((x) & ID_AA64PFR1_RAS_frac_MASK)
+#define ID_AA64PFR1_RAS_frac_p0 (UL(0x0) << ID_AA64PFR1_RAS_frac_SHIFT)
+#define ID_AA64PFR1_RAS_frac_p1 (UL(0x1) << ID_AA64PFR1_RAS_frac_SHIFT)
+#define ID_AA64PFR1_MPAM_frac_SHIFT 16
+#define ID_AA64PFR1_MPAM_frac_WIDTH 4
+#define ID_AA64PFR1_MPAM_frac_MASK (UL(0xf) << ID_AA64PFR1_MPAM_frac_SHIFT)
+#define ID_AA64PFR1_MPAM_frac_VAL(x) ((x) & ID_AA64PFR1_MPAM_frac_MASK)
+#define ID_AA64PFR1_MPAM_frac_p0 (UL(0x0) << ID_AA64PFR1_MPAM_frac_SHIFT)
+#define ID_AA64PFR1_MPAM_frac_p1 (UL(0x1) << ID_AA64PFR1_MPAM_frac_SHIFT)
+#define ID_AA64PFR1_SME_SHIFT 24
+#define ID_AA64PFR1_SME_WIDTH 4
+#define ID_AA64PFR1_SME_MASK (UL(0xf) << ID_AA64PFR1_SME_SHIFT)
+#define ID_AA64PFR1_SME_VAL(x) ((x) & ID_AA64PFR1_SME_MASK)
+#define ID_AA64PFR1_SME_NONE (UL(0x0) << ID_AA64PFR1_SME_SHIFT)
+#define ID_AA64PFR1_SME_SME (UL(0x1) << ID_AA64PFR1_SME_SHIFT)
+#define ID_AA64PFR1_SME_SME2 (UL(0x2) << ID_AA64PFR1_SME_SHIFT)
+#define ID_AA64PFR1_RNDR_trap_SHIFT 28
+#define ID_AA64PFR1_RNDR_trap_WIDTH 4
+#define ID_AA64PFR1_RNDR_trap_MASK (UL(0xf) << ID_AA64PFR1_RNDR_trap_SHIFT)
+#define ID_AA64PFR1_RNDR_trap_VAL(x) ((x) & ID_AA64PFR1_RNDR_trap_MASK)
+#define ID_AA64PFR1_RNDR_trap_NONE (UL(0x0) << ID_AA64PFR1_RNDR_trap_SHIFT)
+#define ID_AA64PFR1_RNDR_trap_IMPL (UL(0x1) << ID_AA64PFR1_RNDR_trap_SHIFT)
+#define ID_AA64PFR1_CSV2_frac_SHIFT 32
+#define ID_AA64PFR1_CSV2_frac_WIDTH 4
+#define ID_AA64PFR1_CSV2_frac_MASK (UL(0xf) << ID_AA64PFR1_CSV2_frac_SHIFT)
+#define ID_AA64PFR1_CSV2_frac_VAL(x) ((x) & ID_AA64PFR1_CSV2_frac_MASK)
+#define ID_AA64PFR1_CSV2_frac_p0 (UL(0x0) << ID_AA64PFR1_CSV2_frac_SHIFT)
+#define ID_AA64PFR1_CSV2_frac_p1 (UL(0x1) << ID_AA64PFR1_CSV2_frac_SHIFT)
+#define ID_AA64PFR1_CSV2_frac_p2 (UL(0x2) << ID_AA64PFR1_CSV2_frac_SHIFT)
+#define ID_AA64PFR1_NMI_SHIFT 36
+#define ID_AA64PFR1_NMI_WIDTH 4
+#define ID_AA64PFR1_NMI_MASK (UL(0xf) << ID_AA64PFR1_NMI_SHIFT)
+#define ID_AA64PFR1_NMI_VAL(x) ((x) & ID_AA64PFR1_NMI_MASK)
+#define ID_AA64PFR1_NMI_NONE (UL(0x0) << ID_AA64PFR1_NMI_SHIFT)
+#define ID_AA64PFR1_NMI_IMPL (UL(0x1) << ID_AA64PFR1_NMI_SHIFT)
+#define ID_AA64PFR1_MTE_frac_SHIFT 40
+#define ID_AA64PFR1_MTE_frac_WIDTH 4
+#define ID_AA64PFR1_MTE_frac_MASK (UL(0xf) << ID_AA64PFR1_MTE_frac_SHIFT)
+#define ID_AA64PFR1_MTE_frac_VAL(x) ((x) & ID_AA64PFR1_MTE_frac_MASK)
+#define ID_AA64PFR1_MTE_frac_IMPL (UL(0x0) << ID_AA64PFR1_MTE_frac_SHIFT)
+#define ID_AA64PFR1_MTE_frac_NONE (UL(0xf) << ID_AA64PFR1_MTE_frac_SHIFT)
+#define ID_AA64PFR1_THE_SHIFT 48
+#define ID_AA64PFR1_THE_WIDTH 4
+#define ID_AA64PFR1_THE_MASK (UL(0xf) << ID_AA64PFR1_THE_SHIFT)
+#define ID_AA64PFR1_THE_VAL(x) ((x) & ID_AA64PFR1_THE_MASK)
+#define ID_AA64PFR1_THE_NONE (UL(0x0) << ID_AA64PFR1_THE_SHIFT)
+#define ID_AA64PFR1_THE_IMPL (UL(0x1) << ID_AA64PFR1_THE_SHIFT)
+#define ID_AA64PFR1_MTEX_SHIFT 52
+#define ID_AA64PFR1_MTEX_WIDTH 4
+#define ID_AA64PFR1_MTEX_MASK (UL(0xf) << ID_AA64PFR1_MTEX_SHIFT)
+#define ID_AA64PFR1_MTEX_VAL(x) ((x) & ID_AA64PFR1_MTEX_MASK)
+#define ID_AA64PFR1_MTEX_NONE (UL(0x0) << ID_AA64PFR1_MTEX_SHIFT)
+#define ID_AA64PFR1_MTEX_IMPL (UL(0x1) << ID_AA64PFR1_MTEX_SHIFT)
+#define ID_AA64PFR1_DF2_SHIFT 56
+#define ID_AA64PFR1_DF2_WIDTH 4
+#define ID_AA64PFR1_DF2_MASK (UL(0xf) << ID_AA64PFR1_DF2_SHIFT)
+#define ID_AA64PFR1_DF2_VAL(x) ((x) & ID_AA64PFR1_DF2_MASK)
+#define ID_AA64PFR1_DF2_NONE (UL(0x0) << ID_AA64PFR1_DF2_SHIFT)
+#define ID_AA64PFR1_DF2_IMPL (UL(0x1) << ID_AA64PFR1_DF2_SHIFT)
+#define ID_AA64PFR1_PFAR_SHIFT 60
+#define ID_AA64PFR1_PFAR_WIDTH 4
+#define ID_AA64PFR1_PFAR_MASK (UL(0xf) << ID_AA64PFR1_PFAR_SHIFT)
+#define ID_AA64PFR1_PFAR_VAL(x) ((x) & ID_AA64PFR1_PFAR_MASK)
+#define ID_AA64PFR1_PFAR_NONE (UL(0x0) << ID_AA64PFR1_PFAR_SHIFT)
+#define ID_AA64PFR1_PFAR_IMPL (UL(0x1) << ID_AA64PFR1_PFAR_SHIFT)
+
+/* ID_AA64PFR2_EL1 */
+#define ID_AA64PFR2_EL1_REG MRS_REG_ALT_NAME(ID_AA64PFR2_EL1)
+#define ID_AA64PFR2_EL1_ISS ISS_MSR_REG(ID_AA64PFR2_EL1)
+#define ID_AA64PFR2_EL1_op0 3
+#define ID_AA64PFR2_EL1_op1 0
+#define ID_AA64PFR2_EL1_CRn 0
+#define ID_AA64PFR2_EL1_CRm 4
+#define ID_AA64PFR2_EL1_op2 2
+
+/* ID_AA64ZFR0_EL1 */
+#define ID_AA64ZFR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64ZFR0_EL1)
+#define ID_AA64ZFR0_EL1_ISS ISS_MSR_REG(ID_AA64ZFR0_EL1)
+#define ID_AA64ZFR0_EL1_op0 3
+#define ID_AA64ZFR0_EL1_op1 0
+#define ID_AA64ZFR0_EL1_CRn 0
+#define ID_AA64ZFR0_EL1_CRm 4
+#define ID_AA64ZFR0_EL1_op2 4
+#define ID_AA64ZFR0_SVEver_SHIFT 0
+#define ID_AA64ZFR0_SVEver_WIDTH 4
+#define ID_AA64ZFR0_SVEver_MASK (UL(0xf) << ID_AA64ZFR0_SVEver_SHIFT)
+#define ID_AA64ZFR0_SVEver_VAL(x) ((x) & ID_AA64ZFR0_SVEver_MASK)
+#define ID_AA64ZFR0_SVEver_SVE1 (UL(0x0) << ID_AA64ZFR0_SVEver_SHIFT)
+#define ID_AA64ZFR0_SVEver_SVE2 (UL(0x1) << ID_AA64ZFR0_SVEver_SHIFT)
+#define ID_AA64ZFR0_SVEver_SVE2P1 (UL(0x2) << ID_AA64ZFR0_SVEver_SHIFT)
+#define ID_AA64ZFR0_AES_SHIFT 4
+#define ID_AA64ZFR0_AES_WIDTH 4
+#define ID_AA64ZFR0_AES_MASK (UL(0xf) << ID_AA64ZFR0_AES_SHIFT)
+#define ID_AA64ZFR0_AES_VAL(x) ((x) & ID_AA64ZFR0_AES_MASK)
+#define ID_AA64ZFR0_AES_NONE (UL(0x0) << ID_AA64ZFR0_AES_SHIFT)
+#define ID_AA64ZFR0_AES_BASE (UL(0x1) << ID_AA64ZFR0_AES_SHIFT)
+#define ID_AA64ZFR0_AES_PMULL (UL(0x2) << ID_AA64ZFR0_AES_SHIFT)
+#define ID_AA64ZFR0_BitPerm_SHIFT 16
+#define ID_AA64ZFR0_BitPerm_WIDTH 4
+#define ID_AA64ZFR0_BitPerm_MASK (UL(0xf) << ID_AA64ZFR0_BitPerm_SHIFT)
+#define ID_AA64ZFR0_BitPerm_VAL(x) ((x) & ID_AA64ZFR0_BitPerm_MASK)
+#define ID_AA64ZFR0_BitPerm_NONE (UL(0x0) << ID_AA64ZFR0_BitPerm_SHIFT)
+#define ID_AA64ZFR0_BitPerm_IMPL (UL(0x1) << ID_AA64ZFR0_BitPerm_SHIFT)
+#define ID_AA64ZFR0_BF16_SHIFT 20
+#define ID_AA64ZFR0_BF16_WIDTH 4
+#define ID_AA64ZFR0_BF16_MASK (UL(0xf) << ID_AA64ZFR0_BF16_SHIFT)
+#define ID_AA64ZFR0_BF16_VAL(x) ((x) & ID_AA64ZFR0_BF16_MASK)
+#define ID_AA64ZFR0_BF16_NONE (UL(0x0) << ID_AA64ZFR0_BF16_SHIFT)
+#define ID_AA64ZFR0_BF16_BASE (UL(0x1) << ID_AA64ZFR0_BF16_SHIFT)
+#define ID_AA64ZFR0_BF16_EBF (UL(0x1) << ID_AA64ZFR0_BF16_SHIFT)
+#define ID_AA64ZFR0_SHA3_SHIFT 32
+#define ID_AA64ZFR0_SHA3_WIDTH 4
+#define ID_AA64ZFR0_SHA3_MASK (UL(0xf) << ID_AA64ZFR0_SHA3_SHIFT)
+#define ID_AA64ZFR0_SHA3_VAL(x) ((x) & ID_AA64ZFR0_SHA3_MASK)
+#define ID_AA64ZFR0_SHA3_NONE (UL(0x0) << ID_AA64ZFR0_SHA3_SHIFT)
+#define ID_AA64ZFR0_SHA3_IMPL (UL(0x1) << ID_AA64ZFR0_SHA3_SHIFT)
+#define ID_AA64ZFR0_SM4_SHIFT 40
+#define ID_AA64ZFR0_SM4_WIDTH 4
+#define ID_AA64ZFR0_SM4_MASK (UL(0xf) << ID_AA64ZFR0_SM4_SHIFT)
+#define ID_AA64ZFR0_SM4_VAL(x) ((x) & ID_AA64ZFR0_SM4_MASK)
+#define ID_AA64ZFR0_SM4_NONE (UL(0x0) << ID_AA64ZFR0_SM4_SHIFT)
+#define ID_AA64ZFR0_SM4_IMPL (UL(0x1) << ID_AA64ZFR0_SM4_SHIFT)
+#define ID_AA64ZFR0_I8MM_SHIFT 44
+#define ID_AA64ZFR0_I8MM_WIDTH 4
+#define ID_AA64ZFR0_I8MM_MASK (UL(0xf) << ID_AA64ZFR0_I8MM_SHIFT)
+#define ID_AA64ZFR0_I8MM_VAL(x) ((x) & ID_AA64ZFR0_I8MM_MASK)
+#define ID_AA64ZFR0_I8MM_NONE (UL(0x0) << ID_AA64ZFR0_I8MM_SHIFT)
+#define ID_AA64ZFR0_I8MM_IMPL (UL(0x1) << ID_AA64ZFR0_I8MM_SHIFT)
+#define ID_AA64ZFR0_F32MM_SHIFT 52
+#define ID_AA64ZFR0_F32MM_WIDTH 4
+#define ID_AA64ZFR0_F32MM_MASK (UL(0xf) << ID_AA64ZFR0_F32MM_SHIFT)
+#define ID_AA64ZFR0_F32MM_VAL(x) ((x) & ID_AA64ZFR0_F32MM_MASK)
+#define ID_AA64ZFR0_F32MM_NONE (UL(0x0) << ID_AA64ZFR0_F32MM_SHIFT)
+#define ID_AA64ZFR0_F32MM_IMPL (UL(0x1) << ID_AA64ZFR0_F32MM_SHIFT)
+#define ID_AA64ZFR0_F64MM_SHIFT 56
+#define ID_AA64ZFR0_F64MM_WIDTH 4
+#define ID_AA64ZFR0_F64MM_MASK (UL(0xf) << ID_AA64ZFR0_F64MM_SHIFT)
+#define ID_AA64ZFR0_F64MM_VAL(x) ((x) & ID_AA64ZFR0_F64MM_MASK)
+#define ID_AA64ZFR0_F64MM_NONE (UL(0x0) << ID_AA64ZFR0_F64MM_SHIFT)
+#define ID_AA64ZFR0_F64MM_IMPL (UL(0x1) << ID_AA64ZFR0_F64MM_SHIFT)
+
+/* ID_ISAR5_EL1 */
+#define ID_ISAR5_EL1_ISS ISS_MSR_REG(ID_ISAR5_EL1)
+#define ID_ISAR5_EL1_op0 0x3
+#define ID_ISAR5_EL1_op1 0x0
+#define ID_ISAR5_EL1_CRn 0x0
+#define ID_ISAR5_EL1_CRm 0x2
+#define ID_ISAR5_EL1_op2 0x5
+#define ID_ISAR5_SEVL_SHIFT 0
+#define ID_ISAR5_SEVL_WIDTH 4
+#define ID_ISAR5_SEVL_MASK (UL(0xf) << ID_ISAR5_SEVL_SHIFT)
+#define ID_ISAR5_SEVL_VAL(x) ((x) & ID_ISAR5_SEVL_MASK)
+#define ID_ISAR5_SEVL_NOP (UL(0x0) << ID_ISAR5_SEVL_SHIFT)
+#define ID_ISAR5_SEVL_IMPL (UL(0x1) << ID_ISAR5_SEVL_SHIFT)
+#define ID_ISAR5_AES_SHIFT 4
+#define ID_ISAR5_AES_WIDTH 4
+#define ID_ISAR5_AES_MASK (UL(0xf) << ID_ISAR5_AES_SHIFT)
+#define ID_ISAR5_AES_VAL(x) ((x) & ID_ISAR5_AES_MASK)
+#define ID_ISAR5_AES_NONE (UL(0x0) << ID_ISAR5_AES_SHIFT)
+#define ID_ISAR5_AES_BASE (UL(0x1) << ID_ISAR5_AES_SHIFT)
+#define ID_ISAR5_AES_VMULL (UL(0x2) << ID_ISAR5_AES_SHIFT)
+#define ID_ISAR5_SHA1_SHIFT 8
+#define ID_ISAR5_SHA1_WIDTH 4
+#define ID_ISAR5_SHA1_MASK (UL(0xf) << ID_ISAR5_SHA1_SHIFT)
+#define ID_ISAR5_SHA1_VAL(x) ((x) & ID_ISAR5_SHA1_MASK)
+#define ID_ISAR5_SHA1_NONE (UL(0x0) << ID_ISAR5_SHA1_SHIFT)
+#define ID_ISAR5_SHA1_IMPL (UL(0x1) << ID_ISAR5_SHA1_SHIFT)
+#define ID_ISAR5_SHA2_SHIFT 12
+#define ID_ISAR5_SHA2_WIDTH 4
+#define ID_ISAR5_SHA2_MASK (UL(0xf) << ID_ISAR5_SHA2_SHIFT)
+#define ID_ISAR5_SHA2_VAL(x) ((x) & ID_ISAR5_SHA2_MASK)
+#define ID_ISAR5_SHA2_NONE (UL(0x0) << ID_ISAR5_SHA2_SHIFT)
+#define ID_ISAR5_SHA2_IMPL (UL(0x1) << ID_ISAR5_SHA2_SHIFT)
+#define ID_ISAR5_CRC32_SHIFT 16
+#define ID_ISAR5_CRC32_WIDTH 4
+#define ID_ISAR5_CRC32_MASK (UL(0xf) << ID_ISAR5_CRC32_SHIFT)
+#define ID_ISAR5_CRC32_VAL(x) ((x) & ID_ISAR5_CRC32_MASK)
+#define ID_ISAR5_CRC32_NONE (UL(0x0) << ID_ISAR5_CRC32_SHIFT)
+#define ID_ISAR5_CRC32_IMPL (UL(0x1) << ID_ISAR5_CRC32_SHIFT)
+#define ID_ISAR5_RDM_SHIFT 24
+#define ID_ISAR5_RDM_WIDTH 4
+#define ID_ISAR5_RDM_MASK (UL(0xf) << ID_ISAR5_RDM_SHIFT)
+#define ID_ISAR5_RDM_VAL(x) ((x) & ID_ISAR5_RDM_MASK)
+#define ID_ISAR5_RDM_NONE (UL(0x0) << ID_ISAR5_RDM_SHIFT)
+#define ID_ISAR5_RDM_IMPL (UL(0x1) << ID_ISAR5_RDM_SHIFT)
+#define ID_ISAR5_VCMA_SHIFT 28
+#define ID_ISAR5_VCMA_WIDTH 4
+#define ID_ISAR5_VCMA_MASK (UL(0xf) << ID_ISAR5_VCMA_SHIFT)
+#define ID_ISAR5_VCMA_VAL(x) ((x) & ID_ISAR5_VCMA_MASK)
+#define ID_ISAR5_VCMA_NONE (UL(0x0) << ID_ISAR5_VCMA_SHIFT)
+#define ID_ISAR5_VCMA_IMPL (UL(0x1) << ID_ISAR5_VCMA_SHIFT)
+
+/* MAIR_EL1 - Memory Attribute Indirection Register */
+#define MAIR_EL1_REG MRS_REG_ALT_NAME(MAIR_EL1)
+#define MAIR_EL1_op0 3
+#define MAIR_EL1_op1 0
+#define MAIR_EL1_CRn 10
+#define MAIR_EL1_CRm 2
+#define MAIR_EL1_op2 0
+#define MAIR_ATTR_MASK(idx) (UL(0xff) << ((n)* 8))
+#define MAIR_ATTR(attr, idx) ((attr) << ((idx) * 8))
+#define MAIR_DEVICE_nGnRnE UL(0x00)
+#define MAIR_DEVICE_nGnRE UL(0x04)
+#define MAIR_NORMAL_NC UL(0x44)
+#define MAIR_NORMAL_WT UL(0xbb)
+#define MAIR_NORMAL_WB UL(0xff)
+
+/* MAIR_EL12 */
+#define MAIR_EL12_REG MRS_REG_ALT_NAME(MAIR_EL12)
+#define MAIR_EL12_op0 3
+#define MAIR_EL12_op1 5
+#define MAIR_EL12_CRn 10
+#define MAIR_EL12_CRm 2
+#define MAIR_EL12_op2 0
+
+/* MDCCINT_EL1 */
+#define MDCCINT_EL1_op0 2
+#define MDCCINT_EL1_op1 0
+#define MDCCINT_EL1_CRn 0
+#define MDCCINT_EL1_CRm 2
+#define MDCCINT_EL1_op2 0
+
+/* MDCCSR_EL0 */
+#define MDCCSR_EL0_op0 2
+#define MDCCSR_EL0_op1 3
+#define MDCCSR_EL0_CRn 0
+#define MDCCSR_EL0_CRm 1
+#define MDCCSR_EL0_op2 0
+
+/* MDSCR_EL1 - Monitor Debug System Control Register */
+#define MDSCR_EL1_op0 2
+#define MDSCR_EL1_op1 0
+#define MDSCR_EL1_CRn 0
+#define MDSCR_EL1_CRm 2
+#define MDSCR_EL1_op2 2
+#define MDSCR_SS_SHIFT 0
+#define MDSCR_SS (UL(0x1) << MDSCR_SS_SHIFT)
+#define MDSCR_KDE_SHIFT 13
+#define MDSCR_KDE (UL(0x1) << MDSCR_KDE_SHIFT)
+#define MDSCR_MDE_SHIFT 15
+#define MDSCR_MDE (UL(0x1) << MDSCR_MDE_SHIFT)
+
+/* MIDR_EL1 - Main ID Register */
+#define MIDR_EL1_op0 3
+#define MIDR_EL1_op1 0
+#define MIDR_EL1_CRn 0
+#define MIDR_EL1_CRm 0
+#define MIDR_EL1_op2 0
+
+/* MPIDR_EL1 - Multiprocessor Affinity Register */
+#define MPIDR_EL1_op0 3
+#define MPIDR_EL1_op1 0
+#define MPIDR_EL1_CRn 0
+#define MPIDR_EL1_CRm 0
+#define MPIDR_EL1_op2 5
+#define MPIDR_AFF0_SHIFT 0
+#define MPIDR_AFF0_MASK (UL(0xff) << MPIDR_AFF0_SHIFT)
+#define MPIDR_AFF0_VAL(x) ((x) & MPIDR_AFF0_MASK)
+#define MPIDR_AFF1_SHIFT 8
+#define MPIDR_AFF1_MASK (UL(0xff) << MPIDR_AFF1_SHIFT)
+#define MPIDR_AFF1_VAL(x) ((x) & MPIDR_AFF1_MASK)
+#define MPIDR_AFF2_SHIFT 16
+#define MPIDR_AFF2_MASK (UL(0xff) << MPIDR_AFF2_SHIFT)
+#define MPIDR_AFF2_VAL(x) ((x) & MPIDR_AFF2_MASK)
+#define MPIDR_MT_SHIFT 24
+#define MPIDR_MT_MASK (UL(0x1) << MPIDR_MT_SHIFT)
+#define MPIDR_U_SHIFT 30
+#define MPIDR_U_MASK (UL(0x1) << MPIDR_U_SHIFT)
+#define MPIDR_AFF3_SHIFT 32
+#define MPIDR_AFF3_MASK (UL(0xff) << MPIDR_AFF3_SHIFT)
+#define MPIDR_AFF3_VAL(x) ((x) & MPIDR_AFF3_MASK)
+
+/* MVFR0_EL1 */
+#define MVFR0_EL1_ISS ISS_MSR_REG(MVFR0_EL1)
+#define MVFR0_EL1_op0 0x3
+#define MVFR0_EL1_op1 0x0
+#define MVFR0_EL1_CRn 0x0
+#define MVFR0_EL1_CRm 0x3
+#define MVFR0_EL1_op2 0x0
+#define MVFR0_SIMDReg_SHIFT 0
+#define MVFR0_SIMDReg_WIDTH 4
+#define MVFR0_SIMDReg_MASK (UL(0xf) << MVFR0_SIMDReg_SHIFT)
+#define MVFR0_SIMDReg_VAL(x) ((x) & MVFR0_SIMDReg_MASK)
+#define MVFR0_SIMDReg_NONE (UL(0x0) << MVFR0_SIMDReg_SHIFT)
+#define MVFR0_SIMDReg_FP (UL(0x1) << MVFR0_SIMDReg_SHIFT)
+#define MVFR0_SIMDReg_AdvSIMD (UL(0x2) << MVFR0_SIMDReg_SHIFT)
+#define MVFR0_FPSP_SHIFT 4
+#define MVFR0_FPSP_WIDTH 4
+#define MVFR0_FPSP_MASK (UL(0xf) << MVFR0_FPSP_SHIFT)
+#define MVFR0_FPSP_VAL(x) ((x) & MVFR0_FPSP_MASK)
+#define MVFR0_FPSP_NONE (UL(0x0) << MVFR0_FPSP_SHIFT)
+#define MVFR0_FPSP_VFP_v2 (UL(0x1) << MVFR0_FPSP_SHIFT)
+#define MVFR0_FPSP_VFP_v3_v4 (UL(0x2) << MVFR0_FPSP_SHIFT)
+#define MVFR0_FPDP_SHIFT 8
+#define MVFR0_FPDP_WIDTH 4
+#define MVFR0_FPDP_MASK (UL(0xf) << MVFR0_FPDP_SHIFT)
+#define MVFR0_FPDP_VAL(x) ((x) & MVFR0_FPDP_MASK)
+#define MVFR0_FPDP_NONE (UL(0x0) << MVFR0_FPDP_SHIFT)
+#define MVFR0_FPDP_VFP_v2 (UL(0x1) << MVFR0_FPDP_SHIFT)
+#define MVFR0_FPDP_VFP_v3_v4 (UL(0x2) << MVFR0_FPDP_SHIFT)
+#define MVFR0_FPTrap_SHIFT 12
+#define MVFR0_FPTrap_WIDTH 4
+#define MVFR0_FPTrap_MASK (UL(0xf) << MVFR0_FPTrap_SHIFT)
+#define MVFR0_FPTrap_VAL(x) ((x) & MVFR0_FPTrap_MASK)
+#define MVFR0_FPTrap_NONE (UL(0x0) << MVFR0_FPTrap_SHIFT)
+#define MVFR0_FPTrap_IMPL (UL(0x1) << MVFR0_FPTrap_SHIFT)
+#define MVFR0_FPDivide_SHIFT 16
+#define MVFR0_FPDivide_WIDTH 4
+#define MVFR0_FPDivide_MASK (UL(0xf) << MVFR0_FPDivide_SHIFT)
+#define MVFR0_FPDivide_VAL(x) ((x) & MVFR0_FPDivide_MASK)
+#define MVFR0_FPDivide_NONE (UL(0x0) << MVFR0_FPDivide_SHIFT)
+#define MVFR0_FPDivide_IMPL (UL(0x1) << MVFR0_FPDivide_SHIFT)
+#define MVFR0_FPSqrt_SHIFT 20
+#define MVFR0_FPSqrt_WIDTH 4
+#define MVFR0_FPSqrt_MASK (UL(0xf) << MVFR0_FPSqrt_SHIFT)
+#define MVFR0_FPSqrt_VAL(x) ((x) & MVFR0_FPSqrt_MASK)
+#define MVFR0_FPSqrt_NONE (UL(0x0) << MVFR0_FPSqrt_SHIFT)
+#define MVFR0_FPSqrt_IMPL (UL(0x1) << MVFR0_FPSqrt_SHIFT)
+#define MVFR0_FPShVec_SHIFT 24
+#define MVFR0_FPShVec_WIDTH 4
+#define MVFR0_FPShVec_MASK (UL(0xf) << MVFR0_FPShVec_SHIFT)
+#define MVFR0_FPShVec_VAL(x) ((x) & MVFR0_FPShVec_MASK)
+#define MVFR0_FPShVec_NONE (UL(0x0) << MVFR0_FPShVec_SHIFT)
+#define MVFR0_FPShVec_IMPL (UL(0x1) << MVFR0_FPShVec_SHIFT)
+#define MVFR0_FPRound_SHIFT 28
+#define MVFR0_FPRound_WIDTH 4
+#define MVFR0_FPRound_MASK (UL(0xf) << MVFR0_FPRound_SHIFT)
+#define MVFR0_FPRound_VAL(x) ((x) & MVFR0_FPRound_MASK)
+#define MVFR0_FPRound_NONE (UL(0x0) << MVFR0_FPRound_SHIFT)
+#define MVFR0_FPRound_IMPL (UL(0x1) << MVFR0_FPRound_SHIFT)
+
+/* MVFR1_EL1 */
+#define MVFR1_EL1_ISS ISS_MSR_REG(MVFR1_EL1)
+#define MVFR1_EL1_op0 0x3
+#define MVFR1_EL1_op1 0x0
+#define MVFR1_EL1_CRn 0x0
+#define MVFR1_EL1_CRm 0x3
+#define MVFR1_EL1_op2 0x1
+#define MVFR1_FPFtZ_SHIFT 0
+#define MVFR1_FPFtZ_WIDTH 4
+#define MVFR1_FPFtZ_MASK (UL(0xf) << MVFR1_FPFtZ_SHIFT)
+#define MVFR1_FPFtZ_VAL(x) ((x) & MVFR1_FPFtZ_MASK)
+#define MVFR1_FPFtZ_NONE (UL(0x0) << MVFR1_FPFtZ_SHIFT)
+#define MVFR1_FPFtZ_IMPL (UL(0x1) << MVFR1_FPFtZ_SHIFT)
+#define MVFR1_FPDNaN_SHIFT 4
+#define MVFR1_FPDNaN_WIDTH 4
+#define MVFR1_FPDNaN_MASK (UL(0xf) << MVFR1_FPDNaN_SHIFT)
+#define MVFR1_FPDNaN_VAL(x) ((x) & MVFR1_FPDNaN_MASK)
+#define MVFR1_FPDNaN_NONE (UL(0x0) << MVFR1_FPDNaN_SHIFT)
+#define MVFR1_FPDNaN_IMPL (UL(0x1) << MVFR1_FPDNaN_SHIFT)
+#define MVFR1_SIMDLS_SHIFT 8
+#define MVFR1_SIMDLS_WIDTH 4
+#define MVFR1_SIMDLS_MASK (UL(0xf) << MVFR1_SIMDLS_SHIFT)
+#define MVFR1_SIMDLS_VAL(x) ((x) & MVFR1_SIMDLS_MASK)
+#define MVFR1_SIMDLS_NONE (UL(0x0) << MVFR1_SIMDLS_SHIFT)
+#define MVFR1_SIMDLS_IMPL (UL(0x1) << MVFR1_SIMDLS_SHIFT)
+#define MVFR1_SIMDInt_SHIFT 12
+#define MVFR1_SIMDInt_WIDTH 4
+#define MVFR1_SIMDInt_MASK (UL(0xf) << MVFR1_SIMDInt_SHIFT)
+#define MVFR1_SIMDInt_VAL(x) ((x) & MVFR1_SIMDInt_MASK)
+#define MVFR1_SIMDInt_NONE (UL(0x0) << MVFR1_SIMDInt_SHIFT)
+#define MVFR1_SIMDInt_IMPL (UL(0x1) << MVFR1_SIMDInt_SHIFT)
+#define MVFR1_SIMDSP_SHIFT 16
+#define MVFR1_SIMDSP_WIDTH 4
+#define MVFR1_SIMDSP_MASK (UL(0xf) << MVFR1_SIMDSP_SHIFT)
+#define MVFR1_SIMDSP_VAL(x) ((x) & MVFR1_SIMDSP_MASK)
+#define MVFR1_SIMDSP_NONE (UL(0x0) << MVFR1_SIMDSP_SHIFT)
+#define MVFR1_SIMDSP_IMPL (UL(0x1) << MVFR1_SIMDSP_SHIFT)
+#define MVFR1_SIMDHP_SHIFT 20
+#define MVFR1_SIMDHP_WIDTH 4
+#define MVFR1_SIMDHP_MASK (UL(0xf) << MVFR1_SIMDHP_SHIFT)
+#define MVFR1_SIMDHP_VAL(x) ((x) & MVFR1_SIMDHP_MASK)
+#define MVFR1_SIMDHP_NONE (UL(0x0) << MVFR1_SIMDHP_SHIFT)
+#define MVFR1_SIMDHP_CONV_SP (UL(0x1) << MVFR1_SIMDHP_SHIFT)
+#define MVFR1_SIMDHP_ARITH (UL(0x2) << MVFR1_SIMDHP_SHIFT)
+#define MVFR1_FPHP_SHIFT 24
+#define MVFR1_FPHP_WIDTH 4
+#define MVFR1_FPHP_MASK (UL(0xf) << MVFR1_FPHP_SHIFT)
+#define MVFR1_FPHP_VAL(x) ((x) & MVFR1_FPHP_MASK)
+#define MVFR1_FPHP_NONE (UL(0x0) << MVFR1_FPHP_SHIFT)
+#define MVFR1_FPHP_CONV_SP (UL(0x1) << MVFR1_FPHP_SHIFT)
+#define MVFR1_FPHP_CONV_DP (UL(0x2) << MVFR1_FPHP_SHIFT)
+#define MVFR1_FPHP_ARITH (UL(0x3) << MVFR1_FPHP_SHIFT)
+#define MVFR1_SIMDFMAC_SHIFT 28
+#define MVFR1_SIMDFMAC_WIDTH 4
+#define MVFR1_SIMDFMAC_MASK (UL(0xf) << MVFR1_SIMDFMAC_SHIFT)
+#define MVFR1_SIMDFMAC_VAL(x) ((x) & MVFR1_SIMDFMAC_MASK)
+#define MVFR1_SIMDFMAC_NONE (UL(0x0) << MVFR1_SIMDFMAC_SHIFT)
+#define MVFR1_SIMDFMAC_IMPL (UL(0x1) << MVFR1_SIMDFMAC_SHIFT)
+
+/* OSDLR_EL1 */
+#define OSDLR_EL1_op0 2
+#define OSDLR_EL1_op1 0
+#define OSDLR_EL1_CRn 1
+#define OSDLR_EL1_CRm 3
+#define OSDLR_EL1_op2 4
+
+/* OSLAR_EL1 */
+#define OSLAR_EL1_op0 2
+#define OSLAR_EL1_op1 0
+#define OSLAR_EL1_CRn 1
+#define OSLAR_EL1_CRm 0
+#define OSLAR_EL1_op2 4
+
+/* OSLSR_EL1 */
+#define OSLSR_EL1_op0 2
+#define OSLSR_EL1_op1 0
+#define OSLSR_EL1_CRn 1
+#define OSLSR_EL1_CRm 1
+#define OSLSR_EL1_op2 4
+
+/* PAR_EL1 - Physical Address Register */
+#define PAR_F_SHIFT 0
+#define PAR_F (0x1 << PAR_F_SHIFT)
+#define PAR_SUCCESS(x) (((x) & PAR_F) == 0)
+/* When PAR_F == 0 (success) */
+#define PAR_LOW_MASK 0xfff
+#define PAR_SH_SHIFT 7
+#define PAR_SH_MASK (0x3 << PAR_SH_SHIFT)
+#define PAR_NS_SHIFT 9
+#define PAR_NS_MASK (0x3 << PAR_NS_SHIFT)
+#define PAR_PA_SHIFT 12
+#define PAR_PA_MASK 0x000ffffffffff000
+#define PAR_ATTR_SHIFT 56
+#define PAR_ATTR_MASK (0xff << PAR_ATTR_SHIFT)
+/* When PAR_F == 1 (aborted) */
+#define PAR_FST_SHIFT 1
+#define PAR_FST_MASK (0x3f << PAR_FST_SHIFT)
+#define PAR_PTW_SHIFT 8
+#define PAR_PTW_MASK (0x1 << PAR_PTW_SHIFT)
+#define PAR_S_SHIFT 9
+#define PAR_S_MASK (0x1 << PAR_S_SHIFT)
+
+/* PMBIDR_EL1 */
+#define PMBIDR_EL1_REG MRS_REG_ALT_NAME(PMBIDR_EL1)
+#define PMBIDR_EL1_op0 3
+#define PMBIDR_EL1_op1 0
+#define PMBIDR_EL1_CRn 9
+#define PMBIDR_EL1_CRm 10
+#define PMBIDR_EL1_op2 7
+#define PMBIDR_Align_SHIFT 0
+#define PMBIDR_Align_MASK (UL(0xf) << PMBIDR_Align_SHIFT)
+#define PMBIDR_P_SHIFT 4
+#define PMBIDR_P (UL(0x1) << PMBIDR_P_SHIFT)
+#define PMBIDR_F_SHIFT 5
+#define PMBIDR_F (UL(0x1) << PMBIDR_F_SHIFT)
+
+/* PMBLIMITR_EL1 */
+#define PMBLIMITR_EL1_REG MRS_REG_ALT_NAME(PMBLIMITR_EL1)
+#define PMBLIMITR_EL1_op0 3
+#define PMBLIMITR_EL1_op1 0
+#define PMBLIMITR_EL1_CRn 9
+#define PMBLIMITR_EL1_CRm 10
+#define PMBLIMITR_EL1_op2 0
+#define PMBLIMITR_E_SHIFT 0
+#define PMBLIMITR_E (UL(0x1) << PMBLIMITR_E_SHIFT)
+#define PMBLIMITR_FM_SHIFT 1
+#define PMBLIMITR_FM_MASK (UL(0x3) << PMBLIMITR_FM_SHIFT)
+#define PMBLIMITR_PMFZ_SHIFT 5
+#define PMBLIMITR_PMFZ (UL(0x1) << PMBLIMITR_PMFZ_SHIFT)
+#define PMBLIMITR_LIMIT_SHIFT 12
+#define PMBLIMITR_LIMIT_MASK \
+ (UL(0xfffffffffffff) << PMBLIMITR_LIMIT_SHIFT)
+
+/* PMBPTR_EL1 */
+#define PMBPTR_EL1_REG MRS_REG_ALT_NAME(PMBPTR_EL1)
+#define PMBPTR_EL1_op0 3
+#define PMBPTR_EL1_op1 0
+#define PMBPTR_EL1_CRn 9
+#define PMBPTR_EL1_CRm 10
+#define PMBPTR_EL1_op2 1
+#define PMBPTR_PTR_SHIFT 0
+#define PMBPTR_PTR_MASK \
+ (UL(0xffffffffffffffff) << PMBPTR_PTR_SHIFT)
+
+/* PMBSR_EL1 */
+#define PMBSR_EL1_REG MRS_REG_ALT_NAME(PMBSR_EL1)
+#define PMBSR_EL1_op0 3
+#define PMBSR_EL1_op1 0
+#define PMBSR_EL1_CRn 9
+#define PMBSR_EL1_CRm 10
+#define PMBSR_EL1_op2 3
+#define PMBSR_MSS_SHIFT 0
+#define PMBSR_MSS_MASK (UL(0xffff) << PMBSR_MSS_SHIFT)
+#define PMBSR_MSS_BSC_MASK (UL(0x3f) << PMBSR_MSS_SHIFT)
+#define PMBSR_MSS_FSC_MASK (UL(0x3f) << PMBSR_MSS_SHIFT)
+#define PMBSR_COLL_SHIFT 16
+#define PMBSR_COLL (UL(0x1) << PMBSR_COLL_SHIFT)
+#define PMBSR_S_SHIFT 17
+#define PMBSR_S (UL(0x1) << PMBSR_S_SHIFT)
+#define PMBSR_EA_SHIFT 18
+#define PMBSR_EA (UL(0x1) << PMBSR_EA_SHIFT)
+#define PMBSR_DL_SHIFT 19
+#define PMBSR_DL (UL(0x1) << PMBSR_DL_SHIFT)
+#define PMBSR_EC_SHIFT 26
+#define PMBSR_EC_MASK (UL(0x3f) << PMBSR_EC_SHIFT)
+
+/* PMCCFILTR_EL0 */
+#define PMCCFILTR_EL0_op0 3
+#define PMCCFILTR_EL0_op1 3
+#define PMCCFILTR_EL0_CRn 14
+#define PMCCFILTR_EL0_CRm 15
+#define PMCCFILTR_EL0_op2 7
+
+/* PMCCNTR_EL0 */
+#define PMCCNTR_EL0_op0 3
+#define PMCCNTR_EL0_op1 3
+#define PMCCNTR_EL0_CRn 9
+#define PMCCNTR_EL0_CRm 13
+#define PMCCNTR_EL0_op2 0
+
+/* PMCEID0_EL0 */
+#define PMCEID0_EL0_op0 3
+#define PMCEID0_EL0_op1 3
+#define PMCEID0_EL0_CRn 9
+#define PMCEID0_EL0_CRm 12
+#define PMCEID0_EL0_op2 6
+
+/* PMCEID1_EL0 */
+#define PMCEID1_EL0_op0 3
+#define PMCEID1_EL0_op1 3
+#define PMCEID1_EL0_CRn 9
+#define PMCEID1_EL0_CRm 12
+#define PMCEID1_EL0_op2 7
+
+/* PMCNTENCLR_EL0 */
+#define PMCNTENCLR_EL0_op0 3
+#define PMCNTENCLR_EL0_op1 3
+#define PMCNTENCLR_EL0_CRn 9
+#define PMCNTENCLR_EL0_CRm 12
+#define PMCNTENCLR_EL0_op2 2
+
+/* PMCNTENSET_EL0 */
+#define PMCNTENSET_EL0_op0 3
+#define PMCNTENSET_EL0_op1 3
+#define PMCNTENSET_EL0_CRn 9
+#define PMCNTENSET_EL0_CRm 12
+#define PMCNTENSET_EL0_op2 1
+
+/* PMCR_EL0 - Perfomance Monitoring Counters */
+#define PMCR_EL0_op0 3
+#define PMCR_EL0_op1 3
+#define PMCR_EL0_CRn 9
+#define PMCR_EL0_CRm 12
+#define PMCR_EL0_op2 0
+#define PMCR_E (1ul << 0) /* Enable all counters */
+#define PMCR_P (1ul << 1) /* Reset all counters */
+#define PMCR_C (1ul << 2) /* Clock counter reset */
+#define PMCR_D (1ul << 3) /* CNTR counts every 64 clk cycles */
+#define PMCR_X (1ul << 4) /* Export to ext. monitoring (ETM) */
+#define PMCR_DP (1ul << 5) /* Disable CCNT if non-invasive debug*/
+#define PMCR_LC (1ul << 6) /* Long cycle count enable */
+#define PMCR_LP (1ul << 7) /* Long event count enable */
+#define PMCR_FZO (1ul << 9) /* Freeze-on-overflow */
+#define PMCR_N_SHIFT 11 /* Number of counters implemented */
+#define PMCR_N_MASK (0x1ful << PMCR_N_SHIFT)
+#define PMCR_IDCODE_SHIFT 16 /* Identification code */
+#define PMCR_IDCODE_MASK (0xfful << PMCR_IDCODE_SHIFT)
+#define PMCR_IDCODE_CORTEX_A57 0x01
+#define PMCR_IDCODE_CORTEX_A72 0x02
+#define PMCR_IDCODE_CORTEX_A53 0x03
+#define PMCR_IDCODE_CORTEX_A73 0x04
+#define PMCR_IDCODE_CORTEX_A35 0x0a
+#define PMCR_IDCODE_CORTEX_A76 0x0b
+#define PMCR_IDCODE_NEOVERSE_N1 0x0c
+#define PMCR_IDCODE_CORTEX_A77 0x10
+#define PMCR_IDCODE_CORTEX_A55 0x45
+#define PMCR_IDCODE_NEOVERSE_E1 0x46
+#define PMCR_IDCODE_CORTEX_A75 0x4a
+#define PMCR_IMP_SHIFT 24 /* Implementer code */
+#define PMCR_IMP_MASK (0xfful << PMCR_IMP_SHIFT)
+#define PMCR_IMP_ARM 0x41
+#define PMCR_FZS (1ul << 32) /* Freeze-on-SPE event */
+
+/* PMEVCNTR<n>_EL0 */
+#define PMEVCNTR_EL0_op0 3
+#define PMEVCNTR_EL0_op1 3
+#define PMEVCNTR_EL0_CRn 14
+#define PMEVCNTR_EL0_CRm 8
+/*
+ * PMEVCNTRn_EL0_CRm[1:0] holds the upper 2 bits of 'n'
+ * PMEVCNTRn_EL0_op2 holds the lower 3 bits of 'n'
+ */
+
+/* PMEVTYPER<n>_EL0 - Performance Monitoring Event Type */
+#define PMEVTYPER_EL0_op0 3
+#define PMEVTYPER_EL0_op1 3
+#define PMEVTYPER_EL0_CRn 14
+#define PMEVTYPER_EL0_CRm 12
+/*
+ * PMEVTYPERn_EL0_CRm[1:0] holds the upper 2 bits of 'n'
+ * PMEVTYPERn_EL0_op2 holds the lower 3 bits of 'n'
+ */
+#define PMEVTYPER_EVTCOUNT_MASK 0x000003ff /* ARMv8.0 */
+#define PMEVTYPER_EVTCOUNT_8_1_MASK 0x0000ffff /* ARMv8.1+ */
+#define PMEVTYPER_MT (1 << 25) /* Multithreading */
+#define PMEVTYPER_M (1 << 26) /* Secure EL3 filtering */
+#define PMEVTYPER_NSH (1 << 27) /* Non-secure hypervisor filtering */
+#define PMEVTYPER_NSU (1 << 28) /* Non-secure user filtering */
+#define PMEVTYPER_NSK (1 << 29) /* Non-secure kernel filtering */
+#define PMEVTYPER_U (1 << 30) /* User filtering */
+#define PMEVTYPER_P (1 << 31) /* Privileged filtering */
+
+/* PMINTENCLR_EL1 */
+#define PMINTENCLR_EL1_op0 3
+#define PMINTENCLR_EL1_op1 0
+#define PMINTENCLR_EL1_CRn 9
+#define PMINTENCLR_EL1_CRm 14
+#define PMINTENCLR_EL1_op2 2
+
+/* PMINTENSET_EL1 */
+#define PMINTENSET_EL1_op0 3
+#define PMINTENSET_EL1_op1 0
+#define PMINTENSET_EL1_CRn 9
+#define PMINTENSET_EL1_CRm 14
+#define PMINTENSET_EL1_op2 1
+
+/* PMMIR_EL1 */
+#define PMMIR_EL1_op0 3
+#define PMMIR_EL1_op1 0
+#define PMMIR_EL1_CRn 9
+#define PMMIR_EL1_CRm 14
+#define PMMIR_EL1_op2 6
+
+/* PMOVSCLR_EL0 */
+#define PMOVSCLR_EL0_op0 3
+#define PMOVSCLR_EL0_op1 3
+#define PMOVSCLR_EL0_CRn 9
+#define PMOVSCLR_EL0_CRm 12
+#define PMOVSCLR_EL0_op2 3
+
+/* PMOVSSET_EL0 */
+#define PMOVSSET_EL0_op0 3
+#define PMOVSSET_EL0_op1 3
+#define PMOVSSET_EL0_CRn 9
+#define PMOVSSET_EL0_CRm 14
+#define PMOVSSET_EL0_op2 3
+
+/* PMSCR_EL1 */
+#define PMSCR_EL1_REG MRS_REG_ALT_NAME(PMSCR_EL1)
+#define PMSCR_EL1_op0 3
+#define PMSCR_EL1_op1 0
+#define PMSCR_EL1_CRn 9
+#define PMSCR_EL1_CRm 9
+#define PMSCR_EL1_op2 0
+#define PMSCR_E0SPE_SHIFT 0
+#define PMSCR_E0SPE (UL(0x1) << PMSCR_E0SPE_SHIFT)
+#define PMSCR_E1SPE_SHIFT 1
+#define PMSCR_E1SPE (UL(0x1) << PMSCR_E1SPE_SHIFT)
+#define PMSCR_CX_SHIFT 3
+#define PMSCR_CX (UL(0x1) << PMSCR_CX_SHIFT)
+#define PMSCR_PA_SHIFT 4
+#define PMSCR_PA (UL(0x1) << PMSCR_PA_SHIFT)
+#define PMSCR_TS_SHIFT 5
+#define PMSCR_TS (UL(0x1) << PMSCR_TS_SHIFT)
+#define PMSCR_PCT_SHIFT 6
+#define PMSCR_PCT_MASK (UL(0x3) << PMSCR_PCT_SHIFT)
+
+/* PMSELR_EL0 */
+#define PMSELR_EL0_op0 3
+#define PMSELR_EL0_op1 3
+#define PMSELR_EL0_CRn 9
+#define PMSELR_EL0_CRm 12
+#define PMSELR_EL0_op2 5
+#define PMSELR_SEL_MASK 0x1f
+
+/* PMSEVFR_EL1 */
+#define PMSEVFR_EL1_REG MRS_REG_ALT_NAME(PMSEVFR_EL1)
+#define PMSEVFR_EL1_op0 3
+#define PMSEVFR_EL1_op1 0
+#define PMSEVFR_EL1_CRn 9
+#define PMSEVFR_EL1_CRm 9
+#define PMSEVFR_EL1_op2 5
+
+/* PMSFCR_EL1 */
+#define PMSFCR_EL1_REG MRS_REG_ALT_NAME(PMSFCR_EL1)
+#define PMSFCR_EL1_op0 3
+#define PMSFCR_EL1_op1 0
+#define PMSFCR_EL1_CRn 9
+#define PMSFCR_EL1_CRm 9
+#define PMSFCR_EL1_op2 4
+#define PMSFCR_FE_SHIFT 0
+#define PMSFCR_FE (UL(0x1) << PMSFCR_FE_SHIFT)
+#define PMSFCR_FT_SHIFT 1
+#define PMSFCR_FT (UL(0x1) << PMSFCR_FT_SHIFT)
+#define PMSFCR_FL_SHIFT 2
+#define PMSFCR_FL (UL(0x1) << PMSFCR_FL_SHIFT)
+#define PMSFCR_FnE_SHIFT 3
+#define PMSFCR_FnE (UL(0x1) << PMSFCR_FnE_SHIFT)
+#define PMSFCR_B_SHIFT 16
+#define PMSFCR_B (UL(0x1) << PMSFCR_B_SHIFT)
+#define PMSFCR_LD_SHIFT 17
+#define PMSFCR_LD (UL(0x1) << PMSFCR_LD_SHIFT)
+#define PMSFCR_ST_SHIFT 18
+#define PMSFCR_ST (UL(0x1) << PMSFCR_ST_SHIFT)
+
+/* PMSICR_EL1 */
+#define PMSICR_EL1_REG MRS_REG_ALT_NAME(PMSICR_EL1)
+#define PMSICR_EL1_op0 3
+#define PMSICR_EL1_op1 0
+#define PMSICR_EL1_CRn 9
+#define PMSICR_EL1_CRm 9
+#define PMSICR_EL1_op2 2
+#define PMSICR_COUNT_SHIFT 0
+#define PMSICR_COUNT_MASK (UL(0xffffffff) << PMSICR_COUNT_SHIFT)
+#define PMSICR_ECOUNT_SHIFT 56
+#define PMSICR_ECOUNT_MASK (UL(0xff) << PMSICR_ECOUNT_SHIFT)
+
+/* PMSIDR_EL1 */
+#define PMSIDR_EL1_REG MRS_REG_ALT_NAME(PMSIDR_EL1)
+#define PMSIDR_EL1_op0 3
+#define PMSIDR_EL1_op1 0
+#define PMSIDR_EL1_CRn 9
+#define PMSIDR_EL1_CRm 9
+#define PMSIDR_EL1_op2 7
+#define PMSIDR_FE_SHIFT 0
+#define PMSIDR_FE (UL(0x1) << PMSIDR_FE_SHIFT)
+#define PMSIDR_FT_SHIFT 1
+#define PMSIDR_FT (UL(0x1) << PMSIDR_FT_SHIFT)
+#define PMSIDR_FL_SHIFT 2
+#define PMSIDR_FL (UL(0x1) << PMSIDR_FL_SHIFT)
+#define PMSIDR_ArchInst_SHIFT 3
+#define PMSIDR_ArchInst (UL(0x1) << PMSIDR_ArchInst_SHIFT)
+#define PMSIDR_LDS_SHIFT 4
+#define PMSIDR_LDS (UL(0x1) << PMSIDR_LDS_SHIFT)
+#define PMSIDR_ERnd_SHIFT 5
+#define PMSIDR_ERnd (UL(0x1) << PMSIDR_ERnd_SHIFT)
+#define PMSIDR_FnE_SHIFT 6
+#define PMSIDR_FnE (UL(0x1) << PMSIDR_FnE_SHIFT)
+#define PMSIDR_Interval_SHIFT 8
+#define PMSIDR_Interval_MASK (UL(0xf) << PMSIDR_Interval_SHIFT)
+#define PMSIDR_MaxSize_SHIFT 12
+#define PMSIDR_MaxSize_MASK (UL(0xf) << PMSIDR_MaxSize_SHIFT)
+#define PMSIDR_CountSize_SHIFT 16
+#define PMSIDR_CountSize_MASK (UL(0xf) << PMSIDR_CountSize_SHIFT)
+#define PMSIDR_Format_SHIFT 20
+#define PMSIDR_Format_MASK (UL(0xf) << PMSIDR_Format_SHIFT)
+#define PMSIDR_PBT_SHIFT 24
+#define PMSIDR_PBT (UL(0x1) << PMSIDR_PBT_SHIFT)
+
+/* PMSIRR_EL1 */
+#define PMSIRR_EL1_REG MRS_REG_ALT_NAME(PMSIRR_EL1)
+#define PMSIRR_EL1_op0 3
+#define PMSIRR_EL1_op1 0
+#define PMSIRR_EL1_CRn 9
+#define PMSIRR_EL1_CRm 9
+#define PMSIRR_EL1_op2 3
+#define PMSIRR_RND_SHIFT 0
+#define PMSIRR_RND (UL(0x1) << PMSIRR_RND_SHIFT)
+#define PMSIRR_INTERVAL_SHIFT 8
+#define PMSIRR_INTERVAL_MASK (UL(0xffffff) << PMSIRR_INTERVAL_SHIFT)
+
+/* PMSLATFR_EL1 */
+#define PMSLATFR_EL1_REG MRS_REG_ALT_NAME(PMSLATFR_EL1)
+#define PMSLATFR_EL1_op0 3
+#define PMSLATFR_EL1_op1 0
+#define PMSLATFR_EL1_CRn 9
+#define PMSLATFR_EL1_CRm 9
+#define PMSLATFR_EL1_op2 6
+#define PMSLATFR_MINLAT_SHIFT 0
+#define PMSLATFR_MINLAT_MASK (UL(0xfff) << PMSLATFR_MINLAT_SHIFT)
+
+/* PMSNEVFR_EL1 */
+#define PMSNEVFR_EL1_REG MRS_REG_ALT_NAME(PMSNEVFR_EL1)
+#define PMSNEVFR_EL1_op0 3
+#define PMSNEVFR_EL1_op1 0
+#define PMSNEVFR_EL1_CRn 9
+#define PMSNEVFR_EL1_CRm 9
+#define PMSNEVFR_EL1_op2 1
+
+/* PMSWINC_EL0 */
+#define PMSWINC_EL0_op0 3
+#define PMSWINC_EL0_op1 3
+#define PMSWINC_EL0_CRn 9
+#define PMSWINC_EL0_CRm 12
+#define PMSWINC_EL0_op2 4
+
+/* PMUSERENR_EL0 */
+#define PMUSERENR_EL0_op0 3
+#define PMUSERENR_EL0_op1 3
+#define PMUSERENR_EL0_CRn 9
+#define PMUSERENR_EL0_CRm 14
+#define PMUSERENR_EL0_op2 0
+
+/* PMXEVCNTR_EL0 */
+#define PMXEVCNTR_EL0_op0 3
+#define PMXEVCNTR_EL0_op1 3
+#define PMXEVCNTR_EL0_CRn 9
+#define PMXEVCNTR_EL0_CRm 13
+#define PMXEVCNTR_EL0_op2 2
+
+/* PMXEVTYPER_EL0 */
+#define PMXEVTYPER_EL0_op0 3
+#define PMXEVTYPER_EL0_op1 3
+#define PMXEVTYPER_EL0_CRn 9
+#define PMXEVTYPER_EL0_CRm 13
+#define PMXEVTYPER_EL0_op2 1
+
+/* RNDRRS */
+#define RNDRRS_REG MRS_REG_ALT_NAME(RNDRRS)
+#define RNDRRS_op0 3
+#define RNDRRS_op1 3
+#define RNDRRS_CRn 2
+#define RNDRRS_CRm 4
+#define RNDRRS_op2 1
+
+/* SCTLR_EL1 - System Control Register */
+#define SCTLR_EL1_REG MRS_REG_ALT_NAME(SCTLR_EL1)
+#define SCTLR_EL1_op0 3
+#define SCTLR_EL1_op1 0
+#define SCTLR_EL1_CRn 1
+#define SCTLR_EL1_CRm 0
+#define SCTLR_EL1_op2 0
+#define SCTLR_RES1 0x30d00800 /* Reserved ARMv8.0, write 1 */
+#define SCTLR_M (UL(0x1) << 0)
+#define SCTLR_A (UL(0x1) << 1)
+#define SCTLR_C (UL(0x1) << 2)
+#define SCTLR_SA (UL(0x1) << 3)
+#define SCTLR_SA0 (UL(0x1) << 4)
+#define SCTLR_CP15BEN (UL(0x1) << 5)
+#define SCTLR_nAA (UL(0x1) << 6)
+#define SCTLR_ITD (UL(0x1) << 7)
+#define SCTLR_SED (UL(0x1) << 8)
+#define SCTLR_UMA (UL(0x1) << 9)
+#define SCTLR_EnRCTX (UL(0x1) << 10)
+#define SCTLR_EOS (UL(0x1) << 11)
+#define SCTLR_I (UL(0x1) << 12)
+#define SCTLR_EnDB (UL(0x1) << 13)
+#define SCTLR_DZE (UL(0x1) << 14)
+#define SCTLR_UCT (UL(0x1) << 15)
+#define SCTLR_nTWI (UL(0x1) << 16)
+/* Bit 17 is reserved */
+#define SCTLR_nTWE (UL(0x1) << 18)
+#define SCTLR_WXN (UL(0x1) << 19)
+#define SCTLR_TSCXT (UL(0x1) << 20)
+#define SCTLR_IESB (UL(0x1) << 21)
+#define SCTLR_EIS (UL(0x1) << 22)
+#define SCTLR_SPAN (UL(0x1) << 23)
+#define SCTLR_E0E (UL(0x1) << 24)
+#define SCTLR_EE (UL(0x1) << 25)
+#define SCTLR_UCI (UL(0x1) << 26)
+#define SCTLR_EnDA (UL(0x1) << 27)
+#define SCTLR_nTLSMD (UL(0x1) << 28)
+#define SCTLR_LSMAOE (UL(0x1) << 29)
+#define SCTLR_EnIB (UL(0x1) << 30)
+#define SCTLR_EnIA (UL(0x1) << 31)
+/* Bits 34:32 are reserved */
+#define SCTLR_BT0 (UL(0x1) << 35)
+#define SCTLR_BT1 (UL(0x1) << 36)
+#define SCTLR_ITFSB (UL(0x1) << 37)
+#define SCTLR_TCF0_MASK (UL(0x3) << 38)
+#define SCTLR_TCF_MASK (UL(0x3) << 40)
+#define SCTLR_ATA0 (UL(0x1) << 42)
+#define SCTLR_ATA (UL(0x1) << 43)
+#define SCTLR_DSSBS (UL(0x1) << 44)
+#define SCTLR_TWEDEn (UL(0x1) << 45)
+#define SCTLR_TWEDEL_MASK (UL(0xf) << 46)
+/* Bits 53:50 are reserved */
+#define SCTLR_EnASR (UL(0x1) << 54)
+#define SCTLR_EnAS0 (UL(0x1) << 55)
+#define SCTLR_EnALS (UL(0x1) << 56)
+#define SCTLR_EPAN (UL(0x1) << 57)
+
+/* SCTLR_EL12 */
+#define SCTLR_EL12_REG MRS_REG_ALT_NAME(SCTLR_EL12)
+#define SCTLR_EL12_op0 3
+#define SCTLR_EL12_op1 5
+#define SCTLR_EL12_CRn 1
+#define SCTLR_EL12_CRm 0
+#define SCTLR_EL12_op2 0
+
+/* SPSR_EL1 */
+#define SPSR_EL1_REG MRS_REG_ALT_NAME(SPSR_EL1)
+#define SPSR_EL1_op0 3
+#define SPSR_EL1_op1 0
+#define SPSR_EL1_CRn 4
+#define SPSR_EL1_CRm 0
+#define SPSR_EL1_op2 0
+/*
+ * When the exception is taken in AArch64:
+ * M[3:2] is the exception level
+ * M[1] is unused
+ * M[0] is the SP select:
+ * 0: always SP0
+ * 1: current ELs SP
+ */
+#define PSR_M_EL0t 0x00000000UL
+#define PSR_M_EL1t 0x00000004UL
+#define PSR_M_EL1h 0x00000005UL
+#define PSR_M_EL2t 0x00000008UL
+#define PSR_M_EL2h 0x00000009UL
+#define PSR_M_64 0x00000000UL
+#define PSR_M_32 0x00000010UL
+#define PSR_M_MASK 0x0000000fUL
+
+#define PSR_T 0x00000020UL
+
+#define PSR_AARCH32 0x00000010UL
+#define PSR_F 0x00000040UL
+#define PSR_I 0x00000080UL
+#define PSR_A 0x00000100UL
+#define PSR_D 0x00000200UL
+#define PSR_DAIF (PSR_D | PSR_A | PSR_I | PSR_F)
+/* The default DAIF mask. These bits are valid in spsr_el1 and daif */
+#define PSR_DAIF_DEFAULT (0)
+#define PSR_DAIF_INTR (PSR_I | PSR_F)
+#define PSR_BTYPE 0x00000c00UL
+#define PSR_SSBS 0x00001000UL
+#define PSR_ALLINT 0x00002000UL
+#define PSR_IL 0x00100000UL
+#define PSR_SS 0x00200000UL
+#define PSR_PAN 0x00400000UL
+#define PSR_UAO 0x00800000UL
+#define PSR_DIT 0x01000000UL
+#define PSR_TCO 0x02000000UL
+#define PSR_V 0x10000000UL
+#define PSR_C 0x20000000UL
+#define PSR_Z 0x40000000UL
+#define PSR_N 0x80000000UL
+#define PSR_FLAGS 0xf0000000UL
+/* PSR fields that can be set from 32-bit and 64-bit processes */
+#define PSR_SETTABLE_32 PSR_FLAGS
+#define PSR_SETTABLE_64 (PSR_FLAGS | PSR_SS)
+
+/* SPSR_EL12 */
+#define SPSR_EL12_REG MRS_REG_ALT_NAME(SPSR_EL12)
+#define SPSR_EL12_op0 3
+#define SPSR_EL12_op1 5
+#define SPSR_EL12_CRn 4
+#define SPSR_EL12_CRm 0
+#define SPSR_EL12_op2 0
+
+/* REVIDR_EL1 - Revision ID Register */
+#define REVIDR_EL1_op0 3
+#define REVIDR_EL1_op1 0
+#define REVIDR_EL1_CRn 0
+#define REVIDR_EL1_CRm 0
+#define REVIDR_EL1_op2 6
+
+/* TCR_EL1 - Translation Control Register */
+#define TCR_EL1_REG MRS_REG_ALT_NAME(TCR_EL1)
+#define TCR_EL1_op0 3
+#define TCR_EL1_op1 0
+#define TCR_EL1_CRn 2
+#define TCR_EL1_CRm 0
+#define TCR_EL1_op2 2
+/* Bits 63:59 are reserved */
+#define TCR_DS_SHIFT 59
+#define TCR_DS (UL(1) << TCR_DS_SHIFT)
+#define TCR_TCMA1_SHIFT 58
+#define TCR_TCMA1 (UL(1) << TCR_TCMA1_SHIFT)
+#define TCR_TCMA0_SHIFT 57
+#define TCR_TCMA0 (UL(1) << TCR_TCMA0_SHIFT)
+#define TCR_E0PD1_SHIFT 56
+#define TCR_E0PD1 (UL(1) << TCR_E0PD1_SHIFT)
+#define TCR_E0PD0_SHIFT 55
+#define TCR_E0PD0 (UL(1) << TCR_E0PD0_SHIFT)
+#define TCR_NFD1_SHIFT 54
+#define TCR_NFD1 (UL(1) << TCR_NFD1_SHIFT)
+#define TCR_NFD0_SHIFT 53
+#define TCR_NFD0 (UL(1) << TCR_NFD0_SHIFT)
+#define TCR_TBID1_SHIFT 52
+#define TCR_TBID1 (UL(1) << TCR_TBID1_SHIFT)
+#define TCR_TBID0_SHIFT 51
+#define TCR_TBID0 (UL(1) << TCR_TBID0_SHIFT)
+#define TCR_HWU162_SHIFT 50
+#define TCR_HWU162 (UL(1) << TCR_HWU162_SHIFT)
+#define TCR_HWU161_SHIFT 49
+#define TCR_HWU161 (UL(1) << TCR_HWU161_SHIFT)
+#define TCR_HWU160_SHIFT 48
+#define TCR_HWU160 (UL(1) << TCR_HWU160_SHIFT)
+#define TCR_HWU159_SHIFT 47
+#define TCR_HWU159 (UL(1) << TCR_HWU159_SHIFT)
+#define TCR_HWU1 \
+ (TCR_HWU159 | TCR_HWU160 | TCR_HWU161 | TCR_HWU162)
+#define TCR_HWU062_SHIFT 46
+#define TCR_HWU062 (UL(1) << TCR_HWU062_SHIFT)
+#define TCR_HWU061_SHIFT 45
+#define TCR_HWU061 (UL(1) << TCR_HWU061_SHIFT)
+#define TCR_HWU060_SHIFT 44
+#define TCR_HWU060 (UL(1) << TCR_HWU060_SHIFT)
+#define TCR_HWU059_SHIFT 43
+#define TCR_HWU059 (UL(1) << TCR_HWU059_SHIFT)
+#define TCR_HWU0 \
+ (TCR_HWU059 | TCR_HWU060 | TCR_HWU061 | TCR_HWU062)
+#define TCR_HPD1_SHIFT 42
+#define TCR_HPD1 (UL(1) << TCR_HPD1_SHIFT)
+#define TCR_HPD0_SHIFT 41
+#define TCR_HPD0 (UL(1) << TCR_HPD0_SHIFT)
+#define TCR_HD_SHIFT 40
+#define TCR_HD (UL(1) << TCR_HD_SHIFT)
+#define TCR_HA_SHIFT 39
+#define TCR_HA (UL(1) << TCR_HA_SHIFT)
+#define TCR_TBI1_SHIFT 38
+#define TCR_TBI1 (UL(1) << TCR_TBI1_SHIFT)
+#define TCR_TBI0_SHIFT 37
+#define TCR_TBI0 (UL(1) << TCR_TBI0_SHIFT)
+#define TCR_ASID_SHIFT 36
+#define TCR_ASID_WIDTH 1
+#define TCR_ASID_16 (UL(1) << TCR_ASID_SHIFT)
+/* Bit 35 is reserved */
+#define TCR_IPS_SHIFT 32
+#define TCR_IPS_WIDTH 3
+#define TCR_IPS_32BIT (UL(0) << TCR_IPS_SHIFT)
+#define TCR_IPS_36BIT (UL(1) << TCR_IPS_SHIFT)
+#define TCR_IPS_40BIT (UL(2) << TCR_IPS_SHIFT)
+#define TCR_IPS_42BIT (UL(3) << TCR_IPS_SHIFT)
+#define TCR_IPS_44BIT (UL(4) << TCR_IPS_SHIFT)
+#define TCR_IPS_48BIT (UL(5) << TCR_IPS_SHIFT)
+#define TCR_TG1_SHIFT 30
+#define TCR_TG1_MASK (UL(3) << TCR_TG1_SHIFT)
+#define TCR_TG1_16K (UL(1) << TCR_TG1_SHIFT)
+#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
+#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
+#define TCR_SH1_SHIFT 28
+#define TCR_SH1_IS (UL(3) << TCR_SH1_SHIFT)
+#define TCR_ORGN1_SHIFT 26
+#define TCR_ORGN1_WBWA (UL(1) << TCR_ORGN1_SHIFT)
+#define TCR_IRGN1_SHIFT 24
+#define TCR_IRGN1_WBWA (UL(1) << TCR_IRGN1_SHIFT)
+#define TCR_EPD1_SHIFT 23
+#define TCR_EPD1 (UL(1) << TCR_EPD1_SHIFT)
+#define TCR_A1_SHIFT 22
+#define TCR_A1 (UL(1) << TCR_A1_SHIFT)
+#define TCR_T1SZ_SHIFT 16
+#define TCR_T1SZ_MASK (UL(0x3f) << TCR_T1SZ_SHIFT)
+#define TCR_T1SZ(x) ((x) << TCR_T1SZ_SHIFT)
+#define TCR_TG0_SHIFT 14
+#define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT)
+#define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT)
+#define TCR_SH0_SHIFT 12
+#define TCR_SH0_IS (UL(3) << TCR_SH0_SHIFT)
+#define TCR_ORGN0_SHIFT 10
+#define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT)
+#define TCR_IRGN0_SHIFT 8
+#define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT)
+#define TCR_EPD0_SHIFT 7
+#define TCR_EPD0 (UL(1) << TCR_EPD0_SHIFT)
+/* Bit 6 is reserved */
+#define TCR_T0SZ_SHIFT 0
+#define TCR_T0SZ_MASK (UL(0x3f) << TCR_T0SZ_SHIFT)
+#define TCR_T0SZ(x) ((x) << TCR_T0SZ_SHIFT)
+#define TCR_TxSZ(x) (TCR_T1SZ(x) | TCR_T0SZ(x))
+
+/* TCR_EL12 */
+#define TCR_EL12_REG MRS_REG_ALT_NAME(TCR_EL12)
+#define TCR_EL12_op0 3
+#define TCR_EL12_op1 5
+#define TCR_EL12_CRn 2
+#define TCR_EL12_CRm 0
+#define TCR_EL12_op2 2
+
+/* TTBR0_EL1 & TTBR1_EL1 - Translation Table Base Register 0 & 1 */
+#define TTBR_ASID_SHIFT 48
+#define TTBR_ASID_MASK (0xfffful << TTBR_ASID_SHIFT)
+#define TTBR_BADDR 0x0000fffffffffffeul
+#define TTBR_CnP_SHIFT 0
+#define TTBR_CnP (1ul << TTBR_CnP_SHIFT)
+
+/* TTBR0_EL1 */
+#define TTBR0_EL1_REG MRS_REG_ALT_NAME(TTBR0_EL1)
+#define TTBR0_EL1_op0 3
+#define TTBR0_EL1_op1 0
+#define TTBR0_EL1_CRn 2
+#define TTBR0_EL1_CRm 0
+#define TTBR0_EL1_op2 0
+
+/* TTBR0_EL12 */
+#define TTBR0_EL12_REG MRS_REG_ALT_NAME(TTBR0_EL12)
+#define TTBR0_EL12_op0 3
+#define TTBR0_EL12_op1 5
+#define TTBR0_EL12_CRn 2
+#define TTBR0_EL12_CRm 0
+#define TTBR0_EL12_op2 0
+
+/* TTBR1_EL1 */
+#define TTBR1_EL1_REG MRS_REG_ALT_NAME(TTBR1_EL1)
+#define TTBR1_EL1_op0 3
+#define TTBR1_EL1_op1 0
+#define TTBR1_EL1_CRn 2
+#define TTBR1_EL1_CRm 0
+#define TTBR1_EL1_op2 1
+
+/* TTBR1_EL12 */
+#define TTBR1_EL12_REG MRS_REG_ALT_NAME(TTBR1_EL12)
+#define TTBR1_EL12_op0 3
+#define TTBR1_EL12_op1 5
+#define TTBR1_EL12_CRn 2
+#define TTBR1_EL12_CRm 0
+#define TTBR1_EL12_op2 1
+
+/* VBAR_EL1 */
+#define VBAR_EL1_REG MRS_REG_ALT_NAME(VBAR_EL1)
+#define VBAR_EL1_op0 3
+#define VBAR_EL1_op1 0
+#define VBAR_EL1_CRn 12
+#define VBAR_EL1_CRm 0
+#define VBAR_EL1_op2 0
+
+/* VBAR_EL12 */
+#define VBAR_EL12_REG MRS_REG_ALT_NAME(VBAR_EL12)
+#define VBAR_EL12_op0 3
+#define VBAR_EL12_op1 5
+#define VBAR_EL12_CRn 12
+#define VBAR_EL12_CRm 0
+#define VBAR_EL12_op2 0
+
+/* ZCR_EL1 - SVE Control Register */
+#define ZCR_EL1_REG MRS_REG_ALT_NAME(ZCR_EL1)
+#define ZCR_EL1_op0 3
+#define ZCR_EL1_op1 0
+#define ZCR_EL1_CRn 1
+#define ZCR_EL1_CRm 2
+#define ZCR_EL1_op2 0
+#define ZCR_LEN_SHIFT 0
+#define ZCR_LEN_MASK (0xf << ZCR_LEN_SHIFT)
+#define ZCR_LEN_BYTES(x) ((((x) & ZCR_LEN_MASK) + 1) * 16)
+
+#endif /* !_MACHINE_ARMREG_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/asan.h b/sys/arm64/include/asan.h
new file mode 100644
index 000000000000..149565ea04f5
--- /dev/null
+++ b/sys/arm64/include/asan.h
@@ -0,0 +1,68 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Mark Johnston under sponsorship from the
+ * FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_ASAN_H_
+#define _MACHINE_ASAN_H_
+
+#ifdef KASAN
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_page.h>
+#include <machine/vmparam.h>
+
+static inline vm_offset_t
+kasan_md_addr_to_shad(vm_offset_t addr)
+{
+ return (((addr - VM_MIN_KERNEL_ADDRESS) >> KASAN_SHADOW_SCALE_SHIFT) +
+ KASAN_MIN_ADDRESS);
+}
+
+static inline bool
+kasan_md_unsupported(vm_offset_t addr)
+{
+ return (addr < VM_MIN_KERNEL_ADDRESS || addr >= virtual_end);
+}
+
+static inline void
+kasan_md_init(void)
+{
+
+}
+
+static inline void
+kasan_md_init_early(vm_offset_t bootstack, size_t size)
+{
+
+ kasan_shadow_map(bootstack, size);
+}
+
+#endif /* KASAN */
+#endif /* !_MACHINE_ASAN_H_ */
diff --git a/sys/arm64/include/asm.h b/sys/arm64/include/asm.h
new file mode 100644
index 000000000000..4f373dc4b7e1
--- /dev/null
+++ b/sys/arm64/include/asm.h
@@ -0,0 +1,237 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/asm.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_ASM_H_
+#define _MACHINE_ASM_H_
+
+#undef __FBSDID
+#if !defined(lint) && !defined(STRIP_FBSDID)
+#define __FBSDID(s) .ident s
+#else
+#define __FBSDID(s) /* nothing */
+#endif
+
+#define _C_LABEL(x) x
+
+#ifdef KDTRACE_HOOKS
+#define DTRACE_NOP nop
+#else
+#define DTRACE_NOP
+#endif
+
+#define LENTRY(sym) \
+ .text; .align 2; .type sym,#function; sym: \
+ .cfi_startproc; BTI_C; DTRACE_NOP
+#define ENTRY(sym) \
+ .globl sym; LENTRY(sym)
+#define EENTRY(sym) \
+ .globl sym; .text; .align 2; .type sym,#function; sym:
+#define LEND(sym) .ltorg; .cfi_endproc; .size sym, . - sym
+#define END(sym) LEND(sym)
+#define EEND(sym)
+
+#define WEAK_REFERENCE(sym, alias) \
+ .weak alias; \
+ .set alias,sym
+
+#define UINT64_C(x) (x)
+
+#if defined(PIC)
+#define PIC_SYM(x,y) x ## @ ## y
+#else
+#define PIC_SYM(x,y) x
+#endif
+
+/* Alias for link register x30 */
+#define lr x30
+
+/*
+ * Check whether a given cpu feature is present, in the case it is not we jump
+ * to the given label. The tmp register should be a register able to hold the
+ * temporary data.
+ */
+#define CHECK_CPU_FEAT(tmp, feat_reg, feat, label) \
+ mrs tmp, ##feat_reg##_el1; \
+ ubfx tmp, tmp, ##feat_reg##_##feat##_SHIFT, ##feat_reg##_##feat##_WIDTH; \
+ cbz tmp, label
+
+/*
+ * Sets the trap fault handler. The exception handler will return to the
+ * address in the handler register on a data abort or the xzr register to
+ * clear the handler. The tmp parameter should be a register able to hold
+ * the temporary data.
+ */
+#define SET_FAULT_HANDLER(handler, tmp) \
+ ldr tmp, [x18, #PC_CURTHREAD]; /* Load curthread */ \
+ ldr tmp, [tmp, #TD_PCB]; /* Load the pcb */ \
+ str handler, [tmp, #PCB_ONFAULT] /* Set the handler */
+
+#define ENTER_USER_ACCESS(reg, tmp) \
+ ldr tmp, =has_pan; /* Get the addr of has_pan */ \
+ ldr reg, [tmp]; /* Read it */ \
+ cbz reg, 997f; /* If no PAN skip */ \
+ .arch_extension pan; \
+ msr pan, #0; /* Disable PAN checks */ \
+ .arch_extension nopan; \
+ 997:
+
+#define EXIT_USER_ACCESS(reg) \
+ cbz reg, 998f; /* If no PAN skip */ \
+ .arch_extension pan; \
+ msr pan, #1; /* Enable PAN checks */ \
+ .arch_extension nopan; \
+ 998:
+
+#define EXIT_USER_ACCESS_CHECK(reg, tmp) \
+ ldr tmp, =has_pan; /* Get the addr of has_pan */ \
+ ldr reg, [tmp]; /* Read it */ \
+ cbz reg, 999f; /* If no PAN skip */ \
+ .arch_extension pan; \
+ msr pan, #1; /* Enable PAN checks */ \
+ .arch_extension nopan; \
+ 999:
+
+/*
+ * Some AArch64 CPUs speculate past an eret instruction. As the user may
+ * control the registers at this point add a speculation barrier usable on
+ * all AArch64 CPUs after the eret instruction.
+ * TODO: ARMv8.5 adds a specific instruction for this, we could use that
+ * if we know we are running on something that supports it.
+ */
+#define ERET \
+ eret; \
+ dsb sy; \
+ isb
+
+/*
+ * When a CPU that implements FEAT_BTI uses a BR/BLR instruction (or the
+ * pointer authentication variants, e.g. BLRAA) and the target location
+ * has the GP attribute in its page table, then the target of the BR/BLR
+ * needs to be a valid BTI landing pad.
+ *
+ * BTI_C should be used at the start of a function and is used in the
+ * ENTRY macro. It can be replaced by PACIASP or PACIBSP, however these
+ * also need an appropriate authenticate instruction before returning.
+ *
+ * BTI_J should be used as the target instruction when branching with a
+ * BR instruction within a function.
+ *
+ * When using a BR to branch to a new function, e.g. a tail call, then
+ * the target register should be x16 or x17 so it is compatible with
+ * the BRI_C instruction.
+ *
+ * As these instructions are in the hint space they are a NOP when
+ * the CPU doesn't implement FEAT_BTI so are safe to use.
+ */
+#ifdef __ARM_FEATURE_BTI_DEFAULT
+#define BTI_C hint #34
+#define BTI_J hint #36
+#else
+#define BTI_C
+#define BTI_J
+#endif
+
+/*
+ * To help protect against ROP attacks we can use Pointer Authentication
+ * to sign the return address before pushing it to the stack.
+ *
+ * PAC_LR_SIGN can be used at the start of a function to sign the link
+ * register with the stack pointer as the modifier. As this is in the hint
+ * space it is safe to use on CPUs that don't implement pointer
+ * authentication. It can be used in place of the BTI_C instruction above as
+ * a valid BTI landing pad instruction.
+ *
+ * PAC_LR_AUTH is used to authenticate the link register using the stack
+ * pointer as the modifier. It should be used in any function that uses
+ * PAC_LR_SIGN. The stack pointer must be identical in each case.
+ */
+#ifdef __ARM_FEATURE_PAC_DEFAULT
+#define PAC_LR_SIGN hint #25 /* paciasp */
+#define PAC_LR_AUTH hint #29 /* autiasp */
+#else
+#define PAC_LR_SIGN
+#define PAC_LR_AUTH
+#endif
+
+/*
+ * GNU_PROPERTY_AARCH64_FEATURE_1_NOTE can be used to insert a note that
+ * the current assembly file is built with Pointer Authentication (PAC) or
+ * Branch Target Identification support (BTI). As the linker requires all
+ * object files in an executable or library to have the GNU property
+ * note to emit it in the created elf file we need to add a note to all
+ * assembly files that support BTI so the kernel and dynamic linker can
+ * mark memory used by the file as guarded.
+ *
+ * The GNU_PROPERTY_AARCH64_FEATURE_1_VAL macro encodes the combination
+ * of PAC and BTI that have been enabled. It can be used as follows:
+ * GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL);
+ *
+ * To use this you need to include <sys/elf_common.h> for
+ * GNU_PROPERTY_AARCH64_FEATURE_1_*
+ */
+#if defined(__ARM_FEATURE_BTI_DEFAULT)
+#if defined(__ARM_FEATURE_PAC_DEFAULT)
+/* BTI, PAC */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_VAL \
+ (GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC)
+#else
+/* BTI, no PAC */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_VAL \
+ (GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
+#endif
+#elif defined(__ARM_FEATURE_PAC_DEFAULT)
+/* No BTI, PAC */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_VAL \
+ (GNU_PROPERTY_AARCH64_FEATURE_1_PAC)
+#else
+/* No BTI, no PAC */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_VAL 0
+#endif
+
+#if defined(__ARM_FEATURE_BTI_DEFAULT) || defined(__ARM_FEATURE_PAC_DEFAULT)
+#define GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(x) \
+ .section .note.gnu.property, "a"; \
+ .balign 8; \
+ .4byte 0x4; /* sizeof(vendor) */ \
+ .4byte 0x10; /* sizeof(note data) */ \
+ .4byte (NT_GNU_PROPERTY_TYPE_0); \
+ .asciz "GNU"; /* vendor */ \
+ /* note data: */ \
+ .4byte (GNU_PROPERTY_AARCH64_FEATURE_1_AND); \
+ .4byte 0x4; /* sizeof(property) */ \
+ .4byte (x); /* property */ \
+ .4byte 0
+#else
+#define GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(x)
+#endif
+
+#endif /* _MACHINE_ASM_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/atomic.h b/sys/arm64/include/atomic.h
new file mode 100644
index 000000000000..998a49c02e60
--- /dev/null
+++ b/sys/arm64/include/atomic.h
@@ -0,0 +1,679 @@
+/*-
+ * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/atomic.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
+
+#define isb() __asm __volatile("isb" : : : "memory")
+
+/*
+ * Options for DMB and DSB:
+ * oshld Outer Shareable, load
+ * oshst Outer Shareable, store
+ * osh Outer Shareable, all
+ * nshld Non-shareable, load
+ * nshst Non-shareable, store
+ * nsh Non-shareable, all
+ * ishld Inner Shareable, load
+ * ishst Inner Shareable, store
+ * ish Inner Shareable, all
+ * ld Full system, load
+ * st Full system, store
+ * sy Full system, all
+ */
+#define dsb(opt) __asm __volatile("dsb " __STRING(opt) : : : "memory")
+#define dmb(opt) __asm __volatile("dmb " __STRING(opt) : : : "memory")
+
+#define mb() dmb(sy) /* Full system memory barrier all */
+#define wmb() dmb(st) /* Full system memory barrier store */
+#define rmb() dmb(ld) /* Full system memory barrier load */
+
+#ifdef _KERNEL
+extern _Bool lse_supported;
+#endif
+
+#if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME)
+#include <sys/atomic_san.h>
+#else
+
+#include <sys/atomic_common.h>
+
+#if defined(__ARM_FEATURE_ATOMICS)
+#define _ATOMIC_LSE_SUPPORTED 1
+#elif defined(_KERNEL)
+#ifdef LSE_ATOMICS
+#define _ATOMIC_LSE_SUPPORTED 1
+#else
+#define _ATOMIC_LSE_SUPPORTED lse_supported
+#endif
+#else
+#define _ATOMIC_LSE_SUPPORTED 0
+#endif
+
+#define _ATOMIC_OP_PROTO(t, op, bar, flav) \
+static __inline void \
+atomic_##op##_##bar##t##flav(volatile uint##t##_t *p, uint##t##_t val)
+
+#define _ATOMIC_OP_IMPL(t, w, s, op, llsc_asm_op, lse_asm_op, pre, bar, a, l) \
+_ATOMIC_OP_PROTO(t, op, bar, _llsc) \
+{ \
+ uint##t##_t tmp; \
+ int res; \
+ \
+ pre; \
+ __asm __volatile( \
+ "1: ld"#a"xr"#s" %"#w"0, [%2]\n" \
+ " "#llsc_asm_op" %"#w"0, %"#w"0, %"#w"3\n" \
+ " st"#l"xr"#s" %w1, %"#w"0, [%2]\n" \
+ " cbnz %w1, 1b\n" \
+ : "=&r"(tmp), "=&r"(res) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+} \
+ \
+_ATOMIC_OP_PROTO(t, op, bar, _lse) \
+{ \
+ uint##t##_t tmp; \
+ \
+ pre; \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "ld"#lse_asm_op#a#l#s" %"#w"2, %"#w"0, [%1]\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (tmp) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+} \
+ \
+_ATOMIC_OP_PROTO(t, op, bar, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ atomic_##op##_##bar##t##_lse(p, val); \
+ else \
+ atomic_##op##_##bar##t##_llsc(p, val); \
+}
+
+#define __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, bar, a, l) \
+ _ATOMIC_OP_IMPL(8, w, b, op, llsc_asm_op, lse_asm_op, pre, \
+ bar, a, l) \
+ _ATOMIC_OP_IMPL(16, w, h, op, llsc_asm_op, lse_asm_op, pre, \
+ bar, a, l) \
+ _ATOMIC_OP_IMPL(32, w, , op, llsc_asm_op, lse_asm_op, pre, \
+ bar, a, l) \
+ _ATOMIC_OP_IMPL(64, , , op, llsc_asm_op, lse_asm_op, pre, \
+ bar, a, l)
+
+#define _ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre) \
+ __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, , , ) \
+ __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, acq_, a, ) \
+ __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, rel_, , l)
+
+_ATOMIC_OP(add, add, add, )
+_ATOMIC_OP(clear, bic, clr, )
+_ATOMIC_OP(set, orr, set, )
+_ATOMIC_OP(subtract, add, add, val = -val)
+
+#define _ATOMIC_CMPSET_PROTO(t, bar, flav) \
+static __inline int \
+atomic_cmpset_##bar##t##flav(volatile uint##t##_t *p, \
+ uint##t##_t cmpval, uint##t##_t newval)
+
+#define _ATOMIC_FCMPSET_PROTO(t, bar, flav) \
+static __inline int \
+atomic_fcmpset_##bar##t##flav(volatile uint##t##_t *p, \
+ uint##t##_t *cmpval, uint##t##_t newval)
+
+#define _ATOMIC_CMPSET_IMPL(t, w, s, bar, a, l) \
+_ATOMIC_CMPSET_PROTO(t, bar, _llsc) \
+{ \
+ uint##t##_t tmp; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: mov %w1, #1\n" \
+ " ld"#a"xr"#s" %"#w"0, [%2]\n" \
+ " cmp %"#w"0, %"#w"3\n" \
+ " b.ne 2f\n" \
+ " st"#l"xr"#s" %w1, %"#w"4, [%2]\n" \
+ " cbnz %w1, 1b\n" \
+ "2:" \
+ : "=&r"(tmp), "=&r"(res) \
+ : "r" (p), "r" (cmpval), "r" (newval) \
+ : "cc", "memory" \
+ ); \
+ \
+ return (!res); \
+} \
+ \
+_ATOMIC_CMPSET_PROTO(t, bar, _lse) \
+{ \
+ uint##t##_t oldval; \
+ int res; \
+ \
+ oldval = cmpval; \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "cas"#a#l#s" %"#w"1, %"#w"4, [%3]\n" \
+ "cmp %"#w"1, %"#w"2\n" \
+ "cset %w0, eq\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (res), "+&r" (cmpval) \
+ : "r" (oldval), "r" (p), "r" (newval) \
+ : "cc", "memory" \
+ ); \
+ \
+ return (res); \
+} \
+ \
+_ATOMIC_CMPSET_PROTO(t, bar, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_cmpset_##bar##t##_lse(p, cmpval, \
+ newval)); \
+ else \
+ return (atomic_cmpset_##bar##t##_llsc(p, cmpval, \
+ newval)); \
+} \
+ \
+_ATOMIC_FCMPSET_PROTO(t, bar, _llsc) \
+{ \
+ uint##t##_t _cmpval, tmp; \
+ int res; \
+ \
+ _cmpval = *cmpval; \
+ __asm __volatile( \
+ " mov %w1, #1\n" \
+ " ld"#a"xr"#s" %"#w"0, [%2]\n" \
+ " cmp %"#w"0, %"#w"3\n" \
+ " b.ne 1f\n" \
+ " st"#l"xr"#s" %w1, %"#w"4, [%2]\n" \
+ "1:" \
+ : "=&r"(tmp), "=&r"(res) \
+ : "r" (p), "r" (_cmpval), "r" (newval) \
+ : "cc", "memory" \
+ ); \
+ *cmpval = tmp; \
+ \
+ return (!res); \
+} \
+ \
+_ATOMIC_FCMPSET_PROTO(t, bar, _lse) \
+{ \
+ uint##t##_t _cmpval, tmp; \
+ int res; \
+ \
+ _cmpval = tmp = *cmpval; \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "cas"#a#l#s" %"#w"1, %"#w"4, [%3]\n" \
+ "cmp %"#w"1, %"#w"2\n" \
+ "cset %w0, eq\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (res), "+&r" (tmp) \
+ : "r" (_cmpval), "r" (p), "r" (newval) \
+ : "cc", "memory" \
+ ); \
+ *cmpval = tmp; \
+ \
+ return (res); \
+} \
+ \
+_ATOMIC_FCMPSET_PROTO(t, bar, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_fcmpset_##bar##t##_lse(p, cmpval, \
+ newval)); \
+ else \
+ return (atomic_fcmpset_##bar##t##_llsc(p, cmpval, \
+ newval)); \
+}
+
+#define _ATOMIC_CMPSET(bar, a, l) \
+ _ATOMIC_CMPSET_IMPL(8, w, b, bar, a, l) \
+ _ATOMIC_CMPSET_IMPL(16, w, h, bar, a, l) \
+ _ATOMIC_CMPSET_IMPL(32, w, , bar, a, l) \
+ _ATOMIC_CMPSET_IMPL(64, , , bar, a, l)
+
+#define atomic_cmpset_8 atomic_cmpset_8
+#define atomic_fcmpset_8 atomic_fcmpset_8
+#define atomic_cmpset_16 atomic_cmpset_16
+#define atomic_fcmpset_16 atomic_fcmpset_16
+
+_ATOMIC_CMPSET( , , )
+_ATOMIC_CMPSET(acq_, a, )
+_ATOMIC_CMPSET(rel_, ,l)
+
+#define _ATOMIC_FETCHADD_PROTO(t, flav) \
+static __inline uint##t##_t \
+atomic_fetchadd_##t##flav(volatile uint##t##_t *p, uint##t##_t val)
+
+#define _ATOMIC_FETCHADD_IMPL(t, w) \
+_ATOMIC_FETCHADD_PROTO(t, _llsc) \
+{ \
+ uint##t##_t ret, tmp; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: ldxr %"#w"2, [%3]\n" \
+ " add %"#w"0, %"#w"2, %"#w"4\n" \
+ " stxr %w1, %"#w"0, [%3]\n" \
+ " cbnz %w1, 1b\n" \
+ : "=&r" (tmp), "=&r" (res), "=&r" (ret) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+ \
+ return (ret); \
+} \
+ \
+_ATOMIC_FETCHADD_PROTO(t, _lse) \
+{ \
+ uint##t##_t ret; \
+ \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "ldadd %"#w"2, %"#w"0, [%1]\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (ret) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+ \
+ return (ret); \
+} \
+ \
+_ATOMIC_FETCHADD_PROTO(t, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_fetchadd_##t##_lse(p, val)); \
+ else \
+ return (atomic_fetchadd_##t##_llsc(p, val)); \
+}
+
+_ATOMIC_FETCHADD_IMPL(32, w)
+_ATOMIC_FETCHADD_IMPL(64, )
+
+#define _ATOMIC_SWAP_PROTO(t, flav) \
+static __inline uint##t##_t \
+atomic_swap_##t##flav(volatile uint##t##_t *p, uint##t##_t val)
+
+#define _ATOMIC_READANDCLEAR_PROTO(t, flav) \
+static __inline uint##t##_t \
+atomic_readandclear_##t##flav(volatile uint##t##_t *p)
+
+#define _ATOMIC_SWAP_IMPL(t, w, zreg) \
+_ATOMIC_SWAP_PROTO(t, _llsc) \
+{ \
+ uint##t##_t ret; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: ldxr %"#w"1, [%2]\n" \
+ " stxr %w0, %"#w"3, [%2]\n" \
+ " cbnz %w0, 1b\n" \
+ : "=&r" (res), "=&r" (ret) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+ \
+ return (ret); \
+} \
+ \
+_ATOMIC_SWAP_PROTO(t, _lse) \
+{ \
+ uint##t##_t ret; \
+ \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "swp %"#w"2, %"#w"0, [%1]\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (ret) \
+ : "r" (p), "r" (val) \
+ : "memory" \
+ ); \
+ \
+ return (ret); \
+} \
+ \
+_ATOMIC_SWAP_PROTO(t, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_swap_##t##_lse(p, val)); \
+ else \
+ return (atomic_swap_##t##_llsc(p, val)); \
+} \
+ \
+_ATOMIC_READANDCLEAR_PROTO(t, _llsc) \
+{ \
+ uint##t##_t ret; \
+ int res; \
+ \
+ __asm __volatile( \
+ "1: ldxr %"#w"1, [%2]\n" \
+ " stxr %w0, "#zreg", [%2]\n" \
+ " cbnz %w0, 1b\n" \
+ : "=&r" (res), "=&r" (ret) \
+ : "r" (p) \
+ : "memory" \
+ ); \
+ \
+ return (ret); \
+} \
+ \
+_ATOMIC_READANDCLEAR_PROTO(t, _lse) \
+{ \
+ return (atomic_swap_##t##_lse(p, 0)); \
+} \
+ \
+_ATOMIC_READANDCLEAR_PROTO(t, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_readandclear_##t##_lse(p)); \
+ else \
+ return (atomic_readandclear_##t##_llsc(p)); \
+}
+
+_ATOMIC_SWAP_IMPL(32, w, wzr)
+_ATOMIC_SWAP_IMPL(64, , xzr)
+
+#define _ATOMIC_TEST_OP_PROTO(t, op, bar, flav) \
+static __inline int \
+atomic_testand##op##_##bar##t##flav(volatile uint##t##_t *p, u_int val)
+
+#define _ATOMIC_TEST_OP_IMPL(t, w, op, llsc_asm_op, lse_asm_op, bar, a) \
+_ATOMIC_TEST_OP_PROTO(t, op, bar, _llsc) \
+{ \
+ uint##t##_t mask, old, tmp; \
+ int res; \
+ \
+ mask = ((uint##t##_t)1) << (val & (t - 1)); \
+ __asm __volatile( \
+ "1: ld"#a"xr %"#w"2, [%3]\n" \
+ " "#llsc_asm_op" %"#w"0, %"#w"2, %"#w"4\n" \
+ " stxr %w1, %"#w"0, [%3]\n" \
+ " cbnz %w1, 1b\n" \
+ : "=&r" (tmp), "=&r" (res), "=&r" (old) \
+ : "r" (p), "r" (mask) \
+ : "memory" \
+ ); \
+ \
+ return ((old & mask) != 0); \
+} \
+ \
+_ATOMIC_TEST_OP_PROTO(t, op, bar, _lse) \
+{ \
+ uint##t##_t mask, old; \
+ \
+ mask = ((uint##t##_t)1) << (val & (t - 1)); \
+ __asm __volatile( \
+ ".arch_extension lse\n" \
+ "ld"#lse_asm_op#a" %"#w"2, %"#w"0, [%1]\n" \
+ ".arch_extension nolse\n" \
+ : "=r" (old) \
+ : "r" (p), "r" (mask) \
+ : "memory" \
+ ); \
+ \
+ return ((old & mask) != 0); \
+} \
+ \
+_ATOMIC_TEST_OP_PROTO(t, op, bar, ) \
+{ \
+ if (_ATOMIC_LSE_SUPPORTED) \
+ return (atomic_testand##op##_##bar##t##_lse(p, val)); \
+ else \
+ return (atomic_testand##op##_##bar##t##_llsc(p, val)); \
+}
+
+#define _ATOMIC_TEST_OP(op, llsc_asm_op, lse_asm_op) \
+ _ATOMIC_TEST_OP_IMPL(32, w, op, llsc_asm_op, lse_asm_op, , ) \
+ _ATOMIC_TEST_OP_IMPL(32, w, op, llsc_asm_op, lse_asm_op, acq_, a) \
+ _ATOMIC_TEST_OP_IMPL(64, , op, llsc_asm_op, lse_asm_op, , ) \
+ _ATOMIC_TEST_OP_IMPL(64, , op, llsc_asm_op, lse_asm_op, acq_, a)
+
+_ATOMIC_TEST_OP(clear, bic, clr)
+_ATOMIC_TEST_OP(set, orr, set)
+
+#define _ATOMIC_LOAD_ACQ_IMPL(t, w, s) \
+static __inline uint##t##_t \
+atomic_load_acq_##t(const volatile uint##t##_t *p) \
+{ \
+ uint##t##_t ret; \
+ \
+ __asm __volatile( \
+ "ldar"#s" %"#w"0, [%1]\n" \
+ : "=&r" (ret) \
+ : "r" (p) \
+ : "memory"); \
+ \
+ return (ret); \
+}
+
+#define atomic_load_acq_8 atomic_load_acq_8
+#define atomic_load_acq_16 atomic_load_acq_16
+_ATOMIC_LOAD_ACQ_IMPL(8, w, b)
+_ATOMIC_LOAD_ACQ_IMPL(16, w, h)
+_ATOMIC_LOAD_ACQ_IMPL(32, w, )
+_ATOMIC_LOAD_ACQ_IMPL(64, , )
+
+#define _ATOMIC_STORE_REL_IMPL(t, w, s) \
+static __inline void \
+atomic_store_rel_##t(volatile uint##t##_t *p, uint##t##_t val) \
+{ \
+ __asm __volatile( \
+ "stlr"#s" %"#w"0, [%1]\n" \
+ : \
+ : "r" (val), "r" (p) \
+ : "memory"); \
+}
+
+_ATOMIC_STORE_REL_IMPL(8, w, b)
+_ATOMIC_STORE_REL_IMPL(16, w, h)
+_ATOMIC_STORE_REL_IMPL(32, w, )
+_ATOMIC_STORE_REL_IMPL(64, , )
+
+#define atomic_add_char atomic_add_8
+#define atomic_fcmpset_char atomic_fcmpset_8
+#define atomic_clear_char atomic_clear_8
+#define atomic_cmpset_char atomic_cmpset_8
+#define atomic_fetchadd_char atomic_fetchadd_8
+#define atomic_readandclear_char atomic_readandclear_8
+#define atomic_set_char atomic_set_8
+#define atomic_swap_char atomic_swap_8
+#define atomic_subtract_char atomic_subtract_8
+#define atomic_testandclear_char atomic_testandclear_8
+#define atomic_testandset_char atomic_testandset_8
+
+#define atomic_add_acq_char atomic_add_acq_8
+#define atomic_fcmpset_acq_char atomic_fcmpset_acq_8
+#define atomic_clear_acq_char atomic_clear_acq_8
+#define atomic_cmpset_acq_char atomic_cmpset_acq_8
+#define atomic_load_acq_char atomic_load_acq_8
+#define atomic_set_acq_char atomic_set_acq_8
+#define atomic_subtract_acq_char atomic_subtract_acq_8
+#define atomic_testandset_acq_char atomic_testandset_acq_8
+
+#define atomic_add_rel_char atomic_add_rel_8
+#define atomic_fcmpset_rel_char atomic_fcmpset_rel_8
+#define atomic_clear_rel_char atomic_clear_rel_8
+#define atomic_cmpset_rel_char atomic_cmpset_rel_8
+#define atomic_set_rel_char atomic_set_rel_8
+#define atomic_subtract_rel_char atomic_subtract_rel_8
+#define atomic_store_rel_char atomic_store_rel_8
+
+#define atomic_add_short atomic_add_16
+#define atomic_fcmpset_short atomic_fcmpset_16
+#define atomic_clear_short atomic_clear_16
+#define atomic_cmpset_short atomic_cmpset_16
+#define atomic_fetchadd_short atomic_fetchadd_16
+#define atomic_readandclear_short atomic_readandclear_16
+#define atomic_set_short atomic_set_16
+#define atomic_swap_short atomic_swap_16
+#define atomic_subtract_short atomic_subtract_16
+#define atomic_testandclear_short atomic_testandclear_16
+#define atomic_testandset_short atomic_testandset_16
+
+#define atomic_add_acq_short atomic_add_acq_16
+#define atomic_fcmpset_acq_short atomic_fcmpset_acq_16
+#define atomic_clear_acq_short atomic_clear_acq_16
+#define atomic_cmpset_acq_short atomic_cmpset_acq_16
+#define atomic_load_acq_short atomic_load_acq_16
+#define atomic_set_acq_short atomic_set_acq_16
+#define atomic_subtract_acq_short atomic_subtract_acq_16
+#define atomic_testandset_acq_short atomic_testandset_acq_16
+
+#define atomic_add_rel_short atomic_add_rel_16
+#define atomic_fcmpset_rel_short atomic_fcmpset_rel_16
+#define atomic_clear_rel_short atomic_clear_rel_16
+#define atomic_cmpset_rel_short atomic_cmpset_rel_16
+#define atomic_set_rel_short atomic_set_rel_16
+#define atomic_subtract_rel_short atomic_subtract_rel_16
+#define atomic_store_rel_short atomic_store_rel_16
+
+#define atomic_add_int atomic_add_32
+#define atomic_fcmpset_int atomic_fcmpset_32
+#define atomic_clear_int atomic_clear_32
+#define atomic_cmpset_int atomic_cmpset_32
+#define atomic_fetchadd_int atomic_fetchadd_32
+#define atomic_readandclear_int atomic_readandclear_32
+#define atomic_set_int atomic_set_32
+#define atomic_swap_int atomic_swap_32
+#define atomic_subtract_int atomic_subtract_32
+#define atomic_testandclear_int atomic_testandclear_32
+#define atomic_testandset_int atomic_testandset_32
+
+#define atomic_add_acq_int atomic_add_acq_32
+#define atomic_fcmpset_acq_int atomic_fcmpset_acq_32
+#define atomic_clear_acq_int atomic_clear_acq_32
+#define atomic_cmpset_acq_int atomic_cmpset_acq_32
+#define atomic_load_acq_int atomic_load_acq_32
+#define atomic_set_acq_int atomic_set_acq_32
+#define atomic_subtract_acq_int atomic_subtract_acq_32
+#define atomic_testandset_acq_int atomic_testandset_acq_32
+
+#define atomic_add_rel_int atomic_add_rel_32
+#define atomic_fcmpset_rel_int atomic_fcmpset_rel_32
+#define atomic_clear_rel_int atomic_clear_rel_32
+#define atomic_cmpset_rel_int atomic_cmpset_rel_32
+#define atomic_set_rel_int atomic_set_rel_32
+#define atomic_subtract_rel_int atomic_subtract_rel_32
+#define atomic_store_rel_int atomic_store_rel_32
+
+#define atomic_add_long atomic_add_64
+#define atomic_fcmpset_long atomic_fcmpset_64
+#define atomic_clear_long atomic_clear_64
+#define atomic_cmpset_long atomic_cmpset_64
+#define atomic_fetchadd_long atomic_fetchadd_64
+#define atomic_readandclear_long atomic_readandclear_64
+#define atomic_set_long atomic_set_64
+#define atomic_swap_long atomic_swap_64
+#define atomic_subtract_long atomic_subtract_64
+#define atomic_testandclear_long atomic_testandclear_64
+#define atomic_testandset_long atomic_testandset_64
+
+#define atomic_add_ptr atomic_add_64
+#define atomic_fcmpset_ptr atomic_fcmpset_64
+#define atomic_clear_ptr atomic_clear_64
+#define atomic_cmpset_ptr atomic_cmpset_64
+#define atomic_fetchadd_ptr atomic_fetchadd_64
+#define atomic_readandclear_ptr atomic_readandclear_64
+#define atomic_set_ptr atomic_set_64
+#define atomic_swap_ptr atomic_swap_64
+#define atomic_subtract_ptr atomic_subtract_64
+#define atomic_testandclear_ptr atomic_testandclear_64
+#define atomic_testandset_ptr atomic_testandset_64
+
+#define atomic_add_acq_long atomic_add_acq_64
+#define atomic_fcmpset_acq_long atomic_fcmpset_acq_64
+#define atomic_clear_acq_long atomic_clear_acq_64
+#define atomic_cmpset_acq_long atomic_cmpset_acq_64
+#define atomic_load_acq_long atomic_load_acq_64
+#define atomic_set_acq_long atomic_set_acq_64
+#define atomic_subtract_acq_long atomic_subtract_acq_64
+#define atomic_testandset_acq_long atomic_testandset_acq_64
+
+#define atomic_add_acq_ptr atomic_add_acq_64
+#define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_64
+#define atomic_clear_acq_ptr atomic_clear_acq_64
+#define atomic_cmpset_acq_ptr atomic_cmpset_acq_64
+#define atomic_load_acq_ptr atomic_load_acq_64
+#define atomic_set_acq_ptr atomic_set_acq_64
+#define atomic_subtract_acq_ptr atomic_subtract_acq_64
+
+#define atomic_add_rel_long atomic_add_rel_64
+#define atomic_fcmpset_rel_long atomic_fcmpset_rel_64
+#define atomic_clear_rel_long atomic_clear_rel_64
+#define atomic_cmpset_rel_long atomic_cmpset_rel_64
+#define atomic_set_rel_long atomic_set_rel_64
+#define atomic_subtract_rel_long atomic_subtract_rel_64
+#define atomic_store_rel_long atomic_store_rel_64
+
+#define atomic_add_rel_ptr atomic_add_rel_64
+#define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_64
+#define atomic_clear_rel_ptr atomic_clear_rel_64
+#define atomic_cmpset_rel_ptr atomic_cmpset_rel_64
+#define atomic_set_rel_ptr atomic_set_rel_64
+#define atomic_subtract_rel_ptr atomic_subtract_rel_64
+#define atomic_store_rel_ptr atomic_store_rel_64
+
+static __inline void
+atomic_thread_fence_acq(void)
+{
+
+ dmb(ld);
+}
+
+static __inline void
+atomic_thread_fence_rel(void)
+{
+
+ dmb(sy);
+}
+
+static __inline void
+atomic_thread_fence_acq_rel(void)
+{
+
+ dmb(sy);
+}
+
+static __inline void
+atomic_thread_fence_seq_cst(void)
+{
+
+ dmb(sy);
+}
+
+#endif /* KCSAN && !KCSAN_RUNTIME */
+#endif /* _MACHINE_ATOMIC_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/bus.h b/sys/arm64/include/bus.h
new file mode 100644
index 000000000000..2e2ef2f6d008
--- /dev/null
+++ b/sys/arm64/include/bus.h
@@ -0,0 +1,528 @@
+/* $NetBSD: bus.h,v 1.11 2003/07/28 17:35:54 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * From: sys/arm/include/bus.h
+ */
+
+#ifdef __arm__
+#include <arm/bus.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_BUS_H_
+#define _MACHINE_BUS_H_
+
+#include <machine/_bus.h>
+
+#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t)
+
+#define BUS_SPACE_MAXADDR_24BIT 0xFFFFFFUL
+#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFFUL
+#define BUS_SPACE_MAXADDR_36BIT 0xFFFFFFFFFUL
+#define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFUL
+#define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFFUL
+#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFFUL
+#define BUS_SPACE_MAXSIZE_40BIT 0xFFFFFFFFFFUL
+
+#define BUS_SPACE_MAXADDR 0xFFFFFFFFFFFFFFFFUL
+#define BUS_SPACE_MAXSIZE 0xFFFFFFFFFFFFFFFFUL
+
+#define BUS_SPACE_MAP_CACHEABLE 0x01
+#define BUS_SPACE_MAP_LINEAR 0x02
+#define BUS_SPACE_MAP_PREFETCHABLE 0x04
+#define BUS_SPACE_MAP_NONPOSTED 0x08
+
+#define BUS_SPACE_UNRESTRICTED (~0)
+
+#define BUS_SPACE_BARRIER_READ 0x01
+#define BUS_SPACE_BARRIER_WRITE 0x02
+
+struct bus_space {
+ /* cookie */
+ void *bs_cookie;
+
+ /* mapping/unmapping */
+ int (*bs_map) (void *, bus_addr_t, bus_size_t,
+ int, bus_space_handle_t *);
+ void (*bs_unmap) (void *, bus_space_handle_t, bus_size_t);
+ int (*bs_subregion) (void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, bus_space_handle_t *);
+
+ /* allocation/deallocation */
+ int (*bs_alloc) (void *, bus_addr_t, bus_addr_t,
+ bus_size_t, bus_size_t, bus_size_t, int,
+ bus_addr_t *, bus_space_handle_t *);
+ void (*bs_free) (void *, bus_space_handle_t,
+ bus_size_t);
+
+ /* get kernel virtual address */
+ /* barrier */
+ void (*bs_barrier) (void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, int);
+
+ /* read single */
+ u_int8_t (*bs_r_1) (void *, bus_space_handle_t, bus_size_t);
+ u_int16_t (*bs_r_2) (void *, bus_space_handle_t, bus_size_t);
+ u_int32_t (*bs_r_4) (void *, bus_space_handle_t, bus_size_t);
+ u_int64_t (*bs_r_8) (void *, bus_space_handle_t, bus_size_t);
+
+ /* read multiple */
+ void (*bs_rm_1) (void *, bus_space_handle_t, bus_size_t,
+ u_int8_t *, bus_size_t);
+ void (*bs_rm_2) (void *, bus_space_handle_t, bus_size_t,
+ u_int16_t *, bus_size_t);
+ void (*bs_rm_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rm_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* read region */
+ void (*bs_rr_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rr_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rr_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rr_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* write single */
+ void (*bs_w_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ void (*bs_w_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ void (*bs_w_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ void (*bs_w_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t);
+
+ /* write multiple */
+ void (*bs_wm_1) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wm_2) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wm_4) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wm_8) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* write region */
+ void (*bs_wr_1) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wr_2) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wr_4) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wr_8) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* set multiple */
+ void (*bs_sm_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*bs_sm_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*bs_sm_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*bs_sm_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
+
+ /* set region */
+ void (*bs_sr_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*bs_sr_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*bs_sr_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*bs_sr_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
+
+ /* copy */
+ void (*bs_c_1) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_2) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_4) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_8) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+
+ /* read single stream */
+ u_int8_t (*bs_r_1_s) (void *, bus_space_handle_t, bus_size_t);
+ u_int16_t (*bs_r_2_s) (void *, bus_space_handle_t, bus_size_t);
+ u_int32_t (*bs_r_4_s) (void *, bus_space_handle_t, bus_size_t);
+ u_int64_t (*bs_r_8_s) (void *, bus_space_handle_t, bus_size_t);
+
+ /* read multiple stream */
+ void (*bs_rm_1_s) (void *, bus_space_handle_t, bus_size_t,
+ u_int8_t *, bus_size_t);
+ void (*bs_rm_2_s) (void *, bus_space_handle_t, bus_size_t,
+ u_int16_t *, bus_size_t);
+ void (*bs_rm_4_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rm_8_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* read region stream */
+ void (*bs_rr_1_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rr_2_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rr_4_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rr_8_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* write single stream */
+ void (*bs_w_1_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ void (*bs_w_2_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ void (*bs_w_4_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ void (*bs_w_8_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t);
+
+ /* write multiple stream */
+ void (*bs_wm_1_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wm_2_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wm_4_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wm_8_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* write region stream */
+ void (*bs_wr_1_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wr_2_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wr_4_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wr_8_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* peek */
+ int (*bs_peek_1)(void *, bus_space_handle_t,
+ bus_size_t , uint8_t *);
+ int (*bs_peek_2)(void *, bus_space_handle_t,
+ bus_size_t , uint16_t *);
+ int (*bs_peek_4)(void *, bus_space_handle_t,
+ bus_size_t , uint32_t *);
+ int (*bs_peek_8)(void *, bus_space_handle_t,
+ bus_size_t , uint64_t *);
+
+ /* poke */
+ int (*bs_poke_1)(void *, bus_space_handle_t,
+ bus_size_t, uint8_t);
+ int (*bs_poke_2)(void *, bus_space_handle_t,
+ bus_size_t, uint16_t);
+ int (*bs_poke_4)(void *, bus_space_handle_t,
+ bus_size_t, uint32_t);
+ int (*bs_poke_8)(void *, bus_space_handle_t,
+ bus_size_t, uint64_t);
+};
+
+#if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME)
+#include <sys/bus_san.h>
+#else
+
+/*
+ * Utility macros; INTERNAL USE ONLY.
+ */
+#define __bs_c(a,b) __CONCAT(a,b)
+#define __bs_opname(op,size) __bs_c(__bs_c(__bs_c(bs_,op),_),size)
+
+#define __bs_rs(sz, t, h, o) \
+ (*(t)->__bs_opname(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws(sz, t, h, o, v) \
+ (*(t)->__bs_opname(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, a, c)
+#define __bs_set(type, sz, t, h, o, v, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, v, c)
+#define __bs_copy(sz, t, h1, o1, h2, o2, cnt) \
+ (*(t)->__bs_opname(c,sz))((t)->bs_cookie, h1, o1, h2, o2, cnt)
+
+#define __bs_opname_s(op,size) __bs_c(__bs_c(__bs_c(__bs_c(bs_,op),_),size),_s)
+#define __bs_rs_s(sz, t, h, o) \
+ (*(t)->__bs_opname_s(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws_s(sz, t, h, o, v) \
+ (*(t)->__bs_opname_s(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_peek(sz, t, h, o, vp) \
+ (*(t)->__bs_opname(peek, sz))((t)->bs_cookie, h, o, vp)
+#define __bs_poke(sz, t, h, o, v) \
+ (*(t)->__bs_opname(poke, sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle_s(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname_s(type,sz))((t)->bs_cookie, h, o, a, c)
+
+/*
+ * Mapping and unmapping operations.
+ */
+#define bus_space_map(t, a, s, c, hp) \
+ (*(t)->bs_map)((t)->bs_cookie, (a), (s), (c), (hp))
+#define bus_space_unmap(t, h, s) \
+ (*(t)->bs_unmap)((t)->bs_cookie, (h), (s))
+#define bus_space_subregion(t, h, o, s, hp) \
+ (*(t)->bs_subregion)((t)->bs_cookie, (h), (o), (s), (hp))
+
+/*
+ * Allocation and deallocation operations.
+ */
+#define bus_space_alloc(t, rs, re, s, a, b, c, ap, hp) \
+ (*(t)->bs_alloc)((t)->bs_cookie, (rs), (re), (s), (a), (b), \
+ (c), (ap), (hp))
+#define bus_space_free(t, h, s) \
+ (*(t)->bs_free)((t)->bs_cookie, (h), (s))
+
+/*
+ * Bus barrier operations.
+ */
+#define bus_space_barrier(t, h, o, l, f) \
+ (*(t)->bs_barrier)((t)->bs_cookie, (h), (o), (l), (f))
+
+/*
+ * Bus read (single) operations.
+ */
+#define bus_space_read_1(t, h, o) __bs_rs(1,(t),(h),(o))
+#define bus_space_read_2(t, h, o) __bs_rs(2,(t),(h),(o))
+#define bus_space_read_4(t, h, o) __bs_rs(4,(t),(h),(o))
+#define bus_space_read_8(t, h, o) __bs_rs(8,(t),(h),(o))
+
+#define bus_space_read_stream_1(t, h, o) __bs_rs_s(1,(t), (h), (o))
+#define bus_space_read_stream_2(t, h, o) __bs_rs_s(2,(t), (h), (o))
+#define bus_space_read_stream_4(t, h, o) __bs_rs_s(4,(t), (h), (o))
+#define bus_space_read_stream_8(t, h, o) __bs_rs_s(8,(t), (h), (o))
+
+/*
+ * Bus read multiple operations.
+ */
+#define bus_space_read_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(rm,8,(t),(h),(o),(a),(c))
+
+#define bus_space_read_multi_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,8,(t),(h),(o),(a),(c))
+
+/*
+ * Bus read region operations.
+ */
+#define bus_space_read_region_1(t, h, o, a, c) \
+ __bs_nonsingle(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_2(t, h, o, a, c) \
+ __bs_nonsingle(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_4(t, h, o, a, c) \
+ __bs_nonsingle(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_8(t, h, o, a, c) \
+ __bs_nonsingle(rr,8,(t),(h),(o),(a),(c))
+
+#define bus_space_read_region_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,8,(t),(h),(o),(a),(c))
+
+/*
+ * Bus write (single) operations.
+ */
+#define bus_space_write_1(t, h, o, v) __bs_ws(1,(t),(h),(o),(v))
+#define bus_space_write_2(t, h, o, v) __bs_ws(2,(t),(h),(o),(v))
+#define bus_space_write_4(t, h, o, v) __bs_ws(4,(t),(h),(o),(v))
+#define bus_space_write_8(t, h, o, v) __bs_ws(8,(t),(h),(o),(v))
+
+#define bus_space_write_stream_1(t, h, o, v) __bs_ws_s(1,(t),(h),(o),(v))
+#define bus_space_write_stream_2(t, h, o, v) __bs_ws_s(2,(t),(h),(o),(v))
+#define bus_space_write_stream_4(t, h, o, v) __bs_ws_s(4,(t),(h),(o),(v))
+#define bus_space_write_stream_8(t, h, o, v) __bs_ws_s(8,(t),(h),(o),(v))
+
+/*
+ * Bus write multiple operations.
+ */
+#define bus_space_write_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(wm,8,(t),(h),(o),(a),(c))
+
+#define bus_space_write_multi_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,8,(t),(h),(o),(a),(c))
+
+/*
+ * Bus write region operations.
+ */
+#define bus_space_write_region_1(t, h, o, a, c) \
+ __bs_nonsingle(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_2(t, h, o, a, c) \
+ __bs_nonsingle(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_4(t, h, o, a, c) \
+ __bs_nonsingle(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_8(t, h, o, a, c) \
+ __bs_nonsingle(wr,8,(t),(h),(o),(a),(c))
+
+#define bus_space_write_region_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,8,(t),(h),(o),(a),(c))
+
+/*
+ * Set multiple operations.
+ */
+#define bus_space_set_multi_1(t, h, o, v, c) \
+ __bs_set(sm,1,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_2(t, h, o, v, c) \
+ __bs_set(sm,2,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_4(t, h, o, v, c) \
+ __bs_set(sm,4,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_8(t, h, o, v, c) \
+ __bs_set(sm,8,(t),(h),(o),(v),(c))
+
+#define bus_space_set_multi_stream_1(t, h, o, v, c) \
+ bus_space_set_multi_1((t), (h), (o), (v), (c))
+#define bus_space_set_multi_stream_2(t, h, o, v, c) \
+ bus_space_set_multi_2((t), (h), (o), (v), (c))
+#define bus_space_set_multi_stream_4(t, h, o, v, c) \
+ bus_space_set_multi_4((t), (h), (o), (v), (c))
+#define bus_space_set_multi_stream_8(t, h, o, v, c) \
+ bus_space_set_multi_8((t), (h), (o), (v), (c))
+
+/*
+ * Set region operations.
+ */
+#define bus_space_set_region_1(t, h, o, v, c) \
+ __bs_set(sr,1,(t),(h),(o),(v),(c))
+#define bus_space_set_region_2(t, h, o, v, c) \
+ __bs_set(sr,2,(t),(h),(o),(v),(c))
+#define bus_space_set_region_4(t, h, o, v, c) \
+ __bs_set(sr,4,(t),(h),(o),(v),(c))
+#define bus_space_set_region_8(t, h, o, v, c) \
+ __bs_set(sr,8,(t),(h),(o),(v),(c))
+
+#define bus_space_set_region_stream_1(t, h, o, v, c) \
+ bus_space_set_region_1((t), (h), (o), (v), (c))
+#define bus_space_set_region_stream_2(t, h, o, v, c) \
+ bus_space_set_region_2((t), (h), (o), (v), (c))
+#define bus_space_set_region_stream_4(t, h, o, v, c) \
+ bus_space_set_region_4((t), (h), (o), (v), (c))
+#define bus_space_set_region_stream_8(t, h, o, v, c) \
+ bus_space_set_region_8((t), (h), (o), (v), (c))
+
+/*
+ * Copy operations.
+ */
+#define bus_space_copy_region_1(t, h1, o1, h2, o2, c) \
+ __bs_copy(1, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_2(t, h1, o1, h2, o2, c) \
+ __bs_copy(2, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_4(t, h1, o1, h2, o2, c) \
+ __bs_copy(4, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_8(t, h1, o1, h2, o2, c) \
+ __bs_copy(8, t, h1, o1, h2, o2, c)
+
+/*
+ * Poke (checked write) operations.
+ */
+#define bus_space_poke_1(t, h, o, v) __bs_poke(1, (t), (h), (o), (v))
+#define bus_space_poke_2(t, h, o, v) __bs_poke(2, (t), (h), (o), (v))
+#define bus_space_poke_4(t, h, o, v) __bs_poke(4, (t), (h), (o), (v))
+#define bus_space_poke_8(t, h, o, v) __bs_poke(8, (t), (h), (o), (v))
+
+/*
+ * Peek (checked read) operations.
+ */
+#define bus_space_peek_1(t, h, o, vp) __bs_peek(1, (t), (h), (o), (vp))
+#define bus_space_peek_2(t, h, o, vp) __bs_peek(2, (t), (h), (o), (vp))
+#define bus_space_peek_4(t, h, o, vp) __bs_peek(4, (t), (h), (o), (vp))
+#define bus_space_peek_8(t, h, o, vp) __bs_peek(8, (t), (h), (o), (vp))
+
+#endif /* !SAN_NEEDS_INTERCEPTORS */
+
+#include <machine/bus_dma.h>
+
+#endif /* _MACHINE_BUS_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/bus_dma.h b/sys/arm64/include/bus_dma.h
new file mode 100644
index 000000000000..d9c37eb2641b
--- /dev/null
+++ b/sys/arm64/include/bus_dma.h
@@ -0,0 +1,164 @@
+
+#ifndef _MACHINE_BUS_DMA_H_
+#define _MACHINE_BUS_DMA_H_
+
+#define WANT_INLINE_DMAMAP
+#include <sys/bus_dma.h>
+
+#include <machine/bus_dma_impl.h>
+
+/*
+ * Is DMA address 1:1 mapping of physical address
+ */
+static inline bool
+bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->id_mapped(dmat, buf, buflen));
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static inline int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->map_create(dmat, flags, mapp));
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static inline int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->map_destroy(dmat, map));
+}
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints listed in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+static inline int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->mem_alloc(dmat, vaddr, flags, mapp));
+}
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc.
+ */
+static inline void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->mem_free(dmat, vaddr, map);
+}
+
+/*
+ * Release the mapping held by map.
+ */
+static inline void
+bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->map_unload(dmat, map);
+}
+
+static inline void
+bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->map_sync(dmat, map, op);
+}
+
+static inline int
+_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->load_phys(dmat, map, buf, buflen, flags, segs,
+ segp));
+}
+
+static inline int
+_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
+ bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->load_ma(dmat, map, ma, tlen, ma_offs, flags,
+ segs, segp));
+}
+
+static inline int
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->load_buffer(dmat, map, buf, buflen, pmap, flags, segs,
+ segp));
+}
+
+static inline void
+_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ tc->impl->map_waitok(dmat, map, mem, callback, callback_arg);
+}
+
+static inline bus_dma_segment_t *
+_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->map_complete(dmat, map, segs, nsegs, error));
+}
+
+#ifdef KMSAN
+static inline void
+_bus_dmamap_load_kmsan(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem)
+{
+ struct bus_dma_tag_common *tc;
+
+ tc = (struct bus_dma_tag_common *)dmat;
+ return (tc->impl->load_kmsan(map, mem));
+}
+#endif
+
+#endif /* !_MACHINE_BUS_DMA_H_ */
diff --git a/sys/arm64/include/bus_dma_impl.h b/sys/arm64/include/bus_dma_impl.h
new file mode 100644
index 000000000000..1fb5e10c92b0
--- /dev/null
+++ b/sys/arm64/include/bus_dma_impl.h
@@ -0,0 +1,91 @@
+/*-
+ * Copyright (c) 2013 The FreeBSD Foundation
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_BUS_DMA_IMPL_H_
+#define _MACHINE_BUS_DMA_IMPL_H_
+
+struct bus_dma_tag_common {
+ struct bus_dma_impl *impl;
+ bus_size_t alignment;
+ bus_addr_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+ bus_size_t maxsize;
+ u_int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ bus_dma_lock_t *lockfunc;
+ void *lockfuncarg;
+ int domain;
+};
+
+struct bus_dma_impl {
+ int (*tag_create)(bus_dma_tag_t parent,
+ bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat);
+ int (*tag_destroy)(bus_dma_tag_t dmat);
+ int (*tag_set_domain)(bus_dma_tag_t);
+ bool (*id_mapped)(bus_dma_tag_t, vm_paddr_t, bus_size_t);
+ int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
+ int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map);
+ int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp);
+ void (*mem_free)(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map);
+ int (*load_ma)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ int (*load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ int (*load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, struct pmap *pmap, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ void (*map_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback,
+ void *callback_arg);
+ bus_dma_segment_t *(*map_complete)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error);
+ void (*map_unload)(bus_dma_tag_t dmat, bus_dmamap_t map);
+ void (*map_sync)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dmasync_op_t op);
+#ifdef KMSAN
+ void (*load_kmsan)(bus_dmamap_t map, struct memdesc *mem);
+#endif
+};
+
+int common_bus_dma_tag_create(struct bus_dma_tag_common *parent,
+ bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, size_t sz, void **dmat);
+
+extern struct bus_dma_impl bus_dma_bounce_impl;
+
+#endif
diff --git a/sys/arm64/include/clock.h b/sys/arm64/include/clock.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/sys/arm64/include/clock.h
diff --git a/sys/arm64/include/cmn600_reg.h b/sys/arm64/include/cmn600_reg.h
new file mode 100644
index 000000000000..f4c27d794868
--- /dev/null
+++ b/sys/arm64/include/cmn600_reg.h
@@ -0,0 +1,807 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 ARM Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_CMN600_REG_H_
+#define _MACHINE_CMN600_REG_H_
+
+#define CMN600_COUNTERS_N 8
+#define CMN600_UNIT_MAX 4
+#define CMN600_PMU_DEFAULT_UNITS_N 2
+#define CMN600_COMMON_PMU_EVENT_SEL 0x2000 /* rw */
+#define CMN600_COMMON_PMU_EVENT_SEL_OCC_SHIFT 32
+#define CMN600_COMMON_PMU_EVENT_SEL_OCC_MASK (0x7UL << 32)
+
+struct cmn600_pmc {
+ void *arg;
+ int domain;
+};
+
+int cmn600_pmc_nunits(void);
+int cmn600_pmc_getunit(int unit, void **arg, int *domain);
+
+int cmn600_pmu_intr_cb(void *arg, int (*handler)(struct trapframe *tf,
+ int unit, int i));
+
+int pmu_cmn600_alloc_localpmc(void *arg, int nodeid, int node_type,
+ int *local_counter);
+int pmu_cmn600_free_localpmc(void *arg, int nodeid, int node_type,
+ int local_counter);
+int pmu_cmn600_rev(void *arg);
+uint32_t pmu_cmn600_rd4(void *arg, int nodeid, int node_type, off_t reg);
+int pmu_cmn600_wr4(void *arg, int nodeid, int node_type, off_t reg,
+ uint32_t val);
+uint64_t pmu_cmn600_rd8(void *arg, int nodeid, int node_type, off_t reg);
+int pmu_cmn600_wr8(void *arg, int nodeid, int node_type, off_t reg,
+ uint64_t val);
+int pmu_cmn600_set8(void *arg, int nodeid, int node_type, off_t reg,
+ uint64_t val);
+int pmu_cmn600_clr8(void *arg, int nodeid, int node_type, off_t reg,
+ uint64_t val);
+int pmu_cmn600_md8(void *arg, int nodeid, int node_type, off_t reg,
+ uint64_t mask, uint64_t val);
+
+/* Configuration master registers */
+#define POR_CFGM_NODE_INFO 0x0000 /* ro */
+#define POR_CFGM_NODE_INFO_LOGICAL_ID_MASK 0xffff00000000UL
+#define POR_CFGM_NODE_INFO_LOGICAL_ID_SHIFT 32
+#define POR_CFGM_NODE_INFO_NODE_ID_MASK 0xffff0000
+#define POR_CFGM_NODE_INFO_NODE_ID_SHIFT 16
+#define POR_CFGM_NODE_INFO_NODE_TYPE_MASK 0xffff
+#define POR_CFGM_NODE_INFO_NODE_TYPE_SHIFT 0
+
+#define NODE_ID_SUB_MASK 0x3
+#define NODE_ID_SUB_SHIFT 0
+#define NODE_ID_PORT_MASK 0x4
+#define NODE_ID_PORT_SHIFT 2
+#define NODE_ID_X2B_MASK (0x3 << 3)
+#define NODE_ID_X2B_SHIFT 3
+#define NODE_ID_Y2B_MASK (0x3 << 5)
+#define NODE_ID_Y2B_SHIFT 5
+#define NODE_ID_X3B_MASK (0x7 << 3)
+#define NODE_ID_X3B_SHIFT 3
+#define NODE_ID_Y3B_MASK (0x7 << 6)
+#define NODE_ID_Y3B_SHIFT 6
+
+#define NODE_TYPE_INVALID 0x000
+#define NODE_TYPE_DVM 0x001
+#define NODE_TYPE_CFG 0x002
+#define NODE_TYPE_DTC 0x003
+#define NODE_TYPE_HN_I 0x004
+#define NODE_TYPE_HN_F 0x005
+#define NODE_TYPE_XP 0x006
+#define NODE_TYPE_SBSX 0x007
+#define NODE_TYPE_RN_I 0x00A
+#define NODE_TYPE_RN_D 0x00D
+#define NODE_TYPE_RN_SAM 0x00F
+#define NODE_TYPE_CXRA 0x100
+#define NODE_TYPE_CXHA 0x101
+#define NODE_TYPE_CXLA 0x102
+
+#define POR_CFGM_PERIPH_ID_0_PERIPH_ID_1 0x0008 /* ro */
+#define POR_CFGM_PERIPH_ID_2_PERIPH_ID_3 0x0010 /* ro */
+#define POR_CFGM_PERIPH_ID_2_REV_SHIFT 4
+#define POR_CFGM_PERIPH_ID_2_REV_MASK 0xf0
+#define POR_CFGM_PERIPH_ID_2_REV_R1P0 0
+#define POR_CFGM_PERIPH_ID_2_REV_R1P1 1
+#define POR_CFGM_PERIPH_ID_2_REV_R1P2 2
+#define POR_CFGM_PERIPH_ID_2_REV_R1P3 3
+#define POR_CFGM_PERIPH_ID_2_REV_R2P0 4
+#define POR_CFGM_PERIPH_ID_4_PERIPH_ID_5 0x0018 /* ro */
+#define POR_CFGM_PERIPH_ID_6_PERIPH_ID_7 0x0020 /* ro */
+#define POR_CFGM_PERIPH_ID_32(x) (0x0008 + ((x) * 4)) /* ro 32 */
+#define POR_CFGM_COMPONENT_ID_0_COMPONENT_ID_1 0x0028 /* ro */
+#define POR_CFGM_COMPONENT_ID_2_COMPONENT_ID_3 0x0030 /* ro */
+#define POR_CFGM_CHILD_INFO 0x0080 /* ro */
+#define POR_CFGM_CHILD_INFO_CHILD_PTR_OFFSET_MASK 0xffff0000
+#define POR_CFGM_CHILD_INFO_CHILD_PTR_OFFSET_SHIFT 16
+#define POR_CFGM_CHILD_INFO_CHILD_COUNT_MASK 0x0000ffff
+#define POR_CFGM_CHILD_INFO_CHILD_COUNT_SHIFT 0
+#define POR_CFGM_SECURE_ACCESS 0x0980 /* rw */
+#define POR_CFGM_ERRGSR0 0x3000 /* ro */
+#define POR_CFGM_ERRGSR1 0x3008 /* ro */
+#define POR_CFGM_ERRGSR2 0x3010 /* ro */
+#define POR_CFGM_ERRGSR3 0x3018 /* ro */
+#define POR_CFGM_ERRGSR4 0x3020 /* ro */
+#define POR_CFGM_ERRGSR5 0x3080 /* ro */
+#define POR_CFGM_ERRGSR6 0x3088 /* ro */
+#define POR_CFGM_ERRGSR7 0x3090 /* ro */
+#define POR_CFGM_ERRGSR8 0x3098 /* ro */
+#define POR_CFGM_ERRGSR9 0x30a0 /* ro */
+#define POR_CFGM_ERRGSR(x) (0x3000 + ((x) * 8)) /* ro */
+#define POR_CFGM_ERRGSR0_ns 0x3100 /* ro */
+#define POR_CFGM_ERRGSR1_ns 0x3108 /* ro */
+#define POR_CFGM_ERRGSR2_ns 0x3110 /* ro */
+#define POR_CFGM_ERRGSR3_ns 0x3118 /* ro */
+#define POR_CFGM_ERRGSR4_ns 0x3120 /* ro */
+#define POR_CFGM_ERRGSR5_ns 0x3180 /* ro */
+#define POR_CFGM_ERRGSR6_ns 0x3188 /* ro */
+#define POR_CFGM_ERRGSR7_ns 0x3190 /* ro */
+#define POR_CFGM_ERRGSR8_ns 0x3198 /* ro */
+#define POR_CFGM_ERRGSR9_ns 0x31a0 /* ro */
+#define POR_CFGM_ERRGSR_ns(x) (0x3100 + ((x) * 8)) /* ro */
+#define POR_CFGM_ERRDEVAFF 0x3fa8 /* ro */
+#define POR_CFGM_ERRDEVARCH 0x3fb8 /* ro */
+#define POR_CFGM_ERRIDR 0x3fc8 /* ro */
+#define POR_CFGM_ERRPIDR45 0x3fd0 /* ro */
+#define POR_CFGM_ERRPIDR67 0x3fd8 /* ro */
+#define POR_CFGM_ERRPIDR01 0x3fe0 /* ro */
+#define POR_CFGM_ERRPIDR23 0x3fe8 /* ro */
+#define POR_CFGM_ERRCIDR01 0x3ff0 /* ro */
+#define POR_CFGM_ERRCIDR23 0x3ff8 /* ro */
+#define POR_INFO_GLOBAL 0x0900 /* ro */
+#define POR_INFO_GLOBAL_CHIC_MODE_EN (1UL << 49) /* CHI-C mode enable */
+#define POR_INFO_GLOBAL_R2_ENABLE (1UL << 48) /* CMN R2 feature enable */
+#define POR_INFO_GLOBAL_RNSAM_NUM_ADD_HASHED_TGT_SHIFT 36 /* Number of additional hashed target ID's supported by the RN SAM, beyond the local HNF count */
+#define POR_INFO_GLOBAL_RNSAM_NUM_ADD_HASHED_TGT_MASK (0x3fUL << 36)
+#define POR_INFO_GLOBAL_NUM_REMOTE_RNF_SHIFT 28 /* Number of remote RN-F devices in the system when the CML feature is enabled */
+#define POR_INFO_GLOBAL_NUM_REMOTE_RNF_MASK (0xffUL << 28)
+#define POR_INFO_GLOBAL_FLIT_PARITY_EN (1 << 25) /* Indicates whether parity checking is enabled in the transport layer on all flits sent on the interconnect */
+#define POR_INFO_GLOBAL_DATACHECK_EN (1 << 24) /* Indicates whether datacheck feature is enabled for CHI DAT flit */
+#define POR_INFO_GLOBAL_PHYSICAL_ADDRESS_WIDTH_SHIFT 16 /* Physical address width */
+#define POR_INFO_GLOBAL_PHYSICAL_ADDRESS_WIDTH_MASK (0xff << 16)
+#define POR_INFO_GLOBAL_CHI_REQ_ADDR_WIDTH_SHIFT 8 /* REQ address width */
+#define POR_INFO_GLOBAL_CHI_REQ_ADDR_WIDTH_MASK (0xff << 8)
+#define POR_INFO_GLOBAL_CHI_REQ_RSVDC_WIDTH_SHIFT 0 /* RSVDC field width in CHI REQ flit */
+#define POR_INFO_GLOBAL_CHI_REQ_RSVDC_WIDTH_MASK 0xff
+
+#define POR_PPU_INT_ENABLE 0x1000 /* rw */
+#define POR_PPU_INT_STATUS 0x1008 /* w1c */
+#define POR_PPU_QACTIVE_HYST 0x1010 /* rw */
+#define POR_CFGM_CHILD_POINTER_0 0x0100 /* ro */
+#define POR_CFGM_CHILD_POINTER(x) (POR_CFGM_CHILD_POINTER_0 + ((x) * 8))
+#define POR_CFGM_CHILD_POINTER_EXT (1 << 31)
+#define POR_CFGM_CHILD_POINTER_BASE_MASK 0x0fffffffUL
+
+/* DN registers */
+#define POR_DN_NODE_INFO 0x0000 /* ro */
+#define POR_DN_CHILD_INFO 0x0080 /* ro */
+#define POR_DN_BUILD_INFO 0x0900 /* ro */
+#define POR_DN_SECURE_REGISTER_GROUPS_OVERRIDE 0x0980 /* rw */
+#define POR_DN_AUX_CTL 0x0a00 /* rw */
+#define POR_DN_VMF0_CTRL 0x0c00 /* rw */
+#define POR_DN_VMF0_RNF0 0x0c08 /* rw */
+#define POR_DN_VMF0_RND 0x0c10 /* rw */
+#define POR_DN_VMF0_CXRA 0x0c18 /* rw */
+#define POR_DN_VMF1_CTRL 0x0c20 /* rw */
+#define POR_DN_VMF1_RNF0 0x0c28 /* rw */
+#define POR_DN_VMF1_RND 0x0c30 /* rw */
+#define POR_DN_VMF1_CXRA 0x0c38 /* rw */
+#define POR_DN_VMF2_CTRL 0x0c40 /* rw */
+#define POR_DN_VMF2_RNF0 0x0c48 /* rw */
+#define POR_DN_VMF2_RND 0x0c50 /* rw */
+#define POR_DN_VMF2_CXRA 0x0c58 /* rw */
+#define POR_DN_VMF3_CTRL 0x0c60 /* rw */
+#define POR_DN_VMF3_RNF0 0x0c68 /* rw */
+#define POR_DN_VMF3_RND 0x0c70 /* rw */
+#define POR_DN_VMF3_CXRA 0x0c78 /* rw */
+#define POR_DN_VMF4_CTRL 0x0c80 /* rw */
+#define POR_DN_VMF4_RNF0 0x0c88 /* rw */
+#define POR_DN_VMF4_RND 0x0c90 /* rw */
+#define POR_DN_VMF4_CXRA 0x0c98 /* rw */
+#define POR_DN_VMF5_CTRL 0x0ca0 /* rw */
+#define POR_DN_VMF5_RNF0 0x0ca8 /* rw */
+#define POR_DN_VMF5_RND 0x0cb0 /* rw */
+#define POR_DN_VMF5_CXRA 0x0cb8 /* rw */
+#define POR_DN_VMF6_CTRL 0x0cc0 /* rw */
+#define POR_DN_VMF6_RNF0 0x0cc8 /* rw */
+#define POR_DN_VMF6_RND 0x0cd0 /* rw */
+#define POR_DN_VMF6_CXRA 0x0cd8 /* rw */
+#define POR_DN_VMF7_CTRL 0x0ce0 /* rw */
+#define POR_DN_VMF7_RNF0 0x0ce8 /* rw */
+#define POR_DN_VMF7_RND 0x0cf0 /* rw */
+#define POR_DN_VMF7_CXRA 0x0cf8 /* rw */
+#define POR_DN_VMF8_CTRL 0x0d00 /* rw */
+#define POR_DN_VMF8_RNF0 0x0d08 /* rw */
+#define POR_DN_VMF8_RND 0x0d10 /* rw */
+#define POR_DN_VMF8_CXRA 0x0d18 /* rw */
+#define POR_DN_VMF9_CTRL 0x0d20 /* rw */
+#define POR_DN_VMF9_RNF0 0x0d28 /* rw */
+#define POR_DN_VMF9_RND 0x0d30 /* rw */
+#define POR_DN_VMF9_CXRA 0x0d38 /* rw */
+#define POR_DN_VMF10_CTRL 0x0d40 /* rw */
+#define POR_DN_VMF10_RNF0 0x0d48 /* rw */
+#define POR_DN_VMF10_RND 0x0d50 /* rw */
+#define POR_DN_VMF10_CXRA 0x0d58 /* rw */
+#define POR_DN_VMF11_CTRL 0x0d60 /* rw */
+#define POR_DN_VMF11_RNF0 0x0d68 /* rw */
+#define POR_DN_VMF11_RND 0x0d70 /* rw */
+#define POR_DN_VMF11_CXRA 0x0d78 /* rw */
+#define POR_DN_VMF12_CTRL 0x0d80 /* rw */
+#define POR_DN_VMF12_RNF0 0x0d88 /* rw */
+#define POR_DN_VMF12_RND 0x0d90 /* rw */
+#define POR_DN_VMF12_CXRA 0x0d98 /* rw */
+#define POR_DN_VMF13_CTRL 0x0da0 /* rw */
+#define POR_DN_VMF13_RNF0 0x0da8 /* rw */
+#define POR_DN_VMF13_RND 0x0db0 /* rw */
+#define POR_DN_VMF13_CXRA 0x0db8 /* rw */
+#define POR_DN_VMF14_CTRL 0x0dc0 /* rw */
+#define POR_DN_VMF14_RNF0 0x0dc8 /* rw */
+#define POR_DN_VMF14_RND 0x0dd0 /* rw */
+#define POR_DN_VMF14_CXRA 0x0dd8 /* rw */
+#define POR_DN_VMF15_CTRL 0x0de0 /* rw */
+#define POR_DN_VMF15_RNF0 0x0de8 /* rw */
+#define POR_DN_VMF15_RND 0x0df0 /* rw */
+#define POR_DN_VMF15_CXRA 0x0df8 /* rw */
+#define POR_DN_PMU_EVENT_SEL 0x2000 /* rw */
+#define POR_DN_PMU_EVENT_SEL_OCCUP1_ID_SHIFT 32
+#define POR_DN_PMU_EVENT_SEL_OCCUP1_ID_MASK (0xf << 32)
+#define POR_DN_PMU_EVENT_SEL_OCCUP1_ID_ALL 0
+#define POR_DN_PMU_EVENT_SEL_OCCUP1_ID_DVM_OPS 1
+#define POR_DN_PMU_EVENT_SEL_OCCUP1_ID_DVM_SYNCS 2
+#define POR_DN_PMU_EVENT_SEL_EVENT_ID3_SHIFT 24
+#define POR_DN_PMU_EVENT_SEL_EVENT_ID3_MASK (0x3f << 24)
+#define POR_DN_PMU_EVENT_SEL_EVENT_ID2_SHIFT 16
+#define POR_DN_PMU_EVENT_SEL_EVENT_ID2_MASK (0x3f << 16)
+#define POR_DN_PMU_EVENT_SEL_EVENT_ID1_SHIFT 8
+#define POR_DN_PMU_EVENT_SEL_EVENT_ID1_MASK (0x3f << 8)
+#define POR_DN_PMU_EVENT_SEL_EVENT_ID0_SHIFT 0
+#define POR_DN_PMU_EVENT_SEL_EVENT_ID0_MASK 0x3f
+
+/* Debug and trace register */
+#define POR_DT_NODE_INFO 0x0000 /* ro */
+#define POR_DT_CHILD_INFO 0x0080 /* ro */
+#define POR_DT_SECURE_ACCESS 0x0980 /* rw */
+#define POR_DT_DTC_CTL 0x0a00 /* rw */
+#define POR_DT_DTC_CTL_DT_EN (1 << 0)
+#define POR_DT_TRIGGER_STATUS 0x0a10 /* ro */
+#define POR_DT_TRIGGER_STATUS_CLR 0x0a20 /* wo */
+#define POR_DT_TRACE_CONTROL 0x0a30 /* rw */
+#define POR_DT_TRACEID 0x0a48 /* rw */
+#define POR_DT_PMEVCNTAB 0x2000 /* rw */
+#define POR_DT_PMEVCNTCD 0x2010 /* rw */
+#define POR_DT_PMEVCNTEF 0x2020 /* rw */
+#define POR_DT_PMEVCNTGH 0x2030 /* rw */
+#define POR_DT_PMEVCNT(x) (0x2000 + ((x) * 0x10))
+#define POR_DT_PMEVCNT_EVENCNT_SHIFT 0
+#define POR_DT_PMEVCNT_ODDCNT_SHIFT 32
+#define POR_DT_PMCCNTR 0x2040 /* rw */
+#define POR_DT_PMEVCNTSRAB 0x2050 /* rw */
+#define POR_DT_PMEVCNTSRCD 0x2060 /* rw */
+#define POR_DT_PMEVCNTSREF 0x2070 /* rw */
+#define POR_DT_PMEVCNTSRGH 0x2080 /* rw */
+#define POR_DT_PMCCNTRSR 0x2090 /* rw */
+#define POR_DT_PMCR 0x2100 /* rw */
+#define POR_DT_PMCR_OVFL_INTR_EN (1 << 6)
+#define POR_DT_PMCR_CNTR_RST (1 << 5)
+#define POR_DT_PMCR_CNTCFG_SHIFT 1
+#define POR_DT_PMCR_CNTCFG_MASK (0xf << POR_DT_PMCR_CNTCFG_SHIFT)
+#define POR_DT_PMCR_PMU_EN (1 << 0)
+#define POR_DT_PMOVSR 0x2118 /* ro */
+#define POR_DT_PMOVSR_CLR 0x2120 /* wo */
+#define POR_DT_PMOVSR_EVENT_COUNTERS 0xffUL
+#define POR_DT_PMOVSR_CYCLE_COUNTER 0x100UL
+#define POR_DT_PMOVSR_ALL \
+ (POR_DT_PMOVSR_EVENT_COUNTERS | POR_DT_PMOVSR_CYCLE_COUNTER)
+#define POR_DT_PMSSR 0x2128 /* ro */
+#define POR_DT_PMSRR 0x2130 /* wo */
+#define POR_DT_CLAIM 0x2da0 /* rw */
+#define POR_DT_DEVAFF 0x2da8 /* ro */
+#define POR_DT_LSR 0x2db0 /* ro */
+#define POR_DT_AUTHSTATUS_DEVARCH 0x2db8 /* ro */
+#define POR_DT_DEVID 0x2dc0 /* ro */
+#define POR_DT_DEVTYPE 0x2dc8 /* ro */
+#define POR_DT_PIDR45 0x2dd0 /* ro */
+#define POR_DT_PIDR67 0x2dd8 /* ro */
+#define POR_DT_PIDR01 0x2de0 /* ro */
+#define POR_DT_PIDR23 0x2de8 /* ro */
+#define POR_DT_CIDR01 0x2df0 /* ro */
+#define POR_DT_CIDR23 0x2df8 /* ro */
+
+/* HN-F registers */
+#define POR_HNF_NODE_INFO 0x0000 /* ro */
+#define POR_HNF_CHILD_INFO 0x0080 /* ro */
+#define POR_HNF_SECURE_REGISTER_GROUPS_OVERRIDE 0x0980 /* rw */
+#define POR_HNF_UNIT_INFO 0x0900 /* ro */
+#define POR_HNF_CFG_CTL 0x0a00 /* rw */
+#define POR_HNF_AUX_CTL 0x0a08 /* rw */
+#define POR_HNF_R2_AUX_CTL 0x0a10 /* rw */
+#define POR_HNF_PPU_PWPR 0x1000 /* rw */
+#define POR_HNF_PPU_PWSR 0x1008 /* ro */
+#define POR_HNF_PPU_MISR 0x1014 /* ro */
+#define POR_HNF_PPU_IDR0 0x1fb0 /* ro */
+#define POR_HNF_PPU_IDR1 0x1fb4 /* ro */
+#define POR_HNF_PPU_IIDR 0x1fc8 /* ro */
+#define POR_HNF_PPU_AIDR 0x1fcc /* ro */
+#define POR_HNF_PPU_DYN_RET_THRESHOLD 0x1100 /* rw */
+#define POR_HNF_QOS_BAND 0x0a80 /* ro */
+#define POR_HNF_QOS_RESERVATION 0x0a88 /* rw */
+#define POR_HNF_RN_STARVATION 0x0a90 /* rw */
+#define POR_HNF_ERRFR 0x3000 /* ro */
+#define POR_HNF_ERRCTLR 0x3008 /* rw */
+#define POR_HNF_ERRSTATUS 0x3010 /* w1c */
+#define POR_HNF_ERRADDR 0x3018 /* rw */
+#define POR_HNF_ERRMISC 0x3020 /* rw */
+#define POR_HNF_ERR_INJ 0x3030 /* rw */
+#define POR_HNF_BYTE_PAR_ERR_INJ 0x3038 /* wo */
+#define POR_HNF_ERRFR_NS 0x3100 /* ro */
+#define POR_HNF_ERRCTLR_NS 0x3108 /* rw */
+#define POR_HNF_ERRSTATUS_NS 0x3110 /* w1c */
+#define POR_HNF_ERRADDR_NS 0x3118 /* rw */
+#define POR_HNF_ERRMISC_NS 0x3120 /* rw */
+#define POR_HNF_SLC_LOCK_WAYS 0x0c00 /* rw */
+#define POR_HNF_SLC_LOCK_BASE0 0x0c08 /* rw */
+#define POR_HNF_SLC_LOCK_BASE1 0x0c10 /* rw */
+#define POR_HNF_SLC_LOCK_BASE2 0x0c18 /* rw */
+#define POR_HNF_SLC_LOCK_BASE3 0x0c20 /* rw */
+#define POR_HNF_RNF_REGION_VEC1 0x0c28 /* rw */
+#define POR_HNF_RNI_REGION_VEC 0x0c30 /* rw */
+#define POR_HNF_RNF_REGION_VEC 0x0c38 /* rw */
+#define POR_HNF_RND_REGION_VEC 0x0c40 /* rw */
+#define POR_HNF_SLCWAY_PARTITION0_RNF_VEC 0x0c48 /* rw */
+#define POR_HNF_SLCWAY_PARTITION1_RNF_VEC 0x0c50 /* rw */
+#define POR_HNF_SLCWAY_PARTITION2_RNF_VEC 0x0c58 /* rw */
+#define POR_HNF_SLCWAY_PARTITION3_RNF_VEC 0x0c60 /* rw */
+#define POR_HNF_SLCWAY_PARTITION0_RNF_VEC1 0x0cb0 /* rw */
+#define POR_HNF_SLCWAY_PARTITION1_RNF_VEC1 0x0cb8 /* rw */
+#define POR_HNF_SLCWAY_PARTITION2_RNF_VEC1 0x0cc0 /* rw */
+#define POR_HNF_SLCWAY_PARTITION3_RNF_VEC1 0x0cc8 /* rw */
+#define POR_HNF_SLCWAY_PARTITION0_RNI_VEC 0x0c68 /* rw */
+#define POR_HNF_SLCWAY_PARTITION1_RNI_VEC 0x0c70 /* rw */
+#define POR_HNF_SLCWAY_PARTITION2_RNI_VEC 0x0c78 /* rw */
+#define POR_HNF_SLCWAY_PARTITION3_RNI_VEC 0x0c80 /* rw */
+#define POR_HNF_SLCWAY_PARTITION0_RND_VEC 0x0c88 /* rw */
+#define POR_HNF_SLCWAY_PARTITION1_RND_VEC 0x0c90 /* rw */
+#define POR_HNF_SLCWAY_PARTITION2_RND_VEC 0x0c98 /* rw */
+#define POR_HNF_SLCWAY_PARTITION3_RND_VEC 0x0ca0 /* rw */
+#define POR_HNF_RN_REGION_LOCK 0x0ca8 /* rw */
+#define POR_HNF_SAM_CONTROL 0x0d00 /* rw */
+#define POR_HNF_SAM_MEMREGION0 0x0d08 /* rw */
+#define POR_HNF_SAM_MEMREGION1 0x0d10 /* rw */
+#define POR_HNF_SAM_SN_PROPERTIES 0x0d18 /* rw */
+#define POR_HNF_SAM_6SN_NODEID 0x0d20 /* rw */
+#define POR_HNF_RN_PHYS_ID(x) (0x0d28 + 8 * (x)) /* rw */
+#define POR_HNF_RN_PHYS_ID63 0x0f90 /* rw */
+#define POR_HNF_SF_CXG_BLOCKED_WAYS 0x0f00 /* rw */
+#define POR_HNF_CML_PORT_AGGR_GRP0_ADD_MASK 0x0f10 /* rw */
+#define POR_HNF_CML_PORT_AGGR_GRP1_ADD_MASK 0x0f18 /* rw */
+#define POR_HNF_CML_PORT_AGGR_GRP0_REG 0x0f28 /* rw */
+#define POR_HNF_CML_PORT_AGGR_GRP1_REG 0x0f30 /* rw */
+#define HN_SAM_HASH_ADDR_MASK_REG 0x0f40 /* rw */
+#define HN_SAM_REGION_CMP_ADDR_MASK_REG 0x0f48 /* rw */
+#define POR_HNF_ABF_LO_ADDR 0x0f50 /* rw */
+#define POR_HNF_ABF_HI_ADDR 0x0f58 /* rw */
+#define POR_HNF_ABF_PR 0x0f60 /* rw */
+#define POR_HNF_ABF_SR 0x0f68 /* ro */
+#define POR_HNF_LDID_MAP_TABLE_REG0 0x0f98 /* rw */
+#define POR_HNF_LDID_MAP_TABLE_REG1 0x0fa0 /* rw */
+#define POR_HNF_LDID_MAP_TABLE_REG2 0x0fa8 /* rw */
+#define POR_HNF_LDID_MAP_TABLE_REG3 0x0fb0 /* rw */
+#define POR_HNF_CFG_SLCSF_DBGRD 0x0b80 /* wo */
+#define POR_HNF_SLC_CACHE_ACCESS_SLC_TAG 0x0b88 /* ro */
+#define POR_HNF_SLC_CACHE_ACCESS_SLC_DATA 0x0b90 /* ro */
+#define POR_HNF_SLC_CACHE_ACCESS_SF_TAG 0x0b98 /* ro */
+#define POR_HNF_SLC_CACHE_ACCESS_SF_TAG1 0x0ba0 /* ro */
+#define POR_HNF_SLC_CACHE_ACCESS_SF_TAG2 0x0ba8 /* ro */
+#define POR_HNF_PMU_EVENT_SEL 0x2000 /* rw */
+
+/* HN-I registers */
+#define POR_HNI_NODE_INFO 0x0000 /* ro */
+#define POR_HNI_CHILD_INFO 0x0080 /* ro */
+#define POR_HNI_SECURE_REGISTER_GROUPS_OVERRIDE 0x0980 /* rw */
+#define POR_HNI_UNIT_INFO 0x0900 /* ro */
+#define POR_HNI_SAM_ADDRREGION0_CFG 0x0c00 /* rw */
+#define POR_HNI_SAM_ADDRREGION1_CFG 0x0c08 /* rw */
+#define POR_HNI_SAM_ADDRREGION2_CFG 0x0c10 /* rw */
+#define POR_HNI_SAM_ADDRREGION3_CFG 0x0c18 /* rw */
+#define POR_HNI_CFG_CTL 0x0a00 /* rw */
+#define POR_HNI_AUX_CTL 0x0a08 /* rw */
+#define POR_HNI_ERRFR 0x3000 /* ro */
+#define POR_HNI_ERRCTLR 0x3008 /* rw */
+#define POR_HNI_ERRSTATUS 0x3010 /* w1c */
+#define POR_HNI_ERRADDR 0x3018 /* rw */
+#define POR_HNI_ERRMISC 0x3020 /* rw */
+#define POR_HNI_ERRFR_NS 0x3100 /* ro */
+#define POR_HNI_ERRCTLR_NS 0x3108 /* rw */
+#define POR_HNI_ERRSTATUS_NS 0x3110 /* w1c */
+#define POR_HNI_ERRADDR_NS 0x3118 /* rw */
+#define POR_HNI_ERRMISC_NS 0x3120 /* rw */
+#define POR_HNI_PMU_EVENT_SEL 0x2000 /* rw */
+
+/* XP registers */
+#define POR_MXP_NODE_INFO 0x0000 /* ro */
+#define POR_MXP_DEVICE_PORT_CONNECT_INFO_P0 0x0008 /* ro */
+#define POR_MXP_DEVICE_PORT_CONNECT_INFO_P1 0x0010 /* ro */
+#define POR_MXP_MESH_PORT_CONNECT_INFO_EAST 0x0018 /* ro */
+#define POR_MXP_MESH_PORT_CONNECT_INFO_NORTH 0x0020 /* ro */
+#define POR_MXP_CHILD_INFO 0x0080 /* ro */
+#define POR_MXP_CHILD_POINTER_0 0x0100 /* ro */
+#define POR_MXP_CHILD_POINTER_1 0x0108 /* ro */
+#define POR_MXP_CHILD_POINTER_2 0x0110 /* ro */
+#define POR_MXP_CHILD_POINTER_3 0x0118 /* ro */
+#define POR_MXP_CHILD_POINTER_4 0x0120 /* ro */
+#define POR_MXP_CHILD_POINTER_5 0x0128 /* ro */
+#define POR_MXP_CHILD_POINTER_6 0x0130 /* ro */
+#define POR_MXP_CHILD_POINTER_7 0x0138 /* ro */
+#define POR_MXP_CHILD_POINTER_8 0x0140 /* ro */
+#define POR_MXP_CHILD_POINTER_9 0x0148 /* ro */
+#define POR_MXP_CHILD_POINTER_10 0x0150 /* ro */
+#define POR_MXP_CHILD_POINTER_11 0x0158 /* ro */
+#define POR_MXP_CHILD_POINTER_12 0x0160 /* ro */
+#define POR_MXP_CHILD_POINTER_13 0x0168 /* ro */
+#define POR_MXP_CHILD_POINTER_14 0x0170 /* ro */
+#define POR_MXP_CHILD_POINTER_15 0x0178 /* ro */
+#define POR_MXP_P0_INFO 0x0900 /* ro */
+#define POR_MXP_P1_INFO 0x0908 /* ro */
+#define POR_MXP_PX_INFO_DEV_TYPE_RN_I 0x01
+#define POR_MXP_PX_INFO_DEV_TYPE_RN_D 0x02
+#define POR_MXP_PX_INFO_DEV_TYPE_RN_F_CHIB 0x04
+#define POR_MXP_PX_INFO_DEV_TYPE_RN_F_CHIB_ESAM 0x05
+#define POR_MXP_PX_INFO_DEV_TYPE_RN_F_CHIA 0x06
+#define POR_MXP_PX_INFO_DEV_TYPE_RN_F_CHIA_ESAM 0x07
+#define POR_MXP_PX_INFO_DEV_TYPE_HN_T 0x08
+#define POR_MXP_PX_INFO_DEV_TYPE_HN_I 0x09
+#define POR_MXP_PX_INFO_DEV_TYPE_HN_D 0x0a
+#define POR_MXP_PX_INFO_DEV_TYPE_SN_F 0x0c
+#define POR_MXP_PX_INFO_DEV_TYPE_SBSX 0x0d
+#define POR_MXP_PX_INFO_DEV_TYPE_HN_F 0x0e
+#define POR_MXP_PX_INFO_DEV_TYPE_CXHA 0x11
+#define POR_MXP_PX_INFO_DEV_TYPE_CXRA 0x12
+#define POR_MXP_PX_INFO_DEV_TYPE_CXRH 0x13
+
+#define POR_MXP_SECURE_REGISTER_GROUPS_OVERRIDE 0x0980 /* rw */
+#define POR_MXP_AUX_CTL 0x0a00 /* rw */
+#define POR_MXP_P0_QOS_CONTROL 0x0a80 /* rw */
+#define POR_MXP_P0_QOS_LAT_TGT 0x0a88 /* rw */
+#define POR_MXP_P0_QOS_LAT_SCALE 0x0a90 /* rw */
+#define POR_MXP_P0_QOS_LAT_RANGE 0x0a98 /* rw */
+#define POR_MXP_P1_QOS_CONTROL 0x0aa0 /* rw */
+#define POR_MXP_P1_QOS_LAT_TGT 0x0aa8 /* rw */
+#define POR_MXP_P1_QOS_LAT_SCALE 0x0ab0 /* rw */
+#define POR_MXP_P1_QOS_LAT_RANGE 0x0ab8 /* rw */
+#define POR_MXP_PMU_EVENT_SEL 0x2000 /* rw */
+
+#define POR_MXP_ERRFR 0x3000 /* ro */
+#define POR_MXP_ERRCTLR 0x3008 /* rw */
+#define POR_MXP_ERRSTATUS 0x3010 /* w1c */
+#define POR_MXP_ERRMISC 0x3028 /* rw */
+#define POR_MXP_P0_BYTE_PAR_ERR_INJ 0x3030 /* wo */
+#define POR_MXP_P1_BYTE_PAR_ERR_INJ 0x3038 /* wo */
+#define POR_MXP_ERRFR_NS 0x3100 /* ro */
+#define POR_MXP_ERRCTLR_NS 0x3108 /* rw */
+#define POR_MXP_ERRSTATUS_NS 0x3110 /* w1c */
+#define POR_MXP_ERRMISC_NS 0x3128 /* rw */
+#define POR_MXP_P0_SYSCOREQ_CTL 0x1000 /* rw */
+#define POR_MXP_P1_SYSCOREQ_CTL 0x1008 /* rw */
+#define POR_MXP_P0_SYSCOACK_STATUS 0x1010 /* ro */
+#define POR_MXP_P1_SYSCOACK_STATUS 0x1018 /* ro */
+#define POR_DTM_CONTROL 0x2100 /* rw */
+#define POR_DTM_CONTROL_TRACE_NO_ATB (1 << 3)
+#define POR_DTM_CONTROL_SAMPLE_PROFILE_ENABLE (1 << 2)
+#define POR_DTM_CONTROL_TRACE_TAG_ENABLE (1 << 1)
+#define POR_DTM_CONTROL_DTM_ENABLE (1 << 0)
+#define POR_DTM_FIFO_ENTRY_READY 0x2118 /* w1c */
+#define POR_DTM_FIFO_ENTRY0_0 0x2120 /* ro */
+#define POR_DTM_FIFO_ENTRY0_1 0x2128 /* ro */
+#define POR_DTM_FIFO_ENTRY0_2 0x2130 /* ro */
+#define POR_DTM_FIFO_ENTRY1_0 0x2138 /* ro */
+#define POR_DTM_FIFO_ENTRY1_1 0x2140 /* ro */
+#define POR_DTM_FIFO_ENTRY1_2 0x2148 /* ro */
+#define POR_DTM_FIFO_ENTRY2_0 0x2150 /* ro */
+#define POR_DTM_FIFO_ENTRY2_1 0x2158 /* ro */
+#define POR_DTM_FIFO_ENTRY2_2 0x2160 /* ro */
+#define POR_DTM_FIFO_ENTRY3_0 0x2168 /* ro */
+#define POR_DTM_FIFO_ENTRY3_1 0x2170 /* ro */
+#define POR_DTM_FIFO_ENTRY3_2 0x2178 /* ro */
+#define POR_DTM_WP0_CONFIG 0x21a0 /* rw */
+#define POR_DTM_WP0_VAL 0x21a8 /* rw */
+#define POR_DTM_WP0_MASK 0x21b0 /* rw */
+#define POR_DTM_WP1_CONFIG 0x21b8 /* rw */
+#define POR_DTM_WP1_VAL 0x21c0 /* rw */
+#define POR_DTM_WP1_MASK 0x21c8 /* rw */
+#define POR_DTM_WP2_CONFIG 0x21d0 /* rw */
+#define POR_DTM_WP2_VAL 0x21d8 /* rw */
+#define POR_DTM_WP2_MASK 0x21e0 /* rw */
+#define POR_DTM_WP3_CONFIG 0x21e8 /* rw */
+#define POR_DTM_WP3_VAL 0x21f0 /* rw */
+#define POR_DTM_WP3_MASK 0x21f8 /* rw */
+#define POR_DTM_PMSICR 0x2200 /* rw */
+#define POR_DTM_PMSIRR 0x2208 /* rw */
+#define POR_DTM_PMU_CONFIG 0x2210 /* rw */
+#define POR_DTM_PMU_CONFIG_PMU_EN (1 << 0)
+#define POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_SHIFT 32
+#define POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_WIDTH 8
+#define POR_DTM_PMEVCNT 0x2220 /* rw */
+#define POR_DTM_PMEVCNT_CNTR_WIDTH 16
+#define POR_DTM_PMEVCNTSR 0x2240 /* rw */
+
+/* RN-D registers */
+#define POR_RND_NODE_INFO 0x0000 /* ro */
+#define POR_RND_CHILD_INFO 0x0080 /* ro */
+#define POR_RND_SECURE_REGISTER_GROUPS_OVERRIDE 0x0980 /* rw */
+#define POR_RND_UNIT_INFO 0x0900 /* ro */
+#define POR_RND_CFG_CTL 0x0a00 /* rw */
+#define POR_RND_AUX_CTL 0x0a08 /* rw */
+#define POR_RND_S0_PORT_CONTROL 0x0a10 /* rw */
+#define POR_RND_S1_PORT_CONTROL 0x0a18 /* rw */
+#define POR_RND_S2_PORT_CONTROL 0x0a20 /* rw */
+#define POR_RND_S0_QOS_CONTROL 0x0a80 /* rw */
+#define POR_RND_S0_QOS_LAT_TGT 0x0a88 /* rw */
+#define POR_RND_S0_QOS_LAT_SCALE 0x0a90 /* rw */
+#define POR_RND_S0_QOS_LAT_RANGE 0x0a98 /* rw */
+#define POR_RND_S1_QOS_CONTROL 0x0aa0 /* rw */
+#define POR_RND_S1_QOS_LAT_TGT 0x0aa8 /* rw */
+#define POR_RND_S1_QOS_LAT_SCALE 0x0ab0 /* rw */
+#define POR_RND_S1_QOS_LAT_RANGE 0x0ab8 /* rw */
+#define POR_RND_S2_QOS_CONTROL 0x0ac0 /* rw */
+#define POR_RND_S2_QOS_LAT_TGT 0x0ac8 /* rw */
+#define POR_RND_S2_QOS_LAT_SCALE 0x0ad0 /* rw */
+#define POR_RND_S2_QOS_LAT_RANGE 0x0ad8 /* rw */
+#define POR_RND_PMU_EVENT_SEL 0x2000 /* rw */
+#define POR_RND_SYSCOREQ_CTL 0x1000 /* rw */
+#define POR_RND_SYSCOACK_STATUS 0x1008 /* ro */
+
+/* RN-I registers */
+#define POR_RNI_NODE_INFO 0x0000 /* ro */
+#define POR_RNI_CHILD_INFO 0x0080 /* ro */
+#define POR_RNI_SECURE_REGISTER_GROUPS_OVERRIDE 0x0980 /* rw */
+#define POR_RNI_UNIT_INFO 0x0900 /* ro */
+#define POR_RNI_CFG_CTL 0x0a00 /* rw */
+#define POR_RNI_AUX_CTL 0x0a08 /* rw */
+#define POR_RNI_S0_PORT_CONTROL 0x0a10 /* rw */
+#define POR_RNI_S1_PORT_CONTROL 0x0a18 /* rw */
+#define POR_RNI_S2_PORT_CONTROL 0x0a20 /* rw */
+#define POR_RNI_S0_QOS_CONTROL 0x0a80 /* rw */
+#define POR_RNI_S0_QOS_LAT_TGT 0x0a88 /* rw */
+#define POR_RNI_S0_QOS_LAT_SCALE 0x0a90 /* rw */
+#define POR_RNI_S0_QOS_LAT_RANGE 0x0a98 /* rw */
+#define POR_RNI_S1_QOS_CONTROL 0x0aa0 /* rw */
+#define POR_RNI_S1_QOS_LAT_TGT 0x0aa8 /* rw */
+#define POR_RNI_S1_QOS_LAT_SCALE 0x0ab0 /* rw */
+#define POR_RNI_S1_QOS_LAT_RANGE 0x0ab8 /* rw */
+#define POR_RNI_S2_QOS_CONTROL 0x0ac0 /* rw */
+#define POR_RNI_S2_QOS_LAT_TGT 0x0ac8 /* rw */
+#define POR_RNI_S2_QOS_LAT_SCALE 0x0ad0 /* rw */
+#define POR_RNI_S2_QOS_LAT_RANGE 0x0ad8 /* rw */
+#define POR_RNI_PMU_EVENT_SEL 0x2000 /* rw */
+
+/* RN SAM registers */
+#define POR_RNSAM_NODE_INFO 0x0000 /* ro */
+#define POR_RNSAM_CHILD_INFO 0x0080 /* ro */
+#define POR_RNSAM_SECURE_REGISTER_GROUPS_OVERRIDE 0x0980 /* rw */
+#define POR_RNSAM_UNIT_INFO 0x0900 /* ro */
+#define RNSAM_STATUS 0x0c00 /* rw */
+#define NON_HASH_MEM_REGION_REG0 0x0c08 /* rw */
+#define NON_HASH_MEM_REGION_REG1 0x0c10 /* rw */
+#define NON_HASH_MEM_REGION_REG2 0x0c18 /* rw */
+#define NON_HASH_MEM_REGION_REG3 0x0c20 /* rw */
+#define NON_HASH_TGT_NODEID0 0x0c30 /* rw */
+#define NON_HASH_TGT_NODEID1 0x0c38 /* rw */
+#define NON_HASH_TGT_NODEID2 0x0c40 /* rw */
+#define SYS_CACHE_GRP_REGION0 0x0c48 /* rw */
+#define SYS_CACHE_GRP_REGION1 0x0c50 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG0 0x0c58 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG1 0x0c60 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG2 0x0c68 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG3 0x0c70 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG4 0x0c78 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG5 0x0c80 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG6 0x0c88 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG7 0x0c90 /* rw */
+#define SYS_CACHE_GRP_NONHASH_NODEID 0x0c98 /* rw */
+#define SYS_CACHE_GROUP_HN_COUNT 0x0d00 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG0 0x0d08 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG1 0x0d10 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG2 0x0d18 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG3 0x0d20 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG4 0x0d28 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG5 0x0d30 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG6 0x0d38 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG7 0x0d40 /* rw */
+#define SYS_CACHE_GRP_SN_SAM_CFG0 0x0d48 /* rw */
+#define SYS_CACHE_GRP_SN_SAM_CFG1 0x0d50 /* rw */
+#define GIC_MEM_REGION_REG 0x0d58 /* rw */
+#define SYS_CACHE_GRP_SN_ATTR 0x0d60 /* rw */
+#define SYS_CACHE_GRP_HN_CPA_EN_REG 0x0d68 /* rw */
+#define SYS_CACHE_GRP_HN_CPA_GRP_REG 0x0d70 /* rw */
+#define CML_PORT_AGGR_MODE_CTRL_REG 0x0e00 /* rw */
+#define CML_PORT_AGGR_GRP0_ADD_MASK 0x0e08 /* rw */
+#define CML_PORT_AGGR_GRP1_ADD_MASK 0x0e10 /* rw */
+#define CML_PORT_AGGR_GRP0_REG 0x0e40 /* rw */
+#define CML_PORT_AGGR_GRP1_REG 0x0e48 /* rw */
+#define SYS_CACHE_GRP_SECONDARY_REG0 0x0f00 /* rw */
+#define SYS_CACHE_GRP_SECONDARY_REG1 0x0f08 /* rw */
+#define SYS_CACHE_GRP_CAL_MODE_REG 0x0f10 /* rw */
+#define RNSAM_HASH_ADDR_MASK_REG 0x0f18 /* rw */
+#define RNSAM_REGION_CMP_ADDR_MASK_REG 0x0f20 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG8 0x0f58 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG9 0x0f60 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG10 0x0f68 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG11 0x0f70 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG12 0x0f78 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG13 0x0f80 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG14 0x0f88 /* rw */
+#define SYS_CACHE_GRP_HN_NODEID_REG15 0x0f90 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG8 0x1008 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG9 0x1010 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG10 0x1018 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG11 0x1020 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG12 0x1028 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG13 0x1030 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG14 0x1038 /* rw */
+#define SYS_CACHE_GRP_SN_NODEID_REG15 0x1040 /* rw */
+
+/* SBSX registers */
+#define POR_SBSX_NODE_INFO 0x0000 /* ro */
+#define POR_SBSX_CHILD_INFO 0x0080 /* ro */
+#define POR_SBSX_UNIT_INFO 0x0900 /* ro */
+#define POR_SBSX_AUX_CTL 0x0a08 /* rw */
+#define POR_SBSX_ERRFR 0x3000 /* ro */
+#define POR_SBSX_ERRCTLR 0x3008 /* rw */
+#define POR_SBSX_ERRSTATUS 0x3010 /* w1c */
+#define POR_SBSX_ERRADDR 0x3018 /* rw */
+#define POR_SBSX_ERRMISC 0x3020 /* rw */
+#define POR_SBSX_ERRFR_NS 0x3100 /* ro */
+#define POR_SBSX_ERRCTLR_NS 0x3108 /* rw */
+#define POR_SBSX_ERRSTATUS_NS 0x3110 /* w1c */
+#define POR_SBSX_ERRADDR_NS 0x3118 /* rw */
+#define POR_SBSX_ERRMISC_NS 0x3120 /* rw */
+#define POR_SBSX_PMU_EVENT_SEL 0x2000 /* rw */
+
+/* CXHA registers */
+#define POR_CXG_HA_NODE_INFO 0x0000 /* ro */
+#define POR_CXG_HA_ID 0x0008 /* rw */
+#define POR_CXG_HA_CHILD_INFO 0x0080 /* ro */
+#define POR_CXG_HA_AUX_CTL 0x0a08 /* rw */
+#define POR_CXG_HA_SECURE_REGISTER_GROUPS_OVERRIDE 0x0980 /* rw */
+#define POR_CXG_HA_UNIT_INFO 0x0900 /* ro */
+#define POR_CXG_HA_RNF_RAID_TO_LDID_REG0 0x0c00 /* rw */
+#define POR_CXG_HA_RNF_RAID_TO_LDID_REG1 0x0c08 /* rw */
+#define POR_CXG_HA_RNF_RAID_TO_LDID_REG2 0x0c10 /* rw */
+#define POR_CXG_HA_RNF_RAID_TO_LDID_REG3 0x0c18 /* rw */
+#define POR_CXG_HA_RNF_RAID_TO_LDID_REG4 0x0c20 /* rw */
+#define POR_CXG_HA_RNF_RAID_TO_LDID_REG5 0x0c28 /* rw */
+#define POR_CXG_HA_RNF_RAID_TO_LDID_REG6 0x0c30 /* rw */
+#define POR_CXG_HA_RNF_RAID_TO_LDID_REG7 0x0c38 /* rw */
+#define POR_CXG_HA_AGENTID_TO_LINKID_REG0 0x0c40 /* rw */
+#define POR_CXG_HA_AGENTID_TO_LINKID_REG1 0x0c48 /* rw */
+#define POR_CXG_HA_AGENTID_TO_LINKID_REG2 0x0c50 /* rw */
+#define POR_CXG_HA_AGENTID_TO_LINKID_REG3 0x0c58 /* rw */
+#define POR_CXG_HA_AGENTID_TO_LINKID_REG4 0x0c60 /* rw */
+#define POR_CXG_HA_AGENTID_TO_LINKID_REG5 0x0c68 /* rw */
+#define POR_CXG_HA_AGENTID_TO_LINKID_REG6 0x0c70 /* rw */
+#define POR_CXG_HA_AGENTID_TO_LINKID_REG7 0x0c78 /* rw */
+#define POR_CXG_HA_AGENTID_TO_LINKID_VAL 0x0d00 /* rw */
+#define POR_CXG_HA_RNF_RAID_TO_LDID_VAL 0x0d08 /* rw */
+#define POR_CXG_HA_PMU_EVENT_SEL 0x2000 /* rw */
+#define POR_CXG_HA_PMU_EVENT_SEL_EVENT_ID3_SHIFT 24
+#define POR_CXG_HA_PMU_EVENT_SEL_EVENT_ID3_MASK (0x3f << 24)
+#define POR_CXG_HA_PMU_EVENT_SEL_EVENT_ID2_SHIFT 16
+#define POR_CXG_HA_PMU_EVENT_SEL_EVENT_ID2_MASK (0x3f << 16)
+#define POR_CXG_HA_PMU_EVENT_SEL_EVENT_ID1_SHIFT 8
+#define POR_CXG_HA_PMU_EVENT_SEL_EVENT_ID1_MASK (0x3f << 8)
+#define POR_CXG_HA_PMU_EVENT_SEL_EVENT_ID0_SHIFT 0
+#define POR_CXG_HA_PMU_EVENT_SEL_EVENT_ID0_MASK 0x3f
+
+#define POR_CXG_HA_CXPRTCL_LINK0_CTL 0x1000 /* rw */
+#define POR_CXG_HA_CXPRTCL_LINK0_STATUS 0x1008 /* ro */
+#define POR_CXG_HA_CXPRTCL_LINK1_CTL 0x1010 /* rw */
+#define POR_CXG_HA_CXPRTCL_LINK1_STATUS 0x1018 /* ro */
+#define POR_CXG_HA_CXPRTCL_LINK2_CTL 0x1020 /* rw */
+#define POR_CXG_HA_CXPRTCL_LINK2_STATUS 0x1028 /* ro */
+#define POR_CXG_HA_ERRFR 0x3000 /* ro */
+#define POR_CXG_HA_ERRCTLR 0x3008 /* rw */
+#define POR_CXG_HA_ERRSTATUS 0x3010 /* w1c */
+#define POR_CXG_HA_ERRADDR 0x3018 /* rw */
+#define POR_CXG_HA_ERRMISC 0x3020 /* rw */
+#define POR_CXG_HA_ERRFR_NS 0x3100 /* ro */
+#define POR_CXG_HA_ERRCTLR_NS 0x3108 /* rw */
+#define POR_CXG_HA_ERRSTATUS_NS 0x3110 /* w1c */
+#define POR_CXG_HA_ERRADDR_NS 0x3118 /* rw */
+#define POR_CXG_HA_ERRMISC_NS 0x3120 /* rw */
+
+/* CXRA registers */
+#define POR_CXG_RA_NODE_INFO 0x0000 /* ro */
+#define POR_CXG_RA_CHILD_INFO 0x0080 /* ro */
+#define POR_CXG_RA_SECURE_REGISTER_GROUPS_OVERRIDE 0x0980 /* rw */
+#define POR_CXG_RA_UNIT_INFO 0x0900 /* ro */
+#define POR_CXG_RA_CFG_CTL 0x0a00 /* rw */
+#define EN_CXLA_PMUCMD_PROP (1 << 8)
+#define POR_CXG_RA_AUX_CTL 0x0a08 /* rw */
+#define POR_CXG_RA_SAM_ADDR_REGION_REG0 0x0da8 /* rw */
+#define POR_CXG_RA_SAM_ADDR_REGION_REG1 0x0db0 /* rw */
+#define POR_CXG_RA_SAM_ADDR_REGION_REG2 0x0db8 /* rw */
+#define POR_CXG_RA_SAM_ADDR_REGION_REG3 0x0dc0 /* rw */
+#define POR_CXG_RA_SAM_ADDR_REGION_REG4 0x0dc8 /* rw */
+#define POR_CXG_RA_SAM_ADDR_REGION_REG5 0x0dd0 /* rw */
+#define POR_CXG_RA_SAM_ADDR_REGION_REG6 0x0dd8 /* rw */
+#define POR_CXG_RA_SAM_ADDR_REGION_REG7 0x0de0 /* rw */
+#define POR_CXG_RA_SAM_MEM_REGION0_LIMIT_REG 0x0e00 /* rw */
+#define POR_CXG_RA_SAM_MEM_REGION1_LIMIT_REG 0x0e08 /* rw */
+#define POR_CXG_RA_SAM_MEM_REGION2_LIMIT_REG 0x0e10 /* rw */
+#define POR_CXG_RA_SAM_MEM_REGION3_LIMIT_REG 0x0e18 /* rw */
+#define POR_CXG_RA_SAM_MEM_REGION4_LIMIT_REG 0x0e20 /* rw */
+#define POR_CXG_RA_SAM_MEM_REGION5_LIMIT_REG 0x0e28 /* rw */
+#define POR_CXG_RA_SAM_MEM_REGION6_LIMIT_REG 0x0e30 /* rw */
+#define POR_CXG_RA_SAM_MEM_REGION7_LIMIT_REG 0x0e38 /* rw */
+#define POR_CXG_RA_AGENTID_TO_LINKID_REG0 0x0e60 /* rw */
+#define POR_CXG_RA_AGENTID_TO_LINKID_REG1 0x0e68 /* rw */
+#define POR_CXG_RA_AGENTID_TO_LINKID_REG2 0x0e70 /* rw */
+#define POR_CXG_RA_AGENTID_TO_LINKID_REG3 0x0e78 /* rw */
+#define POR_CXG_RA_AGENTID_TO_LINKID_REG4 0x0e80 /* rw */
+#define POR_CXG_RA_AGENTID_TO_LINKID_REG5 0x0e88 /* rw */
+#define POR_CXG_RA_AGENTID_TO_LINKID_REG6 0x0e90 /* rw */
+#define POR_CXG_RA_AGENTID_TO_LINKID_REG7 0x0e98 /* rw */
+#define POR_CXG_RA_RNF_LDID_TO_RAID_REG0 0x0ea0 /* rw */
+#define POR_CXG_RA_RNF_LDID_TO_RAID_REG1 0x0ea8 /* rw */
+#define POR_CXG_RA_RNF_LDID_TO_RAID_REG2 0x0eb0 /* rw */
+#define POR_CXG_RA_RNF_LDID_TO_RAID_REG3 0x0eb8 /* rw */
+#define POR_CXG_RA_RNF_LDID_TO_RAID_REG4 0x0ec0 /* rw */
+#define POR_CXG_RA_RNF_LDID_TO_RAID_REG5 0x0ec8 /* rw */
+#define POR_CXG_RA_RNF_LDID_TO_RAID_REG6 0x0ed0 /* rw */
+#define POR_CXG_RA_RNF_LDID_TO_RAID_REG7 0x0ed8 /* rw */
+#define POR_CXG_RA_RNI_LDID_TO_RAID_REG0 0x0ee0 /* rw */
+#define POR_CXG_RA_RNI_LDID_TO_RAID_REG1 0x0ee8 /* rw */
+#define POR_CXG_RA_RNI_LDID_TO_RAID_REG2 0x0ef0 /* rw */
+#define POR_CXG_RA_RNI_LDID_TO_RAID_REG3 0x0ef8 /* rw */
+#define POR_CXG_RA_RND_LDID_TO_RAID_REG0 0x0f00 /* rw */
+#define POR_CXG_RA_RND_LDID_TO_RAID_REG1 0x0f08 /* rw */
+#define POR_CXG_RA_RND_LDID_TO_RAID_REG2 0x0f10 /* rw */
+#define POR_CXG_RA_RND_LDID_TO_RAID_REG3 0x0f18 /* rw */
+#define POR_CXG_RA_AGENTID_TO_LINKID_VAL 0x0f20 /* rw */
+#define POR_CXG_RA_RNF_LDID_TO_RAID_VAL 0x0f28 /* rw */
+#define POR_CXG_RA_RNI_LDID_TO_RAID_VAL 0x0f30 /* rw */
+#define POR_CXG_RA_RND_LDID_TO_RAID_VAL 0x0f38 /* rw */
+#define POR_CXG_RA_PMU_EVENT_SEL 0x2000 /* rw */
+#define POR_CXG_RA_CXPRTCL_LINK0_CTL 0x1000 /* rw */
+#define POR_CXG_RA_CXPRTCL_LINK0_STATUS 0x1008 /* ro */
+#define POR_CXG_RA_CXPRTCL_LINK1_CTL 0x1010 /* rw */
+#define POR_CXG_RA_CXPRTCL_LINK1_STATUS 0x1018 /* ro */
+#define POR_CXG_RA_CXPRTCL_LINK2_CTL 0x1020 /* rw */
+#define POR_CXG_RA_CXPRTCL_LINK2_STATUS 0x1028 /* ro */
+
+/* CXLA registers */
+#define POR_CXLA_NODE_INFO 0x0000 /* ro */
+#define POR_CXLA_CHILD_INFO 0x0080 /* ro */
+#define POR_CXLA_SECURE_REGISTER_GROUPS_OVERRIDE 0x0980 /* rw */
+#define POR_CXLA_UNIT_INFO 0x0900 /* ro */
+#define POR_CXLA_AUX_CTL 0x0a08 /* rw */
+#define POR_CXLA_CCIX_PROP_CAPABILITIES 0x0c00 /* ro */
+#define POR_CXLA_CCIX_PROP_CONFIGURED 0x0c08 /* rw */
+#define POR_CXLA_TX_CXS_ATTR_CAPABILITIES 0x0c10 /* ro */
+#define POR_CXLA_RX_CXS_ATTR_CAPABILITIES 0x0c18 /* ro */
+#define POR_CXLA_AGENTID_TO_LINKID_REG0 0x0c30 /* rw */
+#define POR_CXLA_AGENTID_TO_LINKID_REG1 0x0c38 /* rw */
+#define POR_CXLA_AGENTID_TO_LINKID_REG2 0x0c40 /* rw */
+#define POR_CXLA_AGENTID_TO_LINKID_REG3 0x0c48 /* rw */
+#define POR_CXLA_AGENTID_TO_LINKID_REG4 0x0c50 /* rw */
+#define POR_CXLA_AGENTID_TO_LINKID_REG5 0x0c58 /* rw */
+#define POR_CXLA_AGENTID_TO_LINKID_REG6 0x0c60 /* rw */
+#define POR_CXLA_AGENTID_TO_LINKID_REG7 0x0c68 /* rw */
+#define POR_CXLA_AGENTID_TO_LINKID_VAL 0x0c70 /* rw */
+#define POR_CXLA_LINKID_TO_PCIE_BUS_NUM 0x0c78 /* rw */
+#define POR_CXLA_PERMSG_PYLD_0_63 0x0d00 /* rw */
+#define POR_CXLA_PERMSG_PYLD_64_127 0x0d08 /* rw */
+#define POR_CXLA_PERMSG_PYLD_128_191 0x0d10 /* rw */
+#define POR_CXLA_PERMSG_PYLD_192_255 0x0d18 /* rw */
+#define POR_CXLA_PERMSG_CTL 0x0d20 /* rw */
+#define POR_CXLA_ERR_AGENT_ID 0x0d28 /* rw */
+#define POR_CXLA_PMU_EVENT_SEL 0x2000 /* rw */
+#define POR_CXLA_PMU_CONFIG 0x2210 /* rw */
+#define POR_CXLA_PMEVCNT 0x2220 /* rw */
+#define POR_CXLA_PMEVCNTSR 0x2240 /* rw */
+
+#endif /* _MACHINE_CMN600_REG_H_ */
diff --git a/sys/arm64/include/counter.h b/sys/arm64/include/counter.h
new file mode 100644
index 000000000000..0a27e1902d86
--- /dev/null
+++ b/sys/arm64/include/counter.h
@@ -0,0 +1,91 @@
+/*-
+ * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/counter.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_COUNTER_H_
+#define _MACHINE_COUNTER_H_
+
+#include <sys/pcpu.h>
+#include <machine/atomic.h>
+
+#define EARLY_COUNTER &pcpu0.pc_early_dummy_counter
+
+#define counter_enter() do {} while (0)
+#define counter_exit() do {} while (0)
+
+#ifdef IN_SUBR_COUNTER_C
+static inline uint64_t
+counter_u64_read_one(uint64_t *p, int cpu)
+{
+
+ return (*(uint64_t *)((char *)p + UMA_PCPU_ALLOC_SIZE * cpu));
+}
+
+static inline uint64_t
+counter_u64_fetch_inline(uint64_t *p)
+{
+ uint64_t r;
+ int i;
+
+ r = 0;
+ CPU_FOREACH(i)
+ r += counter_u64_read_one((uint64_t *)p, i);
+
+ return (r);
+}
+
+static void
+counter_u64_zero_one_cpu(void *arg)
+{
+
+ *((uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE *
+ PCPU_GET(cpuid))) = 0;
+}
+
+static inline void
+counter_u64_zero_inline(counter_u64_t c)
+{
+
+ smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
+ smp_no_rendezvous_barrier, c);
+}
+#endif
+
+#define counter_u64_add_protected(c, inc) counter_u64_add(c, inc)
+
+static inline void
+counter_u64_add(counter_u64_t c, int64_t inc)
+{
+
+ atomic_add_64((uint64_t *)zpcpu_get(c), inc);
+}
+
+#endif /* ! _MACHINE_COUNTER_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h
new file mode 100644
index 000000000000..935e3754bf25
--- /dev/null
+++ b/sys/arm64/include/cpu.h
@@ -0,0 +1,306 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * Copyright (c) 2014-2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * from: FreeBSD: src/sys/i386/include/cpu.h,v 1.62 2001/06/29
+ */
+
+#ifdef __arm__
+#include <arm/cpu.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_CPU_H_
+#define _MACHINE_CPU_H_
+
+#if !defined(__ASSEMBLER__)
+#include <machine/atomic.h>
+#include <machine/frame.h>
+#endif
+#include <machine/armreg.h>
+
+#define TRAPF_PC(tfp) ((tfp)->tf_elr)
+#define TRAPF_USERMODE(tfp) (((tfp)->tf_spsr & PSR_M_MASK) == PSR_M_EL0t)
+
+#define cpu_getstack(td) ((td)->td_frame->tf_sp)
+#define cpu_setstack(td, sp) ((td)->td_frame->tf_sp = (sp))
+#define cpu_spinwait() __asm __volatile("yield" ::: "memory")
+#define cpu_lock_delay() DELAY(1)
+
+/* Extract CPU affinity levels 0-3 */
+#define CPU_AFF0(mpidr) (u_int)(((mpidr) >> 0) & 0xff)
+#define CPU_AFF1(mpidr) (u_int)(((mpidr) >> 8) & 0xff)
+#define CPU_AFF2(mpidr) (u_int)(((mpidr) >> 16) & 0xff)
+#define CPU_AFF3(mpidr) (u_int)(((mpidr) >> 32) & 0xff)
+#define CPU_AFF0_MASK 0xffUL
+#define CPU_AFF1_MASK 0xff00UL
+#define CPU_AFF2_MASK 0xff0000UL
+#define CPU_AFF3_MASK 0xff00000000UL
+#define CPU_AFF_MASK (CPU_AFF0_MASK | CPU_AFF1_MASK | \
+ CPU_AFF2_MASK| CPU_AFF3_MASK) /* Mask affinity fields in MPIDR_EL1 */
+
+#ifdef _KERNEL
+
+#define CPU_IMPL_ARM 0x41
+#define CPU_IMPL_BROADCOM 0x42
+#define CPU_IMPL_CAVIUM 0x43
+#define CPU_IMPL_DEC 0x44
+#define CPU_IMPL_FUJITSU 0x46
+#define CPU_IMPL_HISILICON 0x48
+#define CPU_IMPL_INFINEON 0x49
+#define CPU_IMPL_FREESCALE 0x4D
+#define CPU_IMPL_NVIDIA 0x4E
+#define CPU_IMPL_APM 0x50
+#define CPU_IMPL_QUALCOMM 0x51
+#define CPU_IMPL_MARVELL 0x56
+#define CPU_IMPL_APPLE 0x61
+#define CPU_IMPL_INTEL 0x69
+#define CPU_IMPL_AMPERE 0xC0
+#define CPU_IMPL_MICROSOFT 0x6D
+
+/* ARM Part numbers */
+#define CPU_PART_FOUNDATION 0xD00
+#define CPU_PART_CORTEX_A34 0xD02
+#define CPU_PART_CORTEX_A53 0xD03
+#define CPU_PART_CORTEX_A35 0xD04
+#define CPU_PART_CORTEX_A55 0xD05
+#define CPU_PART_CORTEX_A65 0xD06
+#define CPU_PART_CORTEX_A57 0xD07
+#define CPU_PART_CORTEX_A72 0xD08
+#define CPU_PART_CORTEX_A73 0xD09
+#define CPU_PART_CORTEX_A75 0xD0A
+#define CPU_PART_CORTEX_A76 0xD0B
+#define CPU_PART_NEOVERSE_N1 0xD0C
+#define CPU_PART_CORTEX_A77 0xD0D
+#define CPU_PART_CORTEX_A76AE 0xD0E
+#define CPU_PART_AEM_V8 0xD0F
+#define CPU_PART_NEOVERSE_V1 0xD40
+#define CPU_PART_CORTEX_A78 0xD41
+#define CPU_PART_CORTEX_A78AE 0xD42
+#define CPU_PART_CORTEX_A65AE 0xD43
+#define CPU_PART_CORTEX_X1 0xD44
+#define CPU_PART_CORTEX_A510 0xD46
+#define CPU_PART_CORTEX_A710 0xD47
+#define CPU_PART_CORTEX_X2 0xD48
+#define CPU_PART_NEOVERSE_N2 0xD49
+#define CPU_PART_NEOVERSE_E1 0xD4A
+#define CPU_PART_CORTEX_A78C 0xD4B
+#define CPU_PART_CORTEX_X1C 0xD4C
+#define CPU_PART_CORTEX_A715 0xD4D
+#define CPU_PART_CORTEX_X3 0xD4E
+#define CPU_PART_NEOVERSE_V2 0xD4F
+#define CPU_PART_CORTEX_A520 0xD80
+#define CPU_PART_CORTEX_A720 0xD81
+#define CPU_PART_CORTEX_X4 0xD82
+#define CPU_PART_NEOVERSE_V3AE 0xD83
+#define CPU_PART_NEOVERSE_V3 0xD84
+#define CPU_PART_CORTEX_X925 0xD85
+#define CPU_PART_CORTEX_A725 0xD87
+#define CPU_PART_NEOVERSE_N3 0xD8E
+
+/* Cavium Part numbers */
+#define CPU_PART_THUNDERX 0x0A1
+#define CPU_PART_THUNDERX_81XX 0x0A2
+#define CPU_PART_THUNDERX_83XX 0x0A3
+#define CPU_PART_THUNDERX2 0x0AF
+
+#define CPU_REV_THUNDERX_1_0 0x00
+#define CPU_REV_THUNDERX_1_1 0x01
+
+#define CPU_REV_THUNDERX2_0 0x00
+
+/* APM (now Ampere) Part number */
+#define CPU_PART_EMAG8180 0x000
+
+/* Ampere Part numbers */
+#define CPU_PART_AMPERE1 0xAC3
+#define CPU_PART_AMPERE1A 0xAC4
+
+/* Microsoft Part numbers */
+#define CPU_PART_AZURE_COBALT_100 0xD49
+
+/* Qualcomm */
+#define CPU_PART_KRYO400_GOLD 0x804
+#define CPU_PART_KRYO400_SILVER 0x805
+
+/* Apple part numbers */
+#define CPU_PART_M1_ICESTORM 0x022
+#define CPU_PART_M1_FIRESTORM 0x023
+#define CPU_PART_M1_ICESTORM_PRO 0x024
+#define CPU_PART_M1_FIRESTORM_PRO 0x025
+#define CPU_PART_M1_ICESTORM_MAX 0x028
+#define CPU_PART_M1_FIRESTORM_MAX 0x029
+#define CPU_PART_M2_BLIZZARD 0x032
+#define CPU_PART_M2_AVALANCHE 0x033
+#define CPU_PART_M2_BLIZZARD_PRO 0x034
+#define CPU_PART_M2_AVALANCHE_PRO 0x035
+#define CPU_PART_M2_BLIZZARD_MAX 0x038
+#define CPU_PART_M2_AVALANCHE_MAX 0x039
+
+#define CPU_IMPL(midr) (((midr) >> 24) & 0xff)
+#define CPU_PART(midr) (((midr) >> 4) & 0xfff)
+#define CPU_VAR(midr) (((midr) >> 20) & 0xf)
+#define CPU_ARCH(midr) (((midr) >> 16) & 0xf)
+#define CPU_REV(midr) (((midr) >> 0) & 0xf)
+
+#define CPU_IMPL_TO_MIDR(val) (((val) & 0xff) << 24)
+#define CPU_PART_TO_MIDR(val) (((val) & 0xfff) << 4)
+#define CPU_VAR_TO_MIDR(val) (((val) & 0xf) << 20)
+#define CPU_ARCH_TO_MIDR(val) (((val) & 0xf) << 16)
+#define CPU_REV_TO_MIDR(val) (((val) & 0xf) << 0)
+
+#define CPU_IMPL_MASK (0xff << 24)
+#define CPU_PART_MASK (0xfff << 4)
+#define CPU_VAR_MASK (0xf << 20)
+#define CPU_ARCH_MASK (0xf << 16)
+#define CPU_REV_MASK (0xf << 0)
+
+#define CPU_ID_RAW(impl, part, var, rev) \
+ (CPU_IMPL_TO_MIDR((impl)) | \
+ CPU_PART_TO_MIDR((part)) | CPU_VAR_TO_MIDR((var)) | \
+ CPU_REV_TO_MIDR((rev)))
+
+#define CPU_MATCH(mask, impl, part, var, rev) \
+ (((mask) & PCPU_GET(midr)) == \
+ ((mask) & CPU_ID_RAW((impl), (part), (var), (rev))))
+
+#define CPU_MATCH_RAW(mask, devid) \
+ (((mask) & PCPU_GET(midr)) == ((mask) & (devid)))
+
+/*
+ * Chip-specific errata. This defines are intended to be
+ * booleans used within if statements. When an appropriate
+ * kernel option is disabled, these defines must be defined
+ * as 0 to allow the compiler to remove a dead code thus
+ * produce better optimized kernel image.
+ */
+/*
+ * Vendor: Cavium
+ * Chip: ThunderX
+ * Revision(s): Pass 1.0, Pass 1.1
+ */
+#ifdef THUNDERX_PASS_1_1_ERRATA
+#define CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 \
+ (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK, \
+ CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, CPU_REV_THUNDERX_1_0) || \
+ CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK, \
+ CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, CPU_REV_THUNDERX_1_1))
+#else
+#define CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 0
+#endif
+
+#if !defined(__ASSEMBLER__)
+extern char btext[];
+extern char etext[];
+
+extern uint64_t __cpu_affinity[];
+
+struct arm64_addr_mask;
+extern struct arm64_addr_mask elf64_addr_mask;
+
+typedef void (*cpu_reset_hook_t)(void);
+extern cpu_reset_hook_t cpu_reset_hook;
+
+void cpu_halt(void) __dead2;
+void cpu_reset(void) __dead2;
+void fork_trampoline(void);
+void identify_cache(uint64_t);
+void identify_cpu(u_int);
+void install_cpu_errata(void);
+
+/* Pointer Authentication Code (PAC) support */
+void ptrauth_init(void);
+void ptrauth_fork(struct thread *, struct thread *);
+void ptrauth_exec(struct thread *);
+void ptrauth_copy_thread(struct thread *, struct thread *);
+void ptrauth_thread_alloc(struct thread *);
+void ptrauth_thread0(struct thread *);
+#ifdef SMP
+void ptrauth_mp_start(uint64_t);
+#endif
+
+/* Functions to read the sanitised view of the special registers */
+void update_special_regs(u_int);
+void update_special_reg_iss(u_int, uint64_t, uint64_t);
+#define update_special_reg(reg, clear, set) \
+ update_special_reg_iss(reg ## _ISS, clear, set)
+bool get_kernel_reg_iss(u_int, uint64_t *);
+#define get_kernel_reg(reg, valp) \
+ get_kernel_reg_iss(reg ## _ISS, valp)
+bool get_kernel_reg_iss_masked(u_int, uint64_t *, uint64_t);
+#define get_kernel_reg_masked(reg, valp, mask) \
+ get_kernel_reg_iss_masked(reg ## _ISS, valp, mask)
+bool get_user_reg_iss(u_int, uint64_t *, bool);
+#define get_user_reg(reg, valp, fbsd) \
+ get_user_reg_iss(reg ## _ISS, valp, fbsd)
+
+void cpu_desc_init(void);
+
+#define CPU_AFFINITY(cpu) __cpu_affinity[(cpu)]
+#define CPU_CURRENT_SOCKET \
+ (CPU_AFF2(CPU_AFFINITY(PCPU_GET(cpuid))))
+
+static __inline uint64_t
+get_cyclecount(void)
+{
+ uint64_t ret;
+
+ ret = READ_SPECIALREG(cntvct_el0);
+
+ return (ret);
+}
+
+#define ADDRESS_TRANSLATE_FUNC(stage) \
+static inline uint64_t \
+arm64_address_translate_ ##stage (uint64_t addr) \
+{ \
+ uint64_t ret; \
+ \
+ __asm __volatile( \
+ "at " __STRING(stage) ", %1 \n" \
+ "isb \n" \
+ "mrs %0, par_el1" : "=r"(ret) : "r"(addr)); \
+ \
+ return (ret); \
+}
+
+ADDRESS_TRANSLATE_FUNC(s1e0r)
+ADDRESS_TRANSLATE_FUNC(s1e0w)
+ADDRESS_TRANSLATE_FUNC(s1e1r)
+ADDRESS_TRANSLATE_FUNC(s1e1w)
+
+#endif /* !__ASSEMBLER__ */
+#endif
+
+#endif /* !_MACHINE_CPU_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/cpu_feat.h b/sys/arm64/include/cpu_feat.h
new file mode 100644
index 000000000000..9fe6a9dd95d9
--- /dev/null
+++ b/sys/arm64/include/cpu_feat.h
@@ -0,0 +1,88 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Arm Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_CPU_FEAT_H_
+#define _MACHINE_CPU_FEAT_H_
+
+#include <sys/linker_set.h>
+
+typedef enum {
+ ERRATA_UNKNOWN, /* Unknown erratum */
+ ERRATA_NONE, /* No errata for this feature on this system. */
+ ERRATA_AFFECTED, /* There is errata on this system. */
+ ERRATA_FW_MITIGAION, /* There is errata, and a firmware */
+ /* mitigation. The mitigation may need a */
+ /* kernel component. */
+} cpu_feat_errata;
+
+#define CPU_FEAT_STAGE_MASK 0x00000001
+#define CPU_FEAT_EARLY_BOOT 0x00000000
+#define CPU_FEAT_AFTER_DEV 0x00000001
+
+#define CPU_FEAT_SCOPE_MASK 0x00000010
+#define CPU_FEAT_PER_CPU 0x00000000
+#define CPU_FEAT_SYSTEM 0x00000010
+
+struct cpu_feat;
+
+typedef bool (cpu_feat_check)(const struct cpu_feat *, u_int);
+typedef bool (cpu_feat_has_errata)(const struct cpu_feat *, u_int,
+ u_int **, u_int *);
+typedef void (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata,
+ u_int *, u_int);
+
+struct cpu_feat {
+ const char *feat_name;
+ cpu_feat_check *feat_check;
+ cpu_feat_has_errata *feat_has_errata;
+ cpu_feat_enable *feat_enable;
+ uint32_t feat_flags;
+};
+SET_DECLARE(cpu_feat_set, struct cpu_feat);
+
+/*
+ * Allow drivers to mark an erratum as worked around, e.g. the Errata
+ * Management ABI may know the workaround isn't needed on a given system.
+ */
+typedef cpu_feat_errata (*cpu_feat_errata_check_fn)(const struct cpu_feat *,
+ u_int);
+void cpu_feat_register_errata_check(cpu_feat_errata_check_fn);
+
+void enable_cpu_feat(uint32_t);
+
+/* Check if an erratum is in the list of errata */
+static inline bool
+cpu_feat_has_erratum(u_int *errata_list, u_int errata_count, u_int erratum)
+{
+ for (u_int i = 0; i < errata_count; i++)
+ if (errata_list[0] == erratum)
+ return (true);
+
+ return (false);
+}
+
+#endif /* _MACHINE_CPU_FEAT_H_ */
diff --git a/sys/arm64/include/cpufunc.h b/sys/arm64/include/cpufunc.h
new file mode 100644
index 000000000000..e6e1f682794e
--- /dev/null
+++ b/sys/arm64/include/cpufunc.h
@@ -0,0 +1,217 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/cpufunc.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_CPUFUNC_H_
+#define _MACHINE_CPUFUNC_H_
+
+static __inline void
+breakpoint(void)
+{
+
+ __asm("brk #0");
+}
+
+#ifdef _KERNEL
+#include <machine/armreg.h>
+
+static __inline register_t
+dbg_disable(void)
+{
+ uint32_t ret;
+
+ __asm __volatile(
+ "mrs %x0, daif \n"
+ "msr daifset, #(" __XSTRING(DAIF_D) ") \n"
+ : "=&r" (ret));
+
+ return (ret);
+}
+
+static __inline void
+dbg_enable(void)
+{
+
+ __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_D) ")");
+}
+
+static __inline register_t
+intr_disable(void)
+{
+ /* DAIF is a 32-bit register */
+ uint32_t ret;
+
+ __asm __volatile(
+ "mrs %x0, daif \n"
+ "msr daifset, #(" __XSTRING(DAIF_INTR) ") \n"
+ : "=&r" (ret));
+
+ return (ret);
+}
+
+static __inline void
+intr_restore(register_t s)
+{
+
+ WRITE_SPECIALREG(daif, s);
+}
+
+static __inline void
+intr_enable(void)
+{
+
+ __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_INTR) ")");
+}
+
+static __inline void
+serror_enable(void)
+{
+
+ __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_A) ")");
+}
+
+static __inline register_t
+get_midr(void)
+{
+ uint64_t midr;
+
+ midr = READ_SPECIALREG(midr_el1);
+
+ return (midr);
+}
+
+static __inline register_t
+get_mpidr(void)
+{
+ uint64_t mpidr;
+
+ mpidr = READ_SPECIALREG(mpidr_el1);
+
+ return (mpidr);
+}
+
+static __inline void
+clrex(void)
+{
+
+ /*
+ * Ensure compiler barrier, otherwise the monitor clear might
+ * occur too late for us ?
+ */
+ __asm __volatile("clrex" : : : "memory");
+}
+
+static __inline void
+set_ttbr0(uint64_t ttbr0)
+{
+
+ __asm __volatile(
+ "msr ttbr0_el1, %0 \n"
+ "isb \n"
+ :
+ : "r" (ttbr0));
+}
+
+static __inline void
+invalidate_icache(void)
+{
+
+ __asm __volatile(
+ "ic ialluis \n"
+ "dsb ish \n"
+ "isb \n");
+}
+
+static __inline void
+invalidate_local_icache(void)
+{
+
+ __asm __volatile(
+ "ic iallu \n"
+ "dsb nsh \n"
+ "isb \n");
+}
+
+static __inline void
+wfet(uint64_t val)
+{
+ __asm __volatile(
+ "msr s0_3_c1_c0_0, %0\n"
+ :
+ : "r" ((val))
+ : "memory");
+}
+
+static __inline void
+wfit(uint64_t val)
+{
+ __asm __volatile(
+ "msr s0_3_c1_c0_1, %0\n"
+ :
+ : "r" ((val))
+ : "memory");
+}
+
+extern bool icache_aliasing;
+extern bool icache_vmid;
+
+extern int64_t dcache_line_size;
+extern int64_t icache_line_size;
+extern int64_t idcache_line_size;
+extern int64_t dczva_line_size;
+
+#define cpu_nullop() arm64_nullop()
+#define cpufunc_nullop() arm64_nullop()
+
+#define cpu_tlb_flushID() arm64_tlb_flushID()
+
+#define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s))
+#define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s))
+#define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s))
+
+extern void (*arm64_icache_sync_range)(void *, vm_size_t);
+
+#define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s))
+#define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
+
+void arm64_nullop(void);
+void arm64_tlb_flushID(void);
+void arm64_dic_idc_icache_sync_range(void *, vm_size_t);
+void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t);
+void arm64_aliasing_icache_sync_range(void *, vm_size_t);
+int arm64_icache_sync_range_checked(void *, vm_size_t);
+void arm64_dcache_wbinv_range(void *, vm_size_t);
+void arm64_dcache_inv_range(void *, vm_size_t);
+void arm64_dcache_wb_range(void *, vm_size_t);
+bool arm64_get_writable_addr(void *, void **);
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_CPUFUNC_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/cpuinfo.h b/sys/arm64/include/cpuinfo.h
new file mode 100644
index 000000000000..004f611ccf80
--- /dev/null
+++ b/sys/arm64/include/cpuinfo.h
@@ -0,0 +1,5 @@
+#ifdef __arm__
+#include <arm/cpuinfo.h>
+#else /* !__arm__ */
+#error Do not include this header, used only for 32-bit compatibility
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/csan.h b/sys/arm64/include/csan.h
new file mode 100644
index 000000000000..fa12f9db3568
--- /dev/null
+++ b/sys/arm64/include/csan.h
@@ -0,0 +1,104 @@
+/* $NetBSD: csan.h,v 1.2 2019/11/06 06:57:22 maxv Exp $ */
+
+/*
+ * Copyright (c) 2019 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/cpufunc.h>
+#include <machine/stack.h>
+#include <machine/vmparam.h>
+
+static inline bool
+kcsan_md_unsupported(vm_offset_t addr)
+{
+ return false;
+}
+
+static inline bool
+kcsan_md_is_avail(void)
+{
+ return true;
+}
+
+static inline void
+kcsan_md_disable_intrs(uint64_t *state)
+{
+
+ *state = intr_disable();
+}
+
+static inline void
+kcsan_md_enable_intrs(uint64_t *state)
+{
+
+ intr_restore(*state);
+}
+
+static inline void
+kcsan_md_delay(uint64_t us)
+{
+ DELAY(us);
+}
+
+static void
+kcsan_md_unwind(void)
+{
+#ifdef DDB
+ c_db_sym_t sym;
+ db_expr_t offset;
+ const char *symname;
+#endif
+ struct unwind_state frame;
+ int nsym;
+
+ frame.fp = (uintptr_t)__builtin_frame_address(0);
+ frame.pc = (uintptr_t)kcsan_md_unwind;
+ nsym = 0;
+
+ while (1) {
+ if (!unwind_frame(curthread, &frame))
+ break;
+ if (!INKERNEL((vm_offset_t)frame.pc))
+ break;
+
+#ifdef DDB
+ sym = db_search_symbol((vm_offset_t)frame.pc, DB_STGY_PROC,
+ &offset);
+ db_symbol_values(sym, &symname, NULL);
+ printf("#%d %p in %s+%#lx\n", nsym, (void *)frame.pc,
+ symname, offset);
+#else
+ printf("#%d %p\n", nsym, (void *)frame.pc);
+#endif
+ nsym++;
+
+ if (nsym >= 15) {
+ break;
+ }
+ }
+}
diff --git a/sys/arm64/include/db_machdep.h b/sys/arm64/include/db_machdep.h
new file mode 100644
index 000000000000..5dc496ca851d
--- /dev/null
+++ b/sys/arm64/include/db_machdep.h
@@ -0,0 +1,129 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_DB_MACHDEP_H_
+#define _MACHINE_DB_MACHDEP_H_
+
+#include <machine/armreg.h>
+#include <machine/frame.h>
+#include <machine/trap.h>
+
+#define T_BREAKPOINT (EXCP_BRK)
+#define T_HW_BREAKPOINT (EXCP_BRKPT_EL1)
+#define T_SINGLESTEP (EXCP_SOFTSTP_EL1)
+#define T_WATCHPOINT (EXCP_WATCHPT_EL1)
+
+#define HAS_HW_BREAKPOINT
+#define NHBREAKPOINTS 16
+
+typedef vm_offset_t db_addr_t;
+typedef long db_expr_t;
+
+#define PC_REGS() ((db_addr_t)kdb_thrctx->pcb_x[PCB_LR])
+
+#define BKPT_INST (0xd4200000)
+#define BKPT_SIZE (4)
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define BKPT_SKIP do { \
+ kdb_frame->tf_elr += BKPT_SIZE; \
+ kdb_thrctx->pcb_x[PCB_LR] += BKPT_SIZE; \
+} while (0)
+
+#define db_clear_single_step kdb_cpu_clear_singlestep
+#define db_set_single_step kdb_cpu_set_singlestep
+
+#define IS_BREAKPOINT_TRAP(type, code) \
+ (type == T_BREAKPOINT || type == T_HW_BREAKPOINT)
+#define IS_SSTEP_TRAP(type, code) (type == T_SINGLESTEP)
+#define IS_WATCHPOINT_TRAP(type, code) (type == T_WATCHPOINT)
+
+#define inst_trap_return(ins) (0)
+/* ret */
+#define inst_return(ins) (((ins) & 0xfffffc1fu) == 0xd65f0000)
+#define inst_call(ins) (((ins) & 0xfc000000u) == 0x94000000u || /* BL */ \
+ ((ins) & 0xfffffc1fu) == 0xd63f0000u) /* BLR */
+
+#define inst_load(ins) ({ \
+ uint32_t tmp_instr = db_get_value(PC_REGS(), sizeof(uint32_t), FALSE); \
+ is_load_instr(tmp_instr); \
+})
+
+#define inst_store(ins) ({ \
+ uint32_t tmp_instr = db_get_value(PC_REGS(), sizeof(uint32_t), FALSE); \
+ is_store_instr(tmp_instr); \
+})
+
+#define is_load_instr(ins) ((((ins) & 0x3b000000u) == 0x18000000u) || /* literal */ \
+ (((ins) & 0x3f400000u) == 0x08400000u) || /* exclusive */ \
+ (((ins) & 0x3bc00000u) == 0x28400000u) || /* no-allocate pair */ \
+ ((((ins) & 0x3b200c00u) == 0x38000400u) && \
+ (((ins) & 0x3be00c00u) != 0x38000400u) && \
+ (((ins) & 0xffe00c00u) != 0x3c800400u)) || /* immediate post-indexed */ \
+ ((((ins) & 0x3b200c00u) == 0x38000c00u) && \
+ (((ins) & 0x3be00c00u) != 0x38000c00u) && \
+ (((ins) & 0xffe00c00u) != 0x3c800c00u)) || /* immediate pre-indexed */ \
+ ((((ins) & 0x3b200c00u) == 0x38200800u) && \
+ (((ins) & 0x3be00c00u) != 0x38200800u) && \
+ (((ins) & 0xffe00c00u) != 0x3ca00c80u)) || /* register offset */ \
+ ((((ins) & 0x3b200c00u) == 0x38000800u) && \
+ (((ins) & 0x3be00c00u) != 0x38000800u)) || /* unprivileged */ \
+ ((((ins) & 0x3b200c00u) == 0x38000000u) && \
+ (((ins) & 0x3be00c00u) != 0x38000000u) && \
+ (((ins) & 0xffe00c00u) != 0x3c800000u)) || /* unscaled immediate */ \
+ ((((ins) & 0x3b000000u) == 0x39000000u) && \
+ (((ins) & 0x3bc00000u) != 0x39000000u) && \
+ (((ins) & 0xffc00000u) != 0x3d800000u)) || /* unsigned immediate */ \
+ (((ins) & 0x3bc00000u) == 0x28400000u) || /* pair (offset) */ \
+ (((ins) & 0x3bc00000u) == 0x28c00000u) || /* pair (post-indexed) */ \
+ (((ins) & 0x3bc00000u) == 0x29800000u)) /* pair (pre-indexed) */
+
+#define is_store_instr(ins) ((((ins) & 0x3f400000u) == 0x08000000u) || /* exclusive */ \
+ (((ins) & 0x3bc00000u) == 0x28000000u) || /* no-allocate pair */ \
+ ((((ins) & 0x3be00c00u) == 0x38000400u) || \
+ (((ins) & 0xffe00c00u) == 0x3c800400u)) || /* immediate post-indexed */ \
+ ((((ins) & 0x3be00c00u) == 0x38000c00u) || \
+ (((ins) & 0xffe00c00u) == 0x3c800c00u)) || /* immediate pre-indexed */ \
+ ((((ins) & 0x3be00c00u) == 0x38200800u) || \
+ (((ins) & 0xffe00c00u) == 0x3ca00800u)) || /* register offset */ \
+ (((ins) & 0x3be00c00u) == 0x38000800u) || /* unprivileged */ \
+ ((((ins) & 0x3be00c00u) == 0x38000000u) || \
+ (((ins) & 0xffe00c00u) == 0x3c800000u)) || /* unscaled immediate */ \
+ ((((ins) & 0x3bc00000u) == 0x39000000u) || \
+ (((ins) & 0xffc00000u) == 0x3d800000u)) || /* unsigned immediate */ \
+ (((ins) & 0x3bc00000u) == 0x28000000u) || /* pair (offset) */ \
+ (((ins) & 0x3bc00000u) == 0x28800000u) || /* pair (post-indexed) */ \
+ (((ins) & 0x3bc00000u) == 0x29800000u)) /* pair (pre-indexed) */
+
+#define next_instr_address(pc, bd) ((bd) ? (pc) : ((pc) + 4))
+
+#define DB_ELFSIZE 64
+
+#endif /* !_MACHINE_DB_MACHDEP_H_ */
diff --git a/sys/arm64/include/debug_monitor.h b/sys/arm64/include/debug_monitor.h
new file mode 100644
index 000000000000..8698f21a7c2a
--- /dev/null
+++ b/sys/arm64/include/debug_monitor.h
@@ -0,0 +1,65 @@
+/*-
+ * Copyright (c) 2014 The FreeBSD Foundation
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_DEBUG_MONITOR_H_
+#define _MACHINE_DEBUG_MONITOR_H_
+
+#define DBG_BRP_MAX 16
+#define DBG_WRP_MAX 16
+
+struct debug_monitor_state {
+ uint32_t dbg_enable_count;
+ uint32_t dbg_flags;
+#define DBGMON_ENABLED (1 << 0)
+#define DBGMON_KERNEL (1 << 1)
+ uint64_t dbg_bcr[DBG_BRP_MAX];
+ uint64_t dbg_bvr[DBG_BRP_MAX];
+ uint64_t dbg_wcr[DBG_WRP_MAX];
+ uint64_t dbg_wvr[DBG_WRP_MAX];
+};
+
+#ifdef _KERNEL
+
+enum dbg_access_t {
+ HW_BREAKPOINT_X = 0,
+ HW_BREAKPOINT_R = 1,
+ HW_BREAKPOINT_W = 2,
+ HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
+};
+
+void dbg_monitor_init(void);
+void dbg_register_sync(struct debug_monitor_state *);
+
+#ifdef DDB
+void dbg_show_breakpoint(void);
+void dbg_show_watchpoint(void);
+#endif
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_DEBUG_MONITOR_H_ */
diff --git a/sys/arm64/include/disassem.h b/sys/arm64/include/disassem.h
new file mode 100644
index 000000000000..27336f59e496
--- /dev/null
+++ b/sys/arm64/include/disassem.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2016 Cavium
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __DISASSEM_H_
+#define __DISASSEM_H_
+
+struct disasm_interface {
+ u_int (*di_readword)(vm_offset_t);
+ void (*di_printaddr)(vm_offset_t);
+ int (*di_printf)(const char *, ...) __printflike(1, 2);
+};
+
+vm_offset_t disasm(const struct disasm_interface *, vm_offset_t, int);
+
+#endif /* __DISASSEM_H_ */
diff --git a/sys/arm64/include/dump.h b/sys/arm64/include/dump.h
new file mode 100644
index 000000000000..9deffb75a4eb
--- /dev/null
+++ b/sys/arm64/include/dump.h
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (c) 2014 EMC Corp.
+ * Author: Conrad Meyer <conrad.meyer@isilon.com>
+ * Copyright (c) 2015 The FreeBSD Foundation.
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_DUMP_H_
+#define _MACHINE_DUMP_H_
+
+#define KERNELDUMP_ARCH_VERSION KERNELDUMP_AARCH64_VERSION
+#define EM_VALUE EM_AARCH64
+/* XXX: I suppose 20 should be enough. */
+#define DUMPSYS_MD_PA_NPAIRS 20
+#define DUMPSYS_NUM_AUX_HDRS 1
+
+/* How often to check the dump progress bar? */
+#define DUMPSYS_PB_CHECK_BITS 22 /* Every 4MB */
+
+void dumpsys_wbinv_all(void);
+int dumpsys_write_aux_headers(struct dumperinfo *di);
+
+static inline void
+dumpsys_pa_init(void)
+{
+
+ dumpsys_gen_pa_init();
+}
+
+static inline struct dump_pa *
+dumpsys_pa_next(struct dump_pa *p)
+{
+
+ return (dumpsys_gen_pa_next(p));
+}
+
+static inline void
+dumpsys_unmap_chunk(vm_paddr_t pa, size_t s, void *va)
+{
+
+ dumpsys_gen_unmap_chunk(pa, s, va);
+}
+
+static inline int
+dumpsys(struct dumperinfo *di)
+{
+
+ return (dumpsys_generic(di));
+}
+
+#endif /* !_MACHINE_DUMP_H_ */
diff --git a/sys/arm64/include/efi.h b/sys/arm64/include/efi.h
new file mode 100644
index 000000000000..bfce872296a2
--- /dev/null
+++ b/sys/arm64/include/efi.h
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 2017 Andrew Turner
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/efi.h>
+#else /* !__arm__ */
+
+#ifndef __ARM64_INCLUDE_EFI_H_
+#define __ARM64_INCLUDE_EFI_H_
+
+#include <sys/types.h>
+
+#define EFIABI_ATTR
+
+#ifdef _KERNEL
+#define ARCH_MAY_USE_EFI
+
+#define EFI_TIME_LOCK()
+#define EFI_TIME_UNLOCK()
+#define EFI_TIME_OWNED()
+
+#define EFI_RT_HANDLE_FAULTS_DEFAULT 1
+#endif
+
+struct efirt_callinfo {
+ const char *ec_name;
+ register_t ec_efi_status;
+ register_t ec_fptr;
+ register_t ec_argcnt;
+ register_t ec_arg1;
+ register_t ec_arg2;
+ register_t ec_arg3;
+ register_t ec_arg4;
+ register_t ec_arg5;
+};
+
+#endif /* __ARM64_INCLUDE_EFI_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/elf.h b/sys/arm64/include/elf.h
new file mode 100644
index 000000000000..d6328c143585
--- /dev/null
+++ b/sys/arm64/include/elf.h
@@ -0,0 +1,233 @@
+/*-
+ * Copyright (c) 1996-1997 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/elf.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_ELF_H_
+#define _MACHINE_ELF_H_
+
+/*
+ * ELF definitions for the AArch64 architecture.
+ */
+
+#include <sys/elf32.h> /* Definitions common to all 32 bit architectures. */
+#include <sys/elf64.h> /* Definitions common to all 64 bit architectures. */
+
+#ifndef __ELF_WORD_SIZE
+#define __ELF_WORD_SIZE 64 /* Used by <sys/elf_generic.h> */
+#endif
+
+#include <sys/elf_generic.h>
+
+/*
+ * Auxiliary vector entries for passing information to the interpreter.
+ */
+
+typedef struct { /* Auxiliary vector entry on initial stack */
+ int a_type; /* Entry type. */
+ union {
+ int a_val; /* Integer value. */
+ } a_un;
+} Elf32_Auxinfo;
+
+typedef struct { /* Auxiliary vector entry on initial stack */
+ long a_type; /* Entry type. */
+ union {
+ long a_val; /* Integer value. */
+ void *a_ptr; /* Address. */
+ void (*a_fcn)(void); /* Function pointer (not used). */
+ } a_un;
+} Elf64_Auxinfo;
+
+__ElfType(Auxinfo);
+
+#ifdef _MACHINE_ELF_WANT_32BIT
+#define ELF_ARCH EM_ARM
+#else
+#define ELF_ARCH EM_AARCH64
+#endif
+
+#define ELF_MACHINE_OK(x) ((x) == (ELF_ARCH))
+
+/* Define "machine" characteristics */
+#if __ELF_WORD_SIZE == 64
+#define ELF_TARG_CLASS ELFCLASS64
+#define ELF_TARG_DATA ELFDATA2LSB
+#define ELF_TARG_MACH EM_AARCH64
+#define ELF_TARG_VER 1
+#else
+#define ELF_TARG_CLASS ELFCLASS32
+#define ELF_TARG_DATA ELFDATA2LSB
+#define ELF_TARG_MACH EM_ARM
+#define ELF_TARG_VER 1
+#endif
+
+#if __ELF_WORD_SIZE == 32
+#define ET_DYN_LOAD_ADDR 0x01001000
+#else
+#define ET_DYN_LOAD_ADDR 0x100000
+#endif
+
+/* HWCAP */
+#define HWCAP_FP (1 << 0)
+#define HWCAP_ASIMD (1 << 1)
+#define HWCAP_EVTSTRM (1 << 2)
+#define HWCAP_AES (1 << 3)
+#define HWCAP_PMULL (1 << 4)
+#define HWCAP_SHA1 (1 << 5)
+#define HWCAP_SHA2 (1 << 6)
+#define HWCAP_CRC32 (1 << 7)
+#define HWCAP_ATOMICS (1 << 8)
+#define HWCAP_FPHP (1 << 9)
+#define HWCAP_ASIMDHP (1 << 10)
+/*
+ * XXX: The following bits (from CPUID to FLAGM) were originally incorrect,
+ * but later changed to match the Linux definitions. No compatibility code is
+ * provided, as the fix was expected to result in near-zero fallout.
+ */
+#define HWCAP_CPUID (1 << 11)
+#define HWCAP_ASIMDRDM (1 << 12)
+#define HWCAP_JSCVT (1 << 13)
+#define HWCAP_FCMA (1 << 14)
+#define HWCAP_LRCPC (1 << 15)
+#define HWCAP_DCPOP (1 << 16)
+#define HWCAP_SHA3 (1 << 17)
+#define HWCAP_SM3 (1 << 18)
+#define HWCAP_SM4 (1 << 19)
+#define HWCAP_ASIMDDP (1 << 20)
+#define HWCAP_SHA512 (1 << 21)
+#define HWCAP_SVE (1 << 22)
+#define HWCAP_ASIMDFHM (1 << 23)
+#define HWCAP_DIT (1 << 24)
+#define HWCAP_USCAT (1 << 25)
+#define HWCAP_ILRCPC (1 << 26)
+#define HWCAP_FLAGM (1 << 27)
+#define HWCAP_SSBS (1 << 28)
+#define HWCAP_SB (1 << 29)
+#define HWCAP_PACA (1 << 30)
+#define HWCAP_PACG (1UL << 31)
+#define HWCAP_GCS (1UL << 32)
+
+/* HWCAP2 */
+#define HWCAP2_DCPODP (1 << 0)
+#define HWCAP2_SVE2 (1 << 1)
+#define HWCAP2_SVEAES (1 << 2)
+#define HWCAP2_SVEPMULL (1 << 3)
+#define HWCAP2_SVEBITPERM (1 << 4)
+#define HWCAP2_SVESHA3 (1 << 5)
+#define HWCAP2_SVESM4 (1 << 6)
+#define HWCAP2_FLAGM2 (1 << 7)
+#define HWCAP2_FRINT (1 << 8)
+#define HWCAP2_SVEI8MM (1 << 9)
+#define HWCAP2_SVEF32MM (1 << 10)
+#define HWCAP2_SVEF64MM (1 << 11)
+#define HWCAP2_SVEBF16 (1 << 12)
+#define HWCAP2_I8MM (1 << 13)
+#define HWCAP2_BF16 (1 << 14)
+#define HWCAP2_DGH (1 << 15)
+#define HWCAP2_RNG (1 << 16)
+#define HWCAP2_BTI (1 << 17)
+#define HWCAP2_MTE (1 << 18)
+#define HWCAP2_ECV (1 << 19)
+#define HWCAP2_AFP (1 << 20)
+#define HWCAP2_RPRES (1 << 21)
+#define HWCAP2_MTE3 (1 << 22)
+#define HWCAP2_SME (1 << 23)
+#define HWCAP2_SME_I16I64 (1 << 24)
+#define HWCAP2_SME_F64F64 (1 << 25)
+#define HWCAP2_SME_I8I32 (1 << 26)
+#define HWCAP2_SME_F16F32 (1 << 27)
+#define HWCAP2_SME_B16F32 (1 << 28)
+#define HWCAP2_SME_F32F32 (1 << 29)
+#define HWCAP2_SME_FA64 (1 << 30)
+#define HWCAP2_WFXT (1UL << 31)
+#define HWCAP2_EBF16 (1UL << 32)
+#define HWCAP2_SVE_EBF16 (1UL << 33)
+#define HWCAP2_CSSC (1UL << 34)
+#define HWCAP2_RPRFM (1UL << 35)
+#define HWCAP2_SVE2P1 (1UL << 36)
+#define HWCAP2_SME2 (1UL << 37)
+#define HWCAP2_SME2P1 (1UL << 38)
+#define HWCAP2_SME_I16I32 (1UL << 39)
+#define HWCAP2_SME_BI32I32 (1UL << 40)
+#define HWCAP2_SME_B16B16 (1UL << 41)
+#define HWCAP2_SME_F16F16 (1UL << 42)
+#define HWCAP2_MOPS (1UL << 43)
+#define HWCAP2_HBC (1UL << 44)
+#define HWCAP2_SVE_B16B16 (1UL << 45)
+#define HWCAP2_LRCPC3 (1UL << 46)
+#define HWCAP2_LSE128 (1UL << 47)
+#define HWCAP2_FPMR (1UL << 48)
+#define HWCAP2_LUT (1UL << 49)
+#define HWCAP2_FAMINMAX (1UL << 50)
+#define HWCAP2_F8CVT (1UL << 51)
+#define HWCAP2_F8FMA (1UL << 52)
+#define HWCAP2_F8DP4 (1UL << 53)
+#define HWCAP2_F8DP2 (1UL << 54)
+#define HWCAP2_F8E4M3 (1UL << 55)
+#define HWCAP2_F8E5M2 (1UL << 56)
+#define HWCAP2_SME_LUTV2 (1UL << 57)
+#define HWCAP2_SME_F8F16 (1UL << 58)
+#define HWCAP2_SME_F8F32 (1UL << 59)
+#define HWCAP2_SME_SF8FMA (1UL << 60)
+#define HWCAP2_SME_SF8DP4 (1UL << 61)
+#define HWCAP2_SME_SF8DP2 (1UL << 62)
+#define HWCAP2_POE (1UL << 63)
+
+#ifdef COMPAT_FREEBSD32
+/* ARM HWCAP */
+#define HWCAP32_HALF 0x00000002 /* Always set. */
+#define HWCAP32_THUMB 0x00000004 /* Always set. */
+#define HWCAP32_FAST_MULT 0x00000010 /* Always set. */
+#define HWCAP32_VFP 0x00000040
+#define HWCAP32_EDSP 0x00000080 /* Always set. */
+#define HWCAP32_NEON 0x00001000
+#define HWCAP32_VFPv3 0x00002000
+#define HWCAP32_TLS 0x00008000 /* Always set. */
+#define HWCAP32_VFPv4 0x00010000
+#define HWCAP32_IDIVA 0x00020000 /* Always set. */
+#define HWCAP32_IDIVT 0x00040000 /* Always set. */
+#define HWCAP32_VFPD32 0x00080000 /* Always set. */
+#define HWCAP32_LPAE 0x00100000 /* Always set. */
+
+#define HWCAP32_DEFAULT \
+ (HWCAP32_HALF | HWCAP32_THUMB | HWCAP32_FAST_MULT | HWCAP32_EDSP |\
+ HWCAP32_TLS | HWCAP32_IDIVA | HWCAP32_IDIVT | HWCAP32_VFPD32 | \
+ HWCAP32_LPAE)
+
+/* ARM HWCAP2 */
+#define HWCAP32_2_AES 0x00000001
+#define HWCAP32_2_PMULL 0x00000002
+#define HWCAP32_2_SHA1 0x00000004
+#define HWCAP32_2_SHA2 0x00000008
+#define HWCAP32_2_CRC32 0x00000010
+#endif
+
+#endif /* !_MACHINE_ELF_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/endian.h b/sys/arm64/include/endian.h
new file mode 100644
index 000000000000..7ecc020cdf74
--- /dev/null
+++ b/sys/arm64/include/endian.h
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2001 David E. O'Brien
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * $NetBSD: endian.h,v 1.7 1999/08/21 05:53:51 simonb Exp $
+ */
+
+#ifndef _MACHINE_ENDIAN_H_
+#define _MACHINE_ENDIAN_H_
+
+#include <sys/_types.h>
+#include <sys/_endian.h>
+
+#endif /* !_MACHINE_ENDIAN_H_ */
diff --git a/sys/arm64/include/exec.h b/sys/arm64/include/exec.h
new file mode 100644
index 000000000000..462b9ac6bfa4
--- /dev/null
+++ b/sys/arm64/include/exec.h
@@ -0,0 +1,6 @@
+
+#ifdef __arm__
+#include <arm/exec.h>
+#else /* !__arm__ */
+/* empty */
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/float.h b/sys/arm64/include/float.h
new file mode 100644
index 000000000000..ad1d7aeb8188
--- /dev/null
+++ b/sys/arm64/include/float.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/float.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_FLOAT_H_
+#define _MACHINE_FLOAT_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern int __flt_rounds(void);
+__END_DECLS
+
+#define FLT_RADIX 2 /* b */
+#define FLT_ROUNDS __flt_rounds()
+#if __ISO_C_VISIBLE >= 1999
+#define FLT_EVAL_METHOD 0
+#define DECIMAL_DIG 17 /* max precision in decimal digits */
+#endif
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP (-125) /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP (-37) /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+#if __ISO_C_VISIBLE >= 2011
+#define FLT_TRUE_MIN 1.40129846E-45F /* b**(emin-p) */
+#define FLT_DECIMAL_DIG 9 /* ceil(1+p*log10(b)) */
+#define FLT_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP (-1021)
+#define DBL_MIN 2.2250738585072014E-308
+#define DBL_MIN_10_EXP (-307)
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MAX_10_EXP 308
+#if __ISO_C_VISIBLE >= 2011
+#define DBL_TRUE_MIN 4.9406564584124654E-324
+#define DBL_DECIMAL_DIG 17
+#define DBL_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#define LDBL_MANT_DIG 113
+#define LDBL_EPSILON 1.925929944387235853055977942584927319E-34L
+#define LDBL_DIG 33
+#define LDBL_MIN_EXP (-16381)
+#define LDBL_MIN 3.362103143112093506262677817321752603E-4932L
+#define LDBL_MIN_10_EXP (-4931)
+#define LDBL_MAX_EXP (+16384)
+#define LDBL_MAX 1.189731495357231765085759326628007016E+4932L
+#define LDBL_MAX_10_EXP (+4932)
+#if __ISO_C_VISIBLE >= 2011
+#define LDBL_TRUE_MIN 6.475175119438025110924438958227646552E-4966L
+#define LDBL_DECIMAL_DIG 36
+#define LDBL_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#endif /* _MACHINE_FLOAT_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/floatingpoint.h b/sys/arm64/include/floatingpoint.h
new file mode 100644
index 000000000000..7569ce3f7f27
--- /dev/null
+++ b/sys/arm64/include/floatingpoint.h
@@ -0,0 +1,2 @@
+
+#include <machine/ieeefp.h>
diff --git a/sys/arm64/include/fpu.h b/sys/arm64/include/fpu.h
new file mode 100644
index 000000000000..ebfc4eb98e4a
--- /dev/null
+++ b/sys/arm64/include/fpu.h
@@ -0,0 +1,4 @@
+/*-
+ * This file is in the public domain.
+ */
+#include <machine/vfp.h>
diff --git a/sys/arm64/include/frame.h b/sys/arm64/include/frame.h
new file mode 100644
index 000000000000..5593532ac287
--- /dev/null
+++ b/sys/arm64/include/frame.h
@@ -0,0 +1,83 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/frame.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_FRAME_H_
+#define _MACHINE_FRAME_H_
+
+#ifndef LOCORE
+
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+
+/*
+ * NOTE: keep this structure in sync with struct reg and struct mcontext.
+ */
+struct trapframe {
+ uint64_t tf_sp;
+ uint64_t tf_lr;
+ uint64_t tf_elr;
+ uint64_t tf_spsr;
+ uint64_t tf_esr;
+ uint64_t tf_far;
+ uint64_t tf_x[30];
+};
+
+/*
+ * Signal frame, pushed onto the user stack.
+ */
+struct sigframe {
+ siginfo_t sf_si; /* actual saved siginfo */
+ ucontext_t sf_uc; /* actual saved ucontext */
+};
+
+/*
+ * There is no fixed frame layout, other than to be 16-byte aligned.
+ */
+struct frame {
+ int dummy;
+};
+
+#ifdef COMPAT_FREEBSD32
+struct sigframe32 {
+ struct __siginfo32 sf_si;
+ ucontext32_t sf_uc;
+ mcontext32_vfp_t sf_vfp;
+};
+#endif /* COMPAT_FREEBSD32 */
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_FRAME_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/gdb_machdep.h b/sys/arm64/include/gdb_machdep.h
new file mode 100644
index 000000000000..5edfc2629980
--- /dev/null
+++ b/sys/arm64/include/gdb_machdep.h
@@ -0,0 +1,83 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Mitchell Horne under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_GDB_MACHDEP_H_
+#define _MACHINE_GDB_MACHDEP_H_
+
+#define GDB_BUFSZ 4096
+#define GDB_NREGS 68
+#define GDB_REG_X0 0
+#define GDB_REG_X19 19
+#define GDB_REG_X29 29
+#define GDB_REG_LR 30
+#define GDB_REG_SP 31
+#define GDB_REG_PC 32
+#define GDB_REG_CSPR 33
+#define GDB_REG_V0 34
+#define GDB_REG_V31 65
+#define GDB_REG_FPSR 66
+#define GDB_REG_FPCR 67
+_Static_assert(GDB_BUFSZ >= (GDB_NREGS * 16), "buffer fits 'g' regs");
+
+static __inline size_t
+gdb_cpu_regsz(int regnum)
+{
+ if (regnum == GDB_REG_CSPR || regnum == GDB_REG_FPSR ||
+ regnum == GDB_REG_FPCR)
+ return (4);
+ else if (regnum >= GDB_REG_V0 && regnum <= GDB_REG_V31)
+ return (16);
+
+ return (8);
+}
+
+static __inline int
+gdb_cpu_query(void)
+{
+ return (0);
+}
+
+static __inline void *
+gdb_begin_write(void)
+{
+ return (NULL);
+}
+
+static __inline void
+gdb_end_write(void *arg __unused)
+{
+}
+
+void *gdb_cpu_getreg(int, size_t *);
+void gdb_cpu_setreg(int, void *);
+int gdb_cpu_signal(int, int);
+void gdb_cpu_stop_reason(int, int);
+
+#endif /* !_MACHINE_GDB_MACHDEP_H_ */
diff --git a/sys/arm64/include/hypervisor.h b/sys/arm64/include/hypervisor.h
new file mode 100644
index 000000000000..a32e1000d911
--- /dev/null
+++ b/sys/arm64/include/hypervisor.h
@@ -0,0 +1,342 @@
+/*-
+ * Copyright (c) 2013, 2014 Andrew Turner
+ * Copyright (c) 2021 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_HYPERVISOR_H_
+#define _MACHINE_HYPERVISOR_H_
+
+/*
+ * These registers are only useful when in hypervisor context,
+ * e.g. specific to EL2, or controlling the hypervisor.
+ */
+
+/* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */
+#define CNTHCTL_EVNTI_MASK (0xf << 4) /* Bit to trigger event stream */
+/* Valid if HCR_EL2.E2H == 0 */
+#define CNTHCTL_EL1PCTEN (1 << 0) /* Allow physical counter access */
+#define CNTHCTL_EL1PCEN (1 << 1) /* Allow physical timer access */
+/* Valid if HCR_EL2.E2H == 1 */
+#define CNTHCTL_E2H_EL0PCTEN (1 << 0) /* Allow EL0 physical counter access */
+#define CNTHCTL_E2H_EL0VCTEN (1 << 1) /* Allow EL0 virtual counter access */
+#define CNTHCTL_E2H_EL0VTEN (1 << 8)
+#define CNTHCTL_E2H_EL0PTEN (1 << 9)
+#define CNTHCTL_E2H_EL1PCTEN (1 << 10) /* Allow physical counter access */
+#define CNTHCTL_E2H_EL1PTEN (1 << 11) /* Allow physical timer access */
+/* Unconditionally valid */
+#define CNTHCTL_EVNTDIR (1 << 3) /* Control transition trigger bit */
+#define CNTHCTL_EVNTEN (1 << 2) /* Enable event stream */
+
+/* CPTR_EL2 - Architecture feature trap register */
+/* Valid if HCR_EL2.E2H == 0 */
+#define CPTR_TRAP_ALL 0xc01037ff /* Enable all traps */
+#define CPTR_RES0 0x7fefc800
+#define CPTR_RES1 0x000032ff
+#define CPTR_TZ 0x00000100
+#define CPTR_TFP 0x00000400
+#define CPTR_TTA 0x00100000
+/* Valid if HCR_EL2.E2H == 1 */
+#define CPTR_E2H_TRAP_ALL 0xd0000000
+#define CPTR_E2H_ZPEN 0x00030000
+#define CPTR_E2H_FPEN 0x00300000
+#define CPTR_E2H_TTA 0x10000000
+/* Unconditionally valid */
+#define CPTR_TCPAC 0x80000000
+
+/* HCR_EL2 - Hypervisor Config Register */
+#define HCR_VM (UL(0x1) << 0)
+#define HCR_SWIO (UL(0x1) << 1)
+#define HCR_PTW (UL(0x1) << 2)
+#define HCR_FMO (UL(0x1) << 3)
+#define HCR_IMO (UL(0x1) << 4)
+#define HCR_AMO (UL(0x1) << 5)
+#define HCR_VF (UL(0x1) << 6)
+#define HCR_VI (UL(0x1) << 7)
+#define HCR_VSE (UL(0x1) << 8)
+#define HCR_FB (UL(0x1) << 9)
+#define HCR_BSU_MASK (UL(0x3) << 10)
+#define HCR_BSU_IS (UL(0x1) << 10)
+#define HCR_BSU_OS (UL(0x2) << 10)
+#define HCR_BSU_FS (UL(0x3) << 10)
+#define HCR_DC (UL(0x1) << 12)
+#define HCR_TWI (UL(0x1) << 13)
+#define HCR_TWE (UL(0x1) << 14)
+#define HCR_TID0 (UL(0x1) << 15)
+#define HCR_TID1 (UL(0x1) << 16)
+#define HCR_TID2 (UL(0x1) << 17)
+#define HCR_TID3 (UL(0x1) << 18)
+#define HCR_TSC (UL(0x1) << 19)
+#define HCR_TIDCP (UL(0x1) << 20)
+#define HCR_TACR (UL(0x1) << 21)
+#define HCR_TSW (UL(0x1) << 22)
+#define HCR_TPCP (UL(0x1) << 23)
+#define HCR_TPU (UL(0x1) << 24)
+#define HCR_TTLB (UL(0x1) << 25)
+#define HCR_TVM (UL(0x1) << 26)
+#define HCR_TGE (UL(0x1) << 27)
+#define HCR_TDZ (UL(0x1) << 28)
+#define HCR_HCD (UL(0x1) << 29)
+#define HCR_TRVM (UL(0x1) << 30)
+#define HCR_RW (UL(0x1) << 31)
+#define HCR_CD (UL(0x1) << 32)
+#define HCR_ID (UL(0x1) << 33)
+#define HCR_E2H (UL(0x1) << 34)
+#define HCR_TLOR (UL(0x1) << 35)
+#define HCR_TERR (UL(0x1) << 36)
+#define HCR_TEA (UL(0x1) << 37)
+#define HCR_MIOCNCE (UL(0x1) << 38)
+/* Bit 39 is reserved */
+#define HCR_APK (UL(0x1) << 40)
+#define HCR_API (UL(0x1) << 41)
+#define HCR_NV (UL(0x1) << 42)
+#define HCR_NV1 (UL(0x1) << 43)
+#define HCR_AT (UL(0x1) << 44)
+#define HCR_NV2 (UL(0x1) << 45)
+#define HCR_FWB (UL(0x1) << 46)
+#define HCR_FIEN (UL(0x1) << 47)
+/* Bit 48 is reserved */
+#define HCR_TID4 (UL(0x1) << 49)
+#define HCR_TICAB (UL(0x1) << 50)
+#define HCR_AMVOFFEN (UL(0x1) << 51)
+#define HCR_TOCU (UL(0x1) << 52)
+#define HCR_EnSCXT (UL(0x1) << 53)
+#define HCR_TTLBIS (UL(0x1) << 54)
+#define HCR_TTLBOS (UL(0x1) << 55)
+#define HCR_ATA (UL(0x1) << 56)
+#define HCR_DCT (UL(0x1) << 57)
+#define HCR_TID5 (UL(0x1) << 58)
+#define HCR_TWEDEn (UL(0x1) << 59)
+#define HCR_TWEDEL_MASK (UL(0xf) << 60)
+
+/* HCRX_EL2 - Extended Hypervisor Configuration Register */
+#define HCRX_EL2_REG MRS_REG_ALT_NAME(HCRX_EL2)
+#define HCRX_EL2_op0 3
+#define HCRX_EL2_op1 4
+#define HCRX_EL2_CRn 1
+#define HCRX_EL2_CRm 2
+#define HCRX_EL2_op2 2
+
+#define HCRX_EnAS0 (UL(0x1) << 0)
+#define HCRX_EnALS (UL(0x1) << 1)
+#define HCRX_EnASR (UL(0x1) << 2)
+#define HCRX_FnXS (UL(0x1) << 3)
+#define HCRX_FGTnXS (UL(0x1) << 4)
+#define HCRX_SMPME (UL(0x1) << 5)
+#define HCRX_TALLINT (UL(0x1) << 6)
+#define HCRX_VINMI (UL(0x1) << 7)
+#define HCRX_VFNMI (UL(0x1) << 8)
+#define HCRX_CMOW (UL(0x1) << 9)
+#define HCRX_MCE2 (UL(0x1) << 10)
+#define HCRX_MSCEn (UL(0x1) << 11)
+/* Bits 12 & 13 are reserved */
+#define HCRX_TCR2En (UL(0x1) << 14)
+#define HCRX_SCTLR2En (UL(0x1) << 15)
+#define HCRX_PTTWI (UL(0x1) << 16)
+#define HCRX_D128En (UL(0x1) << 17)
+#define HCRX_EnSNERR (UL(0x1) << 18)
+#define HCRX_TMEA (UL(0x1) << 19)
+#define HCRX_EnSDERR (UL(0x1) << 20)
+#define HCRX_EnIDCP128 (UL(0x1) << 21)
+#define HCRX_GCSEn (UL(0x1) << 22)
+#define HCRX_EnFPM (UL(0x1) << 23)
+#define HCRX_PACMEn (UL(0x1) << 24)
+/* Bit 25 is reserved */
+#define HCRX_SRMASKEn (UL(0x1) << 26)
+
+/* HPFAR_EL2 - Hypervisor IPA Fault Address Register */
+#define HPFAR_EL2_FIPA_SHIFT 4
+#define HPFAR_EL2_FIPA_MASK 0xfffffffff0
+#define HPFAR_EL2_FIPA_GET(x) \
+ (((x) & HPFAR_EL2_FIPA_MASK) >> HPFAR_EL2_FIPA_SHIFT)
+/* HPFAR_EL2_FIPA holds the 4k page address */
+#define HPFAR_EL2_FIPA_ADDR(x) \
+ (HPFAR_EL2_FIPA_GET(x) << 12)
+/* The bits from FAR_EL2 we need to add to HPFAR_EL2_FIPA_ADDR */
+#define FAR_EL2_HPFAR_PAGE_MASK (0xffful)
+
+/* ICC_SRE_EL2 */
+#define ICC_SRE_EL2_SRE (1UL << 0)
+#define ICC_SRE_EL2_EN (1UL << 3)
+
+/* SCTLR_EL2 - System Control Register */
+#define SCTLR_EL2_RES1 0x30c50830
+#define SCTLR_EL2_M_SHIFT 0
+#define SCTLR_EL2_M (0x1UL << SCTLR_EL2_M_SHIFT)
+#define SCTLR_EL2_A_SHIFT 1
+#define SCTLR_EL2_A (0x1UL << SCTLR_EL2_A_SHIFT)
+#define SCTLR_EL2_C_SHIFT 2
+#define SCTLR_EL2_C (0x1UL << SCTLR_EL2_C_SHIFT)
+#define SCTLR_EL2_SA_SHIFT 3
+#define SCTLR_EL2_SA (0x1UL << SCTLR_EL2_SA_SHIFT)
+#define SCTLR_EL2_EOS_SHIFT 11
+#define SCTLR_EL2_EOS (0x1UL << SCTLR_EL2_EOS_SHIFT)
+#define SCTLR_EL2_I_SHIFT 12
+#define SCTLR_EL2_I (0x1UL << SCTLR_EL2_I_SHIFT)
+#define SCTLR_EL2_WXN_SHIFT 19
+#define SCTLR_EL2_WXN (0x1UL << SCTLR_EL2_WXN_SHIFT)
+#define SCTLR_EL2_EIS_SHIFT 22
+#define SCTLR_EL2_EIS (0x1UL << SCTLR_EL2_EIS_SHIFT)
+#define SCTLR_EL2_EE_SHIFT 25
+#define SCTLR_EL2_EE (0x1UL << SCTLR_EL2_EE_SHIFT)
+
+/* TCR_EL2 - Translation Control Register */
+#define TCR_EL2_RES1 ((0x1UL << 31) | (0x1UL << 23))
+#define TCR_EL2_T0SZ_SHIFT 0
+#define TCR_EL2_T0SZ_MASK (0x3fUL << TCR_EL2_T0SZ_SHIFT)
+#define TCR_EL2_T0SZ(x) ((x) << TCR_EL2_T0SZ_SHIFT)
+/* Bits 7:6 are reserved */
+#define TCR_EL2_IRGN0_SHIFT 8
+#define TCR_EL2_IRGN0_MASK (0x3UL << TCR_EL2_IRGN0_SHIFT)
+#define TCR_EL2_IRGN0_WBWA (1UL << TCR_EL2_IRGN0_SHIFT)
+#define TCR_EL2_ORGN0_SHIFT 10
+#define TCR_EL2_ORGN0_MASK (0x3UL << TCR_EL2_ORGN0_SHIFT)
+#define TCR_EL2_ORGN0_WBWA (1UL << TCR_EL2_ORGN0_SHIFT)
+#define TCR_EL2_SH0_SHIFT 12
+#define TCR_EL2_SH0_MASK (0x3UL << TCR_EL2_SH0_SHIFT)
+#define TCR_EL2_SH0_IS (3UL << TCR_EL2_SH0_SHIFT)
+#define TCR_EL2_TG0_SHIFT 14
+#define TCR_EL2_TG0_MASK (0x3UL << TCR_EL2_TG0_SHIFT)
+#define TCR_EL2_TG0_4K (0x0UL << TCR_EL2_TG0_SHIFT)
+#define TCR_EL2_TG0_64K (0x1UL << TCR_EL2_TG0_SHIFT)
+#define TCR_EL2_TG0_16K (0x2UL << TCR_EL2_TG0_SHIFT)
+#define TCR_EL2_PS_SHIFT 16
+#define TCR_EL2_PS_MASK (0xfUL << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_32BITS (0UL << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_36BITS (1UL << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_40BITS (2UL << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_42BITS (3UL << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_44BITS (4UL << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_48BITS (5UL << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_52BITS (6UL << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_HPD_SHIFT 24
+#define TCR_EL2_HPD (1UL << TCR_EL2_HPD_SHIFT)
+#define TCR_EL2_HWU59_SHIFT 25
+#define TCR_EL2_HWU59 (1UL << TCR_EL2_HWU59_SHIFT)
+#define TCR_EL2_HWU60_SHIFT 26
+#define TCR_EL2_HWU60 (1UL << TCR_EL2_HWU60_SHIFT)
+#define TCR_EL2_HWU61_SHIFT 27
+#define TCR_EL2_HWU61 (1UL << TCR_EL2_HWU61_SHIFT)
+#define TCR_EL2_HWU62_SHIFT 28
+#define TCR_EL2_HWU62 (1UL << TCR_EL2_HWU62_SHIFT)
+#define TCR_EL2_HWU \
+ (TCR_EL2_HWU59 | TCR_EL2_HWU60 | TCR_EL2_HWU61 | TCR_EL2_HWU62)
+
+/* VMPDIR_EL2 - Virtualization Multiprocessor ID Register */
+#define VMPIDR_EL2_U 0x0000000040000000
+#define VMPIDR_EL2_MT 0x0000000001000000
+#define VMPIDR_EL2_RES1 0x0000000080000000
+
+/* VTCR_EL2 - Virtualization Translation Control Register */
+#define VTCR_EL2_RES1 (0x1UL << 31)
+#define VTCR_EL2_T0SZ_SHIFT 0
+#define VTCR_EL2_T0SZ_MASK (0x3fUL << VTCR_EL2_T0SZ_SHIFT)
+#define VTCR_EL2_T0SZ(x) ((x) << VTCR_EL2_T0SZ_SHIFT)
+#define VTCR_EL2_SL0_SHIFT 6
+#define VTCR_EL2_SL0_4K_LVL2 (0x0UL << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_SL0_4K_LVL1 (0x1UL << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_SL0_4K_LVL0 (0x2UL << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_SL0_16K_LVL2 (0x1UL << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_SL0_16K_LVL1 (0x2UL << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_SL0_16K_LVL0 (0x3UL << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_IRGN0_SHIFT 8
+#define VTCR_EL2_IRGN0_WBWA (0x1UL << VTCR_EL2_IRGN0_SHIFT)
+#define VTCR_EL2_ORGN0_SHIFT 10
+#define VTCR_EL2_ORGN0_WBWA (0x1UL << VTCR_EL2_ORGN0_SHIFT)
+#define VTCR_EL2_SH0_SHIFT 12
+#define VTCR_EL2_SH0_NS (0x0UL << VTCR_EL2_SH0_SHIFT)
+#define VTCR_EL2_SH0_OS (0x2UL << VTCR_EL2_SH0_SHIFT)
+#define VTCR_EL2_SH0_IS (0x3UL << VTCR_EL2_SH0_SHIFT)
+#define VTCR_EL2_TG0_SHIFT 14
+#define VTCR_EL2_TG0_4K (0x0UL << VTCR_EL2_TG0_SHIFT)
+#define VTCR_EL2_TG0_64K (0x1UL << VTCR_EL2_TG0_SHIFT)
+#define VTCR_EL2_TG0_16K (0x2UL << VTCR_EL2_TG0_SHIFT)
+#define VTCR_EL2_PS_SHIFT 16
+#define VTCR_EL2_PS_32BIT (0x0UL << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_36BIT (0x1UL << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_40BIT (0x2UL << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_42BIT (0x3UL << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_44BIT (0x4UL << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_48BIT (0x5UL << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_52BIT (0x6UL << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_DS_SHIFT 32
+#define VTCR_EL2_DS (0x1UL << VTCR_EL2_DS_SHIFT)
+
+/* VTTBR_EL2 - Virtualization Translation Table Base Register */
+#define VTTBR_VMID_MASK 0xffff000000000000
+#define VTTBR_VMID_SHIFT 48
+/* Assumed to be 0 by locore.S */
+#define VTTBR_HOST 0x0000000000000000
+
+/* MDCR_EL2 - Hyp Debug Control Register */
+#define MDCR_EL2_HPMN_MASK 0x1f
+#define MDCR_EL2_HPMN_SHIFT 0
+#define MDCR_EL2_TPMCR_SHIFT 5
+#define MDCR_EL2_TPMCR (0x1UL << MDCR_EL2_TPMCR_SHIFT)
+#define MDCR_EL2_TPM_SHIFT 6
+#define MDCR_EL2_TPM (0x1UL << MDCR_EL2_TPM_SHIFT)
+#define MDCR_EL2_HPME_SHIFT 7
+#define MDCR_EL2_HPME (0x1UL << MDCR_EL2_HPME_SHIFT)
+#define MDCR_EL2_TDE_SHIFT 8
+#define MDCR_EL2_TDE (0x1UL << MDCR_EL2_TDE_SHIFT)
+#define MDCR_EL2_TDA_SHIFT 9
+#define MDCR_EL2_TDA (0x1UL << MDCR_EL2_TDA_SHIFT)
+#define MDCR_EL2_TDOSA_SHIFT 10
+#define MDCR_EL2_TDOSA (0x1UL << MDCR_EL2_TDOSA_SHIFT)
+#define MDCR_EL2_TDRA_SHIFT 11
+#define MDCR_EL2_TDRA (0x1UL << MDCR_EL2_TDRA_SHIFT)
+#define MDCR_E2PB_SHIFT 12
+#define MDCR_E2PB_MASK (0x3UL << MDCR_E2PB_SHIFT)
+#define MDCR_TPMS_SHIFT 14
+#define MDCR_TPMS (0x1UL << MDCR_TPMS_SHIFT)
+#define MDCR_EnSPM_SHIFT 15
+#define MDCR_EnSPM (0x1UL << MDCR_EnSPM_SHIFT)
+#define MDCR_HPMD_SHIFT 17
+#define MDCR_HPMD (0x1UL << MDCR_HPMD_SHIFT)
+#define MDCR_TTRF_SHIFT 19
+#define MDCR_TTRF (0x1UL << MDCR_TTRF_SHIFT)
+#define MDCR_HCCD_SHIFT 23
+#define MDCR_HCCD (0x1UL << MDCR_HCCD_SHIFT)
+#define MDCR_E2TB_SHIFT 24
+#define MDCR_E2TB_MASK (0x3UL << MDCR_E2TB_SHIFT)
+#define MDCR_HLP_SHIFT 26
+#define MDCR_HLP (0x1UL << MDCR_HLP_SHIFT)
+#define MDCR_TDCC_SHIFT 27
+#define MDCR_TDCC (0x1UL << MDCR_TDCC_SHIFT)
+#define MDCR_MTPME_SHIFT 28
+#define MDCR_MTPME (0x1UL << MDCR_MTPME_SHIFT)
+#define MDCR_HPMFZO_SHIFT 29
+#define MDCR_HPMFZO (0x1UL << MDCR_HPMFZO_SHIFT)
+#define MDCR_PMSSE_SHIFT 30
+#define MDCR_PMSSE_MASK (0x3UL << MDCR_PMSSE_SHIFT)
+#define MDCR_HPMFZS_SHIFT 36
+#define MDCR_HPMFZS (0x1UL << MDCR_HPMFZS_SHIFT)
+#define MDCR_PMEE_SHIFT 40
+#define MDCR_PMEE_MASK (0x3UL << MDCR_PMEE_SHIFT)
+#define MDCR_EBWE_SHIFT 43
+#define MDCR_EBWE (0x1UL << MDCR_EBWE_SHIFT)
+
+#endif /* !_MACHINE_HYPERVISOR_H_ */
diff --git a/sys/arm64/include/ieeefp.h b/sys/arm64/include/ieeefp.h
new file mode 100644
index 000000000000..7f2ba092c507
--- /dev/null
+++ b/sys/arm64/include/ieeefp.h
@@ -0,0 +1,48 @@
+/*-
+ * Based on sys/sparc64/include/ieeefp.h
+ * Public domain.
+ */
+
+#ifdef __arm__
+#include <arm/ieeefp.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_IEEEFP_H_
+#define _MACHINE_IEEEFP_H_
+
+/* Deprecated FPU control interface */
+
+/* FP exception codes */
+#define FP_EXCEPT_INV 8
+#define FP_EXCEPT_DZ 9
+#define FP_EXCEPT_OFL 10
+#define FP_EXCEPT_UFL 11
+#define FP_EXCEPT_IMP 12
+#define FP_EXCEPT_DNML 15
+
+typedef int fp_except_t;
+
+#define FP_X_INV (1 << FP_EXCEPT_INV) /* invalid operation exception */
+#define FP_X_DZ (1 << FP_EXCEPT_DZ) /* divide-by-zero exception */
+#define FP_X_OFL (1 << FP_EXCEPT_OFL) /* overflow exception */
+#define FP_X_UFL (1 << FP_EXCEPT_UFL) /* underflow exception */
+#define FP_X_IMP (1 << FP_EXCEPT_IMP) /* imprecise (loss of precision) */
+#define FP_X_DNML (1 << FP_EXCEPT_DNML) /* denormal exception */
+
+typedef enum {
+ FP_RN = (0 << 22), /* round to nearest representable number */
+ FP_RP = (1 << 22), /* round toward positive infinity */
+ FP_RM = (2 << 22), /* round toward negative infinity */
+ FP_RZ = (3 << 22) /* round to zero (truncate) */
+} fp_rnd_t;
+
+__BEGIN_DECLS
+extern fp_rnd_t fpgetround(void);
+extern fp_rnd_t fpsetround(fp_rnd_t);
+extern fp_except_t fpgetmask(void);
+extern fp_except_t fpsetmask(fp_except_t);
+__END_DECLS
+
+#endif /* _MACHINE_IEEEFP_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/ifunc.h b/sys/arm64/include/ifunc.h
new file mode 100644
index 000000000000..de452ad34c8f
--- /dev/null
+++ b/sys/arm64/include/ifunc.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2015-2018 The FreeBSD Foundation
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __ARM64_IFUNC_H
+#define __ARM64_IFUNC_H
+
+#define DEFINE_IFUNC(qual, ret_type, name, args) \
+ static ret_type (*name##_resolver(void))args __used; \
+ qual ret_type name args __attribute__((ifunc(#name "_resolver"))); \
+ static ret_type (*name##_resolver(void))args
+
+#define DEFINE_UIFUNC(qual, ret_type, name, args) \
+ static ret_type (*name##_resolver(uint64_t, uint64_t, \
+ uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, \
+ uint64_t))args __used; \
+ qual ret_type name args __attribute__((ifunc(#name "_resolver"))); \
+ static ret_type (*name##_resolver(uint64_t _arg1 __unused, \
+ uint64_t _arg2 __unused, uint64_t _arg3 __unused, \
+ uint64_t _arg4 __unused, uint64_t _arg5 __unused, \
+ uint64_t _arg6 __unused, uint64_t _arg7 __unused, \
+ uint64_t _arg8 __unused))args
+
+#endif
diff --git a/sys/arm64/include/in_cksum.h b/sys/arm64/include/in_cksum.h
new file mode 100644
index 000000000000..b206b55a4a30
--- /dev/null
+++ b/sys/arm64/include/in_cksum.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#ifdef _KERNEL
+#define in_cksum(m, len) in_cksum_skip(m, len, 0)
+u_short in_addword(u_short sum, u_short b);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+u_int do_cksum(const void *, int);
+#if defined(IPVERSION) && (IPVERSION == 4)
+u_int in_cksum_hdr(const struct ip *);
+#endif
+
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/sys/arm64/include/intr.h b/sys/arm64/include/intr.h
new file mode 100644
index 000000000000..ef7fe56e3a13
--- /dev/null
+++ b/sys/arm64/include/intr.h
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner <andrew@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_INTR_H_
+#define _MACHINE_INTR_H_
+
+#ifndef LOCORE
+#ifdef FDT
+#include <dev/ofw/openfirm.h>
+#endif
+
+static inline void
+arm_irq_memory_barrier(uintptr_t irq)
+{
+}
+#endif /* !LOCORE */
+
+#ifndef NIRQ
+#define NIRQ 16384 /* XXX - It should be an option. */
+#endif
+
+#ifdef DEV_ACPI
+#define ACPI_INTR_XREF 1
+#define ACPI_MSI_XREF 2
+#define ACPI_GPIO_XREF 3
+#endif
+
+#define INTR_ROOT_IRQ 0
+#define INTR_ROOT_FIQ 1
+#define INTR_ROOT_COUNT 2
+
+#endif /* _MACHINE_INTR_H */
diff --git a/sys/arm64/include/iodev.h b/sys/arm64/include/iodev.h
new file mode 100644
index 000000000000..dbe8855918a3
--- /dev/null
+++ b/sys/arm64/include/iodev.h
@@ -0,0 +1,62 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_IODEV_H_
+#define _MACHINE_IODEV_H_
+
+#define iodev_read_1(a) \
+({ \
+ uint8_t val; \
+ __asm __volatile("ldrb %w0, [%1]" : "=&r" (val) : "r"(a)); \
+ val; \
+})
+
+#define iodev_read_2(a) \
+({ \
+ uint16_t val; \
+ __asm __volatile("ldrh %w0, [%1]" : "=&r" (val) : "r"(a)); \
+ val; \
+})
+
+#define iodev_read_4(a) \
+({ \
+ uint32_t val; \
+ __asm __volatile("ldr %w0, [%1]" : "=&r" (val) : "r"(a)); \
+ val; \
+})
+
+#define iodev_write_1(a, v) \
+ __asm __volatile("strb %w0, [%1]" :: "r" (v), "r"(a))
+
+#define iodev_write_2(a, v) \
+ __asm __volatile("strh %w0, [%1]" :: "r" (v), "r"(a))
+
+#define iodev_write_4(a, v) \
+ __asm __volatile("str %w0, [%1]" :: "r" (v), "r"(a))
+
+#endif /* _MACHINE_IODEV_H_ */
diff --git a/sys/arm64/include/iommu.h b/sys/arm64/include/iommu.h
new file mode 100644
index 000000000000..80500a083044
--- /dev/null
+++ b/sys/arm64/include/iommu.h
@@ -0,0 +1,10 @@
+/*-
+ * This file is in the public domain.
+ */
+
+#ifndef _MACHINE_IOMMU_H_
+#define _MACHINE_IOMMU_H_
+
+#include <arm64/iommu/iommu.h>
+
+#endif /* !_MACHINE_IOMMU_H_ */
diff --git a/sys/arm64/include/kdb.h b/sys/arm64/include/kdb.h
new file mode 100644
index 000000000000..3148b7df7d1b
--- /dev/null
+++ b/sys/arm64/include/kdb.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_KDB_H_
+#define _MACHINE_KDB_H_
+
+#include <machine/cpufunc.h>
+
+#define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid]
+
+void kdb_cpu_clear_singlestep(void);
+void kdb_cpu_set_singlestep(void);
+int kdb_cpu_set_breakpoint(vm_offset_t addr);
+int kdb_cpu_clr_breakpoint(vm_offset_t addr);
+int kdb_cpu_set_watchpoint(vm_offset_t addr, size_t size, int access);
+int kdb_cpu_clr_watchpoint(vm_offset_t addr, size_t size);
+
+static __inline void
+kdb_cpu_sync_icache(unsigned char *addr, size_t size)
+{
+
+ cpu_icache_sync_range(addr, size);
+}
+
+static __inline void
+kdb_cpu_trap(int type, int code)
+{
+}
+
+#endif /* _MACHINE_KDB_H_ */
diff --git a/sys/arm64/include/machdep.h b/sys/arm64/include/machdep.h
new file mode 100644
index 000000000000..4fa80219da42
--- /dev/null
+++ b/sys/arm64/include/machdep.h
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_MACHDEP_H_
+#define _MACHINE_MACHDEP_H_
+
+#ifdef _KERNEL
+
+struct arm64_bootparams {
+ vm_offset_t modulep;
+ vm_offset_t kern_stack;
+ vm_paddr_t kern_ttbr0;
+ int boot_el; /* EL the kernel booted from */
+ int pad;
+};
+
+enum arm64_bus {
+ ARM64_BUS_NONE,
+ ARM64_BUS_FDT,
+ ARM64_BUS_ACPI,
+};
+
+extern enum arm64_bus arm64_bus_method;
+
+void dbg_init(void);
+bool has_hyp(void);
+bool in_vhe(void);
+void initarm(struct arm64_bootparams *);
+vm_offset_t parse_boot_param(struct arm64_bootparams *abp);
+#ifdef FDT
+void parse_fdt_bootargs(void);
+#endif
+int memory_mapping_mode(vm_paddr_t pa);
+extern void (*pagezero)(void *);
+
+#ifdef SOCDEV_PA
+/*
+ * The virtual address SOCDEV_PA is mapped at.
+ * Only valid while the early pagetables are valid.
+ */
+extern uintptr_t socdev_va;
+#endif
+
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_MACHDEP_H_ */
diff --git a/sys/arm64/include/md_var.h b/sys/arm64/include/md_var.h
new file mode 100644
index 000000000000..da136ff091db
--- /dev/null
+++ b/sys/arm64/include/md_var.h
@@ -0,0 +1,76 @@
+/*-
+ * Copyright (c) 1995 Bruce D. Evans.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: FreeBSD: src/sys/i386/include/md_var.h,v 1.40 2001/07/12
+ */
+
+#ifndef _MACHINE_MD_VAR_H_
+#define _MACHINE_MD_VAR_H_
+
+extern long Maxmem;
+extern char sigcode[];
+extern int szsigcode;
+extern u_long elf_hwcap;
+extern u_long elf_hwcap2;
+extern u_long elf_hwcap3;
+extern u_long elf_hwcap4;
+extern u_long linux_elf_hwcap;
+extern u_long linux_elf_hwcap2;
+extern u_long linux_elf_hwcap3;
+extern u_long linux_elf_hwcap4;
+#ifdef COMPAT_FREEBSD32
+extern u_long elf32_hwcap;
+extern u_long elf32_hwcap2;
+#endif
+
+struct dumperinfo;
+struct minidumpstate;
+
+int cpu_minidumpsys(struct dumperinfo *, const struct minidumpstate *);
+void generic_bs_fault(void) __asm(__STRING(generic_bs_fault));
+void generic_bs_peek_1(void) __asm(__STRING(generic_bs_peek_1));
+void generic_bs_peek_2(void) __asm(__STRING(generic_bs_peek_2));
+void generic_bs_peek_4(void) __asm(__STRING(generic_bs_peek_4));
+void generic_bs_peek_8(void) __asm(__STRING(generic_bs_peek_8));
+void generic_bs_poke_1(void) __asm(__STRING(generic_bs_poke_1));
+void generic_bs_poke_2(void) __asm(__STRING(generic_bs_poke_2));
+void generic_bs_poke_4(void) __asm(__STRING(generic_bs_poke_4));
+void generic_bs_poke_8(void) __asm(__STRING(generic_bs_poke_8));
+
+#ifdef _MD_WANT_SWAPWORD
+/*
+ * XXX These are implemented primarily for swp/swpb emulation at the moment, and
+ * should be used sparingly with consideration -- they aren't implemented for
+ * any other platform. If we use them anywhere else, at a minimum they need
+ * KASAN/KMSAN interceptors added.
+ */
+int swapueword8(volatile uint8_t *base, uint8_t *val);
+int swapueword32(volatile uint32_t *base, uint32_t *val);
+#endif
+
+#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/sys/arm64/include/memdev.h b/sys/arm64/include/memdev.h
new file mode 100644
index 000000000000..f2d6e7fdcde1
--- /dev/null
+++ b/sys/arm64/include/memdev.h
@@ -0,0 +1,38 @@
+/*-
+ * Copyright (c) 2004 Mark R V Murray
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_MEMDEV_H_
+#define _MACHINE_MEMDEV_H_
+
+#define CDEV_MINOR_MEM 0
+#define CDEV_MINOR_KMEM 1
+
+d_open_t memopen;
+d_read_t memrw;
+d_ioctl_t memioctl_md;
+d_mmap_t memmmap;
+
+#endif /* _MACHINE_MEMDEV_H_ */
diff --git a/sys/arm64/include/metadata.h b/sys/arm64/include/metadata.h
new file mode 100644
index 000000000000..30ec5115e670
--- /dev/null
+++ b/sys/arm64/include/metadata.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner <andrew@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_METADATA_H_
+#define _MACHINE_METADATA_H_
+
+#define MODINFOMD_EFI_MAP 0x1001
+#define MODINFOMD_DTBP 0x1002
+#define MODINFOMD_EFI_FB 0x1003
+
+/*
+ * This is not the same as the UEFI standard EFI_MEMORY_ATTRIBUTES_TABLE, though
+ * memory_size / descritpr_size entries of EFI_MEMORY_DESCRIPTORS follow this table
+ * starting at a 16-byte alignment.
+ */
+struct efi_map_header {
+ size_t memory_size; /* Numnber of bytes that follow */
+ size_t descriptor_size; /* Size of each EFI_MEMORY_DESCRIPTOR */
+ uint32_t descriptor_version; /* Currently '1' */
+};
+
+struct efi_fb {
+ uint64_t fb_addr;
+ uint64_t fb_size;
+ uint32_t fb_height;
+ uint32_t fb_width;
+ uint32_t fb_stride;
+ uint32_t fb_mask_red;
+ uint32_t fb_mask_green;
+ uint32_t fb_mask_blue;
+ uint32_t fb_mask_reserved;
+};
+
+#endif /* !_MACHINE_METADATA_H_ */
diff --git a/sys/arm64/include/minidump.h b/sys/arm64/include/minidump.h
new file mode 100644
index 000000000000..d3640d381074
--- /dev/null
+++ b/sys/arm64/include/minidump.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2006 Peter Wemm
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * From i386: FreeBSD: 157909 2006-04-21 04:28:43Z peter
+ */
+
+#ifndef _MACHINE_MINIDUMP_H_
+#define _MACHINE_MINIDUMP_H_ 1
+
+#define MINIDUMP_MAGIC "minidump FreeBSD/arm64"
+#define MINIDUMP_VERSION 3
+
+struct minidumphdr {
+ char magic[24];
+ uint32_t version;
+ uint32_t msgbufsize;
+ uint32_t bitmapsize;
+ uint32_t pmapsize;
+ uint64_t kernbase;
+ uint64_t dmapphys;
+ uint64_t dmapbase;
+ uint64_t dmapend;
+ uint32_t dumpavailsize;
+#define MINIDUMP_FLAG_PS_MASK (3 << 0)
+#define MINIDUMP_FLAG_PS_4K (0 << 0)
+#define MINIDUMP_FLAG_PS_16K (1 << 0)
+/* MINIDUMP_FLAG_PS_64K (2 << 0) */
+ uint32_t flags;
+};
+
+#endif /* _MACHINE_MINIDUMP_H_ */
diff --git a/sys/arm64/include/msan.h b/sys/arm64/include/msan.h
new file mode 100644
index 000000000000..48be55af44b6
--- /dev/null
+++ b/sys/arm64/include/msan.h
@@ -0,0 +1,91 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 The FreeBSD Foundation
+ * Copyright (c) 2023 Juniper Networks, Inc.
+ *
+ * This software was developed by Mark Johnston under sponsorship from the
+ * FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_MSAN_H_
+#define _MACHINE_MSAN_H_
+
+#ifdef KMSAN
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_page.h>
+#include <machine/vmparam.h>
+
+typedef uint32_t msan_orig_t;
+
+/*
+ * Our 32-bit origin cells encode a 2-bit type and 30-bit pointer to a kernel
+ * instruction. The pointer is compressed by making it a positive offset
+ * relative to KERNBASE.
+ */
+#define KMSAN_ORIG_TYPE_SHIFT 30u
+#define KMSAN_ORIG_PTR_MASK ((1ul << KMSAN_ORIG_TYPE_SHIFT) - 1)
+
+static inline msan_orig_t
+kmsan_md_orig_encode(int type, uintptr_t ptr)
+{
+ return ((type << KMSAN_ORIG_TYPE_SHIFT) |
+ ((ptr & KMSAN_ORIG_PTR_MASK)));
+}
+
+static inline void
+kmsan_md_orig_decode(msan_orig_t orig, int *type, uintptr_t *ptr)
+{
+ *type = orig >> KMSAN_ORIG_TYPE_SHIFT;
+ *ptr = (orig & KMSAN_ORIG_PTR_MASK) | KERNBASE;
+}
+
+static inline vm_offset_t
+kmsan_md_addr_to_shad(vm_offset_t addr)
+{
+ return (addr - VM_MIN_KERNEL_ADDRESS + KMSAN_SHAD_MIN_ADDRESS);
+}
+
+static inline vm_offset_t
+kmsan_md_addr_to_orig(vm_offset_t addr)
+{
+ return (addr - VM_MIN_KERNEL_ADDRESS + KMSAN_ORIG_MIN_ADDRESS);
+}
+
+static inline bool
+kmsan_md_unsupported(vm_offset_t addr)
+{
+ /*
+ * It would be cheaper to use VM_MAX_KERNEL_ADDRESS as the upper bound,
+ * but we need to exclude device mappings above kernel_vm_end but within
+ * the kernel map.
+ */
+ return (addr < VM_MIN_KERNEL_ADDRESS || addr >= kernel_vm_end);
+}
+
+#endif /* KMSAN */
+
+#endif /* !_MACHINE_MSAN_H_ */
diff --git a/sys/arm64/include/ofw_machdep.h b/sys/arm64/include/ofw_machdep.h
new file mode 100644
index 000000000000..48ea08882981
--- /dev/null
+++ b/sys/arm64/include/ofw_machdep.h
@@ -0,0 +1,41 @@
+/*-
+ * Copyright (c) 2009 The FreeBSD Foundation
+ *
+ * This software was developed by Semihalf under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_OFW_MACHDEP_H_
+#define _MACHINE_OFW_MACHDEP_H_
+
+#include <vm/vm.h>
+
+typedef uint32_t cell_t;
+
+struct mem_region {
+ vm_offset_t mr_start;
+ vm_size_t mr_size;
+};
+
+#endif /* _MACHINE_OFW_MACHDEP_H_ */
diff --git a/sys/arm64/include/param.h b/sys/arm64/include/param.h
new file mode 100644
index 000000000000..753035b7775e
--- /dev/null
+++ b/sys/arm64/include/param.h
@@ -0,0 +1,125 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/param.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_PARAM_H_
+#define _MACHINE_PARAM_H_
+
+/*
+ * Machine dependent constants for arm64.
+ */
+
+#include <machine/_align.h>
+
+#define STACKALIGNBYTES (16 - 1)
+#define STACKALIGN(p) ((uint64_t)(p) & ~STACKALIGNBYTES)
+
+#ifndef MACHINE
+#define MACHINE "arm64"
+#endif
+#ifndef MACHINE_ARCH
+#define MACHINE_ARCH "aarch64"
+#endif
+#ifndef MACHINE_ARCH32
+#define MACHINE_ARCH32 "armv7"
+#endif
+
+#ifdef SMP
+#ifndef MAXCPU
+#define MAXCPU 1024
+#endif
+#else
+#define MAXCPU 1
+#endif
+
+#ifndef MAXMEMDOM
+#define MAXMEMDOM 8
+#endif
+
+#define ALIGNBYTES _ALIGNBYTES
+#define ALIGN(p) _ALIGN(p)
+/*
+ * ALIGNED_POINTER is a boolean macro that checks whether an address
+ * is valid to fetch data elements of type t from on this architecture.
+ * This does not reflect the optimal alignment, just the possibility
+ * (within reasonable limits).
+ */
+#define ALIGNED_POINTER(p, t) ((((u_long)(p)) & (sizeof(t) - 1)) == 0)
+
+/*
+ * CACHE_LINE_SIZE is the compile-time maximum cache line size for an
+ * architecture. It should be used with appropriate caution.
+ */
+#define CACHE_LINE_SHIFT 7
+#define CACHE_LINE_SIZE (1 << CACHE_LINE_SHIFT)
+
+#define PAGE_SHIFT_4K 12
+#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
+
+#define PAGE_SHIFT_16K 14
+#define PAGE_SIZE_16K (1 << PAGE_SHIFT_16K)
+
+#define PAGE_SHIFT_64K 16
+#define PAGE_SIZE_64K (1 << PAGE_SHIFT_64K)
+
+#define PAGE_SHIFT PAGE_SHIFT_4K
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_MASK (PAGE_SIZE - 1)
+
+#define MAXPAGESIZES 4 /* maximum number of supported page sizes */
+
+#ifndef KSTACK_PAGES
+#if defined(KASAN) || defined(KMSAN)
+#define KSTACK_PAGES 6
+#else
+#define KSTACK_PAGES 4 /* pages of kernel stack (with pcb) */
+#endif
+#endif
+
+#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
+#define PCPU_PAGES 1
+
+#ifdef PERTHREAD_SSP
+#define NO_PERTHREAD_SSP __nostackprotector
+#else
+#define NO_PERTHREAD_SSP
+#endif
+
+/*
+ * Mach derived conversion macros
+ */
+#define arm64_btop(x) ((unsigned long)(x) >> PAGE_SHIFT)
+#define arm64_ptob(x) ((unsigned long)(x) << PAGE_SHIFT)
+
+#endif /* !_MACHINE_PARAM_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/pcb.h b/sys/arm64/include/pcb.h
new file mode 100644
index 000000000000..c0feb1149cf5
--- /dev/null
+++ b/sys/arm64/include/pcb.h
@@ -0,0 +1,95 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/pcb.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_PCB_H_
+#define _MACHINE_PCB_H_
+
+#ifndef LOCORE
+
+#include <machine/debug_monitor.h>
+#include <machine/vfp.h>
+
+struct trapframe;
+
+/* The first register in pcb_x is x19 */
+#define PCB_X_START 19
+
+#define PCB_X19 0
+#define PCB_X20 1
+#define PCB_FP 10
+#define PCB_LR 11
+
+struct pcb {
+ uint64_t pcb_x[12];
+ /* These two need to be in order as we access them together */
+ uint64_t pcb_sp;
+ uint64_t pcb_tpidr_el0;
+ uint64_t pcb_tpidrro_el0;
+
+ /* Fault handler, the error value is passed in x0 */
+ vm_offset_t pcb_onfault;
+
+ u_int pcb_flags;
+#define PCB_SINGLE_STEP_SHIFT 0
+#define PCB_SINGLE_STEP (1 << PCB_SINGLE_STEP_SHIFT)
+ u_int pcb_sve_len; /* The SVE vector length */
+
+ struct vfpstate *pcb_fpusaved;
+ int pcb_fpflags;
+#define PCB_FP_STARTED 0x00000001
+#define PCB_FP_SVEVALID 0x00000002
+#define PCB_FP_KERN 0x40000000
+#define PCB_FP_NOSAVE 0x80000000
+/* The bits passed to userspace in get_fpcontext */
+#define PCB_FP_USERMASK (PCB_FP_STARTED | PCB_FP_SVEVALID)
+ u_int pcb_vfpcpu; /* Last cpu this thread ran VFP code */
+ void *pcb_svesaved;
+ uint64_t pcb_reserved[4];
+
+ /*
+ * The userspace VFP state. The pcb_fpusaved pointer will point to
+ * this unless the kernel has allocated a VFP context.
+ * Place last to simplify the asm to access the rest if the struct.
+ */
+ struct vfpstate pcb_fpustate;
+
+ struct debug_monitor_state pcb_dbg_regs;
+};
+
+#ifdef _KERNEL
+void makectx(struct trapframe *tf, struct pcb *pcb);
+void savectx(struct pcb *pcb) __returns_twice;
+#endif
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_PCB_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/pci_cfgreg.h b/sys/arm64/include/pci_cfgreg.h
new file mode 100644
index 000000000000..579dcd954c9b
--- /dev/null
+++ b/sys/arm64/include/pci_cfgreg.h
@@ -0,0 +1,33 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_PCI_CFGREG_H
+#define _MACHINE_PCI_CFGREG_H
+
+int pci_cfgregopen(void);
+uint32_t pci_cfgregread(int, int, int, int, int, int);
+void pci_cfgregwrite(int, int, int, int, int, uint32_t, int);
+
+#endif /* !_MACHINE_PCI_CFGREG_H */
diff --git a/sys/arm64/include/pcpu.h b/sys/arm64/include/pcpu.h
new file mode 100644
index 000000000000..09bd8fa8a966
--- /dev/null
+++ b/sys/arm64/include/pcpu.h
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 1999 Luoqi Chen <luoqi@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: FreeBSD: src/sys/i386/include/globaldata.h,v 1.27 2001/04/27
+ */
+
+#ifdef __arm__
+#include <arm/pcpu.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_PCPU_H_
+#define _MACHINE_PCPU_H_
+
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+
+typedef int (*pcpu_bp_harden)(void);
+typedef int (*pcpu_ssbd)(int);
+struct debug_monitor_state;
+
+#define PCPU_MD_FIELDS \
+ u_int pc_acpi_id; /* ACPI CPU id */ \
+ u_int pc_midr; /* stored MIDR value */ \
+ uint64_t pc_clock; \
+ pcpu_bp_harden pc_bp_harden; \
+ pcpu_ssbd pc_ssbd; \
+ struct pmap *pc_curpmap; \
+ struct pmap *pc_curvmpmap; \
+ uint64_t pc_mpidr; \
+ u_int pc_bcast_tlbi_workaround; \
+ char __pad[197]
+
+#ifdef _KERNEL
+
+struct pcb;
+struct pcpu;
+
+register struct pcpu *pcpup __asm ("x18");
+
+static inline struct pcpu *
+get_pcpu(void)
+{
+ struct pcpu *pcpu;
+
+ __asm __volatile("mov %0, x18" : "=&r"(pcpu));
+ return (pcpu);
+}
+
+static inline struct thread *
+get_curthread(void)
+{
+ struct thread *td;
+
+ __asm __volatile("ldr %0, [x18]" : "=&r"(td));
+ return (td);
+}
+
+#define curthread get_curthread()
+
+#define PCPU_GET(member) (pcpup->pc_ ## member)
+#define PCPU_ADD(member, value) (pcpup->pc_ ## member += (value))
+#define PCPU_PTR(member) (&pcpup->pc_ ## member)
+#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
+
+#define PCPU_GET_MPIDR(pc) ((pc)->pc_mpidr)
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_PCPU_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/pcpu_aux.h b/sys/arm64/include/pcpu_aux.h
new file mode 100644
index 000000000000..88a6f3ebadf7
--- /dev/null
+++ b/sys/arm64/include/pcpu_aux.h
@@ -0,0 +1,56 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 The FreeBSD Foundation
+ *
+ * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/pcpu_aux.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_PCPU_AUX_H_
+#define _MACHINE_PCPU_AUX_H_
+
+#ifndef _KERNEL
+#error "Not for userspace"
+#endif
+
+#ifndef _SYS_PCPU_H_
+#error "Do not include machine/pcpu_aux.h directly"
+#endif
+
+/*
+ * To minimize memory waste in per-cpu UMA zones, the page size should
+ * be a multiple of the size of struct pcpu.
+ */
+_Static_assert(PAGE_SIZE % sizeof(struct pcpu) == 0, "fix pcpu size");
+
+extern struct pcpu pcpu0;
+
+#endif /* _MACHINE_PCPU_AUX_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
new file mode 100644
index 000000000000..0f23f200f0f6
--- /dev/null
+++ b/sys/arm64/include/pmap.h
@@ -0,0 +1,200 @@
+/*-
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/pmap.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_PMAP_H_
+#define _MACHINE_PMAP_H_
+
+#include <machine/pte.h>
+
+#ifndef LOCORE
+
+#include <sys/queue.h>
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
+#include <sys/_pv_entry.h>
+
+#include <vm/_vm_radix.h>
+
+#ifdef _KERNEL
+
+#define vtophys(va) pmap_kextract((vm_offset_t)(va))
+
+#endif
+
+#define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
+#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
+void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
+
+/*
+ * Pmap stuff
+ */
+
+struct rangeset;
+
+struct md_page {
+ TAILQ_HEAD(,pv_entry) pv_list;
+ int pv_gen;
+ vm_memattr_t pv_memattr;
+};
+
+enum pmap_stage {
+ PM_INVALID,
+ PM_STAGE1,
+ PM_STAGE2,
+};
+
+struct pmap {
+ struct mtx pm_mtx;
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ uint64_t pm_ttbr;
+ vm_paddr_t pm_l0_paddr;
+ pd_entry_t *pm_l0;
+ TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
+ struct vm_radix pm_root; /* spare page table pages */
+ long pm_cookie; /* encodes the pmap's ASID */
+ struct asid_set *pm_asid_set; /* The ASID/VMID set to use */
+ enum pmap_stage pm_stage;
+ int pm_levels;
+ struct rangeset *pm_bti;
+ uint64_t pm_reserved[3];
+};
+typedef struct pmap *pmap_t;
+
+struct thread;
+
+#ifdef _KERNEL
+extern struct pmap kernel_pmap_store;
+#define kernel_pmap (&kernel_pmap_store)
+#define pmap_kernel() kernel_pmap
+
+extern bool pmap_lpa_enabled;
+
+#define PMAP_ASSERT_LOCKED(pmap) \
+ mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
+#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
+#define PMAP_LOCK_ASSERT(pmap, type) \
+ mtx_assert(&(pmap)->pm_mtx, (type))
+#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
+#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
+ NULL, MTX_DEF | MTX_DUPOK)
+#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx)
+#define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
+#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
+#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
+
+#define ASID_RESERVED_FOR_PID_0 0
+#define ASID_RESERVED_FOR_EFI 1
+#define ASID_FIRST_AVAILABLE (ASID_RESERVED_FOR_EFI + 1)
+#define ASID_TO_OPERAND(asid) ({ \
+ KASSERT((asid) != -1, ("invalid ASID")); \
+ (uint64_t)(asid) << TTBR_ASID_SHIFT; \
+})
+
+#define PMAP_WANT_ACTIVE_CPUS_NAIVE
+
+extern vm_offset_t virtual_avail;
+extern vm_offset_t virtual_end;
+
+extern pt_entry_t pmap_sh_attr;
+
+/*
+ * Macros to test if a mapping is mappable with an L1 Section mapping
+ * or an L2 Large Page mapping.
+ */
+#define L1_MAPPABLE_P(va, pa, size) \
+ ((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE)
+
+#define pmap_vm_page_alloc_check(m)
+
+void pmap_activate_vm(pmap_t);
+void pmap_bootstrap_dmap(vm_size_t);
+void pmap_bootstrap(void);
+int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
+int pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot);
+void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode);
+void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
+bool pmap_klookup(vm_offset_t va, vm_paddr_t *pa);
+vm_paddr_t pmap_kextract(vm_offset_t va);
+void pmap_kremove(vm_offset_t);
+void pmap_kremove_device(vm_offset_t, vm_size_t);
+void *pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma);
+bool pmap_page_is_mapped(vm_page_t m);
+int pmap_pinit_stage(pmap_t, enum pmap_stage, int);
+bool pmap_ps_enabled(pmap_t pmap);
+uint64_t pmap_to_ttbr0(pmap_t pmap);
+void pmap_disable_promotion(vm_offset_t sva, vm_size_t size);
+void pmap_map_delete(pmap_t, vm_offset_t, vm_offset_t);
+
+void *pmap_mapdev(vm_paddr_t, vm_size_t);
+void *pmap_mapbios(vm_paddr_t, vm_size_t);
+void pmap_unmapdev(void *, vm_size_t);
+void pmap_unmapbios(void *, vm_size_t);
+
+bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool);
+void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool);
+
+bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
+ pd_entry_t **, pt_entry_t **);
+
+int pmap_fault(pmap_t, uint64_t, uint64_t);
+
+struct pcb *pmap_switch(struct thread *);
+
+extern void (*pmap_clean_stage2_tlbi)(void);
+extern void (*pmap_stage2_invalidate_range)(uint64_t, vm_offset_t, vm_offset_t,
+ bool);
+extern void (*pmap_stage2_invalidate_all)(uint64_t);
+
+int pmap_vmspace_copy(pmap_t, pmap_t);
+
+int pmap_bti_set(pmap_t, vm_offset_t, vm_offset_t);
+int pmap_bti_clear(pmap_t, vm_offset_t, vm_offset_t);
+
+#if defined(KASAN) || defined(KMSAN)
+struct arm64_bootparams;
+
+void pmap_bootstrap_san(void);
+void pmap_san_enter(vm_offset_t);
+#endif
+
+#endif /* _KERNEL */
+
+#endif /* !LOCORE */
+
+#endif /* !_MACHINE_PMAP_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/pmc_mdep.h b/sys/arm64/include/pmc_mdep.h
new file mode 100644
index 000000000000..97d0f30c9c09
--- /dev/null
+++ b/sys/arm64/include/pmc_mdep.h
@@ -0,0 +1,91 @@
+/*-
+ * Copyright (c) 2009 Rui Paulo <rpaulo@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_PMC_MDEP_H_
+#define _MACHINE_PMC_MDEP_H_
+
+#define PMC_MDEP_CLASS_INDEX_ARMV8 1
+#define PMC_MDEP_CLASS_INDEX_DMC620_CD2 2
+#define PMC_MDEP_CLASS_INDEX_DMC620_C 3
+#define PMC_MDEP_CLASS_INDEX_CMN600 4
+/*
+ * On the ARMv8 platform we support the following PMCs.
+ *
+ * ARMV8 ARM Cortex-A53/57/72 processors
+ */
+#include <dev/hwpmc/hwpmc_arm64.h>
+#include <dev/hwpmc/hwpmc_cmn600.h>
+#include <dev/hwpmc/hwpmc_dmc620.h>
+#include <dev/hwpmc/pmu_dmc620_reg.h>
+#include <machine/cmn600_reg.h>
+
+union pmc_md_op_pmcallocate {
+ struct {
+ uint32_t pm_md_config;
+ };
+ struct pmc_md_cmn600_pmu_op_pmcallocate pm_cmn600;
+ struct pmc_md_dmc620_pmu_op_pmcallocate pm_dmc620;
+ uint64_t __pad[4];
+};
+
+/* Logging */
+#define PMCLOG_READADDR PMCLOG_READ64
+#define PMCLOG_EMITADDR PMCLOG_EMIT64
+
+#ifdef _KERNEL
+union pmc_md_pmc {
+ struct pmc_md_arm64_pmc pm_arm64;
+ struct pmc_md_cmn600_pmc pm_cmn600;
+ struct pmc_md_dmc620_pmc pm_dmc620;
+};
+
+#define PMC_IN_KERNEL_STACK(va) kstack_contains(curthread, (va), sizeof(va))
+#define PMC_IN_KERNEL(va) INKERNEL((va))
+#define PMC_IN_USERSPACE(va) ((va) <= VM_MAXUSER_ADDRESS)
+#define PMC_TRAPFRAME_TO_PC(TF) ((TF)->tf_elr)
+#define PMC_TRAPFRAME_TO_FP(TF) ((TF)->tf_x[29])
+
+/*
+ * Prototypes
+ */
+struct pmc_mdep *pmc_arm64_initialize(void);
+void pmc_arm64_finalize(struct pmc_mdep *_md);
+
+/* Optional class for CMN-600 controler's PMU. */
+int pmc_cmn600_initialize(struct pmc_mdep *md);
+void pmc_cmn600_finalize(struct pmc_mdep *_md);
+int pmc_cmn600_nclasses(void);
+
+/* Optional class for DMC-620 controler's PMU. */
+int pmc_dmc620_initialize_cd2(struct pmc_mdep *md);
+void pmc_dmc620_finalize_cd2(struct pmc_mdep *_md);
+int pmc_dmc620_initialize_c(struct pmc_mdep *md);
+void pmc_dmc620_finalize_c(struct pmc_mdep *_md);
+int pmc_dmc620_nclasses(void);
+
+#endif /* _KERNEL */
+
+#endif /* !_MACHINE_PMC_MDEP_H_ */
diff --git a/sys/arm64/include/proc.h b/sys/arm64/include/proc.h
new file mode 100644
index 000000000000..dc2fa2df654d
--- /dev/null
+++ b/sys/arm64/include/proc.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * from: FreeBSD: src/sys/i386/include/proc.h,v 1.11 2001/06/29
+ */
+
+#ifdef __arm__
+#include <arm/proc.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_PROC_H_
+#define _MACHINE_PROC_H_
+
+struct ptrauth_key {
+ uint64_t pa_key_lo;
+ uint64_t pa_key_hi;
+};
+
+struct mdthread {
+ int md_spinlock_count; /* (k) */
+ register_t md_saved_daif; /* (k) */
+ uintptr_t md_canary;
+
+ /*
+ * The pointer authentication keys. These are shared within a process,
+ * however this may change for some keys as the PAuth ABI Extension to
+ * ELF for the Arm 64-bit Architecture [1] is currently (July 2021) at
+ * an Alpha release quality so may change.
+ *
+ * [1] https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst
+ */
+ struct {
+ struct ptrauth_key apia;
+ struct ptrauth_key apib;
+ struct ptrauth_key apda;
+ struct ptrauth_key apdb;
+ struct ptrauth_key apga;
+ } md_ptrauth_user;
+
+ struct {
+ struct ptrauth_key apia;
+ } md_ptrauth_kern;
+
+ uint64_t md_efirt_tmp;
+ int md_efirt_dis_pf;
+
+ int md_reserved0;
+ uint64_t md_reserved[2];
+};
+
+struct mdproc {
+ long md_dummy;
+};
+
+#define KINFO_PROC_SIZE 1088
+#define KINFO_PROC32_SIZE 816
+
+#endif /* !_MACHINE_PROC_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/procctl.h b/sys/arm64/include/procctl.h
new file mode 100644
index 000000000000..b340002b45ee
--- /dev/null
+++ b/sys/arm64/include/procctl.h
@@ -0,0 +1,3 @@
+/*-
+ * This file is in the public domain.
+ */
diff --git a/sys/arm64/include/profile.h b/sys/arm64/include/profile.h
new file mode 100644
index 000000000000..8a61ff995d83
--- /dev/null
+++ b/sys/arm64/include/profile.h
@@ -0,0 +1,98 @@
+/*-
+ * SPDX-License-Identifier: MIT-CMU
+ *
+ * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * from: NetBSD: profile.h,v 1.9 1997/04/06 08:47:37 cgd Exp
+ * from: FreeBSD: src/sys/alpha/include/profile.h,v 1.4 1999/12/29
+ */
+
+#ifdef __arm__
+#include <arm/profile.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_PROFILE_H_
+#define _MACHINE_PROFILE_H_
+
+#define FUNCTION_ALIGNMENT 32
+
+typedef u_long fptrdiff_t;
+
+#ifndef _KERNEL
+
+#include <sys/cdefs.h>
+
+typedef __uintfptr_t uintfptr_t;
+
+#define _MCOUNT_DECL \
+static void _mcount(uintfptr_t frompc, uintfptr_t selfpc) __used; \
+static void _mcount
+
+/*
+ * Call into _mcount. On arm64 the .mcount is a function so callers will
+ * handle caller saved registers. As we don't directly touch any callee
+ * saved registers we can just load the two arguments and use a tail call
+ * into the MI _mcount function.
+ *
+ * When building with gcc frompc will be in x0, however this is not the
+ * case on clang. As such we need to load it from the stack. As long as
+ * the caller follows the ABI this will load the correct value.
+ */
+#define MCOUNT __asm( \
+" .text \n" \
+" .align 6 \n" \
+" .type .mcount,#function \n" \
+" .globl .mcount \n" \
+" .mcount: \n" \
+" .cfi_startproc \n" \
+ /* Allow this to work with BTI, see BTI_C in asm.h */ \
+" hint #34 \n" \
+ /* Load the caller return address as frompc */ \
+" ldr x0, [x29, #8] \n" \
+ /* Use our return address as selfpc */ \
+" mov x1, lr \n" \
+" b _mcount \n" \
+" .cfi_endproc \n" \
+" .size .mcount, . - .mcount \n" \
+ );
+#if 0
+/*
+ * If clang passed frompc correctly we could implement it like this, however
+ * all clang versions we care about would need to be fixed before we could
+ * make this change.
+ */
+void
+mcount(uintfptr_t frompc)
+{
+ _mcount(frompc, __builtin_return_address(0));
+}
+#endif
+
+#endif /* !_KERNEL */
+
+#endif /* !_MACHINE_PROFILE_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/psl.h b/sys/arm64/include/psl.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/sys/arm64/include/psl.h
diff --git a/sys/arm64/include/pte.h b/sys/arm64/include/pte.h
new file mode 100644
index 000000000000..464d8c941c56
--- /dev/null
+++ b/sys/arm64/include/pte.h
@@ -0,0 +1,262 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/pte.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_PTE_H_
+#define _MACHINE_PTE_H_
+
+#ifndef LOCORE
+typedef uint64_t pd_entry_t; /* page directory entry */
+typedef uint64_t pt_entry_t; /* page table entry */
+#endif
+
+/* Table attributes */
+#define TATTR_MASK UINT64_C(0xfff8000000000000)
+#define TATTR_AP_TABLE_MASK (3UL << 61)
+#define TATTR_AP_TABLE_RO (2UL << 61)
+#define TATTR_AP_TABLE_NO_EL0 (1UL << 61)
+#define TATTR_UXN_TABLE (1UL << 60)
+#define TATTR_PXN_TABLE (1UL << 59)
+/* Bits 58:51 are ignored */
+
+/* Block and Page attributes */
+#define ATTR_MASK_H UINT64_C(0xfffc000000000000)
+#define ATTR_MASK_L UINT64_C(0x0000000000000fff)
+#define ATTR_MASK (ATTR_MASK_H | ATTR_MASK_L)
+
+/* Bits 58:55 are reserved for software */
+#define ATTR_SW_UNUSED1 (1UL << 58)
+#define ATTR_SW_NO_PROMOTE (1UL << 57)
+#define ATTR_SW_MANAGED (1UL << 56)
+#define ATTR_SW_WIRED (1UL << 55)
+
+#define ATTR_S1_UXN (1UL << 54)
+#define ATTR_S1_PXN (1UL << 53)
+#define ATTR_S1_XN (ATTR_S1_PXN | ATTR_S1_UXN)
+
+#define ATTR_S2_XN(x) ((x) << 53)
+#define ATTR_S2_XN_MASK ATTR_S2_XN(3UL)
+#define ATTR_S2_XN_NONE 0UL /* Allow execution at EL0 & EL1 */
+#define ATTR_S2_XN_EL1 1UL /* Allow execution at EL0 */
+#define ATTR_S2_XN_ALL 2UL /* No execution */
+#define ATTR_S2_XN_EL0 3UL /* Allow execution at EL1 */
+
+#define ATTR_CONTIGUOUS (1UL << 52)
+#define ATTR_DBM (1UL << 51)
+#define ATTR_S1_GP_SHIFT 50
+#define ATTR_S1_GP (1UL << ATTR_S1_GP_SHIFT)
+
+/*
+ * Largest possible output address field for a level 3 page. Block
+ * entries will use fewer low address bits, but these are res0 so
+ * should be safe to include.
+ *
+ * This is also safe to use for the next-level table address for
+ * table entries as they encode a physical address in the same way.
+ */
+#if PAGE_SIZE == PAGE_SIZE_4K
+#define ATTR_ADDR UINT64_C(0x0003fffffffff000)
+#elif PAGE_SIZE == PAGE_SIZE_16K
+#define ATTR_ADDR UINT64_C(0x0003ffffffffc000)
+#else
+#error Unsupported page size
+#endif
+
+#define ATTR_S1_nG (1 << 11)
+#define ATTR_AF (1 << 10)
+/* When TCR_EL1.DS == 0 */
+#define ATTR_SH(x) ((x) << 8)
+#define ATTR_SH_MASK ATTR_SH(3)
+#define ATTR_SH_NS 0 /* Non-shareable */
+#define ATTR_SH_OS 2 /* Outer-shareable */
+#define ATTR_SH_IS 3 /* Inner-shareable */
+/* When TCR_EL1.DS == 1 */
+#define ATTR_OA_51_50_SHIFT 8
+#define ATTR_OA_51_50_MASK (3 << ATTR_OA_51_50_SHIFT)
+#define ATTR_OA_51_50_DELTA (50 - 8) /* Delta from address to pte */
+
+#define ATTR_S1_AP_RW_BIT (1 << 7)
+#define ATTR_S1_AP(x) ((x) << 6)
+#define ATTR_S1_AP_MASK ATTR_S1_AP(3)
+#define ATTR_S1_AP_RW (0 << 1)
+#define ATTR_S1_AP_RO (1 << 1)
+#define ATTR_S1_AP_USER (1 << 0)
+#define ATTR_S1_NS (1 << 5)
+#define ATTR_S1_IDX(x) ((x) << 2)
+#define ATTR_S1_IDX_MASK (7 << 2)
+
+#define ATTR_S2_S2AP(x) ((x) << 6)
+#define ATTR_S2_S2AP_MASK 3
+#define ATTR_S2_S2AP_READ 1
+#define ATTR_S2_S2AP_WRITE 2
+
+#define ATTR_S2_MEMATTR(x) ((x) << 2)
+#define ATTR_S2_MEMATTR_MASK ATTR_S2_MEMATTR(0xf)
+#define ATTR_S2_MEMATTR_DEVICE_nGnRnE 0x0
+#define ATTR_S2_MEMATTR_NC 0xf
+#define ATTR_S2_MEMATTR_WT 0xa
+#define ATTR_S2_MEMATTR_WB 0xf
+
+#define ATTR_DESCR_MASK 3
+#define ATTR_DESCR_VALID 1
+#define ATTR_DESCR_TYPE_MASK 2
+#define ATTR_DESCR_TYPE_TABLE 2
+#define ATTR_DESCR_TYPE_PAGE 2
+#define ATTR_DESCR_TYPE_BLOCK 0
+
+/*
+ * Superpage promotion requires that the bits specified by the following
+ * mask all be identical in the constituent PTEs.
+ */
+#define ATTR_PROMOTE (ATTR_MASK & ~(ATTR_CONTIGUOUS | ATTR_AF))
+
+/* Read the output address or next-level table address from a PTE */
+#define PTE_TO_PHYS(x) ({ \
+ pt_entry_t _pte = (x); \
+ vm_paddr_t _pa; \
+ _pa = _pte & ATTR_ADDR; \
+ if (pmap_lpa_enabled) \
+ _pa |= (_pte & ATTR_OA_51_50_MASK) << ATTR_OA_51_50_DELTA; \
+ _pa; \
+})
+
+/*
+ * Convert a physical address to an output address or next-level
+ * table address in a PTE
+ */
+#define PHYS_TO_PTE(x) ({ \
+ vm_paddr_t _pa = (x); \
+ pt_entry_t _pte; \
+ _pte = _pa & ATTR_ADDR; \
+ if (pmap_lpa_enabled) \
+ _pte |= (_pa >> ATTR_OA_51_50_DELTA) & ATTR_OA_51_50_MASK; \
+ _pte; \
+})
+
+#if PAGE_SIZE == PAGE_SIZE_4K
+#define L0_SHIFT 39
+#define L1_SHIFT 30
+#define L2_SHIFT 21
+#define L3_SHIFT 12
+#elif PAGE_SIZE == PAGE_SIZE_16K
+#define L0_SHIFT 47
+#define L1_SHIFT 36
+#define L2_SHIFT 25
+#define L3_SHIFT 14
+#else
+#error Unsupported page size
+#endif
+
+/* Level 0 table, 512GiB/128TiB per entry */
+#define L0_SIZE (UINT64_C(1) << L0_SHIFT)
+#define L0_OFFSET (L0_SIZE - 1ul)
+#define L0_INVAL 0x0 /* An invalid address */
+ /* 0x1 Level 0 doesn't support block translation */
+ /* 0x2 also marks an invalid address */
+#define L0_TABLE 0x3 /* A next-level table */
+
+/* Level 1 table, 1GiB/64GiB per entry */
+#define L1_SIZE (UINT64_C(1) << L1_SHIFT)
+#define L1_OFFSET (L1_SIZE - 1)
+#define L1_INVAL L0_INVAL
+#define L1_BLOCK 0x1
+#define L1_TABLE L0_TABLE
+
+/* Level 2 table, 2MiB/32MiB per entry */
+#define L2_SIZE (UINT64_C(1) << L2_SHIFT)
+#define L2_OFFSET (L2_SIZE - 1)
+#define L2_INVAL L1_INVAL
+#define L2_BLOCK 0x1
+#define L2_TABLE L1_TABLE
+
+/* Level 3 table, 4KiB/16KiB per entry */
+#define L3_SIZE (1 << L3_SHIFT)
+#define L3_OFFSET (L3_SIZE - 1)
+#define L3_INVAL 0x0
+ /* 0x1 is reserved */
+ /* 0x2 also marks an invalid address */
+#define L3_PAGE 0x3
+
+/*
+ * A substantial portion of this is to make sure that we can cope with 4K
+ * framebuffers in early boot, assuming a common 4K resolution @ 32-bit depth.
+ */
+#define PMAP_MAPDEV_EARLY_SIZE (L2_SIZE * 20)
+
+#if PAGE_SIZE == PAGE_SIZE_4K
+#define L0_ENTRIES_SHIFT 9
+#define Ln_ENTRIES_SHIFT 9
+#elif PAGE_SIZE == PAGE_SIZE_16K
+#define L0_ENTRIES_SHIFT 1
+#define Ln_ENTRIES_SHIFT 11
+#else
+#error Unsupported page size
+#endif
+
+#define L0_ENTRIES (1 << L0_ENTRIES_SHIFT)
+#define L0_ADDR_MASK (L0_ENTRIES - 1)
+
+#define Ln_ENTRIES (1 << Ln_ENTRIES_SHIFT)
+#define Ln_ADDR_MASK (Ln_ENTRIES - 1)
+#define Ln_TABLE_MASK ((1 << 12) - 1)
+
+/*
+ * The number of contiguous Level 3 entries (with ATTR_CONTIGUOUS set) that
+ * can be coalesced into a single TLB entry
+ */
+#if PAGE_SIZE == PAGE_SIZE_4K
+#define L2C_ENTRIES 16
+#define L3C_ENTRIES 16
+#elif PAGE_SIZE == PAGE_SIZE_16K
+#define L2C_ENTRIES 32
+#define L3C_ENTRIES 128
+#else
+#error Unsupported page size
+#endif
+
+#define L2C_SIZE (L2C_ENTRIES * L2_SIZE)
+#define L2C_OFFSET (L2C_SIZE - 1)
+
+#define L3C_SIZE (L3C_ENTRIES * L3_SIZE)
+#define L3C_OFFSET (L3C_SIZE - 1)
+
+#define pmap_l0_index(va) (((va) >> L0_SHIFT) & L0_ADDR_MASK)
+#define pmap_l1_index(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
+#define pmap_l2_index(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
+#define pmap_l3_index(va) (((va) >> L3_SHIFT) & Ln_ADDR_MASK)
+
+#endif /* !_MACHINE_PTE_H_ */
+
+/* End of pte.h */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/ptrace.h b/sys/arm64/include/ptrace.h
new file mode 100644
index 000000000000..a83188d8d47b
--- /dev/null
+++ b/sys/arm64/include/ptrace.h
@@ -0,0 +1,10 @@
+
+#ifndef _MACHINE_PTRACE_H_
+#define _MACHINE_PTRACE_H_
+
+#define __HAVE_PTRACE_MACHDEP
+
+#define PT_GETVFPREGS32 (PT_FIRSTMACH + 0)
+#define PT_SETVFPREGS32 (PT_FIRSTMACH + 1)
+
+#endif /* _MACHINE_PTRACE_H_ */
diff --git a/sys/arm64/include/reg.h b/sys/arm64/include/reg.h
new file mode 100644
index 000000000000..4226385480e8
--- /dev/null
+++ b/sys/arm64/include/reg.h
@@ -0,0 +1,110 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/reg.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_REG_H_
+#define _MACHINE_REG_H_
+
+#include <sys/_types.h>
+
+struct reg {
+ __uint64_t x[30];
+ __uint64_t lr;
+ __uint64_t sp;
+ __uint64_t elr;
+ __uint64_t spsr;
+};
+
+struct reg32 {
+ unsigned int r[13];
+ unsigned int r_sp;
+ unsigned int r_lr;
+ unsigned int r_pc;
+ unsigned int r_cpsr;
+};
+
+struct fpreg {
+ __uint128_t fp_q[32];
+ __uint32_t fp_sr;
+ __uint32_t fp_cr;
+};
+
+struct fpreg32 {
+ int dummy;
+};
+
+#define SVEREG_FLAG_REGS_MASK 0x0001
+#define SVEREG_FLAG_FP 0x0000
+#define SVEREG_FLAG_SVE 0x0001
+
+struct svereg_header {
+ __uint32_t sve_size;
+ __uint32_t sve_maxsize;
+ __uint16_t sve_vec_len;
+ __uint16_t sve_max_vec_len;
+ __uint16_t sve_flags;
+ __uint16_t sve_reserved;
+};
+
+struct dbreg {
+ __uint8_t db_debug_ver;
+ __uint8_t db_nbkpts;
+ __uint8_t db_nwtpts;
+ __uint8_t db_pad[5];
+
+ struct {
+ __uint64_t dbr_addr;
+ __uint32_t dbr_ctrl;
+ __uint32_t dbr_pad;
+ } db_breakregs[16];
+ struct {
+ __uint64_t dbw_addr;
+ __uint32_t dbw_ctrl;
+ __uint32_t dbw_pad;
+ } db_watchregs[16];
+};
+
+struct dbreg32 {
+ int dummy;
+};
+
+struct arm64_addr_mask {
+ __uint64_t code;
+ __uint64_t data;
+};
+
+#define __HAVE_REG32
+
+#endif /* !_MACHINE_REG_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/reloc.h b/sys/arm64/include/reloc.h
new file mode 100644
index 000000000000..4f9f327af86f
--- /dev/null
+++ b/sys/arm64/include/reloc.h
@@ -0,0 +1,6 @@
+
+#ifdef __arm__
+#include <arm/reloc.h>
+#else /* !__arm__ */
+/* empty */
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/resource.h b/sys/arm64/include/resource.h
new file mode 100644
index 000000000000..336fc11a435a
--- /dev/null
+++ b/sys/arm64/include/resource.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/resource.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_RESOURCE_H_
+#define _MACHINE_RESOURCE_H_ 1
+
+/*
+ * Definitions of resource types for Intel Architecture machines
+ * with support for legacy ISA devices and drivers.
+ */
+
+#define SYS_RES_IRQ 1 /* interrupt lines */
+#define SYS_RES_DRQ 2 /* isa dma lines */
+#define SYS_RES_MEMORY 3 /* i/o memory */
+#define SYS_RES_IOPORT 4 /* i/o ports */
+#define SYS_RES_GPIO 5 /* general purpose i/o */
+#define PCI_RES_BUS 6 /* PCI bus numbers */
+
+#endif /* !_MACHINE_RESOURCE_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/sdt_machdep.h b/sys/arm64/include/sdt_machdep.h
new file mode 100644
index 000000000000..738d246832a2
--- /dev/null
+++ b/sys/arm64/include/sdt_machdep.h
@@ -0,0 +1,12 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Mark Johnston <markj@FreeBSD.org>
+ */
+
+#ifndef _SYS_SDT_MACHDEP_H_
+#define _SYS_SDT_MACHDEP_H_
+
+#define _SDT_ASM_PATCH_INSTR "nop"
+
+#endif /* _SYS_SDT_MACHDEP_H_ */
diff --git a/sys/arm64/include/setjmp.h b/sys/arm64/include/setjmp.h
new file mode 100644
index 000000000000..b0a0cd039237
--- /dev/null
+++ b/sys/arm64/include/setjmp.h
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/setjmp.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_SETJMP_H_
+#define _MACHINE_SETJMP_H_
+
+#include <sys/cdefs.h>
+
+/*
+ * We need to store:
+ * - A magic value to differentiate the buffers
+ * - The stack pointer
+ * - The link register
+ * - 11 general purpose registers
+ * - 8 floating point registers
+ * - The signal mask (128 bits)
+ * i.e. 24 64-bit words, round this up to 31(+1) 128-bit words to allow for
+ * CPU extensions with larger registers and stronger alignment requirements.
+ *
+ * The registers to save are: r19 to r29, and d8 to d15.
+ */
+#define _JBLEN 31
+#define _JB_SIGMASK 22
+
+/* This should only be needed in libc and may change */
+#ifdef __ASSEMBLER__
+#define _JB_MAGIC__SETJMP 0xfb5d25837d7ff700
+#define _JB_MAGIC_SETJMP 0xfb5d25837d7ff701
+#endif
+
+#ifndef __ASSEMBLER__
+/*
+ * jmp_buf and sigjmp_buf are encapsulated in different structs to force
+ * compile-time diagnostics for mismatches. The structs are the same
+ * internally to avoid some run-time errors for mismatches.
+ */
+#if __BSD_VISIBLE || __POSIX_VISIBLE || __XSI_VISIBLE
+typedef struct _sigjmp_buf { __int128_t _sjb[_JBLEN + 1]; } sigjmp_buf[1];
+#endif
+
+typedef struct _jmp_buf { __int128_t _jb[_JBLEN + 1]; } jmp_buf[1];
+#endif /* __ASSEMBLER__ */
+
+#endif /* !_MACHINE_SETJMP_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/sf_buf.h b/sys/arm64/include/sf_buf.h
new file mode 100644
index 000000000000..00ed0e9d7456
--- /dev/null
+++ b/sys/arm64/include/sf_buf.h
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (c) 2003, 2005 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/sf_buf.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_SF_BUF_H_
+#define _MACHINE_SF_BUF_H_
+
+/*
+ * On this machine, the only purpose for which sf_buf is used is to implement
+ * an opaque pointer required by the machine-independent parts of the kernel.
+ * That pointer references the vm_page that is "mapped" by the sf_buf. The
+ * actual mapping is provided by the direct virtual-to-physical mapping.
+ */
+static inline vm_offset_t
+sf_buf_kva(struct sf_buf *sf)
+{
+
+ return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS((vm_page_t)sf)));
+}
+
+static inline vm_page_t
+sf_buf_page(struct sf_buf *sf)
+{
+
+ return ((vm_page_t)sf);
+}
+#endif /* !_MACHINE_SF_BUF_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/sigframe.h b/sys/arm64/include/sigframe.h
new file mode 100644
index 000000000000..aa24726c4e34
--- /dev/null
+++ b/sys/arm64/include/sigframe.h
@@ -0,0 +1 @@
+#include <machine/frame.h>
diff --git a/sys/arm64/include/signal.h b/sys/arm64/include/signal.h
new file mode 100644
index 000000000000..03cb50bc3822
--- /dev/null
+++ b/sys/arm64/include/signal.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 1986, 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * from: FreeBSD: src/sys/i386/include/signal.h,v 1.13 2000/11/09
+ * from: FreeBSD: src/sys/sparc64/include/signal.h,v 1.6 2001/09/30 18:52:17
+ */
+
+#ifdef __arm__
+#include <arm/signal.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_SIGNAL_H_
+#define _MACHINE_SIGNAL_H_
+
+#include <sys/cdefs.h>
+
+typedef long sig_atomic_t;
+
+#if __BSD_VISIBLE
+
+struct sigcontext {
+ int _dummy;
+};
+
+#endif
+
+#endif /* !_MACHINE_SIGNAL_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/smp.h b/sys/arm64/include/smp.h
new file mode 100644
index 000000000000..500cd1ef4f02
--- /dev/null
+++ b/sys/arm64/include/smp.h
@@ -0,0 +1,50 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner <andrew@FreeBSD.org>
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_SMP_H_
+#define _MACHINE_SMP_H_
+
+#include <machine/pcb.h>
+
+enum {
+ IPI_AST,
+ IPI_PREEMPT,
+ IPI_RENDEZVOUS,
+ IPI_STOP,
+ IPI_STOP_HARD,
+ IPI_HARDCLOCK,
+ INTR_IPI_COUNT,
+};
+
+void ipi_all_but_self(u_int ipi);
+void ipi_cpu(int cpu, u_int ipi);
+void ipi_selected(cpuset_t cpus, u_int ipi);
+
+#endif /* !_MACHINE_SMP_H_ */
diff --git a/sys/arm64/include/stack.h b/sys/arm64/include/stack.h
new file mode 100644
index 000000000000..3aa1e235884d
--- /dev/null
+++ b/sys/arm64/include/stack.h
@@ -0,0 +1,59 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_STACK_H_
+#define _MACHINE_STACK_H_
+
+#define INKERNEL(va) \
+ ((va) >= VM_MIN_KERNEL_ADDRESS && (va) <= VM_MAX_KERNEL_ADDRESS)
+
+struct unwind_state {
+ uintptr_t fp;
+ uintptr_t pc;
+};
+
+bool unwind_frame(struct thread *, struct unwind_state *);
+
+#ifdef _SYS_PROC_H_
+
+#include <machine/pcb.h>
+
+#define GET_STACK_USAGE(total, used) do { \
+ struct thread *td = curthread; \
+ (total) = td->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb); \
+ (used) = td->td_kstack + (total) - (vm_offset_t)&td; \
+} while (0)
+
+static __inline bool
+kstack_contains(struct thread *td, vm_offset_t va, size_t len)
+{
+ return (va >= td->td_kstack && va + len >= va &&
+ va + len <= td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
+ sizeof(struct pcb));
+}
+#endif /* _SYS_PROC_H_ */
+
+#endif /* !_MACHINE_STACK_H_ */
diff --git a/sys/arm64/include/stdarg.h b/sys/arm64/include/stdarg.h
new file mode 100644
index 000000000000..aba008ef3774
--- /dev/null
+++ b/sys/arm64/include/stdarg.h
@@ -0,0 +1,37 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2017 Poul-Henning Kamp. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_STDARG_H_
+#define _MACHINE_STDARG_H_
+
+#include <sys/_stdarg.h>
+
+#ifndef va_start
+ #error this file needs to be ported to your compiler
+#endif
+
+#endif /* !_MACHINE_STDARG_H_ */
diff --git a/sys/arm64/include/sysarch.h b/sys/arm64/include/sysarch.h
new file mode 100644
index 000000000000..498e26f6d47e
--- /dev/null
+++ b/sys/arm64/include/sysarch.h
@@ -0,0 +1,64 @@
+/*-
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: FreeBSD: src/sys/i386/include/sysarch.h,v 1.14 2000/09/21
+ */
+
+#ifdef __arm__
+#include <arm/sysarch.h>
+#else /* !__arm__ */
+
+/*
+ * Architecture specific syscalls (arm64)
+ */
+#ifndef _MACHINE_SYSARCH_H_
+#define _MACHINE_SYSARCH_H_
+
+#include <sys/cdefs.h>
+
+#define ARM64_GUARD_PAGE 0x100
+
+struct arm64_guard_page_args {
+ __uintptr_t addr;
+ __size_t len;
+};
+
+#define ARM64_GET_SVE_VL 0x200
+/* Reserved ARM64_SET_SVE_VL 0x201 */
+
+#ifndef _KERNEL
+
+__BEGIN_DECLS
+int sysarch(int _number, void *_args);
+__END_DECLS
+
+#endif
+
+#endif /* !_MACHINE_SYSARCH_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/sysreg.h b/sys/arm64/include/sysreg.h
new file mode 100644
index 000000000000..4ec435f9dcc1
--- /dev/null
+++ b/sys/arm64/include/sysreg.h
@@ -0,0 +1,5 @@
+#ifdef __arm__
+#include <arm/sysreg.h>
+#else /* !__arm__ */
+#error Do not include this header, used only for 32-bit compatibility
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/tls.h b/sys/arm64/include/tls.h
new file mode 100644
index 000000000000..c9db2bdfd708
--- /dev/null
+++ b/sys/arm64/include/tls.h
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>.
+ * Copyright (c) 2014 the FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/tls.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_TLS_H_
+#define _MACHINE_TLS_H_
+
+#include <sys/_tls_variant_i.h>
+
+#define TLS_DTV_OFFSET 0
+#define TLS_TCB_ALIGN 16
+#define TLS_TP_OFFSET 0
+
+static __inline void
+_tcb_set(struct tcb *tcb)
+{
+ __asm __volatile("msr tpidr_el0, %x0" :: "r" (tcb));
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ struct tcb *tcb;
+
+ __asm __volatile("mrs %x0, tpidr_el0" : "=r" (tcb));
+ return (tcb);
+}
+
+#endif /* !_MACHINE_TLS_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/trap.h b/sys/arm64/include/trap.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/sys/arm64/include/trap.h
diff --git a/sys/arm64/include/ucontext.h b/sys/arm64/include/ucontext.h
new file mode 100644
index 000000000000..a4f0ee243b3a
--- /dev/null
+++ b/sys/arm64/include/ucontext.h
@@ -0,0 +1,114 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/ucontext.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_UCONTEXT_H_
+#define _MACHINE_UCONTEXT_H_
+
+struct gpregs {
+ __register_t gp_x[30];
+ __register_t gp_lr;
+ __register_t gp_sp;
+ __register_t gp_elr;
+ __uint64_t gp_spsr;
+};
+
+struct fpregs {
+ __uint128_t fp_q[32];
+ __uint32_t fp_sr;
+ __uint32_t fp_cr;
+ int fp_flags;
+ int fp_pad;
+};
+
+/*
+ * Support for registers that don't fit into gpregs or fpregs, e.g. SVE.
+ * There are some registers that have been added so are optional. To support
+ * these create an array of headers that point at the register data.
+ */
+struct arm64_reg_context {
+ __uint32_t ctx_id;
+ __uint32_t ctx_size;
+};
+
+#define ARM64_CTX_END 0xa5a5a5a5
+#define ARM64_CTX_SVE 0x00657673
+
+struct sve_context {
+ struct arm64_reg_context sve_ctx;
+ __uint16_t sve_vector_len;
+ __uint16_t sve_flags;
+ __uint16_t sve_reserved[2];
+};
+
+struct __mcontext {
+ struct gpregs mc_gpregs;
+ struct fpregs mc_fpregs;
+ int mc_flags;
+#define _MC_FP_VALID 0x1 /* Set when mc_fpregs has valid data */
+ int mc_pad; /* Padding */
+ __uint64_t mc_ptr; /* Address of extra_regs struct */
+ __uint64_t mc_spare[7]; /* Space for expansion, set to zero */
+};
+
+
+typedef struct __mcontext mcontext_t;
+
+#ifdef COMPAT_FREEBSD32
+#include <compat/freebsd32/freebsd32_signal.h>
+typedef struct __mcontext32 {
+ uint32_t mc_gregset[17];
+ uint32_t mc_vfp_size;
+ uint32_t mc_vfp_ptr;
+ uint32_t mc_spare[33];
+} mcontext32_t;
+
+typedef struct __ucontext32 {
+ sigset_t uc_sigmask;
+ mcontext32_t uc_mcontext;
+ u_int32_t uc_link;
+ struct sigaltstack32 uc_stack;
+ u_int32_t uc_flags;
+ u_int32_t __spare__[4];
+} ucontext32_t;
+
+typedef struct __mcontext32_vfp {
+ __uint64_t mcv_reg[32];
+ __uint32_t mcv_fpscr;
+} mcontext32_vfp_t;
+
+#endif /* COMPAT_FREEBSD32 */
+
+#endif /* !_MACHINE_UCONTEXT_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/undefined.h b/sys/arm64/include/undefined.h
new file mode 100644
index 000000000000..71b2eed22a84
--- /dev/null
+++ b/sys/arm64/include/undefined.h
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 2017 Andrew Turner
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE__UNDEFINED_H_
+#define _MACHINE__UNDEFINED_H_
+
+#ifdef _KERNEL
+
+typedef int (*undef_handler_t)(vm_offset_t, uint32_t, struct trapframe *,
+ uint32_t);
+typedef bool (*undef_sys_handler_t)(uint64_t, struct trapframe *);
+
+void undef_init(void);
+void install_sys_handler(undef_sys_handler_t);
+void *install_undef_handler(undef_handler_t);
+#ifdef COMPAT_FREEBSD32
+void *install_undef32_handler(undef_handler_t);
+#endif
+void remove_undef_handler(void *);
+bool undef_sys(uint64_t, struct trapframe *);
+int undef_insn(struct trapframe *);
+
+#endif /* _KERNEL */
+
+#endif
diff --git a/sys/arm64/include/vdso.h b/sys/arm64/include/vdso.h
new file mode 100644
index 000000000000..7ee0241874a4
--- /dev/null
+++ b/sys/arm64/include/vdso.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright 2012 Konstantin Belousov <kib@FreeBSD.ORG>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/vdso.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_VDSO_H_
+#define _MACHINE_VDSO_H_
+
+#define VDSO_TIMEHANDS_MD \
+ uint32_t th_physical; \
+ uint32_t th_res[7];
+
+#define VDSO_TH_ALGO_ARM_GENTIM VDSO_TH_ALGO_1
+
+#define VDSO_TIMEHANDS_MD32 VDSO_TIMEHANDS_MD
+
+#endif /* !_MACHINE_VDSO_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/vfp.h b/sys/arm64/include/vfp.h
new file mode 100644
index 000000000000..fc93908add0b
--- /dev/null
+++ b/sys/arm64/include/vfp.h
@@ -0,0 +1,127 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __arm__
+#include <arm/vfp.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_VFP_H_
+#define _MACHINE_VFP_H_
+
+/* VFPCR */
+#define VFPCR_AHP (0x04000000) /* alt. half-precision: */
+#define VFPCR_DN (0x02000000) /* default NaN enable */
+#define VFPCR_FZ (0x01000000) /* flush to zero enabled */
+#define VFPCR_INIT 0 /* Default fpcr after exec */
+
+#define VFPCR_RMODE_OFF 22 /* rounding mode offset */
+#define VFPCR_RMODE_MASK (0x00c00000) /* rounding mode mask */
+#define VFPCR_RMODE_RN (0x00000000) /* round nearest */
+#define VFPCR_RMODE_RPI (0x00400000) /* round to plus infinity */
+#define VFPCR_RMODE_RNI (0x00800000) /* round to neg infinity */
+#define VFPCR_RMODE_RM (0x00c00000) /* round to zero */
+
+#define VFPCR_STRIDE_OFF 20 /* vector stride -1 */
+#define VFPCR_STRIDE_MASK (0x00300000)
+#define VFPCR_LEN_OFF 16 /* vector length -1 */
+#define VFPCR_LEN_MASK (0x00070000)
+#define VFPCR_IDE (0x00008000) /* input subnormal exc enable */
+#define VFPCR_IXE (0x00001000) /* inexact exception enable */
+#define VFPCR_UFE (0x00000800) /* underflow exception enable */
+#define VFPCR_OFE (0x00000400) /* overflow exception enable */
+#define VFPCR_DZE (0x00000200) /* div by zero exception en */
+#define VFPCR_IOE (0x00000100) /* invalid op exec enable */
+
+#ifndef LOCORE
+struct vfpstate {
+ __uint128_t vfp_regs[32];
+ uint32_t vfp_fpcr;
+ uint32_t vfp_fpsr;
+};
+
+#ifdef _KERNEL
+struct pcb;
+struct thread;
+
+void vfp_init_secondary(void);
+void vfp_enable(void);
+void vfp_disable(void);
+void vfp_discard(struct thread *);
+void vfp_store(struct vfpstate *);
+void vfp_restore(struct vfpstate *);
+void vfp_new_thread(struct thread *, struct thread *, bool);
+void vfp_reset_state(struct thread *, struct pcb *);
+void vfp_restore_state(void);
+void vfp_save_state(struct thread *, struct pcb *);
+void vfp_save_state_savectx(struct pcb *);
+void vfp_save_state_switch(struct thread *);
+void vfp_to_sve_sync(struct thread *);
+void sve_to_vfp_sync(struct thread *);
+
+size_t sve_max_buf_size(void);
+size_t sve_buf_size(struct thread *);
+bool sve_restore_state(struct thread *);
+
+struct fpu_kern_ctx;
+
+/*
+ * Flags for fpu_kern_alloc_ctx(), fpu_kern_enter() and fpu_kern_thread().
+ */
+#define FPU_KERN_NORMAL 0x0000
+#define FPU_KERN_NOWAIT 0x0001
+#define FPU_KERN_KTHR 0x0002
+#define FPU_KERN_NOCTX 0x0004
+
+struct fpu_kern_ctx *fpu_kern_alloc_ctx(u_int);
+void fpu_kern_free_ctx(struct fpu_kern_ctx *);
+void fpu_kern_enter(struct thread *, struct fpu_kern_ctx *, u_int);
+int fpu_kern_leave(struct thread *, struct fpu_kern_ctx *);
+int fpu_kern_thread(u_int);
+int is_fpu_kern_thread(u_int);
+
+struct vfpstate *fpu_save_area_alloc(void);
+void fpu_save_area_free(struct vfpstate *fsa);
+void fpu_save_area_reset(struct vfpstate *fsa);
+
+/* Convert to and from Aarch32 FPSCR to Aarch64 FPCR/FPSR */
+#define VFP_FPSCR_FROM_SRCR(vpsr, vpcr) ((vpsr) | ((vpcr) & 0x7c00000))
+#define VFP_FPSR_FROM_FPSCR(vpscr) ((vpscr) &~ 0x7c00000)
+#define VFP_FPCR_FROM_FPSCR(vpsrc) ((vpsrc) & 0x7c00000)
+
+#ifdef COMPAT_FREEBSD32
+void get_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp);
+void set_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp);
+#endif
+
+#endif
+
+#endif
+
+#endif /* !_MACHINE_VFP_H_ */
+
+#endif /* !__arm__ */
diff --git a/sys/arm64/include/vm.h b/sys/arm64/include/vm.h
new file mode 100644
index 000000000000..342143c57246
--- /dev/null
+++ b/sys/arm64/include/vm.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2009 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_VM_H_
+#define _MACHINE_VM_H_
+
+/* Memory attribute configuration. */
+#define VM_MEMATTR_DEVICE_nGnRnE 0
+#define VM_MEMATTR_UNCACHEABLE 1
+#define VM_MEMATTR_WRITE_BACK 2
+#define VM_MEMATTR_WRITE_THROUGH 3
+#define VM_MEMATTR_DEVICE_nGnRE 4
+
+#define VM_MEMATTR_DEVICE VM_MEMATTR_DEVICE_nGnRE
+#define VM_MEMATTR_DEVICE_NP VM_MEMATTR_DEVICE_nGnRnE
+
+#ifdef _KERNEL
+/* If defined vmstat will try to use both of these in a switch statement */
+#define VM_MEMATTR_WRITE_COMBINING VM_MEMATTR_WRITE_THROUGH
+#endif
+
+#define VM_MEMATTR_DEFAULT VM_MEMATTR_WRITE_BACK
+
+#endif /* !_MACHINE_VM_H_ */
diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
new file mode 100644
index 000000000000..1d783cdacb0d
--- /dev/null
+++ b/sys/arm64/include/vmm.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _VMM_H_
+#define _VMM_H_
+
+#include <sys/param.h>
+#include <sys/cpuset.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "pte.h"
+#include "pmap.h"
+
+struct vcpu;
+
+enum vm_suspend_how {
+ VM_SUSPEND_NONE,
+ VM_SUSPEND_RESET,
+ VM_SUSPEND_POWEROFF,
+ VM_SUSPEND_HALT,
+ VM_SUSPEND_LAST
+};
+
+/*
+ * Identifiers for architecturally defined registers.
+ */
+enum vm_reg_name {
+ VM_REG_GUEST_X0 = 0,
+ VM_REG_GUEST_X1,
+ VM_REG_GUEST_X2,
+ VM_REG_GUEST_X3,
+ VM_REG_GUEST_X4,
+ VM_REG_GUEST_X5,
+ VM_REG_GUEST_X6,
+ VM_REG_GUEST_X7,
+ VM_REG_GUEST_X8,
+ VM_REG_GUEST_X9,
+ VM_REG_GUEST_X10,
+ VM_REG_GUEST_X11,
+ VM_REG_GUEST_X12,
+ VM_REG_GUEST_X13,
+ VM_REG_GUEST_X14,
+ VM_REG_GUEST_X15,
+ VM_REG_GUEST_X16,
+ VM_REG_GUEST_X17,
+ VM_REG_GUEST_X18,
+ VM_REG_GUEST_X19,
+ VM_REG_GUEST_X20,
+ VM_REG_GUEST_X21,
+ VM_REG_GUEST_X22,
+ VM_REG_GUEST_X23,
+ VM_REG_GUEST_X24,
+ VM_REG_GUEST_X25,
+ VM_REG_GUEST_X26,
+ VM_REG_GUEST_X27,
+ VM_REG_GUEST_X28,
+ VM_REG_GUEST_X29,
+ VM_REG_GUEST_LR,
+ VM_REG_GUEST_SP,
+ VM_REG_GUEST_PC,
+ VM_REG_GUEST_CPSR,
+
+ VM_REG_GUEST_SCTLR_EL1,
+ VM_REG_GUEST_TTBR0_EL1,
+ VM_REG_GUEST_TTBR1_EL1,
+ VM_REG_GUEST_TCR_EL1,
+ VM_REG_GUEST_TCR2_EL1,
+ VM_REG_LAST
+};
+
+#define VM_INTINFO_VECTOR(info) ((info) & 0xff)
+#define VM_INTINFO_DEL_ERRCODE 0x800
+#define VM_INTINFO_RSVD 0x7ffff000
+#define VM_INTINFO_VALID 0x80000000
+#define VM_INTINFO_TYPE 0x700
+#define VM_INTINFO_HWINTR (0 << 8)
+#define VM_INTINFO_NMI (2 << 8)
+#define VM_INTINFO_HWEXCEPTION (3 << 8)
+#define VM_INTINFO_SWINTR (4 << 8)
+
+#define VM_GUEST_BASE_IPA 0x80000000UL /* Guest kernel start ipa */
+
+/*
+ * The VM name has to fit into the pathname length constraints of devfs,
+ * governed primarily by SPECNAMELEN. The length is the total number of
+ * characters in the full path, relative to the mount point and not
+ * including any leading '/' characters.
+ * A prefix and a suffix are added to the name specified by the user.
+ * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters
+ * longer for future use.
+ * The suffix is a string that identifies a bootrom image or some similar
+ * image that is attached to the VM. A separator character gets added to
+ * the suffix automatically when generating the full path, so it must be
+ * accounted for, reducing the effective length by 1.
+ * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37
+ * bytes for FreeBSD 12. A minimum length is set for safety and supports
+ * a SPECNAMELEN as small as 32 on old systems.
+ */
+#define VM_MAX_PREFIXLEN 10
+#define VM_MAX_SUFFIXLEN 15
+#define VM_MAX_NAMELEN \
+ (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1)
+
+#ifdef _KERNEL
+struct vm;
+struct vm_exception;
+struct vm_exit;
+struct vm_run;
+struct vm_object;
+struct vm_guest_paging;
+struct vm_vgic_descr;
+struct pmap;
+
+struct vm_eventinfo {
+ void *rptr; /* rendezvous cookie */
+ int *sptr; /* suspend cookie */
+ int *iptr; /* reqidle cookie */
+};
+
+int vm_create(const char *name, struct vm **retvm);
+struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
+void vm_disable_vcpu_creation(struct vm *vm);
+void vm_slock_vcpus(struct vm *vm);
+void vm_unlock_vcpus(struct vm *vm);
+void vm_destroy(struct vm *vm);
+int vm_reinit(struct vm *vm);
+const char *vm_name(struct vm *vm);
+
+uint16_t vm_get_maxcpus(struct vm *vm);
+void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
+ uint16_t *threads, uint16_t *maxcpus);
+int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
+ uint16_t threads, uint16_t maxcpus);
+int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
+int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
+int vm_run(struct vcpu *vcpu);
+int vm_suspend(struct vm *vm, enum vm_suspend_how how);
+void* vm_get_cookie(struct vm *vm);
+int vcpu_vcpuid(struct vcpu *vcpu);
+void *vcpu_get_cookie(struct vcpu *vcpu);
+struct vm *vcpu_vm(struct vcpu *vcpu);
+struct vcpu *vm_vcpu(struct vm *vm, int cpu);
+int vm_get_capability(struct vcpu *vcpu, int type, int *val);
+int vm_set_capability(struct vcpu *vcpu, int type, int val);
+int vm_activate_cpu(struct vcpu *vcpu);
+int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
+int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
+int vm_inject_exception(struct vcpu *vcpu, uint64_t esr, uint64_t far);
+int vm_attach_vgic(struct vm *vm, struct vm_vgic_descr *descr);
+int vm_assert_irq(struct vm *vm, uint32_t irq);
+int vm_deassert_irq(struct vm *vm, uint32_t irq);
+int vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot,
+ int func);
+struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
+void vm_exit_suspended(struct vcpu *vcpu, uint64_t pc);
+void vm_exit_debug(struct vcpu *vcpu, uint64_t pc);
+void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t pc);
+void vm_exit_astpending(struct vcpu *vcpu, uint64_t pc);
+
+cpuset_t vm_active_cpus(struct vm *vm);
+cpuset_t vm_debug_cpus(struct vm *vm);
+cpuset_t vm_suspended_cpus(struct vm *vm);
+
+static __inline int
+vcpu_rendezvous_pending(struct vm_eventinfo *info)
+{
+
+ return (*((uintptr_t *)(info->rptr)) != 0);
+}
+
+static __inline int
+vcpu_suspended(struct vm_eventinfo *info)
+{
+
+ return (*info->sptr);
+}
+
+int vcpu_debugged(struct vcpu *vcpu);
+
+enum vcpu_state {
+ VCPU_IDLE,
+ VCPU_FROZEN,
+ VCPU_RUNNING,
+ VCPU_SLEEPING,
+};
+
+int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
+enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
+
+static int __inline
+vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
+{
+ return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
+}
+
+#ifdef _SYS_PROC_H_
+static int __inline
+vcpu_should_yield(struct vcpu *vcpu)
+{
+ struct thread *td;
+
+ td = curthread;
+ return (td->td_ast != 0 || td->td_owepreempt != 0);
+}
+#endif
+
+void *vcpu_stats(struct vcpu *vcpu);
+void vcpu_notify_event(struct vcpu *vcpu);
+struct vmspace *vm_vmspace(struct vm *vm);
+struct vm_mem *vm_mem(struct vm *vm);
+
+enum vm_reg_name vm_segment_name(int seg_encoding);
+
+struct vm_copyinfo {
+ uint64_t gpa;
+ size_t len;
+ void *hva;
+ void *cookie;
+};
+
+#endif /* _KERNEL */
+
+#define VM_DIR_READ 0
+#define VM_DIR_WRITE 1
+
+#define VM_GP_M_MASK 0x1f
+#define VM_GP_MMU_ENABLED (1 << 5)
+
+struct vm_guest_paging {
+ uint64_t ttbr0_addr;
+ uint64_t ttbr1_addr;
+ uint64_t tcr_el1;
+ uint64_t tcr2_el1;
+ int flags;
+ int padding;
+};
+
+struct vie {
+ uint8_t access_size:4, sign_extend:1, dir:1, unused:2;
+ enum vm_reg_name reg;
+};
+
+struct vre {
+ uint32_t inst_syndrome;
+ uint8_t dir:1, unused:7;
+ enum vm_reg_name reg;
+};
+
+/*
+ * Identifiers for optional vmm capabilities
+ */
+enum vm_cap_type {
+ VM_CAP_HALT_EXIT,
+ VM_CAP_PAUSE_EXIT,
+ VM_CAP_UNRESTRICTED_GUEST,
+ VM_CAP_BRK_EXIT,
+ VM_CAP_SS_EXIT,
+ VM_CAP_MASK_HWINTR,
+ VM_CAP_MAX
+};
+
+enum vm_exitcode {
+ VM_EXITCODE_BOGUS,
+ VM_EXITCODE_INST_EMUL,
+ VM_EXITCODE_REG_EMUL,
+ VM_EXITCODE_HVC,
+ VM_EXITCODE_SUSPENDED,
+ VM_EXITCODE_HYP,
+ VM_EXITCODE_WFI,
+ VM_EXITCODE_PAGING,
+ VM_EXITCODE_SMCCC,
+ VM_EXITCODE_DEBUG,
+ VM_EXITCODE_BRK,
+ VM_EXITCODE_SS,
+ VM_EXITCODE_MAX
+};
+
+struct vm_exit {
+ enum vm_exitcode exitcode;
+ int inst_length;
+ uint64_t pc;
+ union {
+ /*
+ * ARM specific payload.
+ */
+ struct {
+ uint32_t exception_nr;
+ uint32_t pad;
+ uint64_t esr_el2; /* Exception Syndrome Register */
+ uint64_t far_el2; /* Fault Address Register */
+ uint64_t hpfar_el2; /* Hypervisor IPA Fault Address Register */
+ } hyp;
+ struct {
+ struct vre vre;
+ } reg_emul;
+ struct {
+ uint64_t gpa;
+ uint64_t esr;
+ } paging;
+ struct {
+ uint64_t gpa;
+ struct vm_guest_paging paging;
+ struct vie vie;
+ } inst_emul;
+
+ /*
+ * A SMCCC call, e.g. starting a core via PSCI.
+ * Further arguments can be read by asking the kernel for
+ * all register values.
+ */
+ struct {
+ uint64_t func_id;
+ uint64_t args[7];
+ } smccc_call;
+
+ struct {
+ enum vm_suspend_how how;
+ } suspended;
+ } u;
+};
+
+#endif /* _VMM_H_ */
diff --git a/sys/arm64/include/vmm_dev.h b/sys/arm64/include/vmm_dev.h
new file mode 100644
index 000000000000..219f1116c728
--- /dev/null
+++ b/sys/arm64/include/vmm_dev.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _VMM_DEV_H_
+#define _VMM_DEV_H_
+
+#include <sys/domainset.h>
+
+#include <machine/vmm.h>
+
+struct vm_memmap {
+ vm_paddr_t gpa;
+ int segid; /* memory segment */
+ vm_ooffset_t segoff; /* offset into memory segment */
+ size_t len; /* mmap length */
+ int prot; /* RWX */
+ int flags;
+};
+#define VM_MEMMAP_F_WIRED 0x01
+
+struct vm_munmap {
+ vm_paddr_t gpa;
+ size_t len;
+};
+
+#define VM_MEMSEG_NAME(m) ((m)->name[0] != '\0' ? (m)->name : NULL)
+struct vm_memseg {
+ int segid;
+ size_t len;
+ char name[VM_MAX_SUFFIXLEN + 1];
+ domainset_t *ds_mask;
+ size_t ds_mask_size;
+ int ds_policy;
+};
+
+struct vm_register {
+ int cpuid;
+ int regnum; /* enum vm_reg_name */
+ uint64_t regval;
+};
+
+struct vm_register_set {
+ int cpuid;
+ unsigned int count;
+ const int *regnums; /* enum vm_reg_name */
+ uint64_t *regvals;
+};
+
+struct vm_run {
+ int cpuid;
+ cpuset_t *cpuset; /* CPU set storage */
+ size_t cpusetsize;
+ struct vm_exit *vm_exit;
+};
+
+struct vm_exception {
+ int cpuid;
+ uint64_t esr;
+ uint64_t far;
+};
+
+struct vm_msi {
+ uint64_t msg;
+ uint64_t addr;
+ int bus;
+ int slot;
+ int func;
+};
+
+struct vm_capability {
+ int cpuid;
+ enum vm_cap_type captype;
+ int capval;
+ int allcpus;
+};
+
+#define MAX_VM_STATS 64
+struct vm_stats {
+ int cpuid; /* in */
+ int index; /* in */
+ int num_entries; /* out */
+ struct timeval tv;
+ uint64_t statbuf[MAX_VM_STATS];
+};
+struct vm_stat_desc {
+ int index; /* in */
+ char desc[128]; /* out */
+};
+
+struct vm_suspend {
+ enum vm_suspend_how how;
+};
+
+struct vm_gla2gpa {
+ int vcpuid; /* inputs */
+ int prot; /* PROT_READ or PROT_WRITE */
+ uint64_t gla;
+ struct vm_guest_paging paging;
+ int fault; /* outputs */
+ uint64_t gpa;
+};
+
+struct vm_activate_cpu {
+ int vcpuid;
+};
+
+struct vm_cpuset {
+ int which;
+ int cpusetsize;
+ cpuset_t *cpus;
+};
+#define VM_ACTIVE_CPUS 0
+#define VM_SUSPENDED_CPUS 1
+#define VM_DEBUG_CPUS 2
+
+struct vm_vgic_version {
+ u_int version;
+ u_int flags;
+};
+
+struct vm_vgic_descr {
+ struct vm_vgic_version ver;
+ union {
+ struct {
+ uint64_t dist_start;
+ uint64_t dist_size;
+ uint64_t redist_start;
+ uint64_t redist_size;
+ } v3_regs;
+ };
+};
+
+struct vm_irq {
+ uint32_t irq;
+};
+
+struct vm_cpu_topology {
+ uint16_t sockets;
+ uint16_t cores;
+ uint16_t threads;
+ uint16_t maxcpus;
+};
+
+enum {
+ /* general routines */
+ IOCNUM_ABIVERS = 0,
+ IOCNUM_RUN = 1,
+ IOCNUM_SET_CAPABILITY = 2,
+ IOCNUM_GET_CAPABILITY = 3,
+ IOCNUM_SUSPEND = 4,
+ IOCNUM_REINIT = 5,
+
+ /* memory apis */
+ IOCNUM_GET_GPA_PMAP = 12,
+ IOCNUM_GLA2GPA_NOFAULT = 13,
+ IOCNUM_ALLOC_MEMSEG = 14,
+ IOCNUM_GET_MEMSEG = 15,
+ IOCNUM_MMAP_MEMSEG = 16,
+ IOCNUM_MMAP_GETNEXT = 17,
+ IOCNUM_MUNMAP_MEMSEG = 18,
+
+ /* register/state accessors */
+ IOCNUM_SET_REGISTER = 20,
+ IOCNUM_GET_REGISTER = 21,
+ IOCNUM_SET_REGISTER_SET = 24,
+ IOCNUM_GET_REGISTER_SET = 25,
+
+ /* statistics */
+ IOCNUM_VM_STATS = 50,
+ IOCNUM_VM_STAT_DESC = 51,
+
+ /* CPU Topology */
+ IOCNUM_SET_TOPOLOGY = 63,
+ IOCNUM_GET_TOPOLOGY = 64,
+
+ /* interrupt injection */
+ IOCNUM_ASSERT_IRQ = 80,
+ IOCNUM_DEASSERT_IRQ = 81,
+ IOCNUM_RAISE_MSI = 82,
+ IOCNUM_INJECT_EXCEPTION = 83,
+
+ /* vm_cpuset */
+ IOCNUM_ACTIVATE_CPU = 90,
+ IOCNUM_GET_CPUSET = 91,
+ IOCNUM_SUSPEND_CPU = 92,
+ IOCNUM_RESUME_CPU = 93,
+
+ /* vm_attach_vgic */
+ IOCNUM_GET_VGIC_VERSION = 110,
+ IOCNUM_ATTACH_VGIC = 111,
+};
+
+#define VM_RUN \
+ _IOWR('v', IOCNUM_RUN, struct vm_run)
+#define VM_SUSPEND \
+ _IOW('v', IOCNUM_SUSPEND, struct vm_suspend)
+#define VM_REINIT \
+ _IO('v', IOCNUM_REINIT)
+#define VM_ALLOC_MEMSEG \
+ _IOW('v', IOCNUM_ALLOC_MEMSEG, struct vm_memseg)
+#define VM_GET_MEMSEG \
+ _IOWR('v', IOCNUM_GET_MEMSEG, struct vm_memseg)
+#define VM_MMAP_MEMSEG \
+ _IOW('v', IOCNUM_MMAP_MEMSEG, struct vm_memmap)
+#define VM_MMAP_GETNEXT \
+ _IOWR('v', IOCNUM_MMAP_GETNEXT, struct vm_memmap)
+#define VM_MUNMAP_MEMSEG \
+ _IOW('v', IOCNUM_MUNMAP_MEMSEG, struct vm_munmap)
+#define VM_SET_REGISTER \
+ _IOW('v', IOCNUM_SET_REGISTER, struct vm_register)
+#define VM_GET_REGISTER \
+ _IOWR('v', IOCNUM_GET_REGISTER, struct vm_register)
+#define VM_SET_REGISTER_SET \
+ _IOW('v', IOCNUM_SET_REGISTER_SET, struct vm_register_set)
+#define VM_GET_REGISTER_SET \
+ _IOWR('v', IOCNUM_GET_REGISTER_SET, struct vm_register_set)
+#define VM_SET_CAPABILITY \
+ _IOW('v', IOCNUM_SET_CAPABILITY, struct vm_capability)
+#define VM_GET_CAPABILITY \
+ _IOWR('v', IOCNUM_GET_CAPABILITY, struct vm_capability)
+#define VM_STATS \
+ _IOWR('v', IOCNUM_VM_STATS, struct vm_stats)
+#define VM_STAT_DESC \
+ _IOWR('v', IOCNUM_VM_STAT_DESC, struct vm_stat_desc)
+#define VM_ASSERT_IRQ \
+ _IOW('v', IOCNUM_ASSERT_IRQ, struct vm_irq)
+#define VM_DEASSERT_IRQ \
+ _IOW('v', IOCNUM_DEASSERT_IRQ, struct vm_irq)
+#define VM_RAISE_MSI \
+ _IOW('v', IOCNUM_RAISE_MSI, struct vm_msi)
+#define VM_INJECT_EXCEPTION \
+ _IOW('v', IOCNUM_INJECT_EXCEPTION, struct vm_exception)
+#define VM_SET_TOPOLOGY \
+ _IOW('v', IOCNUM_SET_TOPOLOGY, struct vm_cpu_topology)
+#define VM_GET_TOPOLOGY \
+ _IOR('v', IOCNUM_GET_TOPOLOGY, struct vm_cpu_topology)
+#define VM_GLA2GPA_NOFAULT \
+ _IOWR('v', IOCNUM_GLA2GPA_NOFAULT, struct vm_gla2gpa)
+#define VM_ACTIVATE_CPU \
+ _IOW('v', IOCNUM_ACTIVATE_CPU, struct vm_activate_cpu)
+#define VM_GET_CPUS \
+ _IOW('v', IOCNUM_GET_CPUSET, struct vm_cpuset)
+#define VM_SUSPEND_CPU \
+ _IOW('v', IOCNUM_SUSPEND_CPU, struct vm_activate_cpu)
+#define VM_RESUME_CPU \
+ _IOW('v', IOCNUM_RESUME_CPU, struct vm_activate_cpu)
+#define VM_GET_VGIC_VERSION \
+ _IOR('v', IOCNUM_GET_VGIC_VERSION, struct vm_vgic_version)
+#define VM_ATTACH_VGIC \
+ _IOW('v', IOCNUM_ATTACH_VGIC, struct vm_vgic_descr)
+#endif
diff --git a/sys/arm64/include/vmm_instruction_emul.h b/sys/arm64/include/vmm_instruction_emul.h
new file mode 100644
index 000000000000..a295f7cce127
--- /dev/null
+++ b/sys/arm64/include/vmm_instruction_emul.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _VMM_INSTRUCTION_EMUL_H_
+#define _VMM_INSTRUCTION_EMUL_H_
+
+/*
+ * Callback functions to read and write memory regions.
+ */
+typedef int (*mem_region_read_t)(struct vcpu *vcpu, uint64_t gpa,
+ uint64_t *rval, int rsize, void *arg);
+typedef int (*mem_region_write_t)(struct vcpu *vcpu, uint64_t gpa,
+ uint64_t wval, int wsize, void *arg);
+
+/*
+ * Callback functions to read and write registers.
+ */
+typedef int (*reg_read_t)(struct vcpu *vcpu, uint64_t *rval, void *arg);
+typedef int (*reg_write_t)(struct vcpu *vcpu, uint64_t wval, void *arg);
+
+/*
+ * Emulate the decoded 'vie' instruction when it contains a memory operation.
+ *
+ * The callbacks 'mrr' and 'mrw' emulate reads and writes to the memory region
+ * containing 'gpa'. 'mrarg' is an opaque argument that is passed into the
+ * callback functions.
+ *
+ * 'void *vm' should be 'struct vm *' when called from kernel context and
+ * 'struct vmctx *' when called from user context.
+ *
+ */
+int vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
+ struct vm_guest_paging *paging, mem_region_read_t mrr,
+ mem_region_write_t mrw, void *mrarg);
+
+/*
+ * Emulate the decoded 'vre' instruction when it contains a register access.
+ *
+ * The callbacks 'regread' and 'regwrite' emulate reads and writes to the
+ * register from 'vie'. 'regarg' is an opaque argument that is passed into the
+ * callback functions.
+ *
+ * 'void *vm' should be 'struct vm *' when called from kernel context and
+ * 'struct vmctx *' when called from user context.
+ *
+ */
+int vmm_emulate_register(struct vcpu *vcpu, struct vre *vre, reg_read_t regread,
+ reg_write_t regwrite, void *regarg);
+
+#ifdef _KERNEL
+void vm_register_reg_handler(struct vm *vm, uint64_t iss, uint64_t mask,
+ reg_read_t reg_read, reg_write_t reg_write, void *arg);
+void vm_deregister_reg_handler(struct vm *vm, uint64_t iss, uint64_t mask);
+
+void vm_register_inst_handler(struct vm *vm, uint64_t start, uint64_t size,
+ mem_region_read_t mmio_read, mem_region_write_t mmio_write);
+void vm_deregister_inst_handler(struct vm *vm, uint64_t start, uint64_t size);
+#endif
+
+#endif /* _VMM_INSTRUCTION_EMUL_H_ */
diff --git a/sys/arm64/include/vmm_snapshot.h b/sys/arm64/include/vmm_snapshot.h
new file mode 100644
index 000000000000..da23dbe43a4f
--- /dev/null
+++ b/sys/arm64/include/vmm_snapshot.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h
new file mode 100644
index 000000000000..349849845e73
--- /dev/null
+++ b/sys/arm64/include/vmparam.h
@@ -0,0 +1,335 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30
+ */
+
+#ifdef __arm__
+#include <arm/vmparam.h>
+#else /* !__arm__ */
+
+#ifndef _MACHINE_VMPARAM_H_
+#define _MACHINE_VMPARAM_H_
+
+/*
+ * Virtual memory related constants, all in bytes
+ */
+#ifndef MAXTSIZ
+#define MAXTSIZ (1*1024*1024*1024) /* max text size */
+#endif
+#ifndef DFLDSIZ
+#define DFLDSIZ (128*1024*1024) /* initial data size limit */
+#endif
+#ifndef MAXDSIZ
+#define MAXDSIZ (1*1024*1024*1024) /* max data size */
+#endif
+#ifndef DFLSSIZ
+#define DFLSSIZ (128*1024*1024) /* initial stack size limit */
+#endif
+#ifndef MAXSSIZ
+#define MAXSSIZ (1*1024*1024*1024) /* max stack size */
+#endif
+#ifndef SGROWSIZ
+#define SGROWSIZ (128*1024) /* amount to grow stack */
+#endif
+
+/*
+ * The physical address space is sparsely populated.
+ */
+#define VM_PHYSSEG_SPARSE
+
+/*
+ * The number of PHYSSEG entries.
+ */
+#define VM_PHYSSEG_MAX 64
+
+/*
+ * Create three free page pools: VM_FREEPOOL_DEFAULT is the default pool from
+ * which physical pages are allocated and VM_FREEPOOL_DIRECT is the pool from
+ * which physical pages for page tables and small UMA objects are allocated.
+ * VM_FREEPOOL_LAZYINIT is a special-purpose pool that is populated only during
+ * boot and is used to implement deferred initialization of page structures.
+ */
+#define VM_NFREEPOOL 3
+#define VM_FREEPOOL_LAZYINIT 0
+#define VM_FREEPOOL_DEFAULT 1
+#define VM_FREEPOOL_DIRECT 2
+
+/*
+ * Create two free page lists: VM_FREELIST_DMA32 is for physical pages that have
+ * physical addresses below 4G, and VM_FREELIST_DEFAULT is for all other
+ * physical pages.
+ */
+#define VM_NFREELIST 2
+#define VM_FREELIST_DEFAULT 0
+#define VM_FREELIST_DMA32 1
+
+/*
+ * When PAGE_SIZE is 4KB, an allocation size of 16MB is supported in order
+ * to optimize the use of the direct map by UMA. Specifically, a 64-byte
+ * cache line contains at most 8 L2 BLOCK entries, collectively mapping 16MB
+ * of physical memory. By reducing the number of distinct 16MB "pages" that
+ * are used by UMA, the physical memory allocator reduces the likelihood of
+ * both 2MB page TLB misses and cache misses during the page table walk when
+ * a 2MB page TLB miss does occur.
+ *
+ * When PAGE_SIZE is 16KB, an allocation size of 32MB is supported. This
+ * size is used by level 0 reservations and L2 BLOCK mappings.
+ */
+#if PAGE_SIZE == PAGE_SIZE_4K
+#define VM_NFREEORDER 13
+#elif PAGE_SIZE == PAGE_SIZE_16K
+#define VM_NFREEORDER 12
+#else
+#error Unsupported page size
+#endif
+
+/*
+ * Enable superpage reservations: 2 levels.
+ */
+#ifndef VM_NRESERVLEVEL
+#define VM_NRESERVLEVEL 2
+#endif
+
+/*
+ * Level 0 reservations consist of 16 pages when PAGE_SIZE is 4KB, and 128
+ * pages when PAGE_SIZE is 16KB. Level 1 reservations consist of 32 64KB
+ * pages when PAGE_SIZE is 4KB, and 16 2M pages when PAGE_SIZE is 16KB.
+ */
+#if PAGE_SIZE == PAGE_SIZE_4K
+#ifndef VM_LEVEL_0_ORDER
+#define VM_LEVEL_0_ORDER 4
+#endif
+#ifndef VM_LEVEL_1_ORDER
+#define VM_LEVEL_1_ORDER 5
+#endif
+#elif PAGE_SIZE == PAGE_SIZE_16K
+#ifndef VM_LEVEL_0_ORDER
+#define VM_LEVEL_0_ORDER 7
+#endif
+#ifndef VM_LEVEL_1_ORDER
+#define VM_LEVEL_1_ORDER 4
+#endif
+#else
+#error Unsupported page size
+#endif
+
+/**
+ * Address space layout.
+ *
+ * ARMv8 implements up to a 48 bit virtual address space. The address space is
+ * split into 2 regions at each end of the 64 bit address space, with an
+ * out of range "hole" in the middle.
+ *
+ * We use the full 48 bits for each region, however the kernel may only use
+ * a limited range within this space.
+ *
+ * Upper region: 0xffffffffffffffff Top of virtual memory
+ *
+ * 0xfffffeffffffffff End of DMAP
+ * 0xffffa00000000000 Start of DMAP
+ *
+ * 0xffff027fffffffff End of KMSAN origin map
+ * 0xffff020000000000 Start of KMSAN origin map
+ *
+ * 0xffff017fffffffff End of KMSAN shadow map
+ * 0xffff010000000000 Start of KMSAN shadow map
+ *
+ * 0xffff009fffffffff End of KASAN shadow map
+ * 0xffff008000000000 Start of KASAN shadow map
+ *
+ * 0xffff007fffffffff End of KVA
+ * 0xffff000000000000 Kernel base address & start of KVA
+ *
+ * Hole: 0xfffeffffffffffff
+ * 0x0001000000000000
+ *
+ * Lower region: 0x0000ffffffffffff End of user address space
+ * 0x0000000000000000 Start of user address space
+ *
+ * We use the upper region for the kernel, and the lower region for userland.
+ *
+ * We define some interesting address constants:
+ *
+ * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire
+ * 64 bit address space, mostly just for convenience.
+ *
+ * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of
+ * mappable kernel virtual address space.
+ *
+ * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the
+ * user address space.
+ */
+#define VM_MIN_ADDRESS (0x0000000000000000UL)
+#define VM_MAX_ADDRESS (0xffffffffffffffffUL)
+
+/* 512 GiB of kernel addresses */
+#define VM_MIN_KERNEL_ADDRESS (0xffff000000000000UL)
+#define VM_MAX_KERNEL_ADDRESS (0xffff008000000000UL)
+
+/* 128 GiB KASAN shadow map */
+#define KASAN_MIN_ADDRESS (0xffff008000000000UL)
+#define KASAN_MAX_ADDRESS (0xffff00a000000000UL)
+
+/* 512GiB KMSAN shadow map */
+#define KMSAN_SHAD_MIN_ADDRESS (0xffff010000000000UL)
+#define KMSAN_SHAD_MAX_ADDRESS (0xffff018000000000UL)
+
+/* 512GiB KMSAN origin map */
+#define KMSAN_ORIG_MIN_ADDRESS (0xffff020000000000UL)
+#define KMSAN_ORIG_MAX_ADDRESS (0xffff028000000000UL)
+
+/* The address bits that hold a pointer authentication code */
+#define PAC_ADDR_MASK (0xff7f000000000000UL)
+
+/* If true addr is in the kernel address space */
+#define ADDR_IS_KERNEL(addr) (((addr) & (1ul << 55)) == (1ul << 55))
+/* If true addr is in its canonical form (i.e. no TBI, PAC, etc.) */
+#define ADDR_IS_CANONICAL(addr) \
+ (((addr) & 0xffff000000000000UL) == 0 || \
+ ((addr) & 0xffff000000000000UL) == 0xffff000000000000UL)
+#define ADDR_MAKE_CANONICAL(addr) ({ \
+ __typeof(addr) _tmp_addr = (addr); \
+ \
+ _tmp_addr &= ~0xffff000000000000UL; \
+ if (ADDR_IS_KERNEL(addr)) \
+ _tmp_addr |= 0xffff000000000000UL; \
+ \
+ _tmp_addr; \
+})
+
+/* 95 TiB maximum for the direct map region */
+#define DMAP_MIN_ADDRESS (0xffffa00000000000UL)
+#define DMAP_MAX_ADDRESS (0xffffff0000000000UL)
+
+#define DMAP_MIN_PHYSADDR (dmap_phys_base)
+#define DMAP_MAX_PHYSADDR (dmap_phys_max)
+
+/*
+ * Checks to see if a physical address is in the DMAP range.
+ * - PHYS_IN_DMAP_RANGE will return true that may be within the DMAP range
+ * but not accessible through the DMAP, e.g. device memory between two
+ * DMAP physical address regions.
+ * - PHYS_IN_DMAP will check if DMAP address is mapped before returning true.
+ *
+ * PHYS_IN_DMAP_RANGE should only be used when a check on the address is
+ * performed, e.g. by checking the physical address is within phys_avail,
+ * or checking the virtual address is mapped.
+ */
+#define PHYS_IN_DMAP_RANGE(pa) ((pa) >= DMAP_MIN_PHYSADDR && \
+ (pa) < DMAP_MAX_PHYSADDR)
+#define PHYS_IN_DMAP(pa) (PHYS_IN_DMAP_RANGE(pa) && \
+ pmap_klookup(PHYS_TO_DMAP(pa), NULL))
+/* True if va is in the dmap range */
+#define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \
+ (va) < (dmap_max_addr))
+
+#define PMAP_HAS_DMAP 1
+#define PHYS_TO_DMAP(pa) \
+({ \
+ KASSERT(PHYS_IN_DMAP_RANGE(pa), \
+ ("%s: PA out of range, PA: 0x%lx", __func__, \
+ (vm_paddr_t)(pa))); \
+ ((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS; \
+})
+
+#define DMAP_TO_PHYS(va) \
+({ \
+ KASSERT(VIRT_IN_DMAP(va), \
+ ("%s: VA out of range, VA: 0x%lx", __func__, \
+ (vm_offset_t)(va))); \
+ ((va) - DMAP_MIN_ADDRESS) + dmap_phys_base; \
+})
+
+#define VM_MIN_USER_ADDRESS (0x0000000000000000UL)
+#define VM_MAX_USER_ADDRESS (0x0001000000000000UL)
+
+#define VM_MINUSER_ADDRESS (VM_MIN_USER_ADDRESS)
+#define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS)
+
+#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
+#define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE)
+#define USRSTACK SHAREDPAGE
+
+/*
+ * How many physical pages per kmem arena virtual page.
+ */
+#ifndef VM_KMEM_SIZE_SCALE
+#define VM_KMEM_SIZE_SCALE (1)
+#endif
+
+/*
+ * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the
+ * kernel map.
+ */
+#ifndef VM_KMEM_SIZE_MAX
+#define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \
+ VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
+#endif
+
+/*
+ * Initial pagein size of beginning of executable file.
+ */
+#ifndef VM_INITIAL_PAGEIN
+#define VM_INITIAL_PAGEIN 16
+#endif
+
+#if !defined(KASAN) && !defined(KMSAN)
+#define UMA_USE_DMAP
+#endif
+
+#ifndef LOCORE
+
+extern vm_paddr_t dmap_phys_base;
+extern vm_paddr_t dmap_phys_max;
+extern vm_offset_t dmap_max_addr;
+
+#endif
+
+#define ZERO_REGION_SIZE (64 * 1024) /* 64KB */
+
+#define DEVMAP_MAX_VADDR VM_MAX_KERNEL_ADDRESS
+
+/*
+ * The pmap can create non-transparent large page mappings.
+ */
+#define PMAP_HAS_LARGEPAGES 1
+
+/*
+ * Need a page dump array for minidump.
+ */
+#define MINIDUMP_PAGE_TRACKING 1
+#define MINIDUMP_STARTUP_PAGE_TRACKING 1
+
+#endif /* !_MACHINE_VMPARAM_H_ */
+
+#endif /* !__arm__ */