aboutsummaryrefslogtreecommitdiff
path: root/sys/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arm64')
-rw-r--r--sys/arm64/acpica/acpi_iort.c4
-rw-r--r--sys/arm64/apple/apple_aic.c780
-rw-r--r--sys/arm64/apple/apple_pinctrl.c469
-rw-r--r--sys/arm64/apple/apple_wdog.c234
-rw-r--r--sys/arm64/apple/exynos_uart.c568
-rw-r--r--sys/arm64/apple/exynos_uart.h136
-rw-r--r--sys/arm64/arm64/bus_space_asm.S4
-rw-r--r--sys/arm64/arm64/busdma_bounce.c84
-rw-r--r--sys/arm64/arm64/cmn600.c8
-rw-r--r--sys/arm64/arm64/copyinout.S5
-rw-r--r--sys/arm64/arm64/cpu_feat.c119
-rw-r--r--sys/arm64/arm64/cpufunc_asm.S4
-rw-r--r--sys/arm64/arm64/debug_monitor.c6
-rw-r--r--sys/arm64/arm64/efirt_machdep.c16
-rw-r--r--sys/arm64/arm64/efirt_support.S101
-rw-r--r--sys/arm64/arm64/elf32_machdep.c2
-rw-r--r--sys/arm64/arm64/elf_machdep.c6
-rw-r--r--sys/arm64/arm64/exception.S40
-rw-r--r--sys/arm64/arm64/exec_machdep.c112
-rw-r--r--sys/arm64/arm64/genassym.c9
-rw-r--r--sys/arm64/arm64/gic_v3.c10
-rw-r--r--sys/arm64/arm64/gic_v3_acpi.c12
-rw-r--r--sys/arm64/arm64/gic_v3_fdt.c10
-rw-r--r--sys/arm64/arm64/gic_v3_var.h8
-rw-r--r--sys/arm64/arm64/gicv3_its.c46
-rw-r--r--sys/arm64/arm64/hyp_stub.S4
-rw-r--r--sys/arm64/arm64/identcpu.c1350
-rw-r--r--sys/arm64/arm64/locore.S460
-rw-r--r--sys/arm64/arm64/machdep.c332
-rw-r--r--sys/arm64/arm64/machdep_boot.c22
-rw-r--r--sys/arm64/arm64/mem.c13
-rw-r--r--sys/arm64/arm64/memcmp.S3
-rw-r--r--sys/arm64/arm64/memcpy.S3
-rw-r--r--sys/arm64/arm64/memset.S4
-rw-r--r--sys/arm64/arm64/minidump_machdep.c6
-rw-r--r--sys/arm64/arm64/mp_machdep.c131
-rw-r--r--sys/arm64/arm64/nexus.c51
-rw-r--r--sys/arm64/arm64/pmap.c1275
-rw-r--r--sys/arm64/arm64/ptrauth.c100
-rw-r--r--sys/arm64/arm64/sdt_machdep.c77
-rw-r--r--sys/arm64/arm64/sigtramp.S3
-rw-r--r--sys/arm64/arm64/stack_machdep.c2
-rw-r--r--sys/arm64/arm64/strcmp.S3
-rw-r--r--sys/arm64/arm64/strncmp.S3
-rw-r--r--sys/arm64/arm64/support.S4
-rw-r--r--sys/arm64/arm64/swtch.S38
-rw-r--r--sys/arm64/arm64/sys_machdep.c18
-rw-r--r--sys/arm64/arm64/trap.c43
-rw-r--r--sys/arm64/arm64/uio_machdep.c16
-rw-r--r--sys/arm64/arm64/undefined.c208
-rw-r--r--sys/arm64/arm64/vfp.c762
-rw-r--r--sys/arm64/arm64/vm_machdep.c20
-rw-r--r--sys/arm64/broadcom/brcmmdio/mdio_mux_iproc.c5
-rw-r--r--sys/arm64/broadcom/brcmmdio/mdio_nexus_iproc.c5
-rw-r--r--sys/arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c3
-rw-r--r--sys/arm64/broadcom/genet/if_genet.c9
-rw-r--r--sys/arm64/cavium/thunder_pcie_fdt.c2
-rw-r--r--sys/arm64/cavium/thunder_pcie_pem.c26
-rw-r--r--sys/arm64/conf/DEFAULTS1
-rw-r--r--sys/arm64/conf/GENERIC.hints2
-rw-r--r--sys/arm64/conf/NOTES14
-rw-r--r--sys/arm64/conf/std.allwinner2
-rw-r--r--sys/arm64/conf/std.arm644
-rw-r--r--sys/arm64/conf/std.dev9
-rw-r--r--sys/arm64/conf/std.qcom3
-rw-r--r--sys/arm64/conf/std.rockchip3
-rw-r--r--sys/arm64/conf/std.xilinx1
-rw-r--r--sys/arm64/freescale/imx/clk/imx_clk_composite.c5
-rw-r--r--sys/arm64/freescale/imx/imx8mp_ccm.c693
-rw-r--r--sys/arm64/freescale/imx/imx8mp_ccm.h456
-rw-r--r--sys/arm64/freescale/imx/imx8mq_ccm.c197
-rw-r--r--sys/arm64/freescale/imx/imx_ccm.c237
-rw-r--r--sys/arm64/freescale/imx/imx_ccm.h (renamed from sys/arm64/freescale/imx/imx_ccm_clk.h)20
-rw-r--r--sys/arm64/include/armreg.h1176
-rw-r--r--sys/arm64/include/asm.h22
-rw-r--r--sys/arm64/include/atomic.h9
-rw-r--r--sys/arm64/include/bus.h1
-rw-r--r--sys/arm64/include/bus_dma.h2
-rw-r--r--sys/arm64/include/cpu.h39
-rw-r--r--sys/arm64/include/cpu_feat.h88
-rw-r--r--sys/arm64/include/cpufunc.h22
-rw-r--r--sys/arm64/include/efi.h4
-rw-r--r--sys/arm64/include/elf.h174
-rw-r--r--sys/arm64/include/hypervisor.h102
-rw-r--r--sys/arm64/include/intr.h16
-rw-r--r--sys/arm64/include/machdep.h1
-rw-r--r--sys/arm64/include/md_var.h4
-rw-r--r--sys/arm64/include/metadata.h11
-rw-r--r--sys/arm64/include/param.h12
-rw-r--r--sys/arm64/include/pcb.h10
-rw-r--r--sys/arm64/include/pmap.h18
-rw-r--r--sys/arm64/include/proc.h6
-rw-r--r--sys/arm64/include/pte.h62
-rw-r--r--sys/arm64/include/reg.h13
-rw-r--r--sys/arm64/include/resource.h2
-rw-r--r--sys/arm64/include/sdt_machdep.h12
-rw-r--r--sys/arm64/include/sysarch.h5
-rw-r--r--sys/arm64/include/ucontext.h8
-rw-r--r--sys/arm64/include/undefined.h30
-rw-r--r--sys/arm64/include/vfp.h7
-rw-r--r--sys/arm64/include/vmm.h74
-rw-r--r--sys/arm64/include/vmm_dev.h5
-rw-r--r--sys/arm64/include/vmparam.h45
-rw-r--r--sys/arm64/iommu/iommu.c52
-rw-r--r--sys/arm64/iommu/iommu_pmap.c10
-rw-r--r--sys/arm64/iommu/smmu.c23
-rw-r--r--sys/arm64/iommu/smmuvar.h2
-rw-r--r--sys/arm64/linux/linux.h2
-rw-r--r--sys/arm64/linux/linux_dummy_machdep.c6
-rw-r--r--sys/arm64/linux/linux_machdep.c2
-rw-r--r--sys/arm64/linux/linux_proto.h4
-rw-r--r--sys/arm64/linux/linux_support.S4
-rw-r--r--sys/arm64/linux/linux_sysent.c314
-rw-r--r--sys/arm64/linux/linux_systrace_args.c13
-rw-r--r--sys/arm64/linux/linux_sysvec.c4
-rw-r--r--sys/arm64/linux/linux_vdso_gtod.c2
-rw-r--r--sys/arm64/linux/syscalls.master4
-rw-r--r--sys/arm64/nvidia/tegra210/max77620.c10
-rw-r--r--sys/arm64/nvidia/tegra210/max77620_rtc.c12
-rw-r--r--sys/arm64/nvidia/tegra210/tegra210_coretemp.c4
-rw-r--r--sys/arm64/nvidia/tegra210/tegra210_cpufreq.c4
-rw-r--r--sys/arm64/nvidia/tegra210/tegra210_pmc.c4
-rw-r--r--sys/arm64/qoriq/qoriq_dw_pci.c4
-rw-r--r--sys/arm64/qoriq/qoriq_gpio_pic.c2
-rw-r--r--sys/arm64/qoriq/qoriq_therm.c3
-rw-r--r--sys/arm64/rockchip/rk3328_codec.c7
-rw-r--r--sys/arm64/rockchip/rk3568_pcie.c65
-rw-r--r--sys/arm64/rockchip/rk_gpio.c2
-rw-r--r--sys/arm64/rockchip/rk_grf_gpio.c236
-rw-r--r--sys/arm64/rockchip/rk_i2s.c8
-rw-r--r--sys/arm64/rockchip/rk_iodomain.c11
-rw-r--r--sys/arm64/rockchip/rk_pcie.c5
-rw-r--r--sys/arm64/rockchip/rk_pinctrl.c11
-rw-r--r--sys/arm64/rockchip/rk_tsadc.c3
-rw-r--r--sys/arm64/rockchip/rk_usbphy.c3
-rw-r--r--sys/arm64/vmm/arm64.h10
-rw-r--r--sys/arm64/vmm/io/vgic_v3.c3
-rw-r--r--sys/arm64/vmm/io/vtimer.c38
-rw-r--r--sys/arm64/vmm/vmm.c487
-rw-r--r--sys/arm64/vmm/vmm_arm64.c369
-rw-r--r--sys/arm64/vmm/vmm_call.S3
-rw-r--r--sys/arm64/vmm/vmm_dev.c1054
-rw-r--r--sys/arm64/vmm/vmm_dev_machdep.c138
-rw-r--r--sys/arm64/vmm/vmm_handlers.c113
-rw-r--r--sys/arm64/vmm/vmm_handlers.h (renamed from sys/arm64/arm64/uma_machdep.c)63
-rw-r--r--sys/arm64/vmm/vmm_hyp.c253
-rw-r--r--sys/arm64/vmm/vmm_hyp_el2.S7
-rw-r--r--sys/arm64/vmm/vmm_hyp_exception.S118
-rw-r--r--sys/arm64/vmm/vmm_ktr.h69
-rw-r--r--sys/arm64/vmm/vmm_mmu.c2
-rw-r--r--sys/arm64/vmm/vmm_nvhe.c118
-rw-r--r--sys/arm64/vmm/vmm_nvhe_exception.S120
-rw-r--r--sys/arm64/vmm/vmm_reset.c11
-rw-r--r--sys/arm64/vmm/vmm_stat.c165
-rw-r--r--sys/arm64/vmm/vmm_stat.h100
-rw-r--r--sys/arm64/vmm/vmm_vhe.c (renamed from sys/arm64/include/runq.h)35
-rw-r--r--sys/arm64/vmm/vmm_vhe_exception.S31
157 files changed, 10863 insertions, 4701 deletions
diff --git a/sys/arm64/acpica/acpi_iort.c b/sys/arm64/acpica/acpi_iort.c
index a0e24788b775..a180dc1a2a2c 100644
--- a/sys/arm64/acpica/acpi_iort.c
+++ b/sys/arm64/acpica/acpi_iort.c
@@ -630,7 +630,7 @@ acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid)
}
int
-acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *sid)
+acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, uint64_t *xref, u_int *sid)
{
ACPI_IORT_SMMU_V3 *smmu;
struct iort_node *node;
@@ -672,7 +672,7 @@ acpi_iort_map_named_msi(const char *devname, u_int rid, u_int *xref,
}
int
-acpi_iort_map_named_smmuv3(const char *devname, u_int rid, u_int *xref,
+acpi_iort_map_named_smmuv3(const char *devname, u_int rid, uint64_t *xref,
u_int *devid)
{
ACPI_IORT_SMMU_V3 *smmu;
diff --git a/sys/arm64/apple/apple_aic.c b/sys/arm64/apple/apple_aic.c
new file mode 100644
index 000000000000..c9ce3b4d2165
--- /dev/null
+++ b/sys/arm64/apple/apple_aic.c
@@ -0,0 +1,780 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Andrew Turner
+ * Copyright (c) 2022 Michael J. Karels <karels@freebsd.org>
+ * Copyright (c) 2022 Kyle Evans <kevans@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_platform.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+#include <sys/rman.h>
+#include <sys/smp.h>
+
+#include <machine/bus.h>
+#include <machine/machdep.h>
+#ifdef SMP
+#include <machine/intr.h>
+#include <machine/smp.h>
+#endif
+
+#include <dev/fdt/fdt_intr.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dt-bindings/interrupt-controller/apple-aic.h>
+
+#include "pic_if.h"
+
+#define AIC_INFO 0x0004
+#define AIC_INFO_NDIE(val) (((val) >> 24) & 0xf)
+#define AIC_INFO_NIRQS(val) ((val) & 0x0000ffff)
+
+#define AIC_WHOAMI 0x2000
+#define AIC_EVENT 0x2004
+#define AIC_EVENT_DIE(val) (((val) >> 24) & 0xff)
+#define AIC_EVENT_TYPE(val) (((val) >> 16) & 0xff)
+#define AIC_EVENT_TYPE_NONE 0
+#define AIC_EVENT_TYPE_IRQ 1
+#define AIC_EVENT_TYPE_IPI 4
+#define AIC_EVENT_IRQ(val) ((val) & 0xffff)
+#define AIC_EVENT_IPI_OTHER 1
+#define AIC_EVENT_IPI_SELF 2
+#define AIC_IPI_SEND 0x2008
+#define AIC_IPI_ACK 0x200c
+#define AIC_IPI_MASK_SET 0x2024
+#define AIC_IPI_MASK_CLR 0x2028
+#define AIC_IPI_OTHER 0x00000001
+#define AIC_IPI_SELF 0x80000000
+#define AIC_TARGET_CPU(irq) (0x3000 + (irq) * 4)
+#define AIC_SW_SET(irq) (0x4000 + (((irq) >> 5) * 4))
+#define AIC_SW_CLEAR(irq) (0x4080 + (((irq) >> 5) * 4))
+#define AIC_MASK_SET(irq) (0x4100 + (((irq) >> 5) * 4))
+#define AIC_MASK_CLEAR(irq) (0x4180 + (((irq) >> 5) * 4))
+#define AIC_IRQ_MASK(irq) (1u << ((irq) & 0x1f))
+
+#define AIC_IPI_LOCAL_RR_EL1 s3_5_c15_c0_0
+#define AIC_IPI_GLOBAL_RR_EL1 s3_5_c15_c0_1
+
+#define AIC_IPI_SR_EL1 s3_5_c15_c1_1
+#define AIC_IPI_SR_EL1_PENDING (1 << 0)
+
+#define AIC_FIQ_VM_TIMER s3_5_c15_c1_3
+#define AIC_FIQ_VM_TIMER_VEN (1 << 0)
+#define AIC_FIQ_VM_TIMER_PEN (1 << 1)
+#define AIC_FIQ_VM_TIMER_BITS (AIC_FIQ_VM_TIMER_VEN | AIC_FIQ_VM_TIMER_PEN)
+
+#define CNTV_CTL_ENABLE (1 << 0)
+#define CNTV_CTL_IMASK (1 << 1)
+#define CNTV_CTL_ISTATUS (1 << 2)
+#define CNTV_CTL_BITS \
+ (CNTV_CTL_ENABLE | CNTV_CTL_IMASK | CNTV_CTL_ISTATUS)
+
+#define AIC_MAXCPUS 32
+#define AIC_MAXDIES 4
+
+static struct ofw_compat_data compat_data[] = {
+ { "apple,aic", 1 },
+ { NULL, 0 }
+};
+
+enum apple_aic_irq_type {
+ AIC_TYPE_INVAL,
+ AIC_TYPE_IRQ,
+ AIC_TYPE_FIQ,
+ AIC_TYPE_IPI,
+};
+
+struct apple_aic_irqsrc {
+ struct intr_irqsrc ai_isrc;
+ enum apple_aic_irq_type ai_type;
+ struct {
+ /* AIC_TYPE_IRQ */
+ enum intr_polarity ai_pol;
+ enum intr_trigger ai_trig;
+ u_int ai_irq;
+ };
+};
+
+#ifdef SMP
+#define AIC_NIPIS INTR_IPI_COUNT
+#endif
+
+struct apple_aic_softc {
+ device_t sc_dev;
+ struct resource *sc_mem;
+ struct apple_aic_irqsrc *sc_isrcs[AIC_MAXDIES];
+ u_int sc_nirqs;
+ u_int sc_ndie;
+#ifdef SMP
+ struct apple_aic_irqsrc sc_ipi_srcs[AIC_NIPIS];
+ uint32_t *sc_ipimasks;
+#endif
+ u_int *sc_cpuids; /* cpu index to AIC CPU ID */
+};
+
+static u_int aic_next_cpu;
+
+static device_probe_t apple_aic_probe;
+static device_attach_t apple_aic_attach;
+
+static pic_disable_intr_t apple_aic_disable_intr;
+static pic_enable_intr_t apple_aic_enable_intr;
+static pic_map_intr_t apple_aic_map_intr;
+static pic_setup_intr_t apple_aic_setup_intr;
+static pic_teardown_intr_t apple_aic_teardown_intr;
+static pic_post_filter_t apple_aic_post_filter;
+static pic_post_ithread_t apple_aic_post_ithread;
+static pic_pre_ithread_t apple_aic_pre_ithread;
+#ifdef SMP
+static pic_bind_intr_t apple_aic_bind_intr;
+static pic_init_secondary_t apple_aic_init_secondary;
+static pic_ipi_send_t apple_aic_ipi_send;
+static pic_ipi_setup_t apple_aic_ipi_setup;
+#endif
+
+static int apple_aic_irq(void *);
+static int apple_aic_fiq(void *);
+
+static int
+apple_aic_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Apple Interrupt Controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+apple_aic_attach(device_t dev)
+{
+ struct apple_aic_softc *sc;
+ struct intr_irqsrc *isrc;
+ const char *name;
+ intptr_t xref;
+ int error, rid;
+ u_int i, cpu, j, info;
+
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+
+ rid = 0;
+ sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->sc_mem == NULL) {
+ device_printf(dev, "Unable to allocate memory\n");
+ return (ENXIO);
+ }
+
+ info = bus_read_4(sc->sc_mem, AIC_INFO);
+ sc->sc_nirqs = AIC_INFO_NIRQS(info);
+ sc->sc_ndie = AIC_INFO_NDIE(info) + 1;
+ if (bootverbose)
+ device_printf(dev, "Found %d interrupts, %d die\n",
+ sc->sc_nirqs, sc->sc_ndie);
+
+ for (i = 0; i < sc->sc_ndie; i++) {
+ sc->sc_isrcs[i] = mallocarray(sc->sc_nirqs,
+ sizeof(**sc->sc_isrcs), M_DEVBUF, M_WAITOK | M_ZERO);
+ }
+
+#ifdef SMP
+ sc->sc_ipimasks = malloc(sizeof(*sc->sc_ipimasks) * mp_maxid + 1,
+ M_DEVBUF, M_WAITOK | M_ZERO);
+#endif
+ sc->sc_cpuids = malloc(sizeof(*sc->sc_cpuids) * mp_maxid + 1,
+ M_DEVBUF, M_WAITOK | M_ZERO);
+
+ cpu = PCPU_GET(cpuid);
+ sc->sc_cpuids[cpu] = bus_read_4(sc->sc_mem, AIC_WHOAMI);
+ if (bootverbose)
+ device_printf(dev, "BSP CPU %d: whoami %x\n", cpu,
+ sc->sc_cpuids[cpu]);
+
+ name = device_get_nameunit(dev);
+ for (i = 0; i < sc->sc_ndie; i++) {
+ struct apple_aic_irqsrc *die_isrcs;
+
+ die_isrcs = sc->sc_isrcs[i];
+ for (j = 0; j < sc->sc_nirqs; j++) {
+ isrc = &die_isrcs[j].ai_isrc;
+ die_isrcs[j].ai_pol = INTR_POLARITY_CONFORM;
+ die_isrcs[j].ai_trig = INTR_TRIGGER_CONFORM;
+ die_isrcs[j].ai_type = AIC_TYPE_INVAL;
+ die_isrcs[j].ai_irq = j;
+
+ error = intr_isrc_register(isrc, dev, 0, "%s,d%us%u", name,
+ i, j);
+ if (error != 0) {
+ device_printf(dev, "Unable to register irq %u:%u\n",
+ i, j);
+ return (error);
+ }
+ }
+ }
+
+ xref = OF_xref_from_node(ofw_bus_get_node(dev));
+ if (intr_pic_register(dev, xref) == NULL) {
+ device_printf(dev, "Unable to register interrupt handler\n");
+ return (ENXIO);
+ }
+
+ if (intr_pic_claim_root(dev, xref, apple_aic_irq, sc,
+ INTR_ROOT_IRQ) != 0) {
+ device_printf(dev,
+ "Unable to set root interrupt controller\n");
+ intr_pic_deregister(dev, xref);
+ return (ENXIO);
+ }
+
+ if (intr_pic_claim_root(dev, xref, apple_aic_fiq, sc,
+ INTR_ROOT_FIQ) != 0) {
+ device_printf(dev,
+ "Unable to set root fiq controller\n");
+ intr_pic_deregister(dev, xref);
+ return (ENXIO);
+ }
+
+#ifdef SMP
+ if (intr_ipi_pic_register(dev, 0) != 0) {
+ device_printf(dev, "could not register for IPIs\n");
+ return (ENXIO);
+ }
+#endif
+
+ OF_device_register_xref(xref, dev);
+
+ return (0);
+}
+
+static int
+apple_aic_map_intr_fdt(struct apple_aic_softc *sc,
+ struct intr_map_data_fdt *data, u_int *irq, enum apple_aic_irq_type *typep,
+ enum intr_polarity *polp, enum intr_trigger *trigp, u_int *die)
+{
+ if (data->ncells != 3)
+ return (EINVAL);
+
+ /* XXX AIC2 */
+ *die = 0;
+
+ /*
+ * The first cell is the interrupt type:
+ * 0 = IRQ
+ * 1 = FIQ
+ * The second cell is the interrupt number
+ * The third cell is the flags
+ */
+ switch(data->cells[0]) {
+ case 0:
+ if (typep != NULL)
+ *typep = AIC_TYPE_IRQ;
+ break;
+ case 1:
+ if (typep != NULL)
+ *typep = AIC_TYPE_FIQ;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ *irq = data->cells[1];
+ if (*irq > sc->sc_nirqs)
+ return (EINVAL);
+
+ if (trigp != NULL) {
+ if ((data->cells[2] & FDT_INTR_EDGE_MASK) != 0)
+ *trigp = INTR_TRIGGER_EDGE;
+ else
+ *trigp = INTR_TRIGGER_LEVEL;
+ }
+ if (polp != NULL) {
+ if ((data->cells[2] & FDT_INTR_LEVEL_HIGH) != 0)
+ *polp = INTR_POLARITY_HIGH;
+ else
+ *polp = INTR_POLARITY_LOW;
+ }
+
+ return (0);
+}
+
+static int
+apple_aic_map_intr(device_t dev, struct intr_map_data *data,
+ struct intr_irqsrc **isrcp)
+{
+ struct apple_aic_softc *sc;
+ int error;
+ u_int irq;
+ u_int die;
+
+ sc = device_get_softc(dev);
+
+ error = 0;
+ switch(data->type) {
+ case INTR_MAP_DATA_FDT:
+ error = apple_aic_map_intr_fdt(sc,
+ (struct intr_map_data_fdt *)data, &irq, NULL, NULL, NULL,
+ &die);
+ if (error == 0)
+ *isrcp = &sc->sc_isrcs[0 /* XXX */][irq].ai_isrc;
+ break;
+ default:
+ return (ENOTSUP);
+ }
+
+ return (error);
+}
+
+static int
+apple_aic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
+ struct resource *res, struct intr_map_data *data)
+{
+ struct apple_aic_softc *sc;
+ enum apple_aic_irq_type type;
+ struct apple_aic_irqsrc *ai;
+ enum intr_trigger trig;
+ enum intr_polarity pol;
+ int error;
+ u_int die, irq;
+
+ sc = device_get_softc(dev);
+ ai = (struct apple_aic_irqsrc *)isrc;
+
+ if (data != NULL) {
+ KASSERT(data->type == INTR_MAP_DATA_FDT,
+ ("%s: Only FDT data is supported (got %#x)", __func__,
+ data->type));
+ error = apple_aic_map_intr_fdt(sc,
+ (struct intr_map_data_fdt *)data, &irq, &type, &pol, &trig,
+ &die);
+ if (error != 0)
+ return (error);
+ } else {
+ pol = INTR_POLARITY_CONFORM;
+ trig = INTR_TRIGGER_CONFORM;
+ }
+
+ if (isrc->isrc_handlers != 0) {
+ /* TODO */
+ return (0);
+ }
+
+ if (pol == INTR_POLARITY_CONFORM)
+ pol = INTR_POLARITY_LOW;
+ if (trig == INTR_TRIGGER_CONFORM)
+ trig = INTR_TRIGGER_EDGE;
+
+ ai->ai_pol = pol;
+ ai->ai_trig = trig;
+ ai->ai_type = type;
+
+ /*
+ * Only the timer uses FIQs. These could be sent to any CPU.
+ */
+ switch (type) {
+ case AIC_TYPE_IRQ:
+ /* XXX die sensitive? */
+ aic_next_cpu = intr_irq_next_cpu(aic_next_cpu, &all_cpus);
+ bus_write_4(sc->sc_mem, AIC_TARGET_CPU(irq),
+ 1 << sc->sc_cpuids[aic_next_cpu]);
+ break;
+ case AIC_TYPE_FIQ:
+ isrc->isrc_flags |= INTR_ISRCF_PPI;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static int
+apple_aic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
+ struct resource *res, struct intr_map_data *data)
+{
+ panic("%s\n", __func__);
+}
+
+static void
+apple_aic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct apple_aic_irqsrc *ai;
+ struct apple_aic_softc *sc;
+ u_int irq;
+
+ ai = (struct apple_aic_irqsrc *)isrc;
+ irq = ai->ai_irq;
+ switch(ai->ai_type) {
+ case AIC_TYPE_IRQ:
+ sc = device_get_softc(dev);
+ bus_write_4(sc->sc_mem, AIC_MASK_CLEAR(irq), AIC_IRQ_MASK(irq));
+ break;
+ case AIC_TYPE_IPI:
+ /* Nothing needed here. */
+ break;
+ case AIC_TYPE_FIQ:
+ /* TODO */
+ break;
+ default:
+ panic("%s: %x\n", __func__, ai->ai_type);
+ }
+}
+
+static void
+apple_aic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct apple_aic_irqsrc *ai;
+ struct apple_aic_softc *sc;
+ u_int irq;
+
+ ai = (struct apple_aic_irqsrc *)isrc;
+ irq = ai->ai_irq;
+ switch(ai->ai_type) {
+ case AIC_TYPE_IRQ:
+ sc = device_get_softc(dev);
+ bus_write_4(sc->sc_mem, AIC_MASK_SET(irq), AIC_IRQ_MASK(irq));
+ break;
+ case AIC_TYPE_IPI:
+ /* Nothing needed here. */
+ break;
+ case AIC_TYPE_FIQ:
+ /* TODO */
+ break;
+ default:
+ panic("%s: %x\n", __func__, ai->ai_type);
+ }
+}
+
+static void
+apple_aic_post_filter(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct apple_aic_softc *sc;
+ struct apple_aic_irqsrc *ai;
+ int irq;
+
+ ai = (struct apple_aic_irqsrc *)isrc;
+ irq = ai->ai_irq;
+ switch(ai->ai_type) {
+ case AIC_TYPE_IRQ:
+ sc = device_get_softc(dev);
+ bus_write_4(sc->sc_mem, AIC_SW_CLEAR(irq), AIC_IRQ_MASK(irq));
+ bus_write_4(sc->sc_mem, AIC_MASK_CLEAR(irq), AIC_IRQ_MASK(irq));
+ break;
+ case AIC_TYPE_FIQ:
+ /* TODO */
+ break;
+ default:
+ panic("%s: %x\n", __func__, ai->ai_type);
+ }
+}
+
+static void
+apple_aic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct apple_aic_softc *sc;
+ struct apple_aic_irqsrc *ai;
+ int irq;
+
+ ai = (struct apple_aic_irqsrc *)isrc;
+ sc = device_get_softc(dev);
+ irq = ai->ai_irq;
+ bus_write_4(sc->sc_mem, AIC_SW_CLEAR(irq), AIC_IRQ_MASK(irq));
+ apple_aic_disable_intr(dev, isrc);
+ /* ACK IT */
+}
+
+static void
+apple_aic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct apple_aic_softc *sc;
+ struct apple_aic_irqsrc *ai;
+ int irq;
+
+ ai = (struct apple_aic_irqsrc *)isrc;
+ sc = device_get_softc(dev);
+ irq = ai->ai_irq;
+
+ bus_write_4(sc->sc_mem, AIC_MASK_CLEAR(irq), AIC_IRQ_MASK(irq));
+ apple_aic_enable_intr(dev, isrc);
+}
+
+#ifdef SMP
+static void
+apple_aic_ipi_received(struct apple_aic_softc *sc, struct trapframe *tf)
+{
+ uint32_t mask;
+ uint32_t ipi;
+ int cpu;
+
+ cpu = PCPU_GET(cpuid);
+
+ mask = atomic_readandclear_32(&sc->sc_ipimasks[cpu]);
+
+ while (mask != 0) {
+ ipi = ffs(mask) - 1;
+ mask &= ~(1 << ipi);
+
+ intr_ipi_dispatch(ipi);
+ }
+}
+#endif
+
+static int
+apple_aic_irq(void *arg)
+{
+ struct apple_aic_softc *sc;
+ uint32_t die, event, irq, type;
+ struct apple_aic_irqsrc *aisrc;
+ struct trapframe *tf;
+
+ sc = arg;
+ tf = curthread->td_intr_frame;
+
+ event = bus_read_4(sc->sc_mem, AIC_EVENT);
+ type = AIC_EVENT_TYPE(event);
+
+ /* If we get an IPI here, we really goofed. */
+ MPASS(type != AIC_EVENT_TYPE_IPI);
+
+ if (type != AIC_EVENT_TYPE_IRQ) {
+ if (type != AIC_EVENT_TYPE_NONE)
+ device_printf(sc->sc_dev, "unexpected event type %d\n",
+ type);
+ return (FILTER_STRAY);
+ }
+
+ die = AIC_EVENT_DIE(event);
+ irq = AIC_EVENT_IRQ(event);
+
+ if (die >= sc->sc_ndie)
+ panic("%s: unexpected die %d", __func__, die);
+ if (irq >= sc->sc_nirqs)
+ panic("%s: unexpected irq %d", __func__, irq);
+
+ aisrc = &sc->sc_isrcs[die][irq];
+ if (intr_isrc_dispatch(&aisrc->ai_isrc, tf) != 0) {
+ device_printf(sc->sc_dev, "Stray irq %u:%u disabled\n",
+ die, irq);
+ return (FILTER_STRAY);
+ }
+
+ return (FILTER_HANDLED);
+}
+
+static int
+apple_aic_fiq(void *arg)
+{
+ struct apple_aic_softc *sc;
+ struct apple_aic_irqsrc *isrcs;
+ struct trapframe *tf;
+
+ sc = arg;
+ tf = curthread->td_intr_frame;
+
+#ifdef SMP
+ /* Handle IPIs. */
+ if ((READ_SPECIALREG(AIC_IPI_SR_EL1) & AIC_IPI_SR_EL1_PENDING) != 0) {
+ WRITE_SPECIALREG(AIC_IPI_SR_EL1, AIC_IPI_SR_EL1_PENDING);
+ apple_aic_ipi_received(sc, tf);
+ }
+#endif
+
+ /*
+ * FIQs don't store any state in the interrupt controller at all outside
+ * of IPI handling, so we have to probe around outside of AIC to
+ * determine if we might have been fired off due to a timer.
+ */
+ isrcs = sc->sc_isrcs[0];
+ if ((READ_SPECIALREG(cntv_ctl_el0) & CNTV_CTL_BITS) ==
+ (CNTV_CTL_ENABLE | CNTV_CTL_ISTATUS)) {
+ intr_isrc_dispatch(&isrcs[AIC_TMR_GUEST_VIRT].ai_isrc, tf);
+ }
+
+ if (has_hyp()) {
+ uint64_t reg;
+
+ if ((READ_SPECIALREG(cntp_ctl_el0) & CNTV_CTL_ISTATUS) != 0) {
+ intr_isrc_dispatch(&isrcs[AIC_TMR_GUEST_PHYS].ai_isrc,
+ tf);
+ }
+
+ reg = READ_SPECIALREG(AIC_FIQ_VM_TIMER);
+ if ((reg & AIC_FIQ_VM_TIMER_PEN) != 0) {
+ intr_isrc_dispatch(&isrcs[AIC_TMR_HV_PHYS].ai_isrc, tf);
+ }
+
+ if ((reg & AIC_FIQ_VM_TIMER_VEN) != 0) {
+ intr_isrc_dispatch(&isrcs[AIC_TMR_HV_VIRT].ai_isrc, tf);
+ }
+ }
+
+ return (FILTER_HANDLED);
+}
+
+#ifdef SMP
+static int
+apple_aic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
+{
+ struct apple_aic_softc *sc = device_get_softc(dev);
+ static int aic_next_cpu;
+ uint32_t targets = 0;
+ u_int irq, cpu;
+
+ MPASS(((struct apple_aic_irqsrc *)isrc)->ai_type == AIC_TYPE_IRQ);
+ irq = ((struct apple_aic_irqsrc *)isrc)->ai_irq;
+ if (CPU_EMPTY(&isrc->isrc_cpu)) {
+ aic_next_cpu = intr_irq_next_cpu(aic_next_cpu, &all_cpus);
+ CPU_SETOF(aic_next_cpu, &isrc->isrc_cpu);
+ bus_write_4(sc->sc_mem, AIC_TARGET_CPU(irq),
+ sc->sc_cpuids[aic_next_cpu] << 1);
+ } else {
+ CPU_FOREACH_ISSET(cpu, &isrc->isrc_cpu) {
+ targets |= sc->sc_cpuids[cpu] << 1;
+ }
+ bus_write_4(sc->sc_mem, AIC_TARGET_CPU(irq), targets);
+ }
+ return (0);
+}
+
+static void
+apple_aic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
+ u_int ipi)
+{
+ struct apple_aic_softc *sc;
+ uint64_t aff, localgrp, sendmask;
+ u_int cpu;
+
+ sc = device_get_softc(dev);
+ sendmask = 0;
+ localgrp = CPU_AFF1(CPU_AFFINITY(PCPU_GET(cpuid)));
+
+ KASSERT(isrc == &sc->sc_ipi_srcs[ipi].ai_isrc,
+ ("%s: bad ISRC %p argument", __func__, isrc));
+ for (cpu = 0; cpu <= mp_maxid; cpu++) {
+ if (CPU_ISSET(cpu, &cpus)) {
+ aff = CPU_AFFINITY(cpu);
+ sendmask = CPU_AFF0(aff);
+ atomic_set_32(&sc->sc_ipimasks[cpu], 1 << ipi);
+
+ /*
+ * The above write to sc_ipimasks needs to be visible
+ * before we write to the ipi register to avoid the
+ * targetted CPU missing the dispatch in
+ * apple_aic_ipi_received(). Note that WRITE_SPECIALREG
+ * isn't a memory operation, so we can't relax this to a
+ * a dmb.
+ */
+ dsb(ishst);
+
+ if (CPU_AFF1(aff) == localgrp) {
+ WRITE_SPECIALREG(AIC_IPI_LOCAL_RR_EL1,
+ sendmask);
+ } else {
+ sendmask |= CPU_AFF1(aff) << 16;
+ WRITE_SPECIALREG(AIC_IPI_GLOBAL_RR_EL1,
+ sendmask);
+ }
+
+ isb();
+ }
+ }
+}
+
+static int
+apple_aic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
+{
+ struct apple_aic_softc *sc = device_get_softc(dev);
+ struct apple_aic_irqsrc *ai;
+
+ KASSERT(ipi < AIC_NIPIS, ("%s: ipi %u too high", __func__, ipi));
+
+ ai = &sc->sc_ipi_srcs[ipi];
+ ai->ai_type = AIC_TYPE_IPI;
+
+ *isrcp = &ai->ai_isrc;
+ return (0);
+}
+
+static void
+apple_aic_init_secondary(device_t dev, uint32_t rootnum)
+{
+ struct apple_aic_softc *sc = device_get_softc(dev);
+ u_int cpu = PCPU_GET(cpuid);
+
+ /* We don't need to re-initialize for the FIQ root. */
+ if (rootnum != INTR_ROOT_IRQ)
+ return;
+
+ sc->sc_cpuids[cpu] = bus_read_4(sc->sc_mem, AIC_WHOAMI);
+ if (bootverbose)
+ device_printf(dev, "CPU %d: whoami %x\n", cpu,
+ sc->sc_cpuids[cpu]);
+
+ bus_write_4(sc->sc_mem, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
+}
+#endif
+
+static device_method_t apple_aic_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, apple_aic_probe),
+ DEVMETHOD(device_attach, apple_aic_attach),
+
+ /* Interrupt controller interface */
+ DEVMETHOD(pic_disable_intr, apple_aic_disable_intr),
+ DEVMETHOD(pic_enable_intr, apple_aic_enable_intr),
+ DEVMETHOD(pic_map_intr, apple_aic_map_intr),
+ DEVMETHOD(pic_setup_intr, apple_aic_setup_intr),
+ DEVMETHOD(pic_teardown_intr, apple_aic_teardown_intr),
+ DEVMETHOD(pic_post_filter, apple_aic_post_filter),
+ DEVMETHOD(pic_post_ithread, apple_aic_post_ithread),
+ DEVMETHOD(pic_pre_ithread, apple_aic_pre_ithread),
+#ifdef SMP
+ DEVMETHOD(pic_bind_intr, apple_aic_bind_intr),
+ DEVMETHOD(pic_init_secondary, apple_aic_init_secondary),
+ DEVMETHOD(pic_ipi_send, apple_aic_ipi_send),
+ DEVMETHOD(pic_ipi_setup, apple_aic_ipi_setup),
+#endif
+
+ /* End */
+ DEVMETHOD_END
+};
+
+static DEFINE_CLASS_0(aic, apple_aic_driver, apple_aic_methods,
+ sizeof(struct apple_aic_softc));
+
+EARLY_DRIVER_MODULE(aic, simplebus, apple_aic_driver, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/apple/apple_pinctrl.c b/sys/arm64/apple/apple_pinctrl.c
new file mode 100644
index 000000000000..ec2dd5907024
--- /dev/null
+++ b/sys/arm64/apple/apple_pinctrl.c
@@ -0,0 +1,469 @@
+/* $OpenBSD: aplpinctrl.c,v 1.4 2022/04/06 18:59:26 naddy Exp $ */
+/*
+ * Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
+ * Copyright (c) 2022 Kyle Evans <kevans@FreeBSD.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/gpio.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+#include <machine/resource.h>
+
+#include <dev/gpio/gpiobusvar.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/fdt/fdt_pinctrl.h>
+
+#include "pic_if.h"
+#include "gpio_if.h"
+
+#define APPLE_PIN(pinmux) ((pinmux) & 0xffff)
+#define APPLE_FUNC(pinmux) ((pinmux) >> 16)
+
+#define GPIO_PIN(pin) ((pin) * 4)
+#define GPIO_PIN_GROUP_MASK (7 << 16)
+#define GPIO_PIN_INPUT_ENABLE (1 << 9)
+#define GPIO_PIN_FUNC_MASK (3 << 5)
+#define GPIO_PIN_FUNC_SHIFT 5
+#define GPIO_PIN_MODE_MASK (7 << 1)
+#define GPIO_PIN_MODE_INPUT (0 << 1)
+#define GPIO_PIN_MODE_OUTPUT (1 << 1)
+#define GPIO_PIN_MODE_IRQ_HI (2 << 1)
+#define GPIO_PIN_MODE_IRQ_LO (3 << 1)
+#define GPIO_PIN_MODE_IRQ_UP (4 << 1)
+#define GPIO_PIN_MODE_IRQ_DN (5 << 1)
+#define GPIO_PIN_MODE_IRQ_ANY (6 << 1)
+#define GPIO_PIN_MODE_IRQ_OFF (7 << 1)
+#define GPIO_PIN_DATA (1 << 0)
+#define GPIO_IRQ(grp, pin) (0x800 + (grp) * 64 + ((pin) >> 5) * 4)
+
+#define APPLE_PINCTRL_DEFAULT_CAPS \
+ (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)
+
+#define HREAD4(sc, reg) \
+ bus_read_4((sc)->sc_res[APPLE_PINCTRL_MEMRES], reg)
+#define HWRITE4(sc, reg, val) \
+ bus_write_4((sc)->sc_res[APPLE_PINCTRL_MEMRES], reg, val)
+#define HSET4(sc, reg, bits) \
+ HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
+#define HCLR4(sc, reg, bits) \
+ HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
+
+struct apple_pinctrl_irqsrc {
+ struct intr_irqsrc isrc;
+ int irq;
+ int type;
+};
+
+enum {
+ APPLE_PINCTRL_MEMRES = 0,
+ APPLE_PINCTRL_IRQRES,
+ APPLE_PINCTRL_NRES,
+};
+
+struct apple_pinctrl_softc {
+ device_t sc_dev;
+ device_t sc_busdev;
+ struct mtx sc_mtx;
+ int sc_ngpios;
+
+ void *sc_intrhand;
+ struct resource *sc_res[APPLE_PINCTRL_NRES];
+ struct apple_pinctrl_irqsrc *sc_irqs;
+};
+
+#define APPLE_PINCTRL_LOCK(sc) mtx_lock_spin(&(sc)->sc_mtx)
+#define APPLE_PINCTRL_UNLOCK(sc) mtx_unlock_spin(&(sc)->sc_mtx)
+#define APPLE_PINCTRL_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
+
+static struct ofw_compat_data compat_data[] = {
+ {"apple,pinctrl", 1},
+ {NULL, 0},
+};
+
+static struct resource_spec apple_pinctrl_res_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE },
+ { -1, 0, 0 },
+};
+
+static int apple_pinctrl_probe(device_t dev);
+static int apple_pinctrl_attach(device_t dev);
+static int apple_pinctrl_detach(device_t dev);
+
+static int apple_pinctrl_configure(device_t, phandle_t);
+static phandle_t apple_pinctrl_get_node(device_t, device_t);
+
+static int
+apple_pinctrl_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Apple Pinmux Controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+apple_pinctrl_attach(device_t dev)
+{
+ pcell_t gpio_ranges[4];
+ phandle_t node;
+ struct apple_pinctrl_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+
+ node = ofw_bus_get_node(dev);
+
+ if (bus_alloc_resources(dev, apple_pinctrl_res_spec, sc->sc_res) != 0) {
+ device_printf(dev, "cannot allocate device resources\n");
+ return (ENXIO);
+ }
+
+ mtx_init(&sc->sc_mtx, "aapl gpio", "gpio", MTX_SPIN);
+
+ error = OF_getencprop(node, "gpio-ranges", gpio_ranges,
+ sizeof(gpio_ranges));
+ if (error == -1) {
+ device_printf(dev, "failed to get gpio-ranges\n");
+ goto error;
+ }
+
+ sc->sc_ngpios = gpio_ranges[3];
+ if (sc->sc_ngpios == 0) {
+ device_printf(dev, "no GPIOs\n");
+ goto error;
+ }
+
+ sc->sc_busdev = gpiobus_attach_bus(dev);
+ if (sc->sc_busdev == NULL) {
+ device_printf(dev, "failed to attach gpiobus\n");
+ goto error;
+ }
+
+ fdt_pinctrl_register(dev, "pinmux");
+ fdt_pinctrl_configure_tree(dev);
+
+ if (!OF_hasprop(node, "interrupt-controller"))
+ return (0);
+
+ sc->sc_irqs = mallocarray(sc->sc_ngpios,
+ sizeof(*sc->sc_irqs), M_DEVBUF, M_ZERO | M_WAITOK);
+ intr_pic_register(dev, OF_xref_from_node(ofw_bus_get_node(dev)));
+
+ return (0);
+error:
+ mtx_destroy(&sc->sc_mtx);
+ bus_release_resources(dev, apple_pinctrl_res_spec, sc->sc_res);
+ return (ENXIO);
+}
+
+static int
+apple_pinctrl_detach(device_t dev)
+{
+
+ return (EBUSY);
+}
+
+static void
+apple_pinctrl_pin_configure(struct apple_pinctrl_softc *sc, uint32_t pin,
+ uint32_t flags)
+{
+ uint32_t reg;
+
+ APPLE_PINCTRL_LOCK_ASSERT(sc);
+
+ MPASS(pin < sc->sc_ngpios);
+
+ reg = HREAD4(sc, GPIO_PIN(pin));
+ reg &= ~GPIO_PIN_FUNC_MASK;
+ reg &= ~GPIO_PIN_MODE_MASK;
+
+ if ((flags & GPIO_PIN_PRESET_LOW) != 0)
+ reg &= ~GPIO_PIN_DATA;
+ else if ((flags & GPIO_PIN_PRESET_HIGH) != 0)
+ reg |= GPIO_PIN_DATA;
+
+ if ((flags & GPIO_PIN_INPUT) != 0)
+ reg |= GPIO_PIN_MODE_INPUT;
+ else if ((flags & GPIO_PIN_OUTPUT) != 0)
+ reg |= GPIO_PIN_MODE_OUTPUT;
+
+ HWRITE4(sc, GPIO_PIN(pin), reg);
+}
+
+static device_t
+apple_pinctrl_get_bus(device_t dev)
+{
+ struct apple_pinctrl_softc *sc;
+
+ sc = device_get_softc(dev);
+ return (sc->sc_busdev);
+}
+
+static int
+apple_pinctrl_pin_max(device_t dev, int *maxpin)
+{
+ struct apple_pinctrl_softc *sc;
+
+ sc = device_get_softc(dev);
+ *maxpin = sc->sc_ngpios - 1;
+ return (0);
+}
+
+static int
+apple_pinctrl_pin_getname(device_t dev, uint32_t pin, char *name)
+{
+ struct apple_pinctrl_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_ngpios)
+ return (EINVAL);
+
+ snprintf(name, GPIOMAXNAME - 1, "gpio%c%d",
+ device_get_unit(dev) + 'a', pin);
+
+ return (0);
+}
+
+static int
+apple_pinctrl_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags)
+{
+ struct apple_pinctrl_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_ngpios)
+ return (EINVAL);
+
+ *flags = 0;
+
+ APPLE_PINCTRL_LOCK(sc);
+
+ reg = HREAD4(sc, GPIO_PIN(pin));
+ if ((reg & GPIO_PIN_MODE_INPUT) != 0)
+ *flags |= GPIO_PIN_INPUT;
+ else if ((reg & GPIO_PIN_MODE_OUTPUT) != 0)
+ *flags |= GPIO_PIN_OUTPUT;
+
+ APPLE_PINCTRL_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+apple_pinctrl_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
+{
+
+ *caps = APPLE_PINCTRL_DEFAULT_CAPS;
+ return (0);
+}
+
+static int
+apple_pinctrl_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
+{
+ struct apple_pinctrl_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_ngpios)
+ return (EINVAL);
+
+ APPLE_PINCTRL_LOCK(sc);
+ apple_pinctrl_pin_configure(sc, pin, flags);
+ APPLE_PINCTRL_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+apple_pinctrl_pin_get(device_t dev, uint32_t pin, unsigned int *val)
+{
+ struct apple_pinctrl_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_ngpios)
+ return (EINVAL);
+
+ APPLE_PINCTRL_LOCK(sc);
+ reg = HREAD4(sc, GPIO_PIN(pin));
+ *val = !!(reg & GPIO_PIN_DATA);
+ APPLE_PINCTRL_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+apple_pinctrl_pin_set(device_t dev, uint32_t pin, unsigned int value)
+{
+ struct apple_pinctrl_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_ngpios)
+ return (EINVAL);
+
+ APPLE_PINCTRL_LOCK(sc);
+ if (value)
+ HSET4(sc, GPIO_PIN(pin), GPIO_PIN_DATA);
+ else
+ HCLR4(sc, GPIO_PIN(pin), GPIO_PIN_DATA);
+ device_printf(sc->sc_dev, "set pin %d to %x\n",
+ pin, HREAD4(sc, GPIO_PIN(pin)));
+ APPLE_PINCTRL_UNLOCK(sc);
+ return (0);
+}
+
+
+static int
+apple_pinctrl_pin_toggle(device_t dev, uint32_t pin)
+{
+ struct apple_pinctrl_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_ngpios)
+ return (EINVAL);
+
+ APPLE_PINCTRL_LOCK(sc);
+ reg = HREAD4(sc, GPIO_PIN(pin));
+ if ((reg & GPIO_PIN_DATA) == 0)
+ reg |= GPIO_PIN_DATA;
+ else
+ reg &= ~GPIO_PIN_DATA;
+ HWRITE4(sc, GPIO_PIN(pin), reg);
+ APPLE_PINCTRL_UNLOCK(sc);
+ return (0);
+}
+
+
+static int
+apple_pinctrl_pin_config_32(device_t dev, uint32_t first_pin, uint32_t num_pins,
+ uint32_t *pin_flags)
+{
+ struct apple_pinctrl_softc *sc;
+ uint32_t pin;
+
+ sc = device_get_softc(dev);
+ if (first_pin >= sc->sc_ngpios)
+ return (EINVAL);
+
+ /*
+ * The configuration for a bank of pins is scattered among several
+ * registers; we cannot g'tee to simultaneously change the state of all
+ * the pins in the flags array. So just loop through the array
+ * configuring each pin for now. If there was a strong need, it might
+ * be possible to support some limited simultaneous config, such as
+ * adjacent groups of 8 pins that line up the same as the config regs.
+ */
+ APPLE_PINCTRL_LOCK(sc);
+ for (pin = first_pin; pin < num_pins; ++pin) {
+ if (pin_flags[pin] & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT))
+ apple_pinctrl_pin_configure(sc, pin, pin_flags[pin]);
+ }
+ APPLE_PINCTRL_UNLOCK(sc);
+
+ return (0);
+}
+
+static phandle_t
+apple_pinctrl_get_node(device_t dev, device_t bus)
+{
+
+ /* GPIO bus */
+ return (ofw_bus_get_node(dev));
+}
+
+static int
+apple_pinctrl_configure(device_t dev, phandle_t cfgxref)
+{
+ struct apple_pinctrl_softc *sc;
+ pcell_t *pinmux;
+ phandle_t node;
+ ssize_t len;
+ uint32_t reg;
+ uint16_t pin, func;
+ int i;
+
+ sc = device_get_softc(dev);
+ node = OF_node_from_xref(cfgxref);
+
+ len = OF_getencprop_alloc(node, "pinmux", (void **)&pinmux);
+ if (len <= 0)
+ return (-1);
+
+ APPLE_PINCTRL_LOCK(sc);
+ for (i = 0; i < len / sizeof(pcell_t); i++) {
+ pin = APPLE_PIN(pinmux[i]);
+ func = APPLE_FUNC(pinmux[i]);
+ reg = HREAD4(sc, GPIO_PIN(pin));
+ reg &= ~GPIO_PIN_FUNC_MASK;
+ reg |= (func << GPIO_PIN_FUNC_SHIFT) & GPIO_PIN_FUNC_MASK;
+ HWRITE4(sc, GPIO_PIN(pin), reg);
+ }
+ APPLE_PINCTRL_UNLOCK(sc);
+
+ OF_prop_free(pinmux);
+ return 0;
+}
+
+static device_method_t apple_pinctrl_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, apple_pinctrl_probe),
+ DEVMETHOD(device_attach, apple_pinctrl_attach),
+ DEVMETHOD(device_detach, apple_pinctrl_detach),
+
+ /* GPIO protocol */
+ DEVMETHOD(gpio_get_bus, apple_pinctrl_get_bus),
+ DEVMETHOD(gpio_pin_max, apple_pinctrl_pin_max),
+ DEVMETHOD(gpio_pin_getname, apple_pinctrl_pin_getname),
+ DEVMETHOD(gpio_pin_getflags, apple_pinctrl_pin_getflags),
+ DEVMETHOD(gpio_pin_getcaps, apple_pinctrl_pin_getcaps),
+ DEVMETHOD(gpio_pin_setflags, apple_pinctrl_pin_setflags),
+ DEVMETHOD(gpio_pin_get, apple_pinctrl_pin_get),
+ DEVMETHOD(gpio_pin_set, apple_pinctrl_pin_set),
+ DEVMETHOD(gpio_pin_toggle, apple_pinctrl_pin_toggle),
+ DEVMETHOD(gpio_pin_config_32, apple_pinctrl_pin_config_32),
+
+ /* ofw_bus interface */
+ DEVMETHOD(ofw_bus_get_node, apple_pinctrl_get_node),
+
+ /* fdt_pinctrl interface */
+ DEVMETHOD(fdt_pinctrl_configure, apple_pinctrl_configure),
+
+ DEVMETHOD_END
+};
+
+static driver_t apple_pinctrl_driver = {
+ "gpio",
+ apple_pinctrl_methods,
+ sizeof(struct apple_pinctrl_softc),
+};
+
+EARLY_DRIVER_MODULE(apple_pinctrl, simplebus, apple_pinctrl_driver,
+ 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
diff --git a/sys/arm64/apple/apple_wdog.c b/sys/arm64/apple/apple_wdog.c
new file mode 100644
index 000000000000..aaa899298571
--- /dev/null
+++ b/sys/arm64/apple/apple_wdog.c
@@ -0,0 +1,234 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Michael J. Karels <karels@freebsd.org>
+ * Copyright (c) 2012 Alexander Rybalko <ray@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/eventhandler.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/reboot.h>
+#include <sys/rman.h>
+#include <sys/systm.h>
+#include <sys/watchdog.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/clk/clk.h>
+
+#include <machine/bus.h>
+#include <machine/machdep.h>
+
+#define APPLE_WDOG_WD0_TIMER 0x0000
+#define APPLE_WDOG_WD0_RESET 0x0004
+#define APPLE_WDOG_WD0_INTR 0x0008
+#define APPLE_WDOG_WD0_CNTL 0x000c
+
+#define APPLE_WDOG_WD1_TIMER 0x0010
+#define APPLE_WDOG_WD1_RESET 0x0014
+#define APPLE_WDOG_WD1_CNTL 0x001c
+
+#define APPLE_WDOG_WD2_TIMER 0x0020
+#define APPLE_WDOG_WD2_RESET 0x0024
+#define APPLE_WDOG_WD2_CNTL 0x002c
+
+#define APPLE_WDOG_CNTL_INTENABLE 0x0001
+#define APPLE_WDOG_CNTL_INTSTAT 0x0002
+#define APPLE_WDOG_CNTL_RSTENABLE 0x0004
+
+#define READ(_sc, _r) bus_space_read_4((_sc)->bst, (_sc)->bsh, (_r))
+#define WRITE(_sc, _r, _v) bus_space_write_4((_sc)->bst, (_sc)->bsh, (_r), (_v))
+
+struct apple_wdog_softc {
+ device_t dev;
+ struct resource * res;
+ bus_space_tag_t bst;
+ bus_space_handle_t bsh;
+ clk_t clk;
+ uint64_t clk_freq;
+ struct mtx mtx;
+};
+
+static struct ofw_compat_data compat_data[] = {
+ {"apple,wdt", 1},
+ {NULL, 0}
+};
+
+static void apple_wdog_watchdog_fn(void *private, u_int cmd, int *error);
+static void apple_wdog_reboot_system(void *, int);
+
+static int
+apple_wdog_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Apple Watchdog");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+apple_wdog_attach(device_t dev)
+{
+ struct apple_wdog_softc *sc;
+ int error, rid;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ rid = 0;
+ sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (sc->res == NULL) {
+ device_printf(dev, "could not allocate memory resource\n");
+ return (ENXIO);
+ }
+
+ sc->bst = rman_get_bustag(sc->res);
+ sc->bsh = rman_get_bushandle(sc->res);
+
+ error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk);
+ if (error != 0) {
+ device_printf(dev, "cannot get clock\n");
+ goto fail;
+ }
+ error = clk_enable(sc->clk);
+ if (error != 0) {
+ device_printf(dev, "cannot enable clock\n");
+ goto fail;
+ }
+ error = clk_get_freq(sc->clk, &sc->clk_freq);
+ if (error != 0) {
+ device_printf(dev, "cannot get base frequency\n");
+ goto fail_clk;
+ }
+
+ mtx_init(&sc->mtx, "Apple Watchdog", "apple_wdog", MTX_DEF);
+ EVENTHANDLER_REGISTER(watchdog_list, apple_wdog_watchdog_fn, sc, 0);
+ EVENTHANDLER_REGISTER(shutdown_final, apple_wdog_reboot_system, sc,
+ SHUTDOWN_PRI_LAST);
+
+ /* Reset the watchdog timers. */
+ WRITE(sc, APPLE_WDOG_WD0_CNTL, 0);
+ WRITE(sc, APPLE_WDOG_WD1_CNTL, 0);
+
+ return (0);
+
+fail_clk:
+ clk_disable(sc->clk);
+fail:
+ bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
+ return (error);
+}
+
+static void
+apple_wdog_watchdog_fn(void *private, u_int cmd, int *error)
+{
+ struct apple_wdog_softc *sc;
+ uint64_t sec;
+ uint32_t ticks, sec_max;
+
+ sc = private;
+ mtx_lock(&sc->mtx);
+
+ cmd &= WD_INTERVAL;
+
+ if (cmd > 0) {
+ sec = ((uint64_t)1 << (cmd & WD_INTERVAL)) / 1000000000;
+ sec_max = UINT_MAX / sc->clk_freq;
+ if (sec == 0 || sec > sec_max) {
+ /*
+ * Can't arm
+ * disable watchdog as watchdog(9) requires
+ */
+ device_printf(sc->dev,
+ "Can't arm, timeout must be between 1-%d seconds\n",
+ sec_max);
+ WRITE(sc, APPLE_WDOG_WD1_CNTL, 0);
+ mtx_unlock(&sc->mtx);
+ *error = EINVAL;
+ return;
+ }
+
+ ticks = sec * sc->clk_freq;
+ WRITE(sc, APPLE_WDOG_WD1_TIMER, 0);
+ WRITE(sc, APPLE_WDOG_WD1_RESET, ticks);
+ WRITE(sc, APPLE_WDOG_WD1_CNTL, APPLE_WDOG_CNTL_RSTENABLE);
+
+ *error = 0;
+ } else
+ WRITE(sc, APPLE_WDOG_WD1_CNTL, 0);
+
+ mtx_unlock(&sc->mtx);
+}
+
+static void
+apple_wdog_reboot_system(void *private, int howto)
+{
+ struct apple_wdog_softc *sc = private;
+
+ /* Only handle reset. */
+ if ((howto & (RB_HALT | RB_POWEROFF)) != 0)
+ return;
+
+ printf("Resetting system ... ");
+
+ WRITE(sc, APPLE_WDOG_WD1_CNTL, APPLE_WDOG_CNTL_RSTENABLE);
+ WRITE(sc, APPLE_WDOG_WD1_RESET, 1);
+ WRITE(sc, APPLE_WDOG_WD1_TIMER, 0);
+
+ /* Wait for watchdog timeout; should take milliseconds. */
+ DELAY(2000000);
+
+ /* Not reached ... one hopes. */
+ printf("failed to reset.\n");
+}
+
+static device_method_t apple_wdog_methods[] = {
+ DEVMETHOD(device_probe, apple_wdog_probe),
+ DEVMETHOD(device_attach, apple_wdog_attach),
+
+ DEVMETHOD_END
+};
+
+static driver_t apple_wdog_driver = {
+ "apple_wdog",
+ apple_wdog_methods,
+ sizeof(struct apple_wdog_softc),
+};
+
+DRIVER_MODULE(apple_wdog, simplebus, apple_wdog_driver, 0, 0);
diff --git a/sys/arm64/apple/exynos_uart.c b/sys/arm64/apple/exynos_uart.c
new file mode 100644
index 000000000000..2767c338b918
--- /dev/null
+++ b/sys/arm64/apple/exynos_uart.c
@@ -0,0 +1,568 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2003 Marcel Moolenaar
+ * Copyright (c) 2007-2009 Andrew Turner
+ * Copyright (c) 2013 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/cons.h>
+#include <sys/rman.h>
+#include <machine/bus.h>
+#include <machine/intr.h>
+
+#include <dev/uart/uart.h>
+#include <dev/uart/uart_cpu.h>
+#include <dev/uart/uart_cpu_fdt.h>
+#include <dev/uart/uart_bus.h>
+
+#include <arm64/apple/exynos_uart.h>
+
+#include "uart_if.h"
+
+struct exynos_uart_cfg;
+
+#define DEF_CLK 100000000
+
+static int sscomspeed(long, long);
+static int exynos4210_uart_param(struct uart_bas *, int, int, int, int);
+
+/*
+ * Low-level UART interface.
+ */
+static int exynos4210_probe(struct uart_bas *bas);
+static void exynos4210_init_common(struct exynos_uart_cfg *cfg,
+ struct uart_bas *bas, int, int, int, int);
+static void exynos4210_init(struct uart_bas *bas, int, int, int, int);
+static void exynos4210_s5l_init(struct uart_bas *bas, int, int, int, int);
+static void exynos4210_term(struct uart_bas *bas);
+static void exynos4210_putc(struct uart_bas *bas, int);
+static int exynos4210_rxready(struct uart_bas *bas);
+static int exynos4210_getc(struct uart_bas *bas, struct mtx *mtx);
+
+extern SLIST_HEAD(uart_devinfo_list, uart_devinfo) uart_sysdevs;
+
+static struct uart_ops uart_exynos4210_ops;
+static struct uart_ops uart_s5l_ops;
+static kobj_method_t exynos4210_methods[];
+static kobj_method_t s5l_methods[];
+static struct ofw_compat_data compat_data[];
+
+enum exynos_uart_type {
+ EXUART_4210,
+ EXUART_S5L,
+};
+
+struct exynos_uart_cfg {
+ enum exynos_uart_type cfg_type;
+ uint64_t cfg_uart_full_mask;
+};
+
+struct exynos_uart_class {
+ struct uart_class base;
+ struct exynos_uart_cfg cfg;
+};
+
+static struct exynos_uart_class uart_ex4210_class = {
+ .base = {
+ "exynos4210 class",
+ exynos4210_methods,
+ 1,
+ .uc_ops = &uart_exynos4210_ops,
+ .uc_range = 8,
+ .uc_rclk = 0,
+ .uc_rshift = 0
+ },
+ .cfg = {
+ .cfg_type = EXUART_4210,
+ .cfg_uart_full_mask = UFSTAT_TXFULL,
+ },
+};
+
+
+static struct exynos_uart_class uart_s5l_class = {
+ .base = {
+ "s5l class",
+ s5l_methods,
+ 1,
+ .uc_ops = &uart_s5l_ops,
+ .uc_range = 8,
+ .uc_rclk = 0,
+ .uc_rshift = 0
+ },
+ .cfg = {
+ .cfg_type = EXUART_S5L,
+ .cfg_uart_full_mask = UFSTAT_S5L_TXFULL,
+ },
+};
+
+static int
+sscomspeed(long speed, long frequency)
+{
+ int x;
+
+ if (speed <= 0 || frequency <= 0)
+ return (-1);
+ x = (frequency / 16) / speed;
+ return (x-1);
+}
+
+static int
+exynos4210_uart_param(struct uart_bas *bas, int baudrate, int databits,
+ int stopbits, int parity)
+{
+ int brd, ulcon;
+
+ ulcon = 0;
+
+ switch(databits) {
+ case 5:
+ ulcon |= ULCON_LENGTH_5;
+ break;
+ case 6:
+ ulcon |= ULCON_LENGTH_6;
+ break;
+ case 7:
+ ulcon |= ULCON_LENGTH_7;
+ break;
+ case 8:
+ ulcon |= ULCON_LENGTH_8;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ switch (parity) {
+ case UART_PARITY_NONE:
+ ulcon |= ULCON_PARITY_NONE;
+ break;
+ case UART_PARITY_ODD:
+ ulcon |= ULCON_PARITY_ODD;
+ break;
+ case UART_PARITY_EVEN:
+ ulcon |= ULCON_PARITY_EVEN;
+ break;
+ case UART_PARITY_MARK:
+ case UART_PARITY_SPACE:
+ default:
+ return (EINVAL);
+ }
+
+ if (stopbits == 2)
+ ulcon |= ULCON_STOP;
+
+ uart_setreg(bas, SSCOM_ULCON, ulcon);
+
+ /* baudrate may be negative, in which case we just leave it alone. */
+ if (baudrate > 0) {
+ brd = sscomspeed(baudrate, bas->rclk);
+ uart_setreg(bas, SSCOM_UBRDIV, brd);
+ }
+
+ return (0);
+}
+
+static struct uart_ops uart_exynos4210_ops = {
+ .probe = exynos4210_probe,
+ .init = exynos4210_init,
+ .term = exynos4210_term,
+ .putc = exynos4210_putc,
+ .rxready = exynos4210_rxready,
+ .getc = exynos4210_getc,
+};
+
+static struct uart_ops uart_s5l_ops = {
+ .probe = exynos4210_probe,
+ .init = exynos4210_s5l_init,
+ .term = exynos4210_term,
+ .putc = exynos4210_putc,
+ .rxready = exynos4210_rxready,
+ .getc = exynos4210_getc,
+};
+
+static int
+exynos4210_probe(struct uart_bas *bas)
+{
+
+ return (0);
+}
+
+static void
+exynos4210_init_common(struct exynos_uart_cfg *cfg, struct uart_bas *bas,
+ int baudrate, int databits, int stopbits, int parity)
+{
+
+ if (bas->rclk == 0)
+ bas->rclk = DEF_CLK;
+
+ KASSERT(bas->rclk != 0, ("exynos4210_init: Invalid rclk"));
+
+ bas->driver1 = cfg;
+
+ /* Clear interrupts */
+ if (cfg->cfg_type == EXUART_S5L) {
+ uart_setreg(bas, SSCOM_UTRSTAT, 0);
+ } else {
+ uart_setreg(bas, SSCOM_UCON, 0);
+ uart_setreg(bas, SSCOM_UFCON,
+ UFCON_TXTRIGGER_8 | UFCON_RXTRIGGER_8 |
+ UFCON_TXFIFO_RESET | UFCON_RXFIFO_RESET |
+ UFCON_FIFO_ENABLE);
+ }
+
+ exynos4210_uart_param(bas, baudrate, databits, stopbits, parity);
+
+ /* Enable UART. */
+ if (cfg->cfg_type == EXUART_S5L) {
+ uart_setreg(bas, SSCOM_UCON, uart_getreg(bas, SSCOM_UCON) |
+ UCON_TOINT | UCON_S5L_RXTHRESH | UCON_S5L_RX_TIMEOUT |
+ UCON_S5L_TXTHRESH);
+ } else {
+ uart_setreg(bas, SSCOM_UCON, uart_getreg(bas, SSCOM_UCON) |
+ UCON_TXMODE_INT | UCON_RXMODE_INT | UCON_TOINT);
+ uart_setreg(bas, SSCOM_UMCON, UMCON_RTS);
+ }
+}
+
+static void
+exynos4210_init(struct uart_bas *bas, int baudrate, int databits, int stopbits,
+ int parity)
+{
+
+ return (exynos4210_init_common(&uart_ex4210_class.cfg, bas, baudrate,
+ databits, stopbits, parity));
+}
+
+static void
+exynos4210_s5l_init(struct uart_bas *bas, int baudrate, int databits, int stopbits,
+ int parity)
+{
+
+ return (exynos4210_init_common(&uart_s5l_class.cfg, bas, baudrate,
+ databits, stopbits, parity));
+}
+
+static void
+exynos4210_term(struct uart_bas *bas)
+{
+ /* XXX */
+}
+
+static void
+exynos4210_putc(struct uart_bas *bas, int c)
+{
+ struct exynos_uart_cfg *cfg;
+
+ cfg = bas->driver1;
+
+ while ((bus_space_read_4(bas->bst, bas->bsh, SSCOM_UFSTAT) &
+ cfg->cfg_uart_full_mask) != 0)
+ continue;
+
+ uart_setreg(bas, SSCOM_UTXH, c);
+ uart_barrier(bas);
+}
+
+static int
+exynos4210_rxready_impl(struct uart_bas *bas, bool intr)
+{
+ struct exynos_uart_cfg *cfg;
+ int ufstat, utrstat;
+
+ cfg = bas->driver1;
+ if (!intr || cfg->cfg_type != EXUART_S5L) {
+ utrstat = bus_space_read_4(bas->bst, bas->bsh, SSCOM_UTRSTAT);
+
+ if ((utrstat & UTRSTAT_RXREADY) != 0)
+ return (1);
+ if (cfg->cfg_type != EXUART_S5L)
+ return (0);
+ }
+
+ ufstat = bus_space_read_4(bas->bst, bas->bsh, SSCOM_UFSTAT);
+
+ return ((ufstat & (UFSTAT_RXCOUNT | UFSTAT_RXFULL)) != 0);
+}
+
+static int
+exynos4210_rxready(struct uart_bas *bas)
+{
+
+ return (exynos4210_rxready_impl(bas, false));
+}
+
+static int
+exynos4210_getc(struct uart_bas *bas, struct mtx *mtx)
+{
+
+ while (!exynos4210_rxready(bas)) {
+ continue;
+ }
+
+ return (uart_getreg(bas, SSCOM_URXH));
+}
+
+static int exynos4210_bus_probe(struct uart_softc *sc);
+static int exynos4210_bus_attach(struct uart_softc *sc);
+static int exynos4210_bus_flush(struct uart_softc *, int);
+static int exynos4210_bus_getsig(struct uart_softc *);
+static int exynos4210_bus_ioctl(struct uart_softc *, int, intptr_t);
+static int exynos4210_bus_ipend(struct uart_softc *);
+static int s5l_bus_ipend(struct uart_softc *);
+static int exynos4210_bus_param(struct uart_softc *, int, int, int, int);
+static int exynos4210_bus_receive(struct uart_softc *);
+static int exynos4210_bus_setsig(struct uart_softc *, int);
+static int exynos4210_bus_transmit(struct uart_softc *);
+
+static kobj_method_t exynos4210_methods[] = {
+ KOBJMETHOD(uart_probe, exynos4210_bus_probe),
+ KOBJMETHOD(uart_attach, exynos4210_bus_attach),
+ KOBJMETHOD(uart_flush, exynos4210_bus_flush),
+ KOBJMETHOD(uart_getsig, exynos4210_bus_getsig),
+ KOBJMETHOD(uart_ioctl, exynos4210_bus_ioctl),
+ KOBJMETHOD(uart_ipend, exynos4210_bus_ipend),
+ KOBJMETHOD(uart_param, exynos4210_bus_param),
+ KOBJMETHOD(uart_receive, exynos4210_bus_receive),
+ KOBJMETHOD(uart_setsig, exynos4210_bus_setsig),
+ KOBJMETHOD(uart_transmit, exynos4210_bus_transmit),
+ {0, 0 }
+};
+
+static kobj_method_t s5l_methods[] = {
+ KOBJMETHOD(uart_probe, exynos4210_bus_probe),
+ KOBJMETHOD(uart_attach, exynos4210_bus_attach),
+ KOBJMETHOD(uart_flush, exynos4210_bus_flush),
+ KOBJMETHOD(uart_getsig, exynos4210_bus_getsig),
+ KOBJMETHOD(uart_ioctl, exynos4210_bus_ioctl),
+ KOBJMETHOD(uart_ipend, s5l_bus_ipend),
+ KOBJMETHOD(uart_param, exynos4210_bus_param),
+ KOBJMETHOD(uart_receive, exynos4210_bus_receive),
+ KOBJMETHOD(uart_setsig, exynos4210_bus_setsig),
+ KOBJMETHOD(uart_transmit, exynos4210_bus_transmit),
+ {0, 0 }
+};
+
+int
+exynos4210_bus_probe(struct uart_softc *sc)
+{
+
+ sc->sc_txfifosz = 16;
+ sc->sc_rxfifosz = 16;
+
+ return (0);
+}
+
+static int
+exynos4210_bus_attach(struct uart_softc *sc)
+{
+ struct exynos_uart_class *class;
+ struct exynos_uart_cfg *cfg;
+
+ sc->sc_hwiflow = 0;
+ sc->sc_hwoflow = 0;
+
+ class = (struct exynos_uart_class *)ofw_bus_search_compatible(sc->sc_dev,
+ compat_data)->ocd_data;
+ MPASS(class != NULL);
+
+ cfg = &class->cfg;
+ MPASS(sc->sc_sysdev == NULL || cfg == sc->sc_sysdev->bas.driver1);
+ sc->sc_bas.driver1 = cfg;
+
+ return (0);
+}
+
+static int
+exynos4210_bus_transmit(struct uart_softc *sc)
+{
+ struct exynos_uart_cfg *cfg;
+ int i;
+ int reg;
+
+ cfg = sc->sc_bas.driver1;
+ uart_lock(sc->sc_hwmtx);
+
+ /* tx fifo has room, fire away. */
+ for (i = 0; i < sc->sc_txdatasz; i++) {
+ uart_setreg(&sc->sc_bas, SSCOM_UTXH, sc->sc_txbuf[i]);
+ uart_barrier(&sc->sc_bas);
+ }
+
+ if (cfg->cfg_type == EXUART_S5L) {
+ sc->sc_txbusy = 1;
+ } else {
+ /* unmask TX interrupt */
+ reg = bus_space_read_4(sc->sc_bas.bst, sc->sc_bas.bsh,
+ SSCOM_UINTM);
+ reg &= ~(1 << 2);
+ bus_space_write_4(sc->sc_bas.bst, sc->sc_bas.bsh, SSCOM_UINTM,
+ reg);
+ }
+
+ uart_unlock(sc->sc_hwmtx);
+
+ return (0);
+}
+
+static int
+exynos4210_bus_setsig(struct uart_softc *sc, int sig)
+{
+
+ return (0);
+}
+
+static int
+exynos4210_bus_receive(struct uart_softc *sc)
+{
+ struct uart_bas *bas;
+
+ bas = &sc->sc_bas;
+ uart_lock(sc->sc_hwmtx);
+
+ while (exynos4210_rxready_impl(bas, true)) {
+ if (uart_rx_full(sc)) {
+ sc->sc_rxbuf[sc->sc_rxput] = UART_STAT_OVERRUN;
+ break;
+ }
+
+ uart_rx_put(sc, uart_getreg(&sc->sc_bas, SSCOM_URXH));
+ }
+
+ uart_unlock(sc->sc_hwmtx);
+
+ return (0);
+}
+
+static int
+exynos4210_bus_param(struct uart_softc *sc, int baudrate, int databits,
+ int stopbits, int parity)
+{
+ int error;
+
+ if (sc->sc_bas.rclk == 0)
+ sc->sc_bas.rclk = DEF_CLK;
+
+ KASSERT(sc->sc_bas.rclk != 0, ("exynos4210_init: Invalid rclk"));
+
+ uart_lock(sc->sc_hwmtx);
+ error = exynos4210_uart_param(&sc->sc_bas, baudrate, databits, stopbits,
+ parity);
+ uart_unlock(sc->sc_hwmtx);
+
+ return (error);
+}
+
+static int
+s5l_bus_ipend(struct uart_softc *sc)
+{
+ int ipend;
+ uint32_t uerstat, utrstat;
+
+ ipend = 0;
+ uart_lock(sc->sc_hwmtx);
+ utrstat = bus_space_read_4(sc->sc_bas.bst, sc->sc_bas.bsh,
+ SSCOM_UTRSTAT);
+
+ if (utrstat & (UTRSTAT_S5L_RXTHRESH | UTRSTAT_S5L_RX_TIMEOUT))
+ ipend |= SER_INT_RXREADY;
+
+ if (utrstat & UTRSTAT_S5L_TXTHRESH)
+ ipend |= SER_INT_TXIDLE;
+
+ uerstat = bus_space_read_4(sc->sc_bas.bst, sc->sc_bas.bsh,
+ SSCOM_UERSTAT);
+ if ((uerstat & UERSTAT_BREAK) != 0)
+ ipend |= SER_INT_BREAK;
+
+ bus_space_write_4(sc->sc_bas.bst, sc->sc_bas.bsh, SSCOM_UTRSTAT,
+ utrstat);
+ uart_unlock(sc->sc_hwmtx);
+
+ return (ipend);
+}
+
+static int
+exynos4210_bus_ipend(struct uart_softc *sc)
+{
+ uint32_t ints;
+ int reg;
+ int ipend;
+
+ uart_lock(sc->sc_hwmtx);
+ ints = bus_space_read_4(sc->sc_bas.bst, sc->sc_bas.bsh, SSCOM_UINTP);
+ bus_space_write_4(sc->sc_bas.bst, sc->sc_bas.bsh, SSCOM_UINTP, ints);
+
+ ipend = 0;
+ if ((ints & UINTP_TXEMPTY) != 0) {
+ if (sc->sc_txbusy != 0)
+ ipend |= SER_INT_TXIDLE;
+
+ /* mask TX interrupt */
+ reg = bus_space_read_4(sc->sc_bas.bst, sc->sc_bas.bsh,
+ SSCOM_UINTM);
+ reg |= UINTM_TXINTR;
+ bus_space_write_4(sc->sc_bas.bst, sc->sc_bas.bsh,
+ SSCOM_UINTM, reg);
+ }
+
+ if ((ints & UINTP_RXREADY) != 0) {
+ ipend |= SER_INT_RXREADY;
+ }
+
+ uart_unlock(sc->sc_hwmtx);
+ return (ipend);
+}
+
+static int
+exynos4210_bus_flush(struct uart_softc *sc, int what)
+{
+
+ return (0);
+}
+
+static int
+exynos4210_bus_getsig(struct uart_softc *sc)
+{
+
+ return (0);
+}
+
+static int
+exynos4210_bus_ioctl(struct uart_softc *sc, int request, intptr_t data)
+{
+
+ return (EINVAL);
+}
+
+static struct ofw_compat_data compat_data[] = {
+ {"apple,s5l-uart", (uintptr_t)&uart_s5l_class.base},
+ {"samsung,exynos4210-uart", (uintptr_t)&uart_ex4210_class.base},
+ {NULL, (uintptr_t)NULL},
+};
+UART_FDT_CLASS_AND_DEVICE(compat_data);
diff --git a/sys/arm64/apple/exynos_uart.h b/sys/arm64/apple/exynos_uart.h
new file mode 100644
index 000000000000..6c817252a69a
--- /dev/null
+++ b/sys/arm64/apple/exynos_uart.h
@@ -0,0 +1,136 @@
+/* $NetBSD: s3c2xx0reg.h,v 1.4 2004/02/12 03:47:29 bsh Exp $ */
+
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2002, 2003 Fujitsu Component Limited
+ * Copyright (c) 2002, 2003 Genetec Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of The Fujitsu Component Limited nor the name of
+ * Genetec corporation may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY FUJITSU COMPONENT LIMITED AND GENETEC
+ * CORPORATION ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL FUJITSU COMPONENT LIMITED OR GENETEC
+ * CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* s3c2410-specific registers */
+#define UMCON_AFC (1 << 4) /* auto flow control */
+#define UMSTAT_DCTS (1 << 2) /* CTS change */
+#define ULCON_IR (1 << 6)
+#define ULCON_PARITY_SHIFT 3
+
+/*
+ * Exynos-specific
+ *
+ * UFSTAT_TXFULL register differs between Exynos and others.
+ * Others have UFSTAT_TXFULL (1 << 9)
+ */
+#define UFSTAT_TXFULL (1 << 24)
+#define UFSTAT_S5L_TXFULL (1 << 9)
+
+#define SSCOM_UINTM 0x038
+#define UINTM_TXINTR (1 << 2)
+#define SSCOM_UINTP 0x030
+#define UINTP_RXREADY (1 << 0)
+#define UINTP_TXEMPTY (1 << 2)
+
+/* common for s3c2800 and s3c24x0 */
+#define SSCOM_ULCON 0x00 /* UART line control */
+#define ULCON_PARITY_NONE (0 << ULCON_PARITY_SHIFT)
+#define ULCON_PARITY_ODD (4 << ULCON_PARITY_SHIFT)
+#define ULCON_PARITY_EVEN (5 << ULCON_PARITY_SHIFT)
+#define ULCON_PARITY_ONE (6 << ULCON_PARITY_SHIFT)
+#define ULCON_PARITY_ZERO (7 << ULCON_PARITY_SHIFT)
+#define ULCON_STOP (1 << 2)
+#define ULCON_LENGTH_5 0
+#define ULCON_LENGTH_6 1
+#define ULCON_LENGTH_7 2
+#define ULCON_LENGTH_8 3
+#define SSCOM_UCON 0x04 /* UART control */
+#define UCON_TXINT_TYPE (1 << 9) /* Tx interrupt. 0=pulse,1=level */
+#define UCON_TXINT_TYPE_LEVEL UCON_TXINT_TYPE
+#define UCON_TXINT_TYPE_PULSE 0
+#define UCON_RXINT_TYPE (1 << 8) /* Rx interrupt */
+#define UCON_RXINT_TYPE_LEVEL UCON_RXINT_TYPE
+#define UCON_RXINT_TYPE_PULSE 0
+#define UCON_TOINT (1 << 7) /* Rx timeout interrupt */
+#define UCON_ERRINT (1 << 6) /* receive error interrupt */
+#define UCON_LOOP (1 << 5) /* loopback */
+#define UCON_SBREAK (1 << 4) /* send break */
+#define UCON_TXMODE_DISABLE (0 << 2)
+#define UCON_TXMODE_INT (1 << 2)
+#define UCON_TXMODE_DMA (2 << 2)
+#define UCON_TXMODE_MASK (3 << 2)
+#define UCON_RXMODE_DISABLE (0 << 0)
+#define UCON_RXMODE_INT (1 << 0)
+#define UCON_RXMODE_DMA (2 << 0)
+#define UCON_RXMODE_MASK (3 << 0)
+#define UCON_S5L_RX_TIMEOUT (0x1 << 9)
+#define UCON_S5L_RXTHRESH (0x1 << 12)
+#define UCON_S5L_TXTHRESH (0x1 << 13)
+#define SSCOM_UFCON 0x08 /* FIFO control */
+#define UFCON_TXTRIGGER_0 (0 << 6)
+#define UFCON_TXTRIGGER_4 (1 << 6)
+#define UFCON_TXTRIGGER_8 (2 << 6)
+#define UFCON_TXTRIGGER_16 (3 << 6)
+#define UFCON_RXTRIGGER_4 (0 << 4)
+#define UFCON_RXTRIGGER_8 (1 << 4)
+#define UFCON_RXTRIGGER_12 (2 << 4)
+#define UFCON_RXTRIGGER_16 (3 << 4)
+#define UFCON_TXFIFO_RESET (1 << 2)
+#define UFCON_RXFIFO_RESET (1 << 1)
+#define UFCON_FIFO_ENABLE (1 << 0)
+#define SSCOM_UMCON 0x0c /* MODEM control */
+#define UMCON_RTS (1 << 0) /* Request to send */
+#define SSCOM_UTRSTAT 0x10 /* Status register */
+#define UTRSTAT_TXSHIFTER_EMPTY ( 1<< 2)
+#define UTRSTAT_TXEMPTY (1 << 1) /* TX fifo or buffer empty */
+#define UTRSTAT_RXREADY (1 << 0) /* RX fifo or buffer is not empty */
+#define UTRSTAT_S5L_RXTHRESH (0x1 << 4)
+#define UTRSTAT_S5L_TXTHRESH (0x1 << 5)
+#define UTRSTAT_S5L_RX_TIMEOUT (0x1 << 9)
+#define SSCOM_UERSTAT 0x14 /* Error status register */
+#define UERSTAT_BREAK (1 << 3) /* Break signal, not 2410 */
+#define UERSTAT_FRAME (1 << 2) /* Frame error */
+#define UERSTAT_PARITY (1 << 1) /* Parity error, not 2410 */
+#define UERSTAT_OVERRUN (1 << 0) /* Overrun */
+#define UERSTAT_ALL_ERRORS \
+ (UERSTAT_OVERRUN|UERSTAT_BREAK|UERSTAT_FRAME|UERSTAT_PARITY)
+#define SSCOM_UFSTAT 0x18 /* Fifo status register */
+#define UFSTAT_RXFULL (1 <<8) /* Rx fifo full */
+#define UFSTAT_TXCOUNT_SHIFT 4 /* TX FIFO count */
+#define UFSTAT_TXCOUNT (0x0f << UFSTAT_TXCOUNT_SHIFT)
+#define UFSTAT_RXCOUNT_SHIFT 0 /* RX FIFO count */
+#define UFSTAT_RXCOUNT (0x0f << UFSTAT_RXCOUNT_SHIFT)
+#define SSCOM_UMSTAT 0x1c /* Modem status register */
+#define UMSTAT_CTS (1 << 0) /* Clear to send */
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+#define SSCOM_UTXH 0x20 /* Transmit data register */
+#define SSCOM_URXH 0x24 /* Receive data register */
+#else
+#define SSCOM_UTXH 0x23 /* Transmit data register */
+#define SSCOM_URXH 0x27 /* Receive data register */
+#endif
+#define SSCOM_UBRDIV 0x28 /* baud-reate divisor */
+#define SSCOM_SIZE 0x2c
diff --git a/sys/arm64/arm64/bus_space_asm.S b/sys/arm64/arm64/bus_space_asm.S
index bc9b41f96952..699a27bedab4 100644
--- a/sys/arm64/arm64/bus_space_asm.S
+++ b/sys/arm64/arm64/bus_space_asm.S
@@ -25,7 +25,9 @@
*
*/
+#include <sys/elf_common.h>
#include <machine/asm.h>
+
ENTRY(generic_bs_r_1)
ldrb w0, [x1, x2]
ret
@@ -475,3 +477,5 @@ generic_bs_poke_8f:
mov x0, #0
ret
END(generic_bs_poke_8)
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
index e62794da2753..abfd5c195857 100644
--- a/sys/arm64/arm64/busdma_bounce.c
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -63,7 +63,9 @@
enum {
BF_COULD_BOUNCE = 0x01,
BF_MIN_ALLOC_COMP = 0x02,
- BF_KMEM_ALLOC = 0x04,
+ BF_KMEM_ALLOC_PAGES = 0x04,
+ BF_KMEM_ALLOC_CONTIG = 0x08,
+ BF_KMEM_ALLOC = BF_KMEM_ALLOC_PAGES | BF_KMEM_ALLOC_CONTIG,
BF_COHERENT = 0x10,
};
@@ -121,12 +123,16 @@ static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
#define dmat_alignment(dmat) ((dmat)->common.alignment)
+#define dmat_bounce_flags(dmat) ((dmat)->bounce_flags)
+#define dmat_boundary(dmat) ((dmat)->common.boundary)
#define dmat_domain(dmat) ((dmat)->common.domain)
#define dmat_flags(dmat) ((dmat)->common.flags)
#define dmat_highaddr(dmat) ((dmat)->common.highaddr)
#define dmat_lowaddr(dmat) ((dmat)->common.lowaddr)
#define dmat_lockfunc(dmat) ((dmat)->common.lockfunc)
#define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg)
+#define dmat_maxsegsz(dmat) ((dmat)->common.maxsegsz)
+#define dmat_nsegments(dmat) ((dmat)->common.nsegments)
#include "../../kern/subr_busdma_bounce.c"
@@ -576,14 +582,14 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
*vaddr = kmem_alloc_attr_domainset(
DOMAINSET_PREF(dmat->common.domain), dmat->alloc_size,
mflags, 0ul, dmat->common.lowaddr, attr);
- dmat->bounce_flags |= BF_KMEM_ALLOC;
+ dmat->bounce_flags |= BF_KMEM_ALLOC_PAGES;
} else {
*vaddr = kmem_alloc_contig_domainset(
DOMAINSET_PREF(dmat->common.domain), dmat->alloc_size,
mflags, 0ul, dmat->common.lowaddr,
dmat->alloc_alignment != 0 ? dmat->alloc_alignment : 1ul,
dmat->common.boundary, attr);
- dmat->bounce_flags |= BF_KMEM_ALLOC;
+ dmat->bounce_flags |= BF_KMEM_ALLOC_CONTIG;
}
if (*vaddr == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
@@ -601,7 +607,7 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
/*
* Free a piece of memory and it's allociated dmamap, that was allocated
- * via bus_dmamem_alloc. Make the same choice for free/contigfree.
+ * via bus_dmamem_alloc.
*/
static void
bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
@@ -639,7 +645,7 @@ _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
count = 0;
curaddr = buf;
while (buflen != 0) {
- sgsize = MIN(buflen, dmat->common.maxsegsz);
+ sgsize = buflen;
if (must_bounce(dmat, map, curaddr, sgsize)) {
sgsize = MIN(sgsize,
PAGE_SIZE - (curaddr & PAGE_MASK));
@@ -692,15 +698,13 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
vendaddr = (vm_offset_t)buf + buflen;
while (vaddr < vendaddr) {
- sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
- sg_len = MIN(sg_len, dmat->common.maxsegsz);
+ sg_len = MIN(vendaddr - vaddr,
+ PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
if (pmap == kernel_pmap)
paddr = pmap_kextract(vaddr);
else
paddr = pmap_extract(pmap, vaddr);
- if (must_bounce(dmat, map, paddr,
- min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr &
- PAGE_MASK)))) != 0) {
+ if (must_bounce(dmat, map, paddr, sg_len) != 0) {
sg_len = roundup2(sg_len,
dmat->common.alignment);
map->pagesneeded++;
@@ -712,47 +716,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
}
/*
- * Add a single contiguous physical range to the segment list.
- */
-static bus_size_t
-_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
- bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
-{
- int seg;
-
- /*
- * Make sure we don't cross any boundaries.
- */
- if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
- sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
-
- /*
- * Insert chunk into a segment, coalescing with
- * previous segment if possible.
- */
- seg = *segp;
- if (seg == -1) {
- seg = 0;
- segs[seg].ds_addr = curaddr;
- segs[seg].ds_len = sgsize;
- } else {
- if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
- (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
- vm_addr_bound_ok(segs[seg].ds_addr,
- segs[seg].ds_len + sgsize, dmat->common.boundary))
- segs[seg].ds_len += sgsize;
- else {
- if (++seg >= dmat->common.nsegments)
- return (0);
- segs[seg].ds_addr = curaddr;
- segs[seg].ds_len = sgsize;
- }
- }
- *segp = seg;
- return (sgsize);
-}
-
-/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
@@ -783,7 +746,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
while (buflen > 0) {
curaddr = buf;
- sgsize = MIN(buflen, dmat->common.maxsegsz);
+ sgsize = buflen;
if (map->pagesneeded != 0 &&
must_bounce(dmat, map, curaddr, sgsize)) {
/*
@@ -817,9 +780,8 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
} else
sl->datacount += sgsize;
}
- sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
- segp);
- if (sgsize == 0)
+ if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
+ segp))
break;
buf += sgsize;
buflen -= sgsize;
@@ -895,8 +857,9 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
/*
* Compute the segment size, and adjust counts.
*/
- sgsize = MIN(buflen, dmat->common.maxsegsz);
- if ((map->flags & DMAMAP_FROM_DMAMEM) == 0)
+ sgsize = buflen;
+ if ((map->flags & DMAMAP_FROM_DMAMEM) == 0 ||
+ (dmat->bounce_flags & BF_KMEM_ALLOC_CONTIG) == 0)
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
if (map->pagesneeded != 0 &&
@@ -934,12 +897,11 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
} else
sl->datacount += sgsize;
}
- sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
- segp);
- if (sgsize == 0)
+ if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
+ segp))
break;
vaddr += sgsize;
- buflen -= sgsize;
+ buflen -= MIN(sgsize, buflen); /* avoid underflow */
}
/*
diff --git a/sys/arm64/arm64/cmn600.c b/sys/arm64/arm64/cmn600.c
index 4e3be8fee40e..530cdcdc3d06 100644
--- a/sys/arm64/arm64/cmn600.c
+++ b/sys/arm64/arm64/cmn600.c
@@ -332,9 +332,6 @@ cmn600_create_node(struct cmn600_softc *sc, off_t node_offset,
int i;
node = malloc(sizeof(struct cmn600_node), M_DEVBUF, M_WAITOK);
- if (node == NULL)
- return (NULL);
-
node->sc = sc;
node->nd_offset = node_offset;
node->nd_parent = parent;
@@ -399,8 +396,6 @@ cmn600_create_node(struct cmn600_softc *sc, off_t node_offset,
node->nd_children = (struct cmn600_node **)mallocarray(
node->nd_child_count, sizeof(struct cmn600_node *), M_DEVBUF,
M_WAITOK);
- if (node->nd_children == NULL)
- goto FAIL;
for (i = 0; i < node->nd_child_count; i++) {
val = node->nd_read8(node, child_offset + (i * 8));
node->nd_children[i] = cmn600_create_node(sc, val &
@@ -420,9 +415,6 @@ cmn600_create_node(struct cmn600_softc *sc, off_t node_offset,
break;
}
return (node);
-FAIL:
- free(node, M_DEVBUF);
- return (NULL);
}
static void
diff --git a/sys/arm64/arm64/copyinout.S b/sys/arm64/arm64/copyinout.S
index 23f56ae85daa..26dd0b4cf14f 100644
--- a/sys/arm64/arm64/copyinout.S
+++ b/sys/arm64/arm64/copyinout.S
@@ -27,9 +27,10 @@
*
*/
-#include <machine/asm.h>
+#include <sys/elf_common.h>
#include <sys/errno.h>
+#include <machine/asm.h>
#include <machine/param.h>
#include <machine/vmparam.h>
@@ -220,3 +221,5 @@ ending:
mov x0, xzr /* return 0 */
ret
.size copycommon, . - copycommon
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/cpu_feat.c b/sys/arm64/arm64/cpu_feat.c
new file mode 100644
index 000000000000..cc262394913d
--- /dev/null
+++ b/sys/arm64/arm64/cpu_feat.c
@@ -0,0 +1,119 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Arm Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+
+#include <machine/cpu.h>
+#include <machine/cpu_feat.h>
+
+/* TODO: Make this a list if we ever grow a callback other than smccc_errata */
+static cpu_feat_errata_check_fn cpu_feat_check_cb = NULL;
+
+void
+enable_cpu_feat(uint32_t stage)
+{
+ struct cpu_feat **featp, *feat;
+ uint32_t midr;
+ u_int errata_count, *errata_list;
+ cpu_feat_errata errata_status;
+
+ MPASS((stage & ~CPU_FEAT_STAGE_MASK) == 0);
+
+ midr = get_midr();
+ SET_FOREACH(featp, cpu_feat_set) {
+ feat = *featp;
+
+ /* Run the enablement code at the correct stage of boot */
+ if ((feat->feat_flags & CPU_FEAT_STAGE_MASK) != stage)
+ continue;
+
+ /* If the feature is system wide run on a single CPU */
+ if ((feat->feat_flags & CPU_FEAT_SCOPE_MASK)==CPU_FEAT_SYSTEM &&
+ PCPU_GET(cpuid) != 0)
+ continue;
+
+ if (feat->feat_check != NULL && !feat->feat_check(feat, midr))
+ continue;
+
+ /*
+ * Check if the feature has any errata that may need a
+ * workaround applied (or it is to install the workaround for
+ * known errata.
+ */
+ errata_status = ERRATA_NONE;
+ errata_list = NULL;
+ errata_count = 0;
+ if (feat->feat_has_errata != NULL) {
+ if (feat->feat_has_errata(feat, midr, &errata_list,
+ &errata_count)) {
+ /* Assume we are affected */
+ errata_status = ERRATA_AFFECTED;
+ }
+ }
+
+ if (errata_status == ERRATA_AFFECTED &&
+ cpu_feat_check_cb != NULL) {
+ for (int i = 0; i < errata_count; i++) {
+ cpu_feat_errata new_status;
+
+ /* Check if affected by this erratum */
+ new_status = cpu_feat_check_cb(feat,
+ errata_list[i]);
+ if (new_status != ERRATA_UNKNOWN) {
+ errata_status = new_status;
+ errata_list = &errata_list[i];
+ errata_count = 1;
+ break;
+ }
+ }
+ }
+
+ /* Shouldn't be possible */
+ MPASS(errata_status != ERRATA_UNKNOWN);
+
+ feat->feat_enable(feat, errata_status, errata_list,
+ errata_count);
+ }
+}
+
+static void
+enable_cpu_feat_after_dev(void *dummy __unused)
+{
+ MPASS(PCPU_GET(cpuid) == 0);
+ enable_cpu_feat(CPU_FEAT_AFTER_DEV);
+}
+SYSINIT(enable_cpu_feat_after_dev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
+ enable_cpu_feat_after_dev, NULL);
+
+void
+cpu_feat_register_errata_check(cpu_feat_errata_check_fn cb)
+{
+ MPASS(cpu_feat_check_cb == NULL);
+ cpu_feat_check_cb = cb;
+}
diff --git a/sys/arm64/arm64/cpufunc_asm.S b/sys/arm64/arm64/cpufunc_asm.S
index 5a668aeb542e..a13b97c2cdee 100644
--- a/sys/arm64/arm64/cpufunc_asm.S
+++ b/sys/arm64/arm64/cpufunc_asm.S
@@ -29,7 +29,9 @@
*
*/
+#include <sys/elf_common.h>
#include <sys/errno.h>
+
#include <machine/asm.h>
#include <machine/param.h>
@@ -190,3 +192,5 @@ ENTRY(cache_maint_fault)
mov x0, #EFAULT
ret
END(cache_maint_fault)
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/debug_monitor.c b/sys/arm64/arm64/debug_monitor.c
index 2e1a956ad75c..d92d3fb37ce4 100644
--- a/sys/arm64/arm64/debug_monitor.c
+++ b/sys/arm64/arm64/debug_monitor.c
@@ -40,6 +40,7 @@
#include <machine/armreg.h>
#include <machine/cpu.h>
#include <machine/debug_monitor.h>
+#include <machine/machdep.h>
#include <machine/kdb.h>
#include <machine/pcb.h>
@@ -86,6 +87,7 @@ void dbg_monitor_exit(struct thread *, struct trapframe *);
#define DBG_WATCH_CTRL_ACCESS_MASK(x) ((x) & (0x3 << 3))
/* Common for breakpoint and watchpoint */
+#define DBG_WB_CTRL_HMC (0x1 << 13)
#define DBG_WB_CTRL_EL1 (0x1 << 1)
#define DBG_WB_CTRL_EL0 (0x2 << 1)
#define DBG_WB_CTRL_ELX_MASK(x) ((x) & (0x3 << 1))
@@ -457,6 +459,8 @@ dbg_setup_breakpoint(struct debug_monitor_state *monitor, vm_offset_t addr)
if ((monitor->dbg_flags & DBGMON_KERNEL) == 0)
bcr_priv = DBG_WB_CTRL_EL0;
+ else if (in_vhe())
+ bcr_priv = DBG_WB_CTRL_EL1 | DBG_WB_CTRL_HMC;
else
bcr_priv = DBG_WB_CTRL_EL1;
@@ -530,6 +534,8 @@ dbg_setup_watchpoint(struct debug_monitor_state *monitor, vm_offset_t addr,
if ((monitor->dbg_flags & DBGMON_KERNEL) == 0)
wcr_priv = DBG_WB_CTRL_EL0;
+ else if (in_vhe())
+ wcr_priv = DBG_WB_CTRL_EL1 | DBG_WB_CTRL_HMC;
else
wcr_priv = DBG_WB_CTRL_EL1;
diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c
index 0c46d2e6dcc6..0f46e44f5d6a 100644
--- a/sys/arm64/arm64/efirt_machdep.c
+++ b/sys/arm64/arm64/efirt_machdep.c
@@ -50,10 +50,12 @@
#include <vm/vm.h>
#include <vm/pmap.h>
+#include <vm/vm_extern.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
+#include <vm/vm_radix.h>
static vm_object_t obj_1t1_pt;
static vm_pindex_t efi_1t1_idx;
@@ -63,11 +65,13 @@ static uint64_t efi_ttbr0;
void
efi_destroy_1t1_map(void)
{
+ struct pctrie_iter pages;
vm_page_t m;
if (obj_1t1_pt != NULL) {
+ vm_page_iter_init(&pages, obj_1t1_pt);
VM_OBJECT_RLOCK(obj_1t1_pt);
- TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
+ VM_RADIX_FOREACH(m, &pages)
m->ref_count = VPRC_OBJREF;
vm_wire_sub(obj_1t1_pt->resident_page_count);
VM_OBJECT_RUNLOCK(obj_1t1_pt);
@@ -214,7 +218,7 @@ efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
p->md_phys, mode, p->md_pages);
}
- l3_attr = ATTR_DEFAULT | ATTR_S1_IDX(mode) |
+ l3_attr = ATTR_AF | pmap_sh_attr | ATTR_S1_IDX(mode) |
ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE;
if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP)
l3_attr |= ATTR_S1_XN;
@@ -239,6 +243,7 @@ efi_arch_enter(void)
{
CRITICAL_ASSERT(curthread);
+ curthread->td_md.md_efirt_dis_pf = vm_fault_disable_pagefaults();
/*
* Temporarily switch to EFI's page table. However, we leave curpmap
@@ -269,11 +274,6 @@ efi_arch_leave(void)
set_ttbr0(pmap_to_ttbr0(PCPU_GET(curpmap)));
if (PCPU_GET(bcast_tlbi_workaround) != 0)
invalidate_local_icache();
+ vm_fault_enable_pagefaults(curthread->td_md.md_efirt_dis_pf);
}
-int
-efi_rt_arch_call(struct efirt_callinfo *ec)
-{
-
- panic("not implemented");
-}
diff --git a/sys/arm64/arm64/efirt_support.S b/sys/arm64/arm64/efirt_support.S
new file mode 100644
index 000000000000..994380f1691e
--- /dev/null
+++ b/sys/arm64/arm64/efirt_support.S
@@ -0,0 +1,101 @@
+/*-
+ * Copyright (c) 2024 Arm Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/elf_common.h>
+#include <sys/errno.h>
+
+#include <machine/asm.h>
+
+#include "assym.inc"
+
+/*
+ * int efi_rt_arch_call(struct efirt_callinfo *);
+ */
+ENTRY(efi_rt_arch_call)
+ sub sp, sp, #(14 * 8)
+ stp x19, x20, [sp, #(2 * 8)]
+ stp x21, x22, [sp, #(4 * 8)]
+ stp x23, x24, [sp, #(6 * 8)]
+ stp x25, x26, [sp, #(8 * 8)]
+ stp x27, x28, [sp, #(10 * 8)]
+ stp x29, x30, [sp, #(12 * 8)]
+ add x29, sp, #(12 * 8)
+
+ /* Save the stack pointer so we can find it later */
+ ldr x23, [x18, #PC_CURTHREAD]
+ mov x24, sp
+ str x24, [x23, #TD_MD_EFIRT_TMP]
+
+ mov x22, x0
+
+ /* Load the function to branch to */
+ ldr x9, [x22, #(EC_FPTR)]
+
+ /* Load the arguments */
+ ldr x4, [x22, #(EC_ARG1 + (4 * 8))]
+ ldr x3, [x22, #(EC_ARG1 + (3 * 8))]
+ ldr x2, [x22, #(EC_ARG1 + (2 * 8))]
+ ldr x1, [x22, #(EC_ARG1 + (1 * 8))]
+ ldr x0, [x22, #(EC_ARG1 + (0 * 8))]
+
+ /* Set the fault handler */
+ adr x10, efi_rt_fault
+ SET_FAULT_HANDLER(x10, x11)
+
+ blr x9
+
+ /* Clear the fault handler */
+ SET_FAULT_HANDLER(xzr, x11)
+
+ /* Store the result */
+ str x0, [x22, #(EC_EFI_STATUS)]
+ mov x0, #0
+
+.Lefi_rt_arch_call_exit:
+ ldp x19, x20, [sp, #(2 * 8)]
+ ldp x21, x22, [sp, #(4 * 8)]
+ ldp x23, x24, [sp, #(6 * 8)]
+ ldp x25, x26, [sp, #(8 * 8)]
+ ldp x27, x28, [sp, #(10 * 8)]
+ ldp x29, x30, [sp, #(12 * 8)]
+ add sp, sp, #(14 * 8)
+
+ ret
+END(efi_rt_arch_call)
+
+LENTRY(efi_rt_fault)
+ /* Clear pcb_onfault */
+ SET_FAULT_HANDLER(xzr, x11)
+ /* Load curthread */
+ ldr x1, [x18, #PC_CURTHREAD]
+ /* Restore the stack pointer */
+ ldr x2, [x1, #TD_MD_EFIRT_TMP]
+ mov sp, x2
+ /* Normal exit returning an error */
+ ldr x0, =EFAULT
+ b .Lefi_rt_arch_call_exit
+LEND(efi_rt_fault)
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/elf32_machdep.c b/sys/arm64/arm64/elf32_machdep.c
index fd2a2690b7a1..7cd5327b9f1b 100644
--- a/sys/arm64/arm64/elf32_machdep.c
+++ b/sys/arm64/arm64/elf32_machdep.c
@@ -134,6 +134,8 @@ static struct sysentvec elf32_freebsd_sysvec = {
.sv_trap = NULL,
.sv_hwcap = &elf32_hwcap,
.sv_hwcap2 = &elf32_hwcap2,
+ .sv_hwcap3 = NULL,
+ .sv_hwcap4 = NULL,
.sv_onexec_old = exec_onexec_old,
.sv_onexit = exit_onexit,
.sv_regset_begin = SET_BEGIN(__elfN(regset)),
diff --git a/sys/arm64/arm64/elf_machdep.c b/sys/arm64/arm64/elf_machdep.c
index d5b420a8b519..970dba0ca7d9 100644
--- a/sys/arm64/arm64/elf_machdep.c
+++ b/sys/arm64/arm64/elf_machdep.c
@@ -57,9 +57,13 @@
u_long __read_frequently elf_hwcap;
u_long __read_frequently elf_hwcap2;
+u_long __read_frequently elf_hwcap3;
+u_long __read_frequently elf_hwcap4;
/* TODO: Move to a better location */
u_long __read_frequently linux_elf_hwcap;
u_long __read_frequently linux_elf_hwcap2;
+u_long __read_frequently linux_elf_hwcap3;
+u_long __read_frequently linux_elf_hwcap4;
struct arm64_addr_mask elf64_addr_mask;
@@ -101,6 +105,8 @@ static struct sysentvec elf64_freebsd_sysvec = {
.sv_trap = NULL,
.sv_hwcap = &elf_hwcap,
.sv_hwcap2 = &elf_hwcap2,
+ .sv_hwcap3 = &elf_hwcap3,
+ .sv_hwcap4 = &elf_hwcap4,
.sv_onexec_old = exec_onexec_old,
.sv_protect = arm64_exec_protect,
.sv_onexit = exit_onexit,
diff --git a/sys/arm64/arm64/exception.S b/sys/arm64/arm64/exception.S
index 41d7e7f7ae1f..13095def8b00 100644
--- a/sys/arm64/arm64/exception.S
+++ b/sys/arm64/arm64/exception.S
@@ -25,9 +25,13 @@
*
*/
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
#include <machine/armreg.h>
+
#include "assym.inc"
+#include <sys/intr.h>
.text
@@ -167,7 +171,7 @@
.macro do_ast
mrs x19, daif
/* Make sure the IRQs are enabled before calling ast() */
- bic x19, x19, #PSR_I
+ bic x19, x19, #(PSR_I | PSR_F)
1:
/*
* Mask interrupts while checking the ast pending flag
@@ -230,12 +234,24 @@ ENTRY(handle_el1h_irq)
save_registers 1
KMSAN_ENTER
mov x0, sp
+ mov x1, #INTR_ROOT_IRQ
bl intr_irq_handler
KMSAN_LEAVE
restore_registers 1
ERET
END(handle_el1h_irq)
+ENTRY(handle_el1h_fiq)
+ save_registers 1
+ KMSAN_ENTER
+ mov x0, sp
+ mov x1, #INTR_ROOT_FIQ
+ bl intr_irq_handler
+ KMSAN_LEAVE
+ restore_registers 1
+ ERET
+END(handle_el1h_fiq)
+
ENTRY(handle_el1h_serror)
save_registers 1
KMSAN_ENTER
@@ -250,9 +266,11 @@ ENTRY(handle_el0_sync)
KMSAN_ENTER
ldr x0, [x18, #PC_CURTHREAD]
mov x1, sp
+ mov x22, x0
str x1, [x0, #TD_FRAME]
bl do_el0_sync
do_ast
+ str xzr, [x22, #TD_FRAME]
KMSAN_LEAVE
restore_registers 0
ERET
@@ -262,6 +280,7 @@ ENTRY(handle_el0_irq)
save_registers 0
KMSAN_ENTER
mov x0, sp
+ mov x1, #INTR_ROOT_IRQ
bl intr_irq_handler
do_ast
KMSAN_LEAVE
@@ -269,6 +288,18 @@ ENTRY(handle_el0_irq)
ERET
END(handle_el0_irq)
+ENTRY(handle_el0_fiq)
+ save_registers 0
+ KMSAN_ENTER
+ mov x0, sp
+ mov x1, #INTR_ROOT_FIQ
+ bl intr_irq_handler
+ do_ast
+ KMSAN_LEAVE
+ restore_registers 0
+ ERET
+END(handle_el0_fiq)
+
ENTRY(handle_el0_serror)
save_registers 0
KMSAN_ENTER
@@ -311,16 +342,17 @@ exception_vectors:
vector el1h_sync 1 /* Synchronous EL1h */
vector el1h_irq 1 /* IRQ EL1h */
- vempty 1 /* FIQ EL1h */
+ vector el1h_fiq 1 /* FIQ EL1h */
vector el1h_serror 1 /* Error EL1h */
vector el0_sync 0 /* Synchronous 64-bit EL0 */
vector el0_irq 0 /* IRQ 64-bit EL0 */
- vempty 0 /* FIQ 64-bit EL0 */
+ vector el0_fiq 0 /* FIQ 64-bit EL0 */
vector el0_serror 0 /* Error 64-bit EL0 */
vector el0_sync 0 /* Synchronous 32-bit EL0 */
vector el0_irq 0 /* IRQ 32-bit EL0 */
- vempty 0 /* FIQ 32-bit EL0 */
+ vector el0_fiq 0 /* FIQ 32-bit EL0 */
vector el0_serror 0 /* Error 32-bit EL0 */
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/exec_machdep.c b/sys/arm64/arm64/exec_machdep.c
index 1bff70450b69..751329affd91 100644
--- a/sys/arm64/arm64/exec_machdep.c
+++ b/sys/arm64/arm64/exec_machdep.c
@@ -191,17 +191,27 @@ int
fill_dbregs(struct thread *td, struct dbreg *regs)
{
struct debug_monitor_state *monitor;
+ uint64_t dfr0;
int i;
uint8_t debug_ver, nbkpts, nwtpts;
memset(regs, 0, sizeof(*regs));
- extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_DebugVer_SHIFT,
- &debug_ver);
- extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_BRPs_SHIFT,
- &nbkpts);
- extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_WRPs_SHIFT,
- &nwtpts);
+ /*
+ * Read these the Debug Feature Register 0 to get info we need.
+ * It will be identical on FreeBSD and Linux, so there is no need
+ * to check which the target is.
+ */
+ if (!get_user_reg(ID_AA64DFR0_EL1, &dfr0, true)) {
+ debug_ver = ID_AA64DFR0_DebugVer_8;
+ nbkpts = 0;
+ nwtpts = 0;
+ } else {
+ debug_ver = ID_AA64DFR0_DebugVer_VAL(dfr0) >>
+ ID_AA64DFR0_DebugVer_SHIFT;
+ nbkpts = ID_AA64DFR0_BRPs_VAL(dfr0) >> ID_AA64DFR0_BRPs_SHIFT;
+ nwtpts = ID_AA64DFR0_WRPs_VAL(dfr0) >> ID_AA64DFR0_WRPs_SHIFT;
+ }
/*
* The BRPs field contains the number of breakpoints - 1. Armv8-A
@@ -463,9 +473,10 @@ set_mcontext(struct thread *td, mcontext_t *mcp)
#define PSR_13_MASK 0xfffffffful
struct arm64_reg_context ctx;
struct trapframe *tf = td->td_frame;
+ struct pcb *pcb;
uint64_t spsr;
vm_offset_t addr;
- int error;
+ int error, seen_types;
bool done;
spsr = mcp->mc_gpregs.gp_spsr;
@@ -511,7 +522,11 @@ set_mcontext(struct thread *td, mcontext_t *mcp)
/* Read any register contexts we find */
if (mcp->mc_ptr != 0) {
addr = mcp->mc_ptr;
+ pcb = td->td_pcb;
+
+#define CTX_TYPE_FLAG_SVE (1 << 0)
+ seen_types = 0;
done = false;
do {
if (!__is_aligned(addr,
@@ -523,6 +538,38 @@ set_mcontext(struct thread *td, mcontext_t *mcp)
return (error);
switch (ctx.ctx_id) {
+#ifdef VFP
+ case ARM64_CTX_SVE: {
+ struct sve_context sve_ctx;
+ size_t buf_size;
+
+ if ((seen_types & CTX_TYPE_FLAG_SVE) != 0)
+ return (EINVAL);
+ seen_types |= CTX_TYPE_FLAG_SVE;
+
+ if (pcb->pcb_svesaved == NULL)
+ return (EINVAL);
+
+ /* XXX: Check pcb_svesaved is valid */
+
+ buf_size = sve_buf_size(td);
+ /* Check the size is valid */
+ if (ctx.ctx_size !=
+ (sizeof(sve_ctx) + buf_size))
+ return (EINVAL);
+
+ memset(pcb->pcb_svesaved, 0,
+ sve_max_buf_size());
+
+ /* Copy the SVE registers from userspace */
+ if (copyin((void *)(addr + sizeof(sve_ctx)),
+ pcb->pcb_svesaved, buf_size) != 0)
+ return (EINVAL);
+
+ pcb->pcb_fpflags |= PCB_FP_SVEVALID;
+ break;
+ }
+#endif
case ARM64_CTX_END:
done = true;
break;
@@ -532,6 +579,8 @@ set_mcontext(struct thread *td, mcontext_t *mcp)
addr += ctx.ctx_size;
} while (!done);
+
+#undef CTX_TYPE_FLAG_SVE
}
return (0);
@@ -592,7 +641,7 @@ set_fpcontext(struct thread *td, mcontext_t *mcp)
sizeof(mcp->mc_fpregs.fp_q));
curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
- curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
+ curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_STARTED;
}
#endif
}
@@ -606,10 +655,19 @@ sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
return (EFAULT);
+ /* Stop an interrupt from causing the sve state to be dropped */
+ td->td_sa.code = -1;
error = set_mcontext(td, &uc.uc_mcontext);
if (error != 0)
return (error);
+ /*
+ * Sync the VFP and SVE registers. To be backwards compatible we
+ * use the VFP registers to restore the lower bits of the SVE
+ * register it aliases.
+ */
+ vfp_to_sve_sync(td);
+
/* Restore signal mask. */
kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
@@ -635,9 +693,47 @@ sendsig_ctx_end(struct thread *td, vm_offset_t *addrp)
return (true);
}
+static bool
+sendsig_ctx_sve(struct thread *td, vm_offset_t *addrp)
+{
+ struct sve_context ctx;
+ struct pcb *pcb;
+ size_t buf_size;
+ vm_offset_t ctx_addr;
+
+ pcb = td->td_pcb;
+ /* Do nothing if sve hasn't started */
+ if (pcb->pcb_svesaved == NULL)
+ return (true);
+
+ MPASS(pcb->pcb_svesaved != NULL);
+
+ buf_size = sve_buf_size(td);
+
+ /* Address for the full context */
+ *addrp -= sizeof(ctx) + buf_size;
+ ctx_addr = *addrp;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.sve_ctx.ctx_id = ARM64_CTX_SVE;
+ ctx.sve_ctx.ctx_size = sizeof(ctx) + buf_size;
+ ctx.sve_vector_len = pcb->pcb_sve_len;
+ ctx.sve_flags = 0;
+
+ /* Copy out the header and data */
+ if (copyout(&ctx, (void *)ctx_addr, sizeof(ctx)) != 0)
+ return (false);
+ if (copyout(pcb->pcb_svesaved, (void *)(ctx_addr + sizeof(ctx)),
+ buf_size) != 0)
+ return (false);
+
+ return (true);
+}
+
typedef bool(*ctx_func)(struct thread *, vm_offset_t *);
static const ctx_func ctx_funcs[] = {
sendsig_ctx_end, /* Must be first to end the linked list */
+ sendsig_ctx_sve,
NULL,
};
diff --git a/sys/arm64/arm64/genassym.c b/sys/arm64/arm64/genassym.c
index c4f52ae61a77..e3977798b046 100644
--- a/sys/arm64/arm64/genassym.c
+++ b/sys/arm64/arm64/genassym.c
@@ -31,6 +31,7 @@
#include <sys/pcpu.h>
#include <sys/proc.h>
+#include <machine/efi.h>
#include <machine/frame.h>
#include <machine/machdep.h>
#include <machine/pcb.h>
@@ -42,7 +43,10 @@ ASSYM(BP_MODULEP, offsetof(struct arm64_bootparams, modulep));
ASSYM(BP_KERN_STACK, offsetof(struct arm64_bootparams, kern_stack));
ASSYM(BP_KERN_TTBR0, offsetof(struct arm64_bootparams, kern_ttbr0));
ASSYM(BP_BOOT_EL, offsetof(struct arm64_bootparams, boot_el));
-ASSYM(BP_HCR_EL2, offsetof(struct arm64_bootparams, hcr_el2));
+
+ASSYM(EC_EFI_STATUS, offsetof(struct efirt_callinfo, ec_efi_status));
+ASSYM(EC_FPTR, offsetof(struct efirt_callinfo, ec_fptr));
+ASSYM(EC_ARG1, offsetof(struct efirt_callinfo, ec_arg1));
ASSYM(PCPU_SIZE, sizeof(struct pcpu));
ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
@@ -59,6 +63,8 @@ ASSYM(PCB_TPIDRRO, offsetof(struct pcb, pcb_tpidrro_el0));
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
+ASSYM(P_PID, offsetof(struct proc, p_pid));
+
ASSYM(SF_UC, offsetof(struct sigframe, sf_uc));
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
@@ -68,6 +74,7 @@ ASSYM(TD_AST, offsetof(struct thread, td_ast));
ASSYM(TD_FRAME, offsetof(struct thread, td_frame));
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
ASSYM(TD_MD_CANARY, offsetof(struct thread, td_md.md_canary));
+ASSYM(TD_MD_EFIRT_TMP, offsetof(struct thread, td_md.md_efirt_tmp));
ASSYM(TF_SIZE, sizeof(struct trapframe));
ASSYM(TF_SP, offsetof(struct trapframe, tf_sp));
diff --git a/sys/arm64/arm64/gic_v3.c b/sys/arm64/arm64/gic_v3.c
index b57dd9be48aa..201cdae6de09 100644
--- a/sys/arm64/arm64/gic_v3.c
+++ b/sys/arm64/arm64/gic_v3.c
@@ -494,6 +494,10 @@ gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
case GICV3_IVAR_REDIST:
*result = (uintptr_t)&sc->gic_redists.pcpu[PCPU_GET(cpuid)];
return (0);
+ case GICV3_IVAR_SUPPORT_LPIS:
+ *result =
+ (gic_d_read(sc, 4, GICD_TYPER) & GICD_TYPER_LPIS) != 0;
+ return (0);
case GIC_IVAR_HW_REV:
KASSERT(
GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
@@ -1089,7 +1093,7 @@ gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
#ifdef SMP
static void
-gic_v3_init_secondary(device_t dev)
+gic_v3_init_secondary(device_t dev, uint32_t rootnum)
{
struct gic_v3_setup_periph_args pargs;
device_t child;
@@ -1136,7 +1140,7 @@ gic_v3_init_secondary(device_t dev)
for (i = 0; i < sc->gic_nchildren; i++) {
child = sc->gic_children[i];
- PIC_INIT_SECONDARY(child);
+ PIC_INIT_SECONDARY(child, rootnum);
}
}
@@ -1430,7 +1434,7 @@ gic_v3_redist_find(struct gic_v3_softc *sc)
(GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
}
} while (offset < rman_get_size(r_res) &&
- (typer & GICR_TYPER_LAST) == 0);
+ !sc->gic_redists.single && (typer & GICR_TYPER_LAST) == 0);
}
device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
diff --git a/sys/arm64/arm64/gic_v3_acpi.c b/sys/arm64/arm64/gic_v3_acpi.c
index f91a8f6078d9..88fd0394c548 100644
--- a/sys/arm64/arm64/gic_v3_acpi.c
+++ b/sys/arm64/arm64/gic_v3_acpi.c
@@ -300,6 +300,7 @@ gic_v3_acpi_count_regions(device_t dev)
acpi_walk_subtables(madt + 1,
(char *)madt + madt->Header.Length,
madt_count_gicc_redistrib, sc);
+ sc->gic_redists.single = true;
}
acpi_unmap_table(madt);
@@ -345,8 +346,9 @@ gic_v3_acpi_attach(device_t dev)
}
}
- if (intr_pic_claim_root(dev, ACPI_INTR_XREF, arm_gic_v3_intr, sc)
- != 0) {
+ err = intr_pic_claim_root(dev, ACPI_INTR_XREF, arm_gic_v3_intr, sc,
+ INTR_ROOT_IRQ);
+ if (err != 0) {
err = ENXIO;
goto error;
}
@@ -406,7 +408,7 @@ gic_v3_add_children(ACPI_SUBTABLE_HEADER *entry, void *arg)
return;
}
- child = device_add_child(dev, "its", -1);
+ child = device_add_child(dev, "its", DEVICE_UNIT_ANY);
if (child == NULL) {
free(di, M_GIC_V3);
return;
@@ -448,7 +450,7 @@ gic_v3_acpi_bus_attach(device_t dev)
gic_v3_add_children, dev);
/* Add the vgic child if needed */
if (((uintptr_t)acpi_get_private(dev) & GICV3_PRIV_FLAGS) != 0) {
- child = device_add_child(dev, "vgic", -1);
+ child = device_add_child(dev, "vgic", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "Could not add vgic child\n");
} else {
@@ -463,7 +465,7 @@ gic_v3_acpi_bus_attach(device_t dev)
acpi_unmap_table(madt);
- bus_generic_attach(dev);
+ bus_attach_children(dev);
}
static struct resource_list *
diff --git a/sys/arm64/arm64/gic_v3_fdt.c b/sys/arm64/arm64/gic_v3_fdt.c
index 3a3647fc89a5..4bea4040c0ba 100644
--- a/sys/arm64/arm64/gic_v3_fdt.c
+++ b/sys/arm64/arm64/gic_v3_fdt.c
@@ -161,7 +161,8 @@ gic_v3_fdt_attach(device_t dev)
/* Register xref */
OF_device_register_xref(xref, dev);
- if (intr_pic_claim_root(dev, xref, arm_gic_v3_intr, sc) != 0) {
+ err = intr_pic_claim_root(dev, xref, arm_gic_v3_intr, sc, INTR_ROOT_IRQ);
+ if (err != 0) {
err = ENXIO;
goto error;
}
@@ -339,7 +340,7 @@ gic_v3_ofw_bus_attach(device_t dev)
/* Should not have any interrupts, so don't add any */
/* Add newbus device for this FDT node */
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (!child) {
if (bootverbose) {
device_printf(dev,
@@ -362,7 +363,7 @@ gic_v3_ofw_bus_attach(device_t dev)
* child so we can use this in the vmm module for bhyve.
*/
if (OF_hasprop(parent, "interrupts")) {
- child = device_add_child(dev, "vgic", -1);
+ child = device_add_child(dev, "vgic", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "Could not add vgic child\n");
} else {
@@ -375,7 +376,8 @@ gic_v3_ofw_bus_attach(device_t dev)
}
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static struct resource_list *
diff --git a/sys/arm64/arm64/gic_v3_var.h b/sys/arm64/arm64/gic_v3_var.h
index 81526e7cc15e..8bc0f456d91e 100644
--- a/sys/arm64/arm64/gic_v3_var.h
+++ b/sys/arm64/arm64/gic_v3_var.h
@@ -53,6 +53,12 @@ struct gic_redists {
struct resource ** regions;
/* Number of Re-Distributor regions */
u_int nregions;
+ /*
+ * Whether to treat each region as a single Re-Distributor page or a
+ * series of contiguous pages (i.e. from each ACPI MADT GICC's GICR
+ * Base Address field)
+ */
+ bool single;
/* Per-CPU Re-Distributor data */
struct redist_pcpu *pcpu;
};
@@ -102,9 +108,11 @@ MALLOC_DECLARE(M_GIC_V3);
#define GICV3_IVAR_NIRQS 1000
/* 1001 was GICV3_IVAR_REDIST_VADDR */
#define GICV3_IVAR_REDIST 1002
+#define GICV3_IVAR_SUPPORT_LPIS 1003
__BUS_ACCESSOR(gicv3, nirqs, GICV3, NIRQS, u_int);
__BUS_ACCESSOR(gicv3, redist, GICV3, REDIST, void *);
+__BUS_ACCESSOR(gicv3, support_lpis, GICV3, SUPPORT_LPIS, bool);
/* Device methods */
int gic_v3_attach(device_t dev);
diff --git a/sys/arm64/arm64/gicv3_its.c b/sys/arm64/arm64/gicv3_its.c
index 31a0ded6c95d..546a225abf09 100644
--- a/sys/arm64/arm64/gicv3_its.c
+++ b/sys/arm64/arm64/gicv3_its.c
@@ -158,7 +158,6 @@ struct its_dev {
struct lpi_chunk lpis;
/* Virtual address of ITT */
void *itt;
- size_t itt_size;
};
/*
@@ -445,7 +444,7 @@ gicv3_its_table_page_size(struct gicv3_its_softc *sc, int table)
reg = gic_its_read_8(sc, GITS_BASER(table));
while (1) {
- reg &= GITS_BASER_PSZ_MASK;
+ reg &= ~GITS_BASER_PSZ_MASK;
switch (page_size) {
case PAGE_SIZE_4K: /* 4KB */
reg |= GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
@@ -533,7 +532,7 @@ gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
cache = 0;
} else {
devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER));
- cache = GITS_BASER_CACHE_WAWB;
+ cache = GITS_BASER_CACHE_RAWAWB;
}
sc->sc_devbits = devbits;
share = GITS_BASER_SHARE_IS;
@@ -587,11 +586,20 @@ gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
its_tbl_size = l1_esize * l1_nidents;
its_tbl_size = roundup2(its_tbl_size, page_size);
break;
- case GITS_BASER_TYPE_VP:
case GITS_BASER_TYPE_PP: /* Undocumented? */
case GITS_BASER_TYPE_IC:
its_tbl_size = page_size;
break;
+ case GITS_BASER_TYPE_VP:
+ /*
+ * If GITS_TYPER.SVPET != 0, the pending table is
+ * shared amongst the redistibutors and ther other
+ * ITSes. Requiring sharing across the ITSes when none
+ * of the redistributors have GICR_VPROPBASER.Valid==1
+ * isn't specified in the architecture, but that's how
+ * the GIC-700 behaves. We don't handle vPE tables at
+ * all yet, so just skip this base register.
+ */
default:
if (bootverbose)
device_printf(dev, "Unhandled table type %lx\n",
@@ -793,7 +801,7 @@ its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc)
/* Make sure changes are observable my the GIC */
dsb(sy);
- size = (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1);
+ size = ilog2_long(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1;
xbaser = vtophys(sc->sc_conf_base) |
(GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) |
@@ -1285,7 +1293,7 @@ gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
#ifdef SMP
static void
-gicv3_its_init_secondary(device_t dev)
+gicv3_its_init_secondary(device_t dev, uint32_t rootnum)
{
struct gicv3_its_softc *sc;
@@ -1412,7 +1420,7 @@ its_device_get(device_t dev, device_t child, u_int nvecs)
struct gicv3_its_softc *sc;
struct its_dev *its_dev;
vmem_addr_t irq_base;
- size_t esize;
+ size_t esize, itt_size;
sc = device_get_softc(dev);
@@ -1450,8 +1458,8 @@ its_device_get(device_t dev, device_t child, u_int nvecs)
* Allocate ITT for this device.
* PA has to be 256 B aligned. At least two entries for device.
*/
- its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
- its_dev->itt = contigmalloc_domainset(its_dev->itt_size,
+ itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
+ its_dev->itt = contigmalloc_domainset(itt_size,
M_GICV3_ITS, sc->sc_ds, M_NOWAIT | M_ZERO, 0,
LPI_INT_TRANS_TAB_MAX_ADDR, LPI_INT_TRANS_TAB_ALIGN, 0);
if (its_dev->itt == NULL) {
@@ -1462,7 +1470,7 @@ its_device_get(device_t dev, device_t child, u_int nvecs)
/* Make sure device sees zeroed ITT. */
if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0)
- cpu_dcache_wb_range(its_dev->itt, its_dev->itt_size);
+ cpu_dcache_wb_range(its_dev->itt, itt_size);
mtx_lock_spin(&sc->sc_its_dev_lock);
TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
@@ -1494,7 +1502,7 @@ its_device_release(device_t dev, struct its_dev *its_dev)
/* Free ITT */
KASSERT(its_dev->itt != NULL, ("Invalid ITT in valid ITS device"));
- contigfree(its_dev->itt, its_dev->itt_size, M_GICV3_ITS);
+ free(its_dev->itt, M_GICV3_ITS);
/* Free the IRQ allocation */
vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base,
@@ -1736,9 +1744,15 @@ gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain)
int error;
sc = device_get_softc(dev);
+ /*
+ * Get the context. If no context is found then the device isn't
+ * behind an IOMMU so no setup is needed.
+ */
ctx = iommu_get_dev_ctx(child);
- if (ctx == NULL)
- return (ENXIO);
+ if (ctx == NULL) {
+ *domain = NULL;
+ return (0);
+ }
/* Map the page containing the GITS_TRANSLATER register. */
error = iommu_map_msi(ctx, PAGE_SIZE, 0,
IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma);
@@ -2208,6 +2222,9 @@ gicv3_its_fdt_probe(device_t dev)
if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its"))
return (ENXIO);
+ if (!gicv3_get_support_lpis(dev))
+ return (ENXIO);
+
device_set_desc(dev, "ARM GIC Interrupt Translation Service");
return (BUS_PROBE_DEFAULT);
}
@@ -2277,6 +2294,9 @@ gicv3_its_acpi_probe(device_t dev)
if (gic_get_hw_rev(dev) < 3)
return (EINVAL);
+ if (!gicv3_get_support_lpis(dev))
+ return (ENXIO);
+
device_set_desc(dev, "ARM GIC Interrupt Translation Service");
return (BUS_PROBE_DEFAULT);
}
diff --git a/sys/arm64/arm64/hyp_stub.S b/sys/arm64/arm64/hyp_stub.S
index 42f76da95062..ee486edf67a0 100644
--- a/sys/arm64/arm64/hyp_stub.S
+++ b/sys/arm64/arm64/hyp_stub.S
@@ -24,6 +24,8 @@
* SUCH DAMAGE.
*/
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
.macro vempty
@@ -63,3 +65,5 @@ hyp_stub_vectors:
vempty /* IRQ 32-bit EL1 */
vempty /* FIQ 32-bit EL1 */
vempty /* SError 32-bit EL1 */
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c
index 01b434fd4f47..123aeeb090dd 100644
--- a/sys/arm64/arm64/identcpu.c
+++ b/sys/arm64/arm64/identcpu.c
@@ -42,6 +42,7 @@
#include <machine/atomic.h>
#include <machine/cpu.h>
+#include <machine/cpu_feat.h>
#include <machine/cpufunc.h>
#include <machine/elf.h>
#include <machine/md_var.h>
@@ -50,6 +51,9 @@
static MALLOC_DEFINE(M_IDENTCPU, "CPU ID", "arm64 CPU identification memory");
struct cpu_desc;
+#ifdef INVARIANTS
+static bool hwcaps_set = false;
+#endif
static void print_cpu_midr(struct sbuf *sb, u_int cpu);
static void print_cpu_features(u_int cpu, struct cpu_desc *desc,
@@ -59,7 +63,7 @@ static void print_cpu_caches(struct sbuf *sb, struct cpu_desc *desc);
static u_long parse_cpu_features_hwcap32(void);
#endif
-char machine[] = "arm64";
+const char machine[] = "arm64";
#ifdef SCTL_MASK32
extern int adaptive_machine_arch;
@@ -76,8 +80,12 @@ static int allow_idc = 1;
SYSCTL_INT(_machdep_cache, OID_AUTO, allow_idc, CTLFLAG_RDTUN, &allow_idc, 0,
"Allow optimizations based on the IDC cache bit");
+static bool emulate_ctr = false;
+
static void check_cpu_regs(u_int cpu, struct cpu_desc *desc,
struct cpu_desc *prev_desc);
+static uint64_t update_special_reg_field(uint64_t user_reg, u_int type,
+ uint64_t value, u_int width, u_int shift, bool sign);
/*
* The default implementation of I-cache sync assumes we have an
@@ -138,15 +146,11 @@ struct cpu_desc {
uint64_t id_aa64mmfr0;
uint64_t id_aa64mmfr1;
uint64_t id_aa64mmfr2;
-#ifdef NOTYET
uint64_t id_aa64mmfr3;
uint64_t id_aa64mmfr4;
-#endif
uint64_t id_aa64pfr0;
uint64_t id_aa64pfr1;
-#ifdef NOTYET
uint64_t id_aa64pfr2;
-#endif
uint64_t id_aa64zfr0;
uint64_t ctr;
#ifdef COMPAT_FREEBSD32
@@ -214,19 +218,28 @@ static const struct cpu_parts cpu_parts_arm[] = {
{ CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
{ CPU_PART_CORTEX_A77, "Cortex-A77" },
{ CPU_PART_CORTEX_A78, "Cortex-A78" },
+ { CPU_PART_CORTEX_A78AE, "Cortex-A78AE" },
{ CPU_PART_CORTEX_A78C, "Cortex-A78C" },
{ CPU_PART_CORTEX_A510, "Cortex-A510" },
+ { CPU_PART_CORTEX_A520, "Cortex-A520" },
{ CPU_PART_CORTEX_A710, "Cortex-A710" },
{ CPU_PART_CORTEX_A715, "Cortex-A715" },
+ { CPU_PART_CORTEX_A720, "Cortex-A720" },
+ { CPU_PART_CORTEX_A725, "Cortex-A725" },
+ { CPU_PART_CORTEX_X925, "Cortex-A925" },
{ CPU_PART_CORTEX_X1, "Cortex-X1" },
{ CPU_PART_CORTEX_X1C, "Cortex-X1C" },
{ CPU_PART_CORTEX_X2, "Cortex-X2" },
{ CPU_PART_CORTEX_X3, "Cortex-X3" },
+ { CPU_PART_CORTEX_X4, "Cortex-X4" },
{ CPU_PART_NEOVERSE_E1, "Neoverse-E1" },
{ CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
{ CPU_PART_NEOVERSE_N2, "Neoverse-N2" },
+ { CPU_PART_NEOVERSE_N3, "Neoverse-N3" },
{ CPU_PART_NEOVERSE_V1, "Neoverse-V1" },
{ CPU_PART_NEOVERSE_V2, "Neoverse-V2" },
+ { CPU_PART_NEOVERSE_V3, "Neoverse-V3" },
+ { CPU_PART_NEOVERSE_V3AE, "Neoverse-V3AE" },
CPU_PART_NONE,
};
@@ -237,12 +250,25 @@ static const struct cpu_parts cpu_parts_cavium[] = {
CPU_PART_NONE,
};
-/* APM / Ampere */
+/* APM (now Ampere) */
static const struct cpu_parts cpu_parts_apm[] = {
{ CPU_PART_EMAG8180, "eMAG 8180" },
CPU_PART_NONE,
};
+/* Ampere */
+static const struct cpu_parts cpu_parts_ampere[] = {
+ { CPU_PART_AMPERE1, "AmpereOne AC03" },
+ { CPU_PART_AMPERE1A, "AmpereOne AC04" },
+ CPU_PART_NONE,
+};
+
+/* Microsoft */
+static const struct cpu_parts cpu_parts_microsoft[] = {
+ { CPU_PART_AZURE_COBALT_100, "Azure Cobalt 100" },
+ CPU_PART_NONE,
+};
+
/* Qualcomm */
static const struct cpu_parts cpu_parts_qcom[] = {
{ CPU_PART_KRYO400_GOLD, "Kryo 400 Gold" },
@@ -276,7 +302,7 @@ static const struct cpu_parts cpu_parts_none[] = {
* Implementers table.
*/
const struct cpu_implementers cpu_implementers[] = {
- { CPU_IMPL_AMPERE, "Ampere", cpu_parts_none },
+ { CPU_IMPL_AMPERE, "Ampere", cpu_parts_ampere },
{ CPU_IMPL_APPLE, "Apple", cpu_parts_apple },
{ CPU_IMPL_APM, "APM", cpu_parts_apm },
{ CPU_IMPL_ARM, "ARM", cpu_parts_arm },
@@ -285,22 +311,30 @@ const struct cpu_implementers cpu_implementers[] = {
{ CPU_IMPL_DEC, "DEC", cpu_parts_none },
{ CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
{ CPU_IMPL_FUJITSU, "Fujitsu", cpu_parts_none },
+ { CPU_IMPL_HISILICON, "HiSilicon", cpu_parts_none },
{ CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
{ CPU_IMPL_INTEL, "Intel", cpu_parts_none },
{ CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
+ { CPU_IMPL_MICROSOFT, "Microsoft", cpu_parts_microsoft },
{ CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
{ CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_qcom },
CPU_IMPLEMENTER_NONE,
};
#define MRS_TYPE_MASK 0xf
-#define MRS_TYPE_FBSD_SHIFT 0
-#define MRS_TYPE_LNX_SHIFT 8
#define MRS_INVALID 0
#define MRS_EXACT 1
-#define MRS_EXACT_VAL(x) (MRS_EXACT | ((x) << 4))
-#define MRS_EXACT_FIELD(x) (((x) >> 4) & 0xf)
-#define MRS_LOWER 2
+#define MRS_EXACT_IF_DIFFERENT 2
+#define MRS_LOWER 3
+#define MRS_HIGHER_OR_ZERO 4
+#define MRS_HIGHER 5
+#define MRS_SAFE_SHIFT 4
+#define MRS_SAFE_MASK (0xfu << MRS_SAFE_SHIFT)
+#define MRS_SAFE(x) (((x) << MRS_SAFE_SHIFT) & MRS_SAFE_MASK)
+#define MRS_SAFE_VAL(x) (((x) & MRS_SAFE_MASK) >> MRS_SAFE_SHIFT)
+#define MRS_FREEBSD (1u << 8)
+#define MRS_LINUX (1u << 9)
+#define MRS_USERSPACE (MRS_FREEBSD | MRS_LINUX)
struct mrs_field_value {
uint64_t value;
@@ -335,6 +369,28 @@ struct mrs_field_value {
MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "15 "_desc "s"), \
MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "16 "_desc "s")
+/*
+ * Used for printing I/D cache line sizes & CWG/ERG, as 0 is a special case
+ * in some cases the decoded string needs to be passed in.
+ */
+#define MRS_FIELD_VALUE_CACHE(_reg, _field, _0desc, _desc) \
+ MRS_FIELD_VALUE(0ul << _reg ## _ ## _field ## _SHIFT, _0desc), \
+ MRS_FIELD_VALUE(1ul << _reg ## _ ## _field ## _SHIFT, "8 " _desc), \
+ MRS_FIELD_VALUE(2ul << _reg ## _ ## _field ## _SHIFT, "16 " _desc), \
+ MRS_FIELD_VALUE(3ul << _reg ## _ ## _field ## _SHIFT, "32 " _desc), \
+ MRS_FIELD_VALUE(4ul << _reg ## _ ## _field ## _SHIFT, "64 " _desc), \
+ MRS_FIELD_VALUE(5ul << _reg ## _ ## _field ## _SHIFT, "128 " _desc), \
+ MRS_FIELD_VALUE(6ul << _reg ## _ ## _field ## _SHIFT, "256 " _desc), \
+ MRS_FIELD_VALUE(7ul << _reg ## _ ## _field ## _SHIFT, "512 " _desc), \
+ MRS_FIELD_VALUE(8ul << _reg ## _ ## _field ## _SHIFT, "1k " _desc), \
+ MRS_FIELD_VALUE(9ul << _reg ## _ ## _field ## _SHIFT, "2k " _desc), \
+ MRS_FIELD_VALUE(10ul<< _reg ## _ ## _field ## _SHIFT, "4k " _desc), \
+ MRS_FIELD_VALUE(11ul<< _reg ## _ ## _field ## _SHIFT, "8k " _desc), \
+ MRS_FIELD_VALUE(12ul<< _reg ## _ ## _field ## _SHIFT, "16k " _desc), \
+ MRS_FIELD_VALUE(13ul<< _reg ## _ ## _field ## _SHIFT, "32k " _desc), \
+ MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "64k " _desc), \
+ MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "128k "_desc)
+
#define MRS_FIELD_VALUE_END { .desc = NULL }
struct mrs_field_hwcap {
@@ -359,31 +415,95 @@ struct mrs_field {
uint64_t mask;
bool sign;
u_int type;
+ u_int width;
u_int shift;
};
-#define MRS_FIELD_HWCAP_SPLIT(_register, _name, _sign, _fbsd_type, \
- _lnx_type, _values, _hwcap) \
+#define MRS_FIELD_RES1(_width, _shift) \
+ { \
+ .sign = false, \
+ .type = MRS_EXACT | MRS_SAFE((1u << (_width)) - 1) | \
+ MRS_USERSPACE, \
+ .width = (_width), \
+ .shift = (_shift), \
+ }
+
+#define MRS_FIELD_HWCAP(_register, _name, _sign, _type, _visibility, \
+ _values, _hwcap) \
{ \
.name = #_name, \
.sign = (_sign), \
- .type = ((_fbsd_type) << MRS_TYPE_FBSD_SHIFT) | \
- ((_lnx_type) << MRS_TYPE_LNX_SHIFT), \
+ .type = ((_type) | (_visibility)), \
+ .width = _register ## _ ## _name ## _WIDTH, \
.shift = _register ## _ ## _name ## _SHIFT, \
.mask = _register ## _ ## _name ## _MASK, \
.values = (_values), \
.hwcaps = (_hwcap), \
}
-#define MRS_FIELD_HWCAP(_register, _name, _sign, _type, _values, _hwcap) \
- MRS_FIELD_HWCAP_SPLIT(_register, _name, _sign, _type, _type, \
- _values, _hwcap)
-
-#define MRS_FIELD(_register, _name, _sign, _type, _values) \
- MRS_FIELD_HWCAP(_register, _name, _sign, _type, _values, NULL)
+#define MRS_FIELD(_register, _name, _sign, _type, _visibility, _values) \
+ MRS_FIELD_HWCAP(_register, _name, _sign, _type, _visibility, \
+ _values, NULL)
#define MRS_FIELD_END { .type = MRS_INVALID, }
+/* CTR_EL0 */
+static const struct mrs_field_value ctr_dic[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(CTR, DIC, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value ctr_idc[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(CTR, IDC, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value ctr_cwg[] = {
+ MRS_FIELD_VALUE_CACHE(CTR, CWG, "Unknown CWG",
+ "byte CWG"),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value ctr_erg[] = {
+ MRS_FIELD_VALUE_CACHE(CTR, ERG, "Unknown ERG",
+ "byte ERG"),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value ctr_dline[] = {
+ MRS_FIELD_VALUE_CACHE(CTR, DLINE, "4 byte D-cacheline",
+ "byte D-cacheline"),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value ctr_l1ip[] = {
+ MRS_FIELD_VALUE(CTR_L1IP_VIPT, "VIPT I-cache"),
+ MRS_FIELD_VALUE(CTR_L1IP_PIPT, "PIPT I-cache"),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value ctr_iline[] = {
+ MRS_FIELD_VALUE_CACHE(CTR, ILINE, "4 byte I-cacheline",
+ "byte I-cacheline"),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field ctr_fields[] = {
+ /* Bit 31 is RES1 */
+ MRS_FIELD_RES1(1, 31),
+ MRS_FIELD(CTR, DIC, false, MRS_LOWER, MRS_USERSPACE, ctr_dic),
+ MRS_FIELD(CTR, IDC, false, MRS_LOWER, MRS_USERSPACE, ctr_idc),
+ MRS_FIELD(CTR, CWG, false, MRS_HIGHER_OR_ZERO, MRS_USERSPACE, ctr_cwg),
+ MRS_FIELD(CTR, ERG, false, MRS_HIGHER_OR_ZERO, MRS_USERSPACE, ctr_erg),
+ MRS_FIELD(CTR, DLINE, false, MRS_LOWER, MRS_USERSPACE, ctr_dline),
+ /* If the ICache types are different report the safe option */
+ MRS_FIELD(CTR, L1IP, false, MRS_EXACT_IF_DIFFERENT |
+ MRS_SAFE(CTR_L1IP_VIPT >> CTR_L1IP_SHIFT), MRS_USERSPACE,
+ ctr_l1ip),
+ MRS_FIELD(CTR, ILINE, false, MRS_LOWER, MRS_USERSPACE, ctr_iline),
+ MRS_FIELD_END,
+};
+
/* ID_AA64AFR0_EL1 */
static const struct mrs_field id_aa64afr0_fields[] = {
MRS_FIELD_END,
@@ -437,6 +557,7 @@ static const struct mrs_field_value id_aa64dfr0_pmsver[] = {
MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE_1_1, "SPEv1p1"),
MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE_1_2, "SPEv1p2"),
MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE_1_3, "SPEv1p3"),
+ MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE_1_4, "SPEv1p4"),
MRS_FIELD_VALUE_END,
};
@@ -450,6 +571,11 @@ static const struct mrs_field_value id_aa64dfr0_wrps[] = {
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_value id_aa64dfr0_pmss[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR0, PMSS, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
static const struct mrs_field_value id_aa64dfr0_brps[] = {
MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, BRPs, "Breakpoint"),
MRS_FIELD_VALUE_END,
@@ -463,6 +589,7 @@ static const struct mrs_field_value id_aa64dfr0_pmuver[] = {
MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_5, "PMUv3p5"),
MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_7, "PMUv3p7"),
MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_8, "PMUv3p8"),
+ MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_9, "PMUv3p9"),
MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_IMPL, "IMPL PMU"),
MRS_FIELD_VALUE_END,
};
@@ -479,35 +606,57 @@ static const struct mrs_field_value id_aa64dfr0_debugver[] = {
MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_2, "Debugv8p2"),
MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_4, "Debugv8p4"),
MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_8, "Debugv8p8"),
+ MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_9, "Debugv8p9"),
MRS_FIELD_VALUE_END,
};
static const struct mrs_field id_aa64dfr0_fields[] = {
- MRS_FIELD(ID_AA64DFR0, HPMN0, false, MRS_EXACT, id_aa64dfr0_hpmn0),
- MRS_FIELD(ID_AA64DFR0, BRBE, false, MRS_EXACT, id_aa64dfr0_brbe),
- MRS_FIELD(ID_AA64DFR0, MTPMU, true, MRS_EXACT, id_aa64dfr0_mtpmu),
- MRS_FIELD(ID_AA64DFR0, TraceBuffer, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64DFR0, HPMN0, false, MRS_LOWER, 0, id_aa64dfr0_hpmn0),
+ MRS_FIELD(ID_AA64DFR0, BRBE, false, MRS_LOWER, 0, id_aa64dfr0_brbe),
+ MRS_FIELD(ID_AA64DFR0, MTPMU, true, MRS_LOWER, 0, id_aa64dfr0_mtpmu),
+ MRS_FIELD(ID_AA64DFR0, TraceBuffer, false, MRS_LOWER, 0,
id_aa64dfr0_tracebuffer),
- MRS_FIELD(ID_AA64DFR0, TraceFilt, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64DFR0, TraceFilt, false, MRS_LOWER, 0,
id_aa64dfr0_tracefilt),
- MRS_FIELD(ID_AA64DFR0, DoubleLock, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64DFR0, DoubleLock, false, MRS_LOWER, 0,
id_aa64dfr0_doublelock),
- MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_EXACT, id_aa64dfr0_pmsver),
- MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_LOWER, 0, id_aa64dfr0_pmsver),
+ MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_LOWER, 0,
id_aa64dfr0_ctx_cmps),
- MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_LOWER, id_aa64dfr0_wrps),
- MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_LOWER, id_aa64dfr0_brps),
- MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_EXACT, id_aa64dfr0_pmuver),
- MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64dfr0_wrps),
+ MRS_FIELD(ID_AA64DFR0, PMSS, false, MRS_LOWER, 0, id_aa64dfr0_pmss),
+ MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64dfr0_brps),
+ MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_LOWER, 0, id_aa64dfr0_pmuver),
+ MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_LOWER, 0,
id_aa64dfr0_tracever),
- MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_EXACT_VAL(0x6),
+ MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_LOWER | MRS_SAFE(0x6), 0,
id_aa64dfr0_debugver),
MRS_FIELD_END,
};
/* ID_AA64DFR1_EL1 */
+static const struct mrs_field_value id_aa64dfr1_dpfzs[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR1, DPFZS, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64dfr1_pmicntr[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR1, PMICNTR, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64dfr1_spmu[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR1, SPMU, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
static const struct mrs_field id_aa64dfr1_fields[] = {
+ MRS_FIELD(ID_AA64DFR1, DPFZS, false, MRS_LOWER, 0, id_aa64dfr1_dpfzs),
+ MRS_FIELD(ID_AA64DFR1, PMICNTR, false, MRS_LOWER, 0, id_aa64dfr1_pmicntr),
+ MRS_FIELD(ID_AA64DFR1, SPMU, false, MRS_LOWER, 0, id_aa64dfr1_spmu),
MRS_FIELD_END,
};
@@ -664,34 +813,34 @@ static const struct mrs_field_hwcap id_aa64isar0_aes_caps[] = {
};
static const struct mrs_field id_aa64isar0_fields[] = {
- MRS_FIELD_HWCAP(ID_AA64ISAR0, RNDR, false, MRS_LOWER,
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, RNDR, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar0_rndr, id_aa64isar0_rndr_caps),
- MRS_FIELD(ID_AA64ISAR0, TLB, false, MRS_EXACT, id_aa64isar0_tlb),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, TS, false, MRS_LOWER, id_aa64isar0_ts,
- id_aa64isar0_ts_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, FHM, false, MRS_LOWER, id_aa64isar0_fhm,
- id_aa64isar0_fhm_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, DP, false, MRS_LOWER, id_aa64isar0_dp,
- id_aa64isar0_dp_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, SM4, false, MRS_LOWER, id_aa64isar0_sm4,
- id_aa64isar0_sm4_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, SM3, false, MRS_LOWER, id_aa64isar0_sm3,
- id_aa64isar0_sm3_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA3, false, MRS_LOWER, id_aa64isar0_sha3,
- id_aa64isar0_sha3_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, RDM, false, MRS_LOWER, id_aa64isar0_rdm,
- id_aa64isar0_rdm_caps),
- MRS_FIELD(ID_AA64ISAR0, TME, false, MRS_EXACT, id_aa64isar0_tme),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, Atomic, false, MRS_LOWER,
+ MRS_FIELD(ID_AA64ISAR0, TLB, false, MRS_LOWER, 0, id_aa64isar0_tlb),
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, TS, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar0_ts, id_aa64isar0_ts_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, FHM, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar0_fhm, id_aa64isar0_fhm_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, DP, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar0_dp, id_aa64isar0_dp_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, SM4, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar0_sm4, id_aa64isar0_sm4_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, SM3, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar0_sm3, id_aa64isar0_sm3_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA3, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar0_sha3, id_aa64isar0_sha3_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, RDM, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar0_rdm, id_aa64isar0_rdm_caps),
+ MRS_FIELD(ID_AA64ISAR0, TME, false, MRS_LOWER, 0, id_aa64isar0_tme),
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, Atomic, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar0_atomic, id_aa64isar0_atomic_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, CRC32, false, MRS_LOWER,
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, CRC32, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar0_crc32, id_aa64isar0_crc32_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA2, false, MRS_LOWER, id_aa64isar0_sha2,
- id_aa64isar0_sha2_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA1, false, MRS_LOWER,
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA2, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar0_sha2, id_aa64isar0_sha2_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA1, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar0_sha1, id_aa64isar0_sha1_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR0, AES, false, MRS_LOWER, id_aa64isar0_aes,
- id_aa64isar0_aes_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR0, AES, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar0_aes, id_aa64isar0_aes_caps),
MRS_FIELD_END,
};
@@ -742,7 +891,8 @@ static const struct mrs_field_hwcap id_aa64isar1_bf16_caps[] = {
static const struct mrs_field_value id_aa64isar1_specres[] = {
MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_NONE, ""),
- MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_IMPL, "PredInv"),
+ MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_8_5, "PredInv v8.5"),
+ MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_8_9, "PredInv v8.9"),
MRS_FIELD_VALUE_END,
};
@@ -865,41 +1015,66 @@ static const struct mrs_field_hwcap id_aa64isar1_dpb_caps[] = {
};
static const struct mrs_field id_aa64isar1_fields[] = {
- MRS_FIELD(ID_AA64ISAR1, LS64, false, MRS_EXACT, id_aa64isar1_ls64),
- MRS_FIELD(ID_AA64ISAR1, XS, false, MRS_EXACT, id_aa64isar1_xs),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, I8MM, false, MRS_LOWER,
+ MRS_FIELD(ID_AA64ISAR1, LS64, false, MRS_LOWER, 0, id_aa64isar1_ls64),
+ MRS_FIELD(ID_AA64ISAR1, XS, false, MRS_LOWER, 0, id_aa64isar1_xs),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, I8MM, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar1_i8mm, id_aa64isar1_i8mm_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, DGH, false, MRS_LOWER, id_aa64isar1_dgh,
- id_aa64isar1_dgh_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, BF16, false, MRS_LOWER,
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, DGH, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar1_dgh, id_aa64isar1_dgh_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, BF16, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar1_bf16, id_aa64isar1_bf16_caps),
- MRS_FIELD(ID_AA64ISAR1, SPECRES, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64ISAR1, SPECRES, false, MRS_LOWER, 0,
id_aa64isar1_specres),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, SB, false, MRS_LOWER, id_aa64isar1_sb,
- id_aa64isar1_sb_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, FRINTTS, false, MRS_LOWER,
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, SB, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar1_sb, id_aa64isar1_sb_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, FRINTTS, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar1_frintts, id_aa64isar1_frintts_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi,
- id_aa64isar1_gpi_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa,
- id_aa64isar1_gpa_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, LRCPC, false, MRS_LOWER,
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, GPI, false, MRS_LOWER, 0,
+ id_aa64isar1_gpi, id_aa64isar1_gpi_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, GPA, false, MRS_LOWER, 0,
+ id_aa64isar1_gpa, id_aa64isar1_gpa_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar1_lrcpc, id_aa64isar1_lrcpc_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, FCMA, false, MRS_LOWER,
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, FCMA, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar1_fcma, id_aa64isar1_fcma_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, JSCVT, false, MRS_LOWER,
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, MRS_USERSPACE,
id_aa64isar1_jscvt, id_aa64isar1_jscvt_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api,
- id_aa64isar1_api_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa,
- id_aa64isar1_apa_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb,
- id_aa64isar1_dpb_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, API, false, MRS_LOWER, 0,
+ id_aa64isar1_api, id_aa64isar1_api_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, APA, false, MRS_LOWER, 0,
+ id_aa64isar1_apa, id_aa64isar1_apa_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR1, DPB, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar1_dpb, id_aa64isar1_dpb_caps),
MRS_FIELD_END,
};
/* ID_AA64ISAR2_EL1 */
+static const struct mrs_field_value id_aa64isar2_ats1a[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, ATS1A, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64isar2_cssc[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, CSSC, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64isar2_rprfm[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, RPRFM, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64isar2_prfmslc[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, PRFMSLC, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64isar2_clrbhb[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, CLRBHB, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
static const struct mrs_field_value id_aa64isar2_pac_frac[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, PAC_frac, NONE, IMPL),
MRS_FIELD_VALUE_END,
@@ -946,22 +1121,39 @@ static const struct mrs_field_value id_aa64isar2_rpres[] = {
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64isar2_rpres_caps[] = {
+ MRS_HWCAP(2, HWCAP2_RPRES, ID_AA64ISAR2_RPRES_IMPL),
+ MRS_HWCAP_END
+};
+
static const struct mrs_field_value id_aa64isar2_wfxt[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, WFxT, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64isar2_wfxt_caps[] = {
+ MRS_HWCAP(2, HWCAP2_WFXT, ID_AA64ISAR2_WFxT_IMPL),
+ MRS_HWCAP_END
+};
+
static const struct mrs_field id_aa64isar2_fields[] = {
- MRS_FIELD(ID_AA64ISAR2, PAC_frac, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64ISAR2, ATS1A, false, MRS_LOWER, 0, id_aa64isar2_ats1a),
+ MRS_FIELD(ID_AA64ISAR2, CSSC, false, MRS_LOWER, 0, id_aa64isar2_cssc),
+ MRS_FIELD(ID_AA64ISAR2, RPRFM, false, MRS_LOWER, 0, id_aa64isar2_rprfm),
+ MRS_FIELD(ID_AA64ISAR2, PRFMSLC, false, MRS_LOWER, 0, id_aa64isar2_prfmslc),
+ MRS_FIELD(ID_AA64ISAR2, CLRBHB, false, MRS_LOWER, 0, id_aa64isar2_clrbhb),
+ MRS_FIELD(ID_AA64ISAR2, PAC_frac, false, MRS_LOWER, 0,
id_aa64isar2_pac_frac),
- MRS_FIELD(ID_AA64ISAR2, BC, false, MRS_EXACT, id_aa64isar2_bc),
- MRS_FIELD(ID_AA64ISAR2, MOPS, false, MRS_EXACT, id_aa64isar2_mops),
- MRS_FIELD_HWCAP(ID_AA64ISAR2, APA3, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64ISAR2, BC, false, MRS_LOWER, 0, id_aa64isar2_bc),
+ MRS_FIELD(ID_AA64ISAR2, MOPS, false, MRS_LOWER, 0, id_aa64isar2_mops),
+ MRS_FIELD_HWCAP(ID_AA64ISAR2, APA3, false, MRS_LOWER, 0,
id_aa64isar2_apa3, id_aa64isar2_apa3_caps),
- MRS_FIELD_HWCAP(ID_AA64ISAR2, GPA3, false, MRS_EXACT,
+ MRS_FIELD_HWCAP(ID_AA64ISAR2, GPA3, false, MRS_LOWER, 0,
id_aa64isar2_gpa3, id_aa64isar2_gpa3_caps),
- MRS_FIELD(ID_AA64ISAR2, RPRES, false, MRS_EXACT, id_aa64isar2_rpres),
- MRS_FIELD(ID_AA64ISAR2, WFxT, false, MRS_EXACT, id_aa64isar2_wfxt),
+ MRS_FIELD_HWCAP(ID_AA64ISAR2, RPRES, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64isar2_rpres, id_aa64isar2_rpres_caps),
+ MRS_FIELD_HWCAP(ID_AA64ISAR2, WFxT, false, MRS_LOWER, 0,
+ id_aa64isar2_wfxt, id_aa64isar2_wfxt_caps),
MRS_FIELD_END,
};
@@ -974,7 +1166,9 @@ static const struct mrs_field_value id_aa64mmfr0_ecv[] = {
};
static const struct mrs_field_value id_aa64mmfr0_fgt[] = {
- MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, FGT, NONE, IMPL),
+ MRS_FIELD_VALUE(ID_AA64MMFR0_FGT_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64MMFR0_FGT_8_6, "FGT v8.6"),
+ MRS_FIELD_VALUE(ID_AA64MMFR0_FGT_8_9, "FGT v8.9"),
MRS_FIELD_VALUE_END,
};
@@ -1056,33 +1250,41 @@ static const struct mrs_field_value id_aa64mmfr0_parange[] = {
};
static const struct mrs_field id_aa64mmfr0_fields[] = {
- MRS_FIELD(ID_AA64MMFR0, ECV, false, MRS_EXACT, id_aa64mmfr0_ecv),
- MRS_FIELD(ID_AA64MMFR0, FGT, false, MRS_EXACT, id_aa64mmfr0_fgt),
- MRS_FIELD(ID_AA64MMFR0, ExS, false, MRS_EXACT, id_aa64mmfr0_exs),
- MRS_FIELD(ID_AA64MMFR0, TGran4_2, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR0, ECV, false, MRS_LOWER, 0, id_aa64mmfr0_ecv),
+ MRS_FIELD(ID_AA64MMFR0, FGT, false, MRS_LOWER, 0, id_aa64mmfr0_fgt),
+ MRS_FIELD(ID_AA64MMFR0, ExS, false, MRS_LOWER, 0, id_aa64mmfr0_exs),
+ MRS_FIELD(ID_AA64MMFR0, TGran4_2, false, MRS_LOWER, 0,
id_aa64mmfr0_tgran4_2),
- MRS_FIELD(ID_AA64MMFR0, TGran64_2, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR0, TGran64_2, false, MRS_LOWER, 0,
id_aa64mmfr0_tgran64_2),
- MRS_FIELD(ID_AA64MMFR0, TGran16_2, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR0, TGran16_2, false, MRS_LOWER, 0,
id_aa64mmfr0_tgran16_2),
- MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_EXACT, id_aa64mmfr0_tgran4),
- MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_LOWER, 0,
+ id_aa64mmfr0_tgran4),
+ MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_LOWER, 0,
id_aa64mmfr0_tgran64),
- MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_LOWER, 0,
id_aa64mmfr0_tgran16),
- MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_LOWER, 0,
id_aa64mmfr0_bigendel0),
- MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_EXACT, id_aa64mmfr0_snsmem),
- MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_EXACT, id_aa64mmfr0_bigend),
- MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_LOWER, 0,
+ id_aa64mmfr0_snsmem),
+ MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_LOWER, 0,
+ id_aa64mmfr0_bigend),
+ MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_LOWER, 0,
id_aa64mmfr0_asidbits),
- MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_LOWER, 0,
id_aa64mmfr0_parange),
MRS_FIELD_END,
};
/* ID_AA64MMFR1_EL1 */
+static const struct mrs_field_value id_aa64mmfr1_ecbhb[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, ECBHB, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
static const struct mrs_field_value id_aa64mmfr1_cmovw[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, CMOVW, NONE, IMPL),
MRS_FIELD_VALUE_END,
@@ -1103,13 +1305,20 @@ static const struct mrs_field_value id_aa64mmfr1_afp[] = {
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64mmfr1_afp_caps[] = {
+ MRS_HWCAP(2, HWCAP2_AFP, ID_AA64MMFR1_AFP_IMPL),
+ MRS_HWCAP_END
+};
+
static const struct mrs_field_value id_aa64mmfr1_hcx[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, HCX, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
static const struct mrs_field_value id_aa64mmfr1_ets[] = {
- MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, ETS, NONE, IMPL),
+ MRS_FIELD_VALUE(ID_AA64MMFR1_ETS_NONE, ""),
+ MRS_FIELD_VALUE(ID_AA64MMFR1_ETS_NONE2, ""),
+ MRS_FIELD_VALUE(ID_AA64MMFR1_ETS_IMPL, "ETS2"),
MRS_FIELD_VALUE_END,
};
@@ -1166,23 +1375,34 @@ static const struct mrs_field_value id_aa64mmfr1_hafdbs[] = {
};
static const struct mrs_field id_aa64mmfr1_fields[] = {
- MRS_FIELD(ID_AA64MMFR1, CMOVW, false, MRS_EXACT, id_aa64mmfr1_cmovw),
- MRS_FIELD(ID_AA64MMFR1, TIDCP1, false, MRS_EXACT, id_aa64mmfr1_tidcp1),
- MRS_FIELD(ID_AA64MMFR1, nTLBPA, false, MRS_EXACT, id_aa64mmfr1_ntlbpa),
- MRS_FIELD(ID_AA64MMFR1, AFP, false, MRS_EXACT, id_aa64mmfr1_afp),
- MRS_FIELD(ID_AA64MMFR1, HCX, false, MRS_EXACT, id_aa64mmfr1_hcx),
- MRS_FIELD(ID_AA64MMFR1, ETS, false, MRS_EXACT, id_aa64mmfr1_ets),
- MRS_FIELD(ID_AA64MMFR1, TWED, false, MRS_EXACT, id_aa64mmfr1_twed),
- MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_EXACT, id_aa64mmfr1_xnx),
- MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR1, ECBHB, false, MRS_LOWER, 0, id_aa64mmfr1_ecbhb),
+ MRS_FIELD(ID_AA64MMFR1, CMOVW, false, MRS_LOWER, 0, id_aa64mmfr1_cmovw),
+ MRS_FIELD(ID_AA64MMFR1, TIDCP1, false, MRS_LOWER, 0,
+ id_aa64mmfr1_tidcp1),
+ MRS_FIELD(ID_AA64MMFR1, nTLBPA, false, MRS_LOWER, 0,
+ id_aa64mmfr1_ntlbpa),
+ MRS_FIELD_HWCAP(ID_AA64MMFR1, AFP, false, MRS_LOWER, 0,
+ id_aa64mmfr1_afp, id_aa64mmfr1_afp_caps),
+ MRS_FIELD(ID_AA64MMFR1, HCX, false, MRS_LOWER, 0, id_aa64mmfr1_hcx),
+ MRS_FIELD(ID_AA64MMFR1, ETS, false, MRS_LOWER, 0, id_aa64mmfr1_ets),
+ MRS_FIELD(ID_AA64MMFR1, TWED, false, MRS_LOWER, 0, id_aa64mmfr1_twed),
+ MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_LOWER, 0, id_aa64mmfr1_xnx),
+ /*
+ * SpecSEI != 0 indicates the CPU might generate an external abort
+ * under speculation, while 0 indicates it can't happen. It's safer
+ * to incorrectly indicate it might happen when it can't rather than
+ * say it can't happen when it could. As such use the largest value
+ * found in the system.
+ */
+ MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_HIGHER, 0,
id_aa64mmfr1_specsei),
- MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_EXACT, id_aa64mmfr1_pan),
- MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_EXACT, id_aa64mmfr1_lo),
- MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_EXACT, id_aa64mmfr1_hpds),
- MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_EXACT, id_aa64mmfr1_vh),
- MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_LOWER, 0, id_aa64mmfr1_pan),
+ MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_LOWER, 0, id_aa64mmfr1_lo),
+ MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_LOWER, 0, id_aa64mmfr1_hpds),
+ MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_LOWER, 0, id_aa64mmfr1_vh),
+ MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_LOWER, 0,
id_aa64mmfr1_vmidbits),
- MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_EXACT, id_aa64mmfr1_hafdbs),
+ MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_LOWER, 0, id_aa64mmfr1_hafdbs),
MRS_FIELD_END,
};
@@ -1276,39 +1496,83 @@ static const struct mrs_field_value id_aa64mmfr2_cnp[] = {
};
static const struct mrs_field id_aa64mmfr2_fields[] = {
- MRS_FIELD(ID_AA64MMFR2, E0PD, false, MRS_EXACT, id_aa64mmfr2_e0pd),
- MRS_FIELD(ID_AA64MMFR2, EVT, false, MRS_EXACT, id_aa64mmfr2_evt),
- MRS_FIELD(ID_AA64MMFR2, BBM, false, MRS_EXACT, id_aa64mmfr2_bbm),
- MRS_FIELD(ID_AA64MMFR2, TTL, false, MRS_EXACT, id_aa64mmfr2_ttl),
- MRS_FIELD(ID_AA64MMFR2, FWB, false, MRS_EXACT, id_aa64mmfr2_fwb),
- MRS_FIELD(ID_AA64MMFR2, IDS, false, MRS_EXACT, id_aa64mmfr2_ids),
- MRS_FIELD_HWCAP(ID_AA64MMFR2, AT, false, MRS_LOWER, id_aa64mmfr2_at,
- id_aa64mmfr2_at_caps),
- MRS_FIELD(ID_AA64MMFR2, ST, false, MRS_EXACT, id_aa64mmfr2_st),
- MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_EXACT, id_aa64mmfr2_nv),
- MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_EXACT, id_aa64mmfr2_ccidx),
- MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR2, E0PD, false, MRS_LOWER, 0, id_aa64mmfr2_e0pd),
+ MRS_FIELD(ID_AA64MMFR2, EVT, false, MRS_LOWER, 0, id_aa64mmfr2_evt),
+ MRS_FIELD(ID_AA64MMFR2, BBM, false, MRS_LOWER, 0, id_aa64mmfr2_bbm),
+ MRS_FIELD(ID_AA64MMFR2, TTL, false, MRS_LOWER, 0, id_aa64mmfr2_ttl),
+ MRS_FIELD(ID_AA64MMFR2, FWB, false, MRS_LOWER, 0, id_aa64mmfr2_fwb),
+ MRS_FIELD(ID_AA64MMFR2, IDS, false, MRS_LOWER, 0, id_aa64mmfr2_ids),
+ MRS_FIELD_HWCAP(ID_AA64MMFR2, AT, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64mmfr2_at, id_aa64mmfr2_at_caps),
+ MRS_FIELD(ID_AA64MMFR2, ST, false, MRS_LOWER, 0, id_aa64mmfr2_st),
+ MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_LOWER, 0, id_aa64mmfr2_nv),
+ MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_LOWER, 0, id_aa64mmfr2_ccidx),
+ MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_LOWER, 0,
id_aa64mmfr2_varange),
- MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_EXACT, id_aa64mmfr2_iesb),
- MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_EXACT, id_aa64mmfr2_lsm),
- MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_EXACT, id_aa64mmfr2_uao),
- MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_EXACT, id_aa64mmfr2_cnp),
+ MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_LOWER, 0, id_aa64mmfr2_iesb),
+ MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_LOWER, 0, id_aa64mmfr2_lsm),
+ MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_LOWER, 0, id_aa64mmfr2_uao),
+ MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_LOWER, 0, id_aa64mmfr2_cnp),
MRS_FIELD_END,
};
-#ifdef NOTYET
/* ID_AA64MMFR2_EL1 */
static const struct mrs_field_value id_aa64mmfr3_spec_fpacc[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, Spec_FPACC, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_value id_aa64mmfr3_aderr[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, ADERR, NONE, SOME),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64mmfr3_sderr[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, SDERR, NONE, ALL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64mmfr3_anerr[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, ANERR, NONE, SOME),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64mmfr3_snerr[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, SNERR, NONE, ALL),
+ MRS_FIELD_VALUE_END,
+};
+
static const struct mrs_field_value id_aa64mmfr3_mec[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, MEC, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_value id_aa64mmfr3_aie[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, AIE, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64mmfr3_s2poe[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, S2POE, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64mmfr3_s1poe[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, S1POE, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64mmfr3_s2pie[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, S2PIE, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64mmfr3_s1pie[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, S1PIE, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
static const struct mrs_field_value id_aa64mmfr3_sctlrx[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, SCTLRX, NONE, IMPL),
MRS_FIELD_VALUE_END,
@@ -1320,11 +1584,21 @@ static const struct mrs_field_value id_aa64mmfr3_tcrx[] = {
};
static const struct mrs_field id_aa64mmfr3_fields[] = {
- MRS_FIELD(ID_AA64MMFR3, Spec_FPACC, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64MMFR3, Spec_FPACC, false, MRS_LOWER, 0,
id_aa64mmfr3_spec_fpacc),
- MRS_FIELD(ID_AA64MMFR3, MEC, false, MRS_EXACT, id_aa64mmfr3_mec),
- MRS_FIELD(ID_AA64MMFR3, SCTLRX, false, MRS_EXACT, id_aa64mmfr3_sctlrx),
- MRS_FIELD(ID_AA64MMFR3, TCRX, false, MRS_EXACT, id_aa64mmfr3_tcrx),
+ MRS_FIELD(ID_AA64MMFR3, ADERR, false, MRS_LOWER, 0, id_aa64mmfr3_aderr),
+ MRS_FIELD(ID_AA64MMFR3, SDERR, false, MRS_LOWER, 0, id_aa64mmfr3_sderr),
+ MRS_FIELD(ID_AA64MMFR3, ANERR, false, MRS_LOWER, 0, id_aa64mmfr3_anerr),
+ MRS_FIELD(ID_AA64MMFR3, SNERR, false, MRS_LOWER, 0, id_aa64mmfr3_snerr),
+ MRS_FIELD(ID_AA64MMFR3, MEC, false, MRS_LOWER, 0, id_aa64mmfr3_mec),
+ MRS_FIELD(ID_AA64MMFR3, AIE, false, MRS_LOWER, 0, id_aa64mmfr3_aie),
+ MRS_FIELD(ID_AA64MMFR3, S2POE, false, MRS_LOWER, 0, id_aa64mmfr3_s2poe),
+ MRS_FIELD(ID_AA64MMFR3, S1POE, false, MRS_LOWER, 0, id_aa64mmfr3_s1poe),
+ MRS_FIELD(ID_AA64MMFR3, S2PIE, false, MRS_LOWER, 0, id_aa64mmfr3_s2pie),
+ MRS_FIELD(ID_AA64MMFR3, S1PIE, false, MRS_LOWER, 0, id_aa64mmfr3_s1pie),
+ MRS_FIELD(ID_AA64MMFR3, SCTLRX, false, MRS_LOWER, 0,
+ id_aa64mmfr3_sctlrx),
+ MRS_FIELD(ID_AA64MMFR3, TCRX, false, MRS_LOWER, 0, id_aa64mmfr3_tcrx),
MRS_FIELD_END,
};
@@ -1333,7 +1607,6 @@ static const struct mrs_field id_aa64mmfr3_fields[] = {
static const struct mrs_field id_aa64mmfr4_fields[] = {
MRS_FIELD_END,
};
-#endif
/* ID_AA64PFR0_EL1 */
@@ -1389,18 +1662,16 @@ static const struct mrs_field_value id_aa64pfr0_sve[] = {
MRS_FIELD_VALUE_END,
};
-#if 0
-/* Enable when we add SVE support */
static const struct mrs_field_hwcap id_aa64pfr0_sve_caps[] = {
MRS_HWCAP(1, HWCAP_SVE, ID_AA64PFR0_SVE_IMPL),
MRS_HWCAP_END
};
-#endif
static const struct mrs_field_value id_aa64pfr0_ras[] = {
MRS_FIELD_VALUE(ID_AA64PFR0_RAS_NONE, ""),
MRS_FIELD_VALUE(ID_AA64PFR0_RAS_IMPL, "RAS"),
MRS_FIELD_VALUE(ID_AA64PFR0_RAS_8_4, "RAS v8.4"),
+ MRS_FIELD_VALUE(ID_AA64PFR0_RAS_8_9, "RAS v8.9"),
MRS_FIELD_VALUE_END,
};
@@ -1461,30 +1732,58 @@ static const struct mrs_field_value id_aa64pfr0_el0[] = {
};
static const struct mrs_field id_aa64pfr0_fields[] = {
- MRS_FIELD(ID_AA64PFR0, CSV3, false, MRS_EXACT, id_aa64pfr0_csv3),
- MRS_FIELD(ID_AA64PFR0, CSV2, false, MRS_EXACT, id_aa64pfr0_csv2),
- MRS_FIELD(ID_AA64PFR0, RME, false, MRS_EXACT, id_aa64pfr0_rme),
- MRS_FIELD_HWCAP(ID_AA64PFR0, DIT, false, MRS_LOWER, id_aa64pfr0_dit,
- id_aa64pfr0_dit_caps),
- MRS_FIELD(ID_AA64PFR0, AMU, false, MRS_EXACT, id_aa64pfr0_amu),
- MRS_FIELD(ID_AA64PFR0, MPAM, false, MRS_EXACT, id_aa64pfr0_mpam),
- MRS_FIELD(ID_AA64PFR0, SEL2, false, MRS_EXACT, id_aa64pfr0_sel2),
- MRS_FIELD(ID_AA64PFR0, SVE, false, MRS_EXACT, id_aa64pfr0_sve),
- MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_EXACT, id_aa64pfr0_ras),
- MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_EXACT, id_aa64pfr0_gic),
- MRS_FIELD_HWCAP(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER,
+ MRS_FIELD(ID_AA64PFR0, CSV3, false, MRS_LOWER, 0, id_aa64pfr0_csv3),
+ MRS_FIELD(ID_AA64PFR0, CSV2, false, MRS_LOWER, 0, id_aa64pfr0_csv2),
+ MRS_FIELD(ID_AA64PFR0, RME, false, MRS_LOWER, 0, id_aa64pfr0_rme),
+ MRS_FIELD_HWCAP(ID_AA64PFR0, DIT, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64pfr0_dit, id_aa64pfr0_dit_caps),
+ MRS_FIELD(ID_AA64PFR0, AMU, false, MRS_LOWER, 0, id_aa64pfr0_amu),
+ MRS_FIELD(ID_AA64PFR0, MPAM, false, MRS_LOWER, 0, id_aa64pfr0_mpam),
+ MRS_FIELD(ID_AA64PFR0, SEL2, false, MRS_LOWER, 0, id_aa64pfr0_sel2),
+ MRS_FIELD_HWCAP(ID_AA64PFR0, SVE, false, MRS_LOWER,
+ MRS_FREEBSD, id_aa64pfr0_sve, id_aa64pfr0_sve_caps),
+ MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_LOWER, 0, id_aa64pfr0_ras),
+ MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_LOWER, 0, id_aa64pfr0_gic),
+ MRS_FIELD_HWCAP(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER, MRS_USERSPACE,
id_aa64pfr0_advsimd, id_aa64pfr0_advsimd_caps),
- MRS_FIELD_HWCAP(ID_AA64PFR0, FP, true, MRS_LOWER, id_aa64pfr0_fp,
- id_aa64pfr0_fp_caps),
- MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_EXACT, id_aa64pfr0_el3),
- MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_EXACT, id_aa64pfr0_el2),
- MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, id_aa64pfr0_el1),
- MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, id_aa64pfr0_el0),
+ MRS_FIELD_HWCAP(ID_AA64PFR0, FP, true, MRS_LOWER, MRS_USERSPACE,
+ id_aa64pfr0_fp, id_aa64pfr0_fp_caps),
+ MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_LOWER, 0, id_aa64pfr0_el3),
+ MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_LOWER, 0, id_aa64pfr0_el2),
+ MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64pfr0_el1),
+ MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64pfr0_el0),
MRS_FIELD_END,
};
/* ID_AA64PFR1_EL1 */
+static const struct mrs_field_value id_aa64pfr1_pfar[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR1, PFAR, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64pfr1_df2[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR1, DF2, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64pfr1_mtex[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR1, MTEX, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64pfr1_the[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR1, THE, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
+static const struct mrs_field_value id_aa64pfr1_mtefrac[] = {
+ MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR1, MTE_frac, NONE, IMPL),
+ MRS_FIELD_VALUE_END,
+};
+
static const struct mrs_field_value id_aa64pfr1_nmi[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR1, NMI, NONE, IMPL),
MRS_FIELD_VALUE_END,
@@ -1553,31 +1852,34 @@ static const struct mrs_field_hwcap id_aa64pfr1_bt_caps[] = {
};
static const struct mrs_field id_aa64pfr1_fields[] = {
- MRS_FIELD(ID_AA64PFR1, NMI, false, MRS_EXACT, id_aa64pfr1_nmi),
- MRS_FIELD(ID_AA64PFR1, CSV2_frac, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64PFR1, PFAR, false, MRS_LOWER, 0, id_aa64pfr1_pfar),
+ MRS_FIELD(ID_AA64PFR1, DF2, false, MRS_LOWER, 0, id_aa64pfr1_df2),
+ MRS_FIELD(ID_AA64PFR1, MTEX, false, MRS_LOWER, 0, id_aa64pfr1_mtex),
+ MRS_FIELD(ID_AA64PFR1, THE, false, MRS_LOWER, 0, id_aa64pfr1_the),
+ MRS_FIELD(ID_AA64PFR1, MTE_frac, false, MRS_LOWER, 0, id_aa64pfr1_mtefrac),
+ MRS_FIELD(ID_AA64PFR1, NMI, false, MRS_LOWER, 0, id_aa64pfr1_nmi),
+ MRS_FIELD(ID_AA64PFR1, CSV2_frac, false, MRS_LOWER, 0,
id_aa64pfr1_csv2_frac),
- MRS_FIELD(ID_AA64PFR1, RNDR_trap, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64PFR1, RNDR_trap, false, MRS_LOWER, 0,
id_aa64pfr1_rndr_trap),
- MRS_FIELD(ID_AA64PFR1, SME, false, MRS_EXACT, id_aa64pfr1_sme),
- MRS_FIELD(ID_AA64PFR1, MPAM_frac, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64PFR1, SME, false, MRS_LOWER, 0, id_aa64pfr1_sme),
+ MRS_FIELD(ID_AA64PFR1, MPAM_frac, false, MRS_LOWER, 0,
id_aa64pfr1_mpam_frac),
- MRS_FIELD(ID_AA64PFR1, RAS_frac, false, MRS_EXACT,
+ MRS_FIELD(ID_AA64PFR1, RAS_frac, false, MRS_LOWER, 0,
id_aa64pfr1_ras_frac),
- MRS_FIELD(ID_AA64PFR1, MTE, false, MRS_EXACT, id_aa64pfr1_mte),
- MRS_FIELD_HWCAP(ID_AA64PFR1, SSBS, false, MRS_LOWER, id_aa64pfr1_ssbs,
- id_aa64pfr1_ssbs_caps),
- MRS_FIELD_HWCAP_SPLIT(ID_AA64PFR1, BT, false, MRS_LOWER, MRS_EXACT,
- id_aa64pfr1_bt, id_aa64pfr1_bt_caps),
+ MRS_FIELD(ID_AA64PFR1, MTE, false, MRS_LOWER, 0, id_aa64pfr1_mte),
+ MRS_FIELD_HWCAP(ID_AA64PFR1, SSBS, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64pfr1_ssbs, id_aa64pfr1_ssbs_caps),
+ MRS_FIELD_HWCAP(ID_AA64PFR1, BT, false, MRS_LOWER,
+ MRS_FREEBSD, id_aa64pfr1_bt, id_aa64pfr1_bt_caps),
MRS_FIELD_END,
};
-#ifdef NOTYET
/* ID_AA64PFR2_EL1 */
static const struct mrs_field id_aa64pfr2_fields[] = {
MRS_FIELD_END,
};
-#endif
/* ID_AA64ZFR0_EL1 */
@@ -1586,59 +1888,117 @@ static const struct mrs_field_value id_aa64zfr0_f64mm[] = {
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64zfr0_f64mm_caps[] = {
+ MRS_HWCAP(2, HWCAP2_SVEF64MM, ID_AA64ZFR0_F64MM_IMPL),
+ MRS_HWCAP_END,
+};
+
static const struct mrs_field_value id_aa64zfr0_f32mm[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, F32MM, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64zfr0_f32mm_caps[] = {
+ MRS_HWCAP(2, HWCAP2_SVEF32MM, ID_AA64ZFR0_F32MM_IMPL),
+ MRS_HWCAP_END,
+};
+
static const struct mrs_field_value id_aa64zfr0_i8mm[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, I8MM, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64zfr0_i8mm_caps[] = {
+ MRS_HWCAP(2, HWCAP2_SVEI8MM, ID_AA64ZFR0_I8MM_IMPL),
+ MRS_HWCAP_END,
+};
+
static const struct mrs_field_value id_aa64zfr0_sm4[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, SM4, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64zfr0_sm4_caps[] = {
+ MRS_HWCAP(2, HWCAP2_SVESM4, ID_AA64ZFR0_SM4_IMPL),
+ MRS_HWCAP_END,
+};
+
static const struct mrs_field_value id_aa64zfr0_sha3[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, SHA3, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64zfr0_sha3_caps[] = {
+ MRS_HWCAP(2, HWCAP2_SVESHA3, ID_AA64ZFR0_SHA3_IMPL),
+ MRS_HWCAP_END,
+};
+
static const struct mrs_field_value id_aa64zfr0_bf16[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, BF16, NONE, BASE),
MRS_FIELD_VALUE(ID_AA64ZFR0_BF16_EBF, "BF16+EBF"),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64zfr0_bf16_caps[] = {
+ MRS_HWCAP(2, HWCAP2_SVEBF16, ID_AA64ZFR0_BF16_BASE),
+ MRS_HWCAP(2, HWCAP2_SVE_EBF16, ID_AA64ZFR0_BF16_EBF),
+ MRS_HWCAP_END,
+};
+
static const struct mrs_field_value id_aa64zfr0_bitperm[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, BitPerm, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64zfr0_bitperm_caps[] = {
+ MRS_HWCAP(2, HWCAP2_SVEBITPERM, ID_AA64ZFR0_BitPerm_IMPL),
+ MRS_HWCAP_END,
+};
+
static const struct mrs_field_value id_aa64zfr0_aes[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, AES, NONE, BASE),
MRS_FIELD_VALUE(ID_AA64ZFR0_AES_PMULL, "AES+PMULL"),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64zfr0_aes_caps[] = {
+ MRS_HWCAP(2, HWCAP2_SVEAES, ID_AA64ZFR0_AES_BASE),
+ MRS_HWCAP(2, HWCAP2_SVEPMULL, ID_AA64ZFR0_AES_PMULL),
+ MRS_HWCAP_END,
+};
+
static const struct mrs_field_value id_aa64zfr0_svever[] = {
MRS_FIELD_VALUE(ID_AA64ZFR0_SVEver_SVE1, "SVE1"),
MRS_FIELD_VALUE(ID_AA64ZFR0_SVEver_SVE2, "SVE2"),
+ MRS_FIELD_VALUE(ID_AA64ZFR0_SVEver_SVE2P1, "SVE2P1"),
MRS_FIELD_VALUE_END,
};
+static const struct mrs_field_hwcap id_aa64zfr0_svever_caps[] = {
+ MRS_HWCAP(2, HWCAP2_SVE2, ID_AA64ZFR0_SVEver_SVE2),
+ MRS_HWCAP(2, HWCAP2_SVE2P1, ID_AA64ZFR0_SVEver_SVE2P1),
+ MRS_HWCAP_END,
+};
+
static const struct mrs_field id_aa64zfr0_fields[] = {
- MRS_FIELD(ID_AA64ZFR0, F64MM, false, MRS_EXACT, id_aa64zfr0_f64mm),
- MRS_FIELD(ID_AA64ZFR0, F32MM, false, MRS_EXACT, id_aa64zfr0_f32mm),
- MRS_FIELD(ID_AA64ZFR0, I8MM, false, MRS_EXACT, id_aa64zfr0_i8mm),
- MRS_FIELD(ID_AA64ZFR0, SM4, false, MRS_EXACT, id_aa64zfr0_sm4),
- MRS_FIELD(ID_AA64ZFR0, SHA3, false, MRS_EXACT, id_aa64zfr0_sha3),
- MRS_FIELD(ID_AA64ZFR0, BF16, false, MRS_EXACT, id_aa64zfr0_bf16),
- MRS_FIELD(ID_AA64ZFR0, BitPerm, false, MRS_EXACT, id_aa64zfr0_bitperm),
- MRS_FIELD(ID_AA64ZFR0, AES, false, MRS_EXACT, id_aa64zfr0_aes),
- MRS_FIELD(ID_AA64ZFR0, SVEver, false, MRS_EXACT, id_aa64zfr0_svever),
+ MRS_FIELD_HWCAP(ID_AA64ZFR0, F64MM, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64zfr0_f64mm, id_aa64zfr0_f64mm_caps),
+ MRS_FIELD_HWCAP(ID_AA64ZFR0, F32MM, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64zfr0_f32mm, id_aa64zfr0_f32mm_caps),
+ MRS_FIELD_HWCAP(ID_AA64ZFR0, I8MM, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64zfr0_i8mm, id_aa64zfr0_i8mm_caps),
+ MRS_FIELD_HWCAP(ID_AA64ZFR0, SM4, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64zfr0_sm4, id_aa64zfr0_sm4_caps),
+ MRS_FIELD_HWCAP(ID_AA64ZFR0, SHA3, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64zfr0_sha3, id_aa64zfr0_sha3_caps),
+ MRS_FIELD_HWCAP(ID_AA64ZFR0, BF16, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64zfr0_bf16, id_aa64zfr0_bf16_caps),
+ MRS_FIELD_HWCAP(ID_AA64ZFR0, BitPerm, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64zfr0_bitperm, id_aa64zfr0_bitperm_caps),
+ MRS_FIELD_HWCAP(ID_AA64ZFR0, AES, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64zfr0_aes, id_aa64zfr0_aes_caps),
+ MRS_FIELD_HWCAP(ID_AA64ZFR0, SVEver, false, MRS_LOWER, MRS_USERSPACE,
+ id_aa64zfr0_svever, id_aa64zfr0_svever_caps),
MRS_FIELD_END,
};
@@ -1703,17 +2063,19 @@ static const struct mrs_field_value id_isar5_sevl[] = {
};
static const struct mrs_field id_isar5_fields[] = {
- MRS_FIELD(ID_ISAR5, VCMA, false, MRS_LOWER, id_isar5_vcma),
- MRS_FIELD(ID_ISAR5, RDM, false, MRS_LOWER, id_isar5_rdm),
- MRS_FIELD_HWCAP(ID_ISAR5, CRC32, false, MRS_LOWER, id_isar5_crc32,
- id_isar5_crc32_caps),
- MRS_FIELD_HWCAP(ID_ISAR5, SHA2, false, MRS_LOWER, id_isar5_sha2,
- id_isar5_sha2_caps),
- MRS_FIELD_HWCAP(ID_ISAR5, SHA1, false, MRS_LOWER, id_isar5_sha1,
- id_isar5_sha1_caps),
- MRS_FIELD_HWCAP(ID_ISAR5, AES, false, MRS_LOWER, id_isar5_aes,
- id_isar5_aes_caps),
- MRS_FIELD(ID_ISAR5, SEVL, false, MRS_LOWER, id_isar5_sevl),
+ MRS_FIELD(ID_ISAR5, VCMA, false, MRS_LOWER,MRS_USERSPACE,
+ id_isar5_vcma),
+ MRS_FIELD(ID_ISAR5, RDM, false, MRS_LOWER, MRS_USERSPACE, id_isar5_rdm),
+ MRS_FIELD_HWCAP(ID_ISAR5, CRC32, false, MRS_LOWER, MRS_USERSPACE,
+ id_isar5_crc32, id_isar5_crc32_caps),
+ MRS_FIELD_HWCAP(ID_ISAR5, SHA2, false, MRS_LOWER, MRS_USERSPACE,
+ id_isar5_sha2, id_isar5_sha2_caps),
+ MRS_FIELD_HWCAP(ID_ISAR5, SHA1, false, MRS_LOWER, MRS_USERSPACE,
+ id_isar5_sha1, id_isar5_sha1_caps),
+ MRS_FIELD_HWCAP(ID_ISAR5, AES, false, MRS_LOWER, MRS_USERSPACE,
+ id_isar5_aes, id_isar5_aes_caps),
+ MRS_FIELD(ID_ISAR5, SEVL, false, MRS_LOWER, MRS_USERSPACE,
+ id_isar5_sevl),
MRS_FIELD_END,
};
@@ -1766,14 +2128,19 @@ static const struct mrs_field_value mvfr0_simdreg[] = {
};
static const struct mrs_field mvfr0_fields[] = {
- MRS_FIELD(MVFR0, FPRound, false, MRS_LOWER, mvfr0_fpround),
- MRS_FIELD(MVFR0, FPSqrt, false, MRS_LOWER, mvfr0_fpsqrt),
- MRS_FIELD(MVFR0, FPDivide, false, MRS_LOWER, mvfr0_fpdivide),
- MRS_FIELD(MVFR0, FPTrap, false, MRS_LOWER, mvfr0_fptrap),
- MRS_FIELD_HWCAP(MVFR0, FPDP, false, MRS_LOWER, mvfr0_fpdp,
- mvfr0_fpdp_caps),
- MRS_FIELD(MVFR0, FPSP, false, MRS_LOWER, mvfr0_fpsp),
- MRS_FIELD(MVFR0, SIMDReg, false, MRS_LOWER, mvfr0_simdreg),
+ MRS_FIELD(MVFR0, FPRound, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr0_fpround),
+ MRS_FIELD(MVFR0, FPSqrt, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr0_fpsqrt),
+ MRS_FIELD(MVFR0, FPDivide, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr0_fpdivide),
+ MRS_FIELD(MVFR0, FPTrap, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr0_fptrap),
+ MRS_FIELD_HWCAP(MVFR0, FPDP, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr0_fpdp, mvfr0_fpdp_caps),
+ MRS_FIELD(MVFR0, FPSP, false, MRS_LOWER, MRS_USERSPACE, mvfr0_fpsp),
+ MRS_FIELD(MVFR0, SIMDReg, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr0_simdreg),
MRS_FIELD_END,
};
@@ -1834,24 +2201,25 @@ static const struct mrs_field_value mvfr1_fpftz[] = {
};
static const struct mrs_field mvfr1_fields[] = {
- MRS_FIELD_HWCAP(MVFR1, SIMDFMAC, false, MRS_LOWER, mvfr1_simdfmac,
- mvfr1_simdfmac_caps),
- MRS_FIELD(MVFR1, FPHP, false, MRS_LOWER, mvfr1_fphp),
- MRS_FIELD(MVFR1, SIMDHP, false, MRS_LOWER, mvfr1_simdhp),
- MRS_FIELD(MVFR1, SIMDSP, false, MRS_LOWER, mvfr1_simdsp),
- MRS_FIELD(MVFR1, SIMDInt, false, MRS_LOWER, mvfr1_simdint),
- MRS_FIELD_HWCAP(MVFR1, SIMDLS, false, MRS_LOWER, mvfr1_simdls,
- mvfr1_simdls_caps),
- MRS_FIELD(MVFR1, FPDNaN, false, MRS_LOWER, mvfr1_fpdnan),
- MRS_FIELD(MVFR1, FPFtZ, false, MRS_LOWER, mvfr1_fpftz),
+ MRS_FIELD_HWCAP(MVFR1, SIMDFMAC, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr1_simdfmac, mvfr1_simdfmac_caps),
+ MRS_FIELD(MVFR1, FPHP, false, MRS_LOWER, MRS_USERSPACE, mvfr1_fphp),
+ MRS_FIELD(MVFR1, SIMDHP, false, MRS_LOWER, MRS_USERSPACE, mvfr1_simdhp),
+ MRS_FIELD(MVFR1, SIMDSP, false, MRS_LOWER, MRS_USERSPACE, mvfr1_simdsp),
+ MRS_FIELD(MVFR1, SIMDInt, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr1_simdint),
+ MRS_FIELD_HWCAP(MVFR1, SIMDLS, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr1_simdls, mvfr1_simdls_caps),
+ MRS_FIELD(MVFR1, FPDNaN, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr1_fpdnan),
+ MRS_FIELD(MVFR1, FPFtZ, false, MRS_LOWER, MRS_USERSPACE,
+ mvfr1_fpftz),
MRS_FIELD_END,
};
#endif /* COMPAT_FREEBSD32 */
struct mrs_user_reg {
- u_int reg;
- u_int CRm;
- u_int Op2;
+ u_int iss;
bool is64bit;
size_t offset;
const struct mrs_field *fields;
@@ -1859,9 +2227,7 @@ struct mrs_user_reg {
#define USER_REG(name, field_name, _is64bit) \
{ \
- .reg = name, \
- .CRm = name##_CRm, \
- .Op2 = name##_op2, \
+ .iss = name##_ISS, \
.offset = __offsetof(struct cpu_desc, field_name), \
.fields = field_name##_fields, \
.is64bit = _is64bit, \
@@ -1880,19 +2246,17 @@ static const struct mrs_user_reg user_regs[] = {
USER_REG(ID_AA64MMFR0_EL1, id_aa64mmfr0, true),
USER_REG(ID_AA64MMFR1_EL1, id_aa64mmfr1, true),
USER_REG(ID_AA64MMFR2_EL1, id_aa64mmfr2, true),
-#ifdef NOTYET
USER_REG(ID_AA64MMFR3_EL1, id_aa64mmfr3, true),
USER_REG(ID_AA64MMFR4_EL1, id_aa64mmfr4, true),
-#endif
USER_REG(ID_AA64PFR0_EL1, id_aa64pfr0, true),
USER_REG(ID_AA64PFR1_EL1, id_aa64pfr1, true),
-#ifdef NOTYET
USER_REG(ID_AA64PFR2_EL1, id_aa64pfr2, true),
-#endif
USER_REG(ID_AA64ZFR0_EL1, id_aa64zfr0, true),
+ USER_REG(CTR_EL0, ctr, true),
+
#ifdef COMPAT_FREEBSD32
USER_REG(ID_ISAR5_EL1, id_isar5, false),
@@ -1904,48 +2268,187 @@ static const struct mrs_user_reg user_regs[] = {
#define CPU_DESC_FIELD(desc, idx) \
*(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
-static int
-user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
- uint32_t esr)
+static bool
+user_ctr_has_neoverse_n1_1542419(uint32_t midr, uint64_t ctr)
+{
+ /* Skip non-Neoverse-N1 */
+ if (!CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_ARM,
+ CPU_PART_NEOVERSE_N1, 0, 0))
+ return (false);
+
+ switch (CPU_VAR(midr)) {
+ default:
+ break;
+ case 4:
+ /* Fixed in r4p1 */
+ if (CPU_REV(midr) > 0)
+ break;
+ /* FALLTHROUGH */
+ case 3:
+ /* If DIC is enabled (coherent icache) then we are affected */
+ return (CTR_DIC_VAL(ctr) != 0);
+ }
+
+ return (false);
+}
+
+static bool
+user_ctr_check(const struct cpu_feat *feat __unused, u_int midr __unused)
+{
+ if (emulate_ctr)
+ return (true);
+
+ if (user_ctr_has_neoverse_n1_1542419(midr, READ_SPECIALREG(ctr_el0)))
+ return (true);
+
+ return (false);
+}
+
+static bool
+user_ctr_has_errata(const struct cpu_feat *feat __unused, u_int midr,
+ u_int **errata_list, u_int *errata_count)
+{
+ if (user_ctr_has_neoverse_n1_1542419(midr, READ_SPECIALREG(ctr_el0))) {
+ static u_int errata_id = 1542419;
+
+ *errata_list = &errata_id;
+ *errata_count = 1;
+ return (true);
+ }
+
+ return (false);
+}
+
+static void
+user_ctr_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status, u_int *errata_list, u_int errata_count)
+{
+ MPASS(emulate_ctr || errata_status != ERRATA_NONE);
+
+ /*
+ * The Errata Management Firmware Interface may incorrectly mark
+ * this as firmware mitigated. We should ignore that as there is
+ * a kernel component to the mitigation.
+ */
+ if (errata_status != ERRATA_NONE && PCPU_GET(cpuid) == 0 &&
+ cpu_feat_has_erratum(errata_list, errata_count, 1542419)) {
+ /* Clear fields we will change */
+ user_cpu_desc.ctr &= ~(CTR_DIC_MASK | CTR_ILINE_WIDTH);
+
+ /*
+ * Set DIC to none so userspace will execute an 'ic ivau'
+ * instruction that can be trapped by EL3.
+ */
+ user_cpu_desc.ctr |= CTR_DIC_NONE;
+ /*
+ * Set the i-cache line size to be page size to reduce the
+ * number of times userspace needs to execute the 'ic ivau'
+ * instruction. The ctr_el0.IminLine is log2 the number of
+ * 4-byte words the instruction covers. As PAGE_SHIFT is log2
+ * of the number of bytes in a page we need to subtract 2.
+ */
+ user_cpu_desc.ctr |= (PAGE_SHIFT - 2) << CTR_ILINE_SHIFT;
+
+ l_user_cpu_desc.ctr = user_cpu_desc.ctr;
+ }
+
+ WRITE_SPECIALREG(sctlr_el1,
+ READ_SPECIALREG(sctlr_el1) & ~SCTLR_UCT);
+ isb();
+}
+
+static struct cpu_feat user_ctr = {
+ .feat_name = "Trap CTR_EL0",
+ .feat_check = user_ctr_check,
+ .feat_has_errata = user_ctr_has_errata,
+ .feat_enable = user_ctr_enable,
+ .feat_flags = CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU,
+};
+DATA_SET(cpu_feat_set, user_ctr);
+
+static bool
+user_ctr_handler(uint64_t esr, struct trapframe *frame)
{
uint64_t value;
- int CRm, Op2, i, reg;
+ int reg;
- if ((insn & MRS_MASK) != MRS_VALUE)
- return (0);
+ if (ESR_ELx_EXCEPTION(esr) != EXCP_MSR)
+ return (false);
+ /* Only support reading from ctr_el0 */
+ if ((esr & ISS_MSR_DIR) == 0)
+ return (false);
+
+ /* Check if this is the ctr_el0 register */
+ if ((esr & ISS_MSR_REG_MASK) != CTR_EL0_ISS)
+ return (false);
+
+ if (SV_CURPROC_ABI() == SV_ABI_FREEBSD)
+ value = user_cpu_desc.ctr;
+ else
+ value = l_user_cpu_desc.ctr;
/*
- * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
- * These are in the EL1 CPU identification space.
- * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
- * CRm == {4-7} holds the ID_AA64 registers.
- *
- * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
- * Table D9-2 System instruction encodings for non-Debug System
- * register accesses.
+ * We will handle this instruction, move to the next so we
+ * don't trap here again.
*/
- if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
- return (0);
+ frame->tf_elr += INSN_SIZE;
- CRm = mrs_CRm(insn);
- if (CRm > 7 || (CRm < 4 && CRm != 0))
- return (0);
+ reg = ISS_MSR_Rt(esr);
+ /* If reg is 31 then write to xzr, i.e. do nothing */
+ if (reg == 31)
+ return (true);
- Op2 = mrs_Op2(insn);
- value = 0;
+ if (reg < nitems(frame->tf_x))
+ frame->tf_x[reg] = value;
+ else if (reg == 30)
+ frame->tf_lr = value;
- for (i = 0; i < nitems(user_regs); i++) {
- if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
- if (SV_CURPROC_ABI() == SV_ABI_FREEBSD)
- value = CPU_DESC_FIELD(user_cpu_desc, i);
- else
- value = CPU_DESC_FIELD(l_user_cpu_desc, i);
- break;
- }
- }
+ return (true);
+}
- if (CRm == 0) {
- switch (Op2) {
+static bool
+user_idreg_handler(uint64_t esr, struct trapframe *frame)
+{
+ uint64_t value;
+ int reg;
+
+ if (ESR_ELx_EXCEPTION(esr) != EXCP_MSR)
+ return (false);
+
+ /* Only support reading from ID registers */
+ if ((esr & ISS_MSR_DIR) == 0)
+ return (false);
+
+ /*
+ * This only handles the ID register space and a few registers that
+ * are safe to pass through to userspace.
+ *
+ * These registers are all in the space op0 == 3, op1 == 0,
+ * CRn == 0. We support the following CRm:
+ * - CRm == 0: midr_el1, mpidr_el1, and revidr_el1.
+ * - CRm in {4-7}: sanitized ID registers.
+ *
+ * Registers in the ID register space (CRm in {4-7}) are all
+ * read-only and have either defined fields, or are read as
+ * zero (RAZ). For these we return 0 for any unknown register.
+ */
+ if (ISS_MSR_OP0(esr) != 3 || ISS_MSR_OP1(esr) != 0 ||
+ ISS_MSR_CRn(esr) != 0)
+ return (false);
+
+ value = 0;
+ if (ISS_MSR_CRm(esr) >= 4 && ISS_MSR_CRm(esr) <= 7) {
+ for (int i = 0; i < nitems(user_regs); i++) {
+ if (user_regs[i].iss == (esr & ISS_MSR_REG_MASK)) {
+ if (SV_CURPROC_ABI() == SV_ABI_FREEBSD)
+ value = CPU_DESC_FIELD(user_cpu_desc, i);
+ else
+ value = CPU_DESC_FIELD(l_user_cpu_desc, i);
+ break;
+ }
+ }
+ } else if (ISS_MSR_CRm(esr) == 0) {
+ switch (ISS_MSR_OP2(esr)) {
case 0:
value = READ_SPECIALREG(midr_el1);
break;
@@ -1956,8 +2459,10 @@ user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
value = READ_SPECIALREG(revidr_el1);
break;
default:
- return (0);
+ return (false);
}
+ } else {
+ return (false);
}
/*
@@ -1966,7 +2471,7 @@ user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
*/
frame->tf_elr += INSN_SIZE;
- reg = MRS_REGISTER(insn);
+ reg = ISS_MSR_Rt(esr);
/* If reg is 31 then write to xzr, i.e. do nothing */
if (reg == 31)
return (1);
@@ -1976,7 +2481,7 @@ user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
else if (reg == 30)
frame->tf_lr = value;
- return (1);
+ return (true);
}
/*
@@ -2013,51 +2518,13 @@ mrs_field_cmp(uint64_t a, uint64_t b, u_int shift, int width, bool sign)
return (a - b);
}
-static uint64_t
-update_lower_register(uint64_t val, uint64_t new_val, u_int shift,
- int width, bool sign)
-{
- uint64_t mask;
-
- KASSERT(width > 0 && width < 64, ("%s: Invalid width %d", __func__,
- width));
-
- /*
- * If the new value is less than the existing value update it.
- */
- if (mrs_field_cmp(new_val, val, shift, width, sign) < 0) {
- mask = (1ul << width) - 1;
- val &= ~(mask << shift);
- val |= new_val & (mask << shift);
- }
-
- return (val);
-}
-
bool
-extract_user_id_field(u_int reg, u_int field_shift, uint8_t *val)
+get_kernel_reg_iss(u_int iss, uint64_t *val)
{
- uint64_t value;
int i;
for (i = 0; i < nitems(user_regs); i++) {
- if (user_regs[i].reg == reg) {
- value = CPU_DESC_FIELD(user_cpu_desc, i);
- *val = value >> field_shift;
- return (true);
- }
- }
-
- return (false);
-}
-
-bool
-get_kernel_reg(u_int reg, uint64_t *val)
-{
- int i;
-
- for (i = 0; i < nitems(user_regs); i++) {
- if (user_regs[i].reg == reg) {
+ if (user_regs[i].iss == iss) {
*val = CPU_DESC_FIELD(kern_cpu_desc, i);
return (true);
}
@@ -2071,18 +2538,19 @@ get_kernel_reg(u_int reg, uint64_t *val)
* do not exceed those in the mask.
*/
bool
-get_kernel_reg_masked(u_int reg, uint64_t *valp, uint64_t mask)
+get_kernel_reg_iss_masked(u_int iss, uint64_t *valp, uint64_t mask)
{
const struct mrs_field *fields;
uint64_t val;
for (int i = 0; i < nitems(user_regs); i++) {
- if (user_regs[i].reg == reg) {
+ if (user_regs[i].iss == iss) {
val = CPU_DESC_FIELD(kern_cpu_desc, i);
fields = user_regs[i].fields;
for (int j = 0; fields[j].type != 0; j++) {
- mask = update_lower_register(mask, val,
- fields[j].shift, 4, fields[j].sign);
+ mask = update_special_reg_field(mask,
+ fields[j].type, val, fields[j].width,
+ fields[j].shift, fields[j].sign);
}
*valp = mask;
return (true);
@@ -2092,33 +2560,122 @@ get_kernel_reg_masked(u_int reg, uint64_t *valp, uint64_t mask)
return (false);
}
+bool
+get_user_reg_iss(u_int iss, uint64_t *val, bool fbsd)
+{
+ int i;
+
+ for (i = 0; i < nitems(user_regs); i++) {
+ if (user_regs[i].iss == iss) {
+ if (fbsd)
+ *val = CPU_DESC_FIELD(user_cpu_desc, i);
+ else
+ *val = CPU_DESC_FIELD(l_user_cpu_desc, i);
+ return (true);
+ }
+ }
+
+ return (false);
+}
+
static uint64_t
update_special_reg_field(uint64_t user_reg, u_int type, uint64_t value,
- u_int shift, bool sign)
+ u_int width, u_int shift, bool sign)
{
+ uint64_t cur, mask, new_val;
+
+ mask = ((1ul << width) - 1) << shift;
+ cur = user_reg & mask;
+ new_val = value & mask;
+
switch (type & MRS_TYPE_MASK) {
+ case MRS_EXACT_IF_DIFFERENT:
+ if (mrs_field_cmp(new_val, cur, shift, width, sign) == 0)
+ break;
+ /* FALLTHROUGH */
case MRS_EXACT:
- user_reg &= ~(0xful << shift);
- user_reg |= (uint64_t)MRS_EXACT_FIELD(type) << shift;
+ cur = (uint64_t)MRS_SAFE_VAL(type) << shift;
break;
case MRS_LOWER:
- user_reg = update_lower_register(user_reg, value, shift, 4,
- sign);
+ if (mrs_field_cmp(new_val, cur, shift, width, sign) < 0)
+ cur = new_val;
+ break;
+ case MRS_HIGHER_OR_ZERO:
+ if (cur == 0 || new_val == 0) {
+ cur = 0;
+ break;
+ }
+ /* FALLTHROUGH */
+ case MRS_HIGHER:
+ if (mrs_field_cmp(new_val, cur, shift, width, sign) > 0)
+ cur = new_val;
break;
default:
panic("Invalid field type: %d", type);
}
+ user_reg &= ~mask;
+ user_reg |= cur;
+
return (user_reg);
}
+static void
+clear_set_special_reg_idx(int idx, uint64_t clear, uint64_t set)
+{
+ const struct mrs_field *fields;
+ uint64_t k_old, k_new;
+ uint64_t f_old, f_new;
+ uint64_t l_old, l_new;
+
+ MPASS(idx < nitems(user_regs));
+
+ k_old = CPU_DESC_FIELD(kern_cpu_desc, idx);
+ k_new = (k_old & ~clear) | set;
+
+ f_old = CPU_DESC_FIELD(user_cpu_desc, idx);
+ f_new = (f_old & ~clear) | set;
+
+ l_old = CPU_DESC_FIELD(l_user_cpu_desc, idx);
+ l_new = (l_old & ~clear) | set;
+
+ fields = user_regs[idx].fields;
+ for (int j = 0; fields[j].type != 0; j++) {
+ u_int type;
+
+ /* Update the FreeBSD userspace ID register view */
+ type = ((fields[j].type & MRS_FREEBSD) != 0) ?
+ fields[j].type :
+ (MRS_EXACT | (fields[j].type & MRS_SAFE_MASK));
+ f_new = update_special_reg_field(f_new,
+ type, f_old, fields[j].width, fields[j].shift,
+ fields[j].sign);
+
+ /* Update the Linux userspace ID register view */
+ type = ((fields[j].type & MRS_LINUX) != 0) ?
+ fields[j].type :
+ (MRS_EXACT | (fields[j].type & MRS_SAFE_MASK));
+ l_new = update_special_reg_field(l_new,
+ type, l_old, fields[j].width, fields[j].shift,
+ fields[j].sign);
+
+ /* Update the kernel ID register view */
+ k_new = update_special_reg_field(k_new,
+ fields[j].type, k_old, fields[j].width,
+ fields[j].shift, fields[j].sign);
+ }
+
+ CPU_DESC_FIELD(kern_cpu_desc, idx) = k_new;
+ CPU_DESC_FIELD(user_cpu_desc, idx) = f_new;
+ CPU_DESC_FIELD(l_user_cpu_desc, idx) = l_new;
+}
+
void
update_special_regs(u_int cpu)
{
struct cpu_desc *desc;
- const struct mrs_field *fields;
- uint64_t l_user_reg, user_reg, kern_reg, value;
- int i, j;
+ uint64_t value;
+ int i;
if (cpu == 0) {
/* Create a user visible cpu description with safe values */
@@ -2136,35 +2693,42 @@ update_special_regs(u_int cpu)
for (i = 0; i < nitems(user_regs); i++) {
value = CPU_DESC_FIELD(*desc, i);
if (cpu == 0) {
- kern_reg = value;
- user_reg = value;
- l_user_reg = value;
- } else {
- kern_reg = CPU_DESC_FIELD(kern_cpu_desc, i);
- user_reg = CPU_DESC_FIELD(user_cpu_desc, i);
- l_user_reg = CPU_DESC_FIELD(l_user_cpu_desc, i);
+ CPU_DESC_FIELD(kern_cpu_desc, i) = value;
+ CPU_DESC_FIELD(user_cpu_desc, i) = value;
+ CPU_DESC_FIELD(l_user_cpu_desc, i) = value;
}
- fields = user_regs[i].fields;
- for (j = 0; fields[j].type != 0; j++) {
- /* Update the FreeBSD userspace ID register view */
- user_reg = update_special_reg_field(user_reg,
- fields[j].type >> MRS_TYPE_FBSD_SHIFT, value,
- fields[j].shift, fields[j].sign);
-
- /* Update the Linux userspace ID register view */
- l_user_reg = update_special_reg_field(l_user_reg,
- fields[j].type >> MRS_TYPE_LNX_SHIFT, value,
- fields[j].shift, fields[j].sign);
-
- /* Update the kernel ID register view */
- kern_reg = update_lower_register(kern_reg, value,
- fields[j].shift, 4, fields[j].sign);
- }
+ clear_set_special_reg_idx(i, UINT64_MAX, value);
+ }
+}
+
+/*
+ * Updates a special register in all views. This creates a copy of the
+ * register then clears it and sets new bits. It will then compare this
+ * with the old version as if it was the ID register for a new CPU.
+ *
+ * It is intended to let code that disables features, e.g. due to errata,
+ * to clear the user visible field.
+ *
+ * This needs to be called before the HWCAPs are set. If called from a CPU
+ * feature handler this safe to call from CPU_FEAT_EARLY_BOOT. It also needs
+ * to be before link_elf_late_ireloc is called. As this is called after the
+ * HWCAPs are set the check for these is enough.
+ */
+void
+update_special_reg_iss(u_int iss, uint64_t clear, uint64_t set)
+{
+ MPASS(hwcaps_set == false);
+ /* There is no locking here, so we only support changing this on CPU0 */
+ /* TODO: Add said locking */
+ MPASS(PCPU_GET(cpuid) == 0);
- CPU_DESC_FIELD(kern_cpu_desc, i) = kern_reg;
- CPU_DESC_FIELD(user_cpu_desc, i) = user_reg;
- CPU_DESC_FIELD(l_user_cpu_desc, i) = l_user_reg;
+ for (int i = 0; i < nitems(user_regs); i++) {
+ if (user_regs[i].iss != iss)
+ continue;
+
+ clear_set_special_reg_idx(i, clear, set);
+ return;
}
}
@@ -2266,6 +2830,11 @@ identify_cpu_sysinit(void *dummy __unused)
prev_desc = desc;
}
+#ifdef INVARIANTS
+ /* Check we dont update the special registers after this point */
+ hwcaps_set = true;
+#endif
+
/* Find the values to export to userspace as AT_HWCAP and AT_HWCAP2 */
parse_cpu_features(true, &user_cpu_desc, &elf_hwcap, &elf_hwcap2);
parse_cpu_features(true, &l_user_cpu_desc, &linux_elf_hwcap,
@@ -2303,9 +2872,15 @@ identify_cpu_sysinit(void *dummy __unused)
panic("CPU does not support LSE atomic instructions");
#endif
- install_undef_handler(true, user_mrs_handler);
+ install_sys_handler(user_ctr_handler);
+ install_sys_handler(user_idreg_handler);
}
-SYSINIT(identify_cpu, SI_SUB_CPU, SI_ORDER_MIDDLE, identify_cpu_sysinit, NULL);
+/*
+ * This needs to be after the APs have stareted as they may have errata that
+ * means we need to mask out ID registers & that could affect hwcaps, etc.
+ */
+SYSINIT(identify_cpu, SI_SUB_CONFIGURE, SI_ORDER_ANY, identify_cpu_sysinit,
+ NULL);
static void
cpu_features_sysinit(void *dummy __unused)
@@ -2377,46 +2952,6 @@ parse_cpu_features_hwcap32(void)
#endif /* COMPAT_FREEBSD32 */
static void
-print_ctr_fields(struct sbuf *sb, uint64_t reg, const void *arg __unused)
-{
-
- sbuf_printf(sb, "%u byte D-cacheline,", CTR_DLINE_SIZE(reg));
- sbuf_printf(sb, "%u byte I-cacheline,", CTR_ILINE_SIZE(reg));
- reg &= ~(CTR_DLINE_MASK | CTR_ILINE_MASK);
-
- switch(CTR_L1IP_VAL(reg)) {
- case CTR_L1IP_VPIPT:
- sbuf_printf(sb, "VPIPT");
- break;
- case CTR_L1IP_AIVIVT:
- sbuf_printf(sb, "AIVIVT");
- break;
- case CTR_L1IP_VIPT:
- sbuf_printf(sb, "VIPT");
- break;
- case CTR_L1IP_PIPT:
- sbuf_printf(sb, "PIPT");
- break;
- }
- sbuf_printf(sb, " ICache,");
- reg &= ~CTR_L1IP_MASK;
-
- sbuf_printf(sb, "%d byte ERG,", CTR_ERG_SIZE(reg));
- sbuf_printf(sb, "%d byte CWG", CTR_CWG_SIZE(reg));
- reg &= ~(CTR_ERG_MASK | CTR_CWG_MASK);
-
- if (CTR_IDC_VAL(reg) != 0)
- sbuf_printf(sb, ",IDC");
- if (CTR_DIC_VAL(reg) != 0)
- sbuf_printf(sb, ",DIC");
- reg &= ~(CTR_IDC_MASK | CTR_DIC_MASK);
- reg &= ~CTR_RES1;
-
- if (reg != 0)
- sbuf_printf(sb, ",%lx", reg);
-}
-
-static void
print_register(struct sbuf *sb, const char *reg_name, uint64_t reg,
void (*print_fields)(struct sbuf *, uint64_t, const void *),
const void *arg)
@@ -2443,9 +2978,8 @@ print_id_fields(struct sbuf *sb, uint64_t reg, const void *arg)
for (i = 0; fields[i].type != 0; i++) {
fv = fields[i].values;
- /* TODO: Handle with an unknown message */
if (fv == NULL)
- continue;
+ goto next;
field = (reg & fields[i].mask) >> fields[i].shift;
for (j = 0; fv[j].desc != NULL; j++) {
@@ -2460,7 +2994,8 @@ print_id_fields(struct sbuf *sb, uint64_t reg, const void *arg)
sbuf_printf(sb, "%sUnknown %s(%x)", SEP_STR,
fields[i].name, field);
- reg &= ~(0xful << fields[i].shift);
+next:
+ reg &= ~(((1ul << fields[i].width) - 1) << fields[i].shift);
}
if (reg != 0)
@@ -2639,10 +3174,8 @@ print_cpu_features(u_int cpu, struct cpu_desc *desc,
(prev_desc == NULL || desc->_reg != prev_desc->_reg)
/* Cache Type Register */
- if (SHOULD_PRINT_REG(ctr)) {
- print_register(sb, "Cache Type",
- desc->ctr, print_ctr_fields, NULL);
- }
+ if (SHOULD_PRINT_REG(ctr))
+ print_id_register(sb, "Cache Type", desc->ctr, ctr_fields);
/* AArch64 Instruction Set Attribute Register 0 */
if (SHOULD_PRINT_REG(id_aa64isar0))
@@ -2669,12 +3202,10 @@ print_cpu_features(u_int cpu, struct cpu_desc *desc,
print_id_register(sb, "Processor Features 1",
desc->id_aa64pfr1, id_aa64pfr1_fields);
-#ifdef NOTYET
/* AArch64 Processor Feature Register 2 */
if (SHOULD_PRINT_REG(id_aa64pfr2))
print_id_register(sb, "Processor Features 2",
desc->id_aa64pfr2, id_aa64pfr2_fields);
-#endif
/* AArch64 Memory Model Feature Register 0 */
if (SHOULD_PRINT_REG(id_aa64mmfr0))
@@ -2691,7 +3222,6 @@ print_cpu_features(u_int cpu, struct cpu_desc *desc,
print_id_register(sb, "Memory Model Features 2",
desc->id_aa64mmfr2, id_aa64mmfr2_fields);
-#ifdef NOTYET
/* AArch64 Memory Model Feature Register 3 */
if (SHOULD_PRINT_REG(id_aa64mmfr3))
print_id_register(sb, "Memory Model Features 3",
@@ -2701,7 +3231,6 @@ print_cpu_features(u_int cpu, struct cpu_desc *desc,
if (SHOULD_PRINT_REG(id_aa64mmfr4))
print_id_register(sb, "Memory Model Features 4",
desc->id_aa64mmfr4, id_aa64mmfr4_fields);
-#endif
/* AArch64 Debug Feature Register 0 */
if (SHOULD_PRINT_REG(id_aa64dfr0))
@@ -2765,9 +3294,6 @@ identify_cache(uint64_t ctr)
switch (CTR_L1IP_VAL(ctr)) {
case CTR_L1IP_PIPT:
break;
- case CTR_L1IP_VPIPT:
- icache_vmid = true;
- break;
default:
case CTR_L1IP_VIPT:
icache_aliasing = true;
@@ -2809,23 +3335,19 @@ identify_cpu(u_int cpu)
CPU_AFFINITY(cpu) = desc->mpidr & CPU_AFF_MASK;
desc->ctr = READ_SPECIALREG(ctr_el0);
- desc->id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
- desc->id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
- desc->id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
- desc->id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
- desc->id_aa64isar2 = READ_SPECIALREG(id_aa64isar2_el1);
- desc->id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
- desc->id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
- desc->id_aa64mmfr2 = READ_SPECIALREG(id_aa64mmfr2_el1);
-#ifdef NOTYET
- desc->id_aa64mmfr3 = READ_SPECIALREG(id_aa64mmfr3_el1);
- desc->id_aa64mmfr4 = READ_SPECIALREG(id_aa64mmfr4_el1);
-#endif
- desc->id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
- desc->id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
-#ifdef NOTYET
- desc->id_aa64pfr2 = READ_SPECIALREG(id_aa64pfr2_el1);
-#endif
+ desc->id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1_REG);
+ desc->id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1_REG);
+ desc->id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1_REG);
+ desc->id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1_REG);
+ desc->id_aa64isar2 = READ_SPECIALREG(ID_AA64ISAR2_EL1_REG);
+ desc->id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1_REG);
+ desc->id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1_REG);
+ desc->id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1_REG);
+ desc->id_aa64mmfr3 = READ_SPECIALREG(ID_AA64MMFR3_EL1_REG);
+ desc->id_aa64mmfr4 = READ_SPECIALREG(ID_AA64MMFR4_EL1_REG);
+ desc->id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1_REG);
+ desc->id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1_REG);
+ desc->id_aa64pfr2 = READ_SPECIALREG(ID_AA64PFR2_EL1_REG);
/*
* ID_AA64ZFR0_EL1 is only valid when at least one of:
@@ -2889,6 +3411,12 @@ check_cpu_regs(u_int cpu, struct cpu_desc *desc, struct cpu_desc *prev_desc)
if (desc->ctr != prev_desc->ctr) {
/*
+ * If the cache is different on different cores we should
+ * emulate for userspace to provide a uniform value
+ */
+ emulate_ctr = true;
+
+ /*
* If the cache type register is different we may
* have a different l1 cache type.
*/
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
index fd77938edae9..2f549a527f43 100644
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -26,9 +26,11 @@
#include "assym.inc"
#include "opt_kstack_pages.h"
+#include <sys/elf_common.h>
#include <sys/syscall.h>
#include <machine/asm.h>
#include <machine/armreg.h>
+#include <machine/cpu.h>
#include <machine/hypervisor.h>
#include <machine/param.h>
#include <machine/pte.h>
@@ -43,6 +45,12 @@
* space, the same as a single level 2 page with 4k pages.
*/
#define L3_PAGE_COUNT 32
+#elif PAGE_SIZE == PAGE_SIZE_4K
+/*
+ * Space for a level 3 table holding the end of the executable memory and
+ * the start of the non-executable data.
+ */
+#define L3_PAGE_COUNT 1
#endif
/*
@@ -65,19 +73,6 @@ ENTRY(_start)
/* Enter the kernel exception level */
bl enter_kernel_el
- /*
- * Disable the MMU. We may have entered the kernel with it on and
- * will need to update the tables later. If this has been set up
- * with anything other than a VA == PA map then this will fail,
- * but in this case the code to find where we are running from
- * would have also failed.
- */
- dsb sy
- mrs x2, sctlr_el1
- bic x2, x2, SCTLR_M
- msr sctlr_el1, x2
- isb
-
/* Set the context id */
msr contextidr_el1, xzr
@@ -95,8 +90,9 @@ ENTRY(_start)
/*
* At this point:
* x27 = TTBR0 table
- * x26 = Kernel L1 table
* x24 = TTBR1 table
+ * x22 = PTE shareability attributes
+ * x21 = BTI guarded page attribute if supported
*/
/* Enable the mmu */
@@ -122,7 +118,7 @@ virtdone:
ldr x15, .Lbss
ldr x14, .Lend
1:
- str xzr, [x15], #8
+ stp xzr, xzr, [x15], #16
cmp x15, x14
b.lo 1b
@@ -145,7 +141,14 @@ virtdone:
str x25, [x0, #BP_KERN_STACK]
str x27, [x0, #BP_KERN_TTBR0]
str x23, [x0, #BP_BOOT_EL]
- str x4, [x0, #BP_HCR_EL2]
+
+ /* Set these before they are used in kasan_init_early */
+ adrp x1, pmap_sh_attr
+ str x22, [x1, :lo12:pmap_sh_attr]
+#ifdef __ARM_FEATURE_BTI_DEFAULT
+ adrp x1, pmap_gp_attr
+ str x21, [x1, :lo12:pmap_gp_attr]
+#endif
#ifdef KASAN
/* Save bootparams */
@@ -192,12 +195,50 @@ END(_start)
#ifdef SMP
/*
- * mpentry(unsigned long)
+ * void
+ * mpentry_psci(unsigned long)
*
- * Called by a core when it is being brought online.
+ * Called by a core when it is being brought online with psci.
* The data in x0 is passed straight to init_secondary.
*/
-ENTRY(mpentry)
+ENTRY(mpentry_psci)
+ mov x26, xzr
+ b mpentry_common
+END(mpentry_psci)
+
+/*
+ * void
+ * mpentry_spintable(void)
+ *
+ * Called by a core when it is being brought online with a spin-table.
+ * Reads the new CPU ID and passes this to init_secondary.
+ */
+ENTRY(mpentry_spintable)
+ ldr x26, =spintable_wait
+ b mpentry_common
+END(mpentry_spintable)
+
+/* Wait for the current CPU to be released */
+LENTRY(spintable_wait)
+ /* Read the affinity bits from mpidr_el1 */
+ mrs x1, mpidr_el1
+ ldr x2, =CPU_AFF_MASK
+ and x1, x1, x2
+
+ adrp x2, ap_cpuid
+1:
+ ldr x0, [x2, :lo12:ap_cpuid]
+ cmp x0, x1
+ b.ne 1b
+
+ str xzr, [x2, :lo12:ap_cpuid]
+ dsb sy
+ sev
+
+ ret
+LEND(mpentry_spintable)
+
+LENTRY(mpentry_common)
/* Disable interrupts */
msr daifset, #DAIF_INTR
@@ -228,6 +269,14 @@ ENTRY(mpentry)
mp_virtdone:
BTI_J
+ /*
+ * Allow this CPU to wait until the kernel is ready for it,
+ * e.g. with spin-table but each CPU uses the same release address
+ */
+ cbz x26, 1f
+ blr x26
+1:
+
/* Start using the AP boot stack */
adrp x4, bootstack
ldr x4, [x4, :lo12:bootstack]
@@ -258,32 +307,54 @@ mp_virtdone:
msr tpidr_el1, x18
b init_secondary
-END(mpentry)
+LEND(mpentry_common)
#endif
/*
- * If we are started in EL2, configure the required hypervisor
- * registers and drop to EL1.
+ * Enter the exception level the kernel will use:
+ *
+ * - If in EL1 continue in EL1
+ * - If the CPU supports FEAT_VHE then set HCR_E2H and HCR_TGE and continue
+ * in EL2
+ * - Configure EL2 to support running the kernel at EL1 and exit to that
*/
LENTRY(enter_kernel_el)
+#define INIT_SCTLR_EL1 (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | \
+ SCTLR_TSCXT | SCTLR_EOS)
mrs x23, CurrentEL
and x23, x23, #(CURRENTEL_EL_MASK)
cmp x23, #(CURRENTEL_EL_EL2)
b.eq 1f
- ret
+
+ ldr x2, =INIT_SCTLR_EL1
+ msr sctlr_el1, x2
+ /* SCTLR_EOS is set so eret is a context synchronizing event so we
+ * need an isb here to ensure it's observed by later instructions,
+ * but don't need it in the eret below.
+ */
+ isb
+
+ /* Ensure SPSR_EL1 and pstate are in sync. The only wat to set the
+ * latter is to set the former and return from an exception with eret.
+ */
+ mov x2, #(PSR_DAIF | PSR_M_EL1h)
+ msr spsr_el1, x2
+ msr elr_el1, lr
+ eret
+
1:
+ dsb sy
/*
- * Disable the MMU. If the HCR_EL2.E2H field is set we will clear it
- * which may break address translation.
+ * Set just the reserved bits in sctlr_el2. This will disable the
+ * MMU which may have broken the kernel if we enter the kernel in
+ * EL2, e.g. when using VHE.
*/
- dsb sy
- mrs x2, sctlr_el2
- bic x2, x2, SCTLR_M
+ ldr x2, =(SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS)
msr sctlr_el2, x2
isb
/* Configure the Hypervisor */
- ldr x2, =(HCR_RW | HCR_APK | HCR_API)
+ ldr x2, =(HCR_RW | HCR_APK | HCR_API | HCR_E2H)
msr hcr_el2, x2
/* Stash value of HCR_EL2 for later */
@@ -298,66 +369,81 @@ LENTRY(enter_kernel_el)
mrs x2, mpidr_el1
msr vmpidr_el2, x2
- /* Set the bits that need to be 1 in sctlr_el1 */
- ldr x2, .Lsctlr_res1
+ /* Set the initial sctlr_el1 */
+ ldr x2, =INIT_SCTLR_EL1
msr sctlr_el1, x2
+ /* Check if the E2H flag is set */
+ tst x4, #HCR_E2H
+ b.eq .Lno_vhe
+
/*
- * On some hardware, e.g., Apple M1, we can't clear E2H, so make sure we
- * don't trap to EL2 for SIMD register usage to have at least a
- * minimally usable system.
+ * The kernel will be running in EL2, route exceptions here rather
+ * than EL1.
*/
- tst x4, #HCR_E2H
- mov x3, #CPTR_RES1 /* HCR_E2H == 0 */
- mov x5, #CPTR_FPEN /* HCR_E2H == 1 */
- csel x2, x3, x5, eq
+ orr x4, x4, #(HCR_TGE)
+ msr hcr_el2, x4
+ isb
+
+ msr SCTLR_EL12_REG, x2
+ mov x2, xzr /* CPTR_EL2 is managed by vfp.c */
+ ldr x3, =(CNTHCTL_E2H_EL1PCTEN | CNTHCTL_E2H_EL1PTEN)
+ ldr x5, =(PSR_DAIF | PSR_M_EL2h)
+ b .Ldone_vhe
+
+.Lno_vhe:
+ /* Hypervisor trap functions */
+ adrp x2, hyp_stub_vectors
+ add x2, x2, :lo12:hyp_stub_vectors
+ msr vbar_el2, x2
+
+ ldr x2, =(CPTR_RES1)
+ ldr x3, =(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
+ ldr x5, =(PSR_DAIF | PSR_M_EL1h)
+
+.Ldone_vhe:
+
msr cptr_el2, x2
+ /* Enable access to the physical timers at EL1 */
+ msr cnthctl_el2, x3
+ /* Set the return PSTATE */
+ msr spsr_el2, x5
+
+ /*
+ * Configure the Extended Hypervisor register. This is only valid if
+ * FEAT_HCX is enabled.
+ */
+ CHECK_CPU_FEAT(x2, ID_AA64MMFR1, HCX, 2f)
+ /* Extended Hypervisor Configuration */
+ mov x2, xzr
+ msr HCRX_EL2_REG, x2
+ isb
+2:
/* Don't trap to EL2 for CP15 traps */
msr hstr_el2, xzr
- /* Enable access to the physical timers at EL1 */
- mrs x2, cnthctl_el2
- orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
- msr cnthctl_el2, x2
-
/* Set the counter offset to a known value */
msr cntvoff_el2, xzr
- /* Hypervisor trap functions */
- adrp x2, hyp_stub_vectors
- add x2, x2, :lo12:hyp_stub_vectors
- msr vbar_el2, x2
-
/* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */
msr vttbr_el2, xzr
- mov x2, #(PSR_DAIF | PSR_M_EL1h)
- msr spsr_el2, x2
-
- /* Configure GICv3 CPU interface */
- mrs x2, id_aa64pfr0_el1
- /* Extract GIC bits from the register */
- ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
- /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
- cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
- b.ne 2f
+ /* Check the CPU supports GIC, and configure the CPU interface */
+ CHECK_CPU_FEAT(x2, ID_AA64PFR0, GIC, 3f)
mrs x2, icc_sre_el2
orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
msr icc_sre_el2, x2
-2:
+3:
/* Set the address to return to our return address */
msr elr_el2, x30
isb
eret
-
- .align 3
-.Lsctlr_res1:
- .quad SCTLR_RES1
+#undef INIT_SCTLR_EL1
LEND(enter_kernel_el)
/*
@@ -382,14 +468,16 @@ LEND(get_load_phys_addr)
* All the memory must not cross a 1GiB boundaty
* x28 contains the physical address we were loaded from
*
- * TODO: This is out of date.
- * There are at least 5 pages before that address for the page tables
+ * There are 7 or 8 pages before that address for the page tables
* The pages used are:
+ * - The Kernel L3 tables (only for 16k kernel)
* - The Kernel L2 table
* - The Kernel L1 table
* - The Kernel L0 table (TTBR1)
+ * - The identity (PA = VA) L2 table
* - The identity (PA = VA) L1 table
- * - The identity (PA = VA) L0 table (TTBR0)
+ * - The identity (PA = VA) L0 table (Early TTBR0)
+ * - The Kernel empty L0 table (Late TTBR0)
*/
LENTRY(create_pagetables)
/* Save the Link register */
@@ -398,7 +486,6 @@ LENTRY(create_pagetables)
/* Clean the page table */
adrp x6, pagetable
add x6, x6, :lo12:pagetable
- mov x26, x6
adrp x27, pagetable_end
add x27, x27, :lo12:pagetable_end
1:
@@ -409,6 +496,41 @@ LENTRY(create_pagetables)
cmp x6, x27
b.lo 1b
+#ifdef __ARM_FEATURE_BTI_DEFAULT
+ /*
+ * Check if the CPU supports BTI
+ */
+ mrs x6, id_aa64pfr1_el1 /* Read the ID register */
+ and x6, x6, ID_AA64PFR1_BT_MASK /* Mask the field we need */
+ cmp x6, xzr /* Check it's non-zero */
+ cset x6, ne /* x6 is set if non-zero */
+ lsl x21, x6, ATTR_S1_GP_SHIFT /* Shift to the correct bit */
+#endif
+
+ /*
+ * Find the shareability attribute we should use. If FEAT_LPA2 is
+ * enabled then the shareability field is moved from the page table
+ * to tcr_el1 and the bits in the page table are reused by the
+ * address field.
+ */
+#if PAGE_SIZE == PAGE_SIZE_4K
+#define LPA2_MASK ID_AA64MMFR0_TGran4_MASK
+#define LPA2_VAL ID_AA64MMFR0_TGran4_LPA2
+#elif PAGE_SIZE == PAGE_SIZE_16K
+#define LPA2_MASK ID_AA64MMFR0_TGran16_MASK
+#define LPA2_VAL ID_AA64MMFR0_TGran16_LPA2
+#else
+#error Unsupported page size
+#endif
+ mrs x6, id_aa64mmfr0_el1
+ mov x7, LPA2_VAL
+ and x6, x6, LPA2_MASK
+ cmp x6, x7
+ ldr x22, =(ATTR_SH(ATTR_SH_IS))
+ csel x22, xzr, x22, eq
+#undef LPA2_MASK
+#undef LPA2_VAL
+
/*
* Build the TTBR1 maps.
*/
@@ -467,67 +589,125 @@ booti_no_fdt:
common:
#if PAGE_SIZE != PAGE_SIZE_4K
/*
- * Create L3 pages. The kernel will be loaded at a 2M aligned
- * address, however L2 blocks are too large when the page size is
- * not 4k to map the kernel with such an aligned address. However,
- * when the page size is larger than 4k, L2 blocks are too large to
- * map the kernel with such an alignment.
+ * Create L3 and L3C pages. The kernel will be loaded at a 2M aligned
+ * address, enabling the creation of L3C pages. However, when the page
+ * size is larger than 4k, L2 blocks are too large to map the kernel
+ * with 2M alignment.
*/
+#define PTE_SHIFT L3_SHIFT
+#define LL_PAGE_TABLE pagetable_l3_ttbr1
+#define BUILD_PTE_FUNC build_l3_page_pagetable
+#else
+#define PTE_SHIFT L2_SHIFT
+#define LL_PAGE_TABLE pagetable_l2_ttbr1
+#define BUILD_PTE_FUNC build_l2_block_pagetable
+#endif
- /* Get the number of l3 pages to allocate, rounded down */
- lsr x10, x8, #(L3_SHIFT)
+ /* Get the number of blocks/pages to allocate, rounded down */
+ lsr x14, x8, #(PTE_SHIFT)
- /* Create the kernel space L2 table */
- mov x6, x26
- mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
+ ldr x26, =etext
+#if PAGE_SIZE != PAGE_SIZE_4K
+ ldr x8, =((1 << PTE_SHIFT) - 1)
+ add x26, x26, x8
+#endif
mov x8, #(KERNBASE)
- mov x9, x28
+ sub x25, x26, x8
+ lsr x25, x25, #(PTE_SHIFT)
+
+#if PAGE_SIZE == PAGE_SIZE_4K
+ /* Calculate the number of executable level 3 pages to create */
+ lsr x26, x26, #(L3_SHIFT)
+ bfc x26, #(Ln_ENTRIES_SHIFT), #(64 - Ln_ENTRIES_SHIFT)
+
+ /* Build the L3 table holding the end of the exectuable code */
+ lsl x15, x25, #(PTE_SHIFT)
+ adrp x6, pagetable_l3_ttbr1
+ add x6, x6, :lo12:pagetable_l3_ttbr1
+ ldr x7, =(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | \
+ ATTR_S1_AP(ATTR_S1_AP_RO))
+ ldr x8, =(KERNBASE)
+ add x8, x8, x15
+ add x9, x28, x15
+ mov x10, x26
bl build_l3_page_pagetable
- /* Move to the l2 table */
- ldr x9, =(PAGE_SIZE * L3_PAGE_COUNT)
- add x26, x26, x9
+ /* Build the remaining level 3 pages */
+ ldr x7, =(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_XN)
+ lsl x27, x26, #(L3_SHIFT)
+ add x8, x8, x27
+ add x9, x28, x15
+ add x9, x9, x27
+ ldr x10, =(Ln_ENTRIES)
+ sub x10, x10, x26
+ bl build_l3_page_pagetable
/* Link the l2 -> l3 table */
mov x9, x6
- mov x6, x26
+ adrp x6, pagetable_l2_ttbr1
+ add x6, x6, :lo12:pagetable_l2_ttbr1
bl link_l2_pagetable
-#else
- /* Get the number of l2 pages to allocate, rounded down */
- lsr x10, x8, #(L2_SHIFT)
+#endif
- /* Create the kernel space L2 table */
- mov x6, x26
- mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
+ /* Create the kernel space PTE table */
+ adrp x6, LL_PAGE_TABLE
+ add x6, x6, :lo12:LL_PAGE_TABLE
+ ldr x7, =(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | \
+ ATTR_S1_AP(ATTR_S1_AP_RO))
mov x8, #(KERNBASE)
mov x9, x28
- bl build_l2_block_pagetable
+ mov x10, x25
+ bl BUILD_PTE_FUNC
+
+#if PAGE_SIZE == PAGE_SIZE_4K
+ /* Skip memory mapped through the L2 table */
+ add x25, x25, #1
#endif
- /* Move to the l1 table */
- add x26, x26, #PAGE_SIZE
+ /* Create the kernel space XN PTE table */
+ lsl x10, x25, #(PTE_SHIFT)
+ ldr x7, =(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_XN)
+ ldr x8, =(KERNBASE)
+ add x8, x8, x10
+ add x9, x28, x10
+ sub x10, x14, x25
+ bl BUILD_PTE_FUNC
+
+#undef PTE_SHIFT
+#undef LL_PAGE_TABLE
+#undef BUILD_PTE_FUNC
+
+#if PAGE_SIZE != PAGE_SIZE_4K
+ /* Link the l2 -> l3 table */
+ mov x9, x6
+ adrp x6, pagetable_l2_ttbr1
+ add x6, x6, :lo12:pagetable_l2_ttbr1
+ bl link_l2_pagetable
+#endif
/* Link the l1 -> l2 table */
mov x9, x6
- mov x6, x26
+ adrp x6, pagetable_l1_ttbr1
+ add x6, x6, :lo12:pagetable_l1_ttbr1
bl link_l1_pagetable
- /* Move to the l0 table */
- add x24, x26, #PAGE_SIZE
-
/* Link the l0 -> l1 table */
mov x9, x6
- mov x6, x24
+ adrp x6, pagetable_l0_ttbr1
+ add x6, x6, :lo12:pagetable_l0_ttbr1
mov x10, #1
bl link_l0_pagetable
+ /* Save the TTBR1 table physical address */
+ mov x24, x6
+
/*
* Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG.
* They are only needed early on, so the VA = PA map is uncached.
*/
- add x27, x24, #PAGE_SIZE
- mov x6, x27 /* The initial page table */
+ adrp x6, pagetable_l2_ttbr0_bootstrap
+ add x6, x6, :lo12:pagetable_l2_ttbr0_bootstrap
/* Create the VA = PA map */
mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
@@ -575,23 +755,22 @@ common:
1:
#endif
- /* Move to the l1 table */
- add x27, x27, #PAGE_SIZE
-
/* Link the l1 -> l2 table */
mov x9, x6
- mov x6, x27
+ adrp x6, pagetable_l1_ttbr0_bootstrap
+ add x6, x6, :lo12:pagetable_l1_ttbr0_bootstrap
bl link_l1_pagetable
- /* Move to the l0 table */
- add x27, x27, #PAGE_SIZE
-
/* Link the l0 -> l1 table */
mov x9, x6
- mov x6, x27
+ adrp x6, pagetable_l0_ttbr0_bootstrap
+ add x6, x6, :lo12:pagetable_l0_ttbr0_bootstrap
mov x10, #1
bl link_l0_pagetable
+ /* Save the TTBR0 table physical address */
+ mov x27, x6
+
/* Restore the Link register */
mov x30, x5
ret
@@ -681,11 +860,13 @@ LENTRY(build_l2_block_pagetable)
/* Build the L2 block entry */
orr x12, x7, #L2_BLOCK
- orr x12, x12, #(ATTR_DEFAULT)
+ orr x12, x12, #(ATTR_AF)
orr x12, x12, #(ATTR_S1_UXN)
#ifdef __ARM_FEATURE_BTI_DEFAULT
- orr x12, x12, #(ATTR_S1_GP)
+ orr x12, x12, x21
#endif
+ /* Set the shareability attribute */
+ orr x12, x12, x22
/* Only use the output address bits */
lsr x9, x9, #L2_SHIFT
@@ -704,7 +885,6 @@ LENTRY(build_l2_block_pagetable)
ret
LEND(build_l2_block_pagetable)
-#if PAGE_SIZE != PAGE_SIZE_4K
/*
* Builds an L2 -> L3 table descriptor
*
@@ -735,15 +915,20 @@ LENTRY(link_l2_pagetable)
LEND(link_l2_pagetable)
/*
- * Builds count level 3 page table entries
+ * Builds count level 3 page table entries. Uses ATTR_CONTIGUOUS to create
+ * large page (L3C) mappings when the current VA and remaining count allow
+ * it.
* x6 = L3 table
* x7 = Block attributes
* x8 = VA start
* x9 = PA start (trashed)
* x10 = Entry count (trashed)
* x11, x12 and x13 are trashed
+ *
+ * VA start (x8) modulo L3C_SIZE must equal PA start (x9) modulo L3C_SIZE.
*/
LENTRY(build_l3_page_pagetable)
+ cbz x10, 2f
/*
* Build the L3 table entry.
*/
@@ -753,17 +938,28 @@ LENTRY(build_l3_page_pagetable)
/* Build the L3 page entry */
orr x12, x7, #L3_PAGE
- orr x12, x12, #(ATTR_DEFAULT)
+ orr x12, x12, #(ATTR_AF)
orr x12, x12, #(ATTR_S1_UXN)
#ifdef __ARM_FEATURE_BTI_DEFAULT
- orr x12, x12, #(ATTR_S1_GP)
+ orr x12, x12, x21
#endif
+ /* Set the shareability attribute */
+ orr x12, x12, x22
/* Only use the output address bits */
lsr x9, x9, #L3_SHIFT
+ /* Check if an ATTR_CONTIGUOUS mapping is possible */
+1: tst x11, #(L3C_ENTRIES - 1)
+ b.ne 2f
+ cmp x10, #L3C_ENTRIES
+ b.lo 3f
+ orr x12, x12, #(ATTR_CONTIGUOUS)
+ b 2f
+3: and x12, x12, #(~ATTR_CONTIGUOUS)
+
/* Set the physical address for this virtual address */
-1: orr x13, x12, x9, lsl #L3_SHIFT
+2: orr x13, x12, x9, lsl #L3_SHIFT
/* Store the entry */
str x13, [x6, x11, lsl #3]
@@ -772,10 +968,10 @@ LENTRY(build_l3_page_pagetable)
add x11, x11, #1
add x9, x9, #1
cbnz x10, 1b
+2:
ret
LEND(build_l3_page_pagetable)
-#endif
LENTRY(start_mmu)
dsb sy
@@ -807,6 +1003,13 @@ LENTRY(start_mmu)
* to 1 only if the ASIDBits field equals 0b0010.
*/
ldr x2, tcr
+
+ /* If x22 contains a non-zero value then LPA2 is not implemented */
+ cbnz x22, .Lno_lpa2
+ ldr x3, =(TCR_DS)
+ orr x2, x2, x3
+.Lno_lpa2:
+
mrs x3, id_aa64mmfr0_el1
/* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */
@@ -821,20 +1024,15 @@ LENTRY(start_mmu)
bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH)
/*
- * Check if the HW supports access flag and dirty state updates,
- * and set TCR_EL1.HA and TCR_EL1.HD accordingly.
+ * Check if the HW supports access flag updates, and set
+ * TCR_EL1.HA accordingly. The TCR_EL1.HD flag to enable
+ * HW management of dirty state is set in C code as it may
+ * need to be disabled because of CPU errata.
*/
- mrs x3, id_aa64mmfr1_el1
- and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK)
- cmp x3, #1
- b.ne 1f
- orr x2, x2, #(TCR_HA)
- b 2f
+ CHECK_CPU_FEAT(x3, ID_AA64MMFR1, HAFDBS, 1f)
+ orr x2, x2, #(TCR_HA)
1:
- cmp x3, #2
- b.ne 2f
- orr x2, x2, #(TCR_HA | TCR_HD)
-2:
+
msr tcr_el1, x2
/*
@@ -866,8 +1064,9 @@ tcr:
#error Unsupported page size
#endif
- .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \
- TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
+ .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \
+ TCR_SH1_IS | TCR_ORGN1_WBWA | TCR_IRGN1_WBWA | \
+ TCR_SH0_IS | TCR_ORGN0_WBWA | TCR_IRGN0_WBWA)
sctlr_set:
/* Bits to set */
.quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
@@ -903,10 +1102,9 @@ initstack_end:
*/
.globl pagetable_l0_ttbr1
pagetable:
-#if PAGE_SIZE != PAGE_SIZE_4K
+pagetable_l3_ttbr1:
.space (PAGE_SIZE * L3_PAGE_COUNT)
pagetable_l2_ttbr1:
-#endif
.space PAGE_SIZE
pagetable_l1_ttbr1:
.space PAGE_SIZE
@@ -945,3 +1143,5 @@ aarch32_esigcode:
.global sz_aarch32_sigcode
sz_aarch32_sigcode:
.quad aarch32_esigcode - aarch32_sigcode
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
index 517f4e7c2e23..53856dd90cae 100644
--- a/sys/arm64/arm64/machdep.c
+++ b/sys/arm64/arm64/machdep.c
@@ -38,8 +38,8 @@
#include <sys/cons.h>
#include <sys/cpu.h>
#include <sys/csan.h>
-#include <sys/devmap.h>
#include <sys/efi.h>
+#include <sys/efi_map.h>
#include <sys/exec.h>
#include <sys/imgact.h>
#include <sys/kdb.h>
@@ -77,6 +77,7 @@
#include <machine/armreg.h>
#include <machine/cpu.h>
+#include <machine/cpu_feat.h>
#include <machine/debug_monitor.h>
#include <machine/hypervisor.h>
#include <machine/kdb.h>
@@ -131,7 +132,6 @@ static struct trapframe proc0_tf;
int early_boot = 1;
int cold = 1;
static int boot_el;
-static uint64_t hcr_el2;
struct kva_md_info kmi;
@@ -173,46 +173,47 @@ SYSINIT(ssp_warn, SI_SUB_COPYRIGHT, SI_ORDER_ANY, print_ssp_warning, NULL);
SYSINIT(ssp_warn2, SI_SUB_LAST, SI_ORDER_ANY, print_ssp_warning, NULL);
#endif
-static void
-pan_setup(void)
+static bool
+pan_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
uint64_t id_aa64mfr1;
id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
- if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
- has_pan = 1;
+ return (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE);
}
-void
-pan_enable(void)
+static void
+pan_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
+ u_int errata_count __unused)
{
+ has_pan = 1;
/*
- * The LLVM integrated assembler doesn't understand the PAN
- * PSTATE field. Because of this we need to manually create
- * the instruction in an asm block. This is equivalent to:
- * msr pan, #1
- *
* This sets the PAN bit, stopping the kernel from accessing
* memory when userspace can also access it unless the kernel
* uses the userspace load/store instructions.
*/
- if (has_pan) {
- WRITE_SPECIALREG(sctlr_el1,
- READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
- __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
- }
+ WRITE_SPECIALREG(sctlr_el1,
+ READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
+ __asm __volatile(
+ ".arch_extension pan \n"
+ "msr pan, #1 \n"
+ ".arch_extension nopan \n");
}
+static struct cpu_feat feat_pan = {
+ .feat_name = "FEAT_PAN",
+ .feat_check = pan_check,
+ .feat_enable = pan_enable,
+ .feat_flags = CPU_FEAT_EARLY_BOOT | CPU_FEAT_PER_CPU,
+};
+DATA_SET(cpu_feat_set, feat_pan);
+
bool
has_hyp(void)
{
-
- /*
- * XXX The E2H check is wrong, but it's close enough for now. Needs to
- * be re-evaluated once we're running regularly in EL2.
- */
- return (boot_el == CURRENTEL_EL_EL2 && (hcr_el2 & HCR_E2H) == 0);
+ return (boot_el == CURRENTEL_EL_EL2);
}
bool
@@ -262,7 +263,9 @@ late_ifunc_resolve(void *dummy __unused)
{
link_elf_late_ireloc();
}
-SYSINIT(late_ifunc_resolve, SI_SUB_CPU, SI_ORDER_ANY, late_ifunc_resolve, NULL);
+/* Late enough for cpu_feat to have completed */
+SYSINIT(late_ifunc_resolve, SI_SUB_CONFIGURE, SI_ORDER_ANY,
+ late_ifunc_resolve, NULL);
int
cpu_idle_wakeup(int cpu)
@@ -456,172 +459,6 @@ arm64_get_writable_addr(void *addr, void **out)
return (false);
}
-typedef void (*efi_map_entry_cb)(struct efi_md *, void *argp);
-
-static void
-foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb, void *argp)
-{
- struct efi_md *map, *p;
- size_t efisz;
- int ndesc, i;
-
- /*
- * Memory map data provided by UEFI via the GetMemoryMap
- * Boot Services API.
- */
- efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
- map = (struct efi_md *)((uint8_t *)efihdr + efisz);
-
- if (efihdr->descriptor_size == 0)
- return;
- ndesc = efihdr->memory_size / efihdr->descriptor_size;
-
- for (i = 0, p = map; i < ndesc; i++,
- p = efi_next_descriptor(p, efihdr->descriptor_size)) {
- cb(p, argp);
- }
-}
-
-/*
- * Handle the EFI memory map list.
- *
- * We will make two passes at this, the first (exclude == false) to populate
- * physmem with valid physical memory ranges from recognized map entry types.
- * In the second pass we will exclude memory ranges from physmem which must not
- * be used for general allocations, either because they are used by runtime
- * firmware or otherwise reserved.
- *
- * Adding the runtime-reserved memory ranges to physmem and excluding them
- * later ensures that they are included in the DMAP, but excluded from
- * phys_avail[].
- *
- * Entry types not explicitly listed here are ignored and not mapped.
- */
-static void
-handle_efi_map_entry(struct efi_md *p, void *argp)
-{
- bool exclude = *(bool *)argp;
-
- switch (p->md_type) {
- case EFI_MD_TYPE_RECLAIM:
- /*
- * The recomended location for ACPI tables. Map into the
- * DMAP so we can access them from userspace via /dev/mem.
- */
- case EFI_MD_TYPE_RT_CODE:
- /*
- * Some UEFI implementations put the system table in the
- * runtime code section. Include it in the DMAP, but will
- * be excluded from phys_avail.
- */
- case EFI_MD_TYPE_RT_DATA:
- /*
- * Runtime data will be excluded after the DMAP
- * region is created to stop it from being added
- * to phys_avail.
- */
- if (exclude) {
- physmem_exclude_region(p->md_phys,
- p->md_pages * EFI_PAGE_SIZE, EXFLAG_NOALLOC);
- break;
- }
- /* FALLTHROUGH */
- case EFI_MD_TYPE_CODE:
- case EFI_MD_TYPE_DATA:
- case EFI_MD_TYPE_BS_CODE:
- case EFI_MD_TYPE_BS_DATA:
- case EFI_MD_TYPE_FREE:
- /*
- * We're allowed to use any entry with these types.
- */
- if (!exclude)
- physmem_hardware_region(p->md_phys,
- p->md_pages * EFI_PAGE_SIZE);
- break;
- default:
- /* Other types shall not be handled by physmem. */
- break;
- }
-}
-
-static void
-add_efi_map_entries(struct efi_map_header *efihdr)
-{
- bool exclude = false;
- foreach_efi_map_entry(efihdr, handle_efi_map_entry, &exclude);
-}
-
-static void
-exclude_efi_map_entries(struct efi_map_header *efihdr)
-{
- bool exclude = true;
- foreach_efi_map_entry(efihdr, handle_efi_map_entry, &exclude);
-}
-
-static void
-print_efi_map_entry(struct efi_md *p, void *argp __unused)
-{
- const char *type;
- static const char *types[] = {
- "Reserved",
- "LoaderCode",
- "LoaderData",
- "BootServicesCode",
- "BootServicesData",
- "RuntimeServicesCode",
- "RuntimeServicesData",
- "ConventionalMemory",
- "UnusableMemory",
- "ACPIReclaimMemory",
- "ACPIMemoryNVS",
- "MemoryMappedIO",
- "MemoryMappedIOPortSpace",
- "PalCode",
- "PersistentMemory"
- };
-
- if (p->md_type < nitems(types))
- type = types[p->md_type];
- else
- type = "<INVALID>";
- printf("%23s %012lx %012lx %08lx ", type, p->md_phys,
- p->md_virt, p->md_pages);
- if (p->md_attr & EFI_MD_ATTR_UC)
- printf("UC ");
- if (p->md_attr & EFI_MD_ATTR_WC)
- printf("WC ");
- if (p->md_attr & EFI_MD_ATTR_WT)
- printf("WT ");
- if (p->md_attr & EFI_MD_ATTR_WB)
- printf("WB ");
- if (p->md_attr & EFI_MD_ATTR_UCE)
- printf("UCE ");
- if (p->md_attr & EFI_MD_ATTR_WP)
- printf("WP ");
- if (p->md_attr & EFI_MD_ATTR_RP)
- printf("RP ");
- if (p->md_attr & EFI_MD_ATTR_XP)
- printf("XP ");
- if (p->md_attr & EFI_MD_ATTR_NV)
- printf("NV ");
- if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
- printf("MORE_RELIABLE ");
- if (p->md_attr & EFI_MD_ATTR_RO)
- printf("RO ");
- if (p->md_attr & EFI_MD_ATTR_RT)
- printf("RUNTIME");
- printf("\n");
-}
-
-static void
-print_efi_map_entries(struct efi_map_header *efihdr)
-{
-
- printf("%23s %12s %12s %8s %4s\n",
- "Type", "Physical", "Virtual", "#Pages", "Attr");
- foreach_efi_map_entry(efihdr, print_efi_map_entry, NULL);
-}
-
/*
* Map the passed in VA in EFI space to a void * using the efi memory table to
* find the PA and return it in the DMAP, if it exists. We're used between the
@@ -658,7 +495,7 @@ efi_early_map(vm_offset_t va)
{
struct early_map_data emd = { .va = va };
- foreach_efi_map_entry(efihdr, efi_early_map_entry, &emd);
+ efi_map_foreach_entry(efihdr, efi_early_map_entry, &emd);
if (emd.pa == 0)
return NULL;
return (void *)PHYS_TO_DMAP(emd.pa);
@@ -666,16 +503,18 @@ efi_early_map(vm_offset_t va)
/*
- * When booted via kboot, the prior kernel will pass in reserved memory areas in
- * a EFI config table. We need to find that table and walk through it excluding
- * the memory ranges in it. btw, this is called too early for the printf to do
- * anything since msgbufp isn't initialized, let alone a console...
+ * When booted via kexec from Linux, the prior kernel will pass in reserved
+ * memory areas in an EFI config table. We need to find that table and walk
+ * through it excluding the memory ranges in it. btw, this is called too early
+ * for the printf to do anything (unless EARLY_PRINTF is defined) since msgbufp
+ * isn't initialized, let alone a console, but breakpoints in printf help
+ * diagnose rare failures.
*/
static void
-exclude_efi_memreserve(vm_offset_t efi_systbl_phys)
+exclude_efi_memreserve(vm_paddr_t efi_systbl_phys)
{
struct efi_systbl *systbl;
- struct uuid efi_memreserve = LINUX_EFI_MEMRESERVE_TABLE;
+ efi_guid_t efi_memreserve = LINUX_EFI_MEMRESERVE_TABLE;
systbl = (struct efi_systbl *)PHYS_TO_DMAP(efi_systbl_phys);
if (systbl == NULL) {
@@ -704,7 +543,7 @@ exclude_efi_memreserve(vm_offset_t efi_systbl_phys)
cfgtbl = efi_early_map(systbl->st_cfgtbl + i * sizeof(*cfgtbl));
if (cfgtbl == NULL)
panic("Can't map the config table entry %d\n", i);
- if (memcmp(&cfgtbl->ct_uuid, &efi_memreserve, sizeof(struct uuid)) != 0)
+ if (memcmp(&cfgtbl->ct_guid, &efi_memreserve, sizeof(efi_guid_t)) != 0)
continue;
/*
@@ -733,11 +572,11 @@ exclude_efi_memreserve(vm_offset_t efi_systbl_phys)
#ifdef FDT
static void
-try_load_dtb(caddr_t kmdp)
+try_load_dtb(void)
{
vm_offset_t dtbp;
- dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
+ dtbp = MD_FETCH(preload_kmdp, MODINFOMD_DTBP, vm_offset_t);
#if defined(FDT_DTB_STATIC)
/*
* In case the device tree blob was not retrieved (from metadata) try
@@ -754,7 +593,7 @@ try_load_dtb(caddr_t kmdp)
return;
}
- if (OF_install(OFW_FDT, 0) == FALSE)
+ if (!OF_install(OFW_FDT, 0))
panic("Cannot install FDT");
if (OF_init((void *)dtbp) != 0)
@@ -807,10 +646,10 @@ bus_probe(void)
}
/* If no order or an invalid order was set use the default */
if (arm64_bus_method == ARM64_BUS_NONE) {
- if (has_fdt)
- arm64_bus_method = ARM64_BUS_FDT;
- else if (has_acpi)
+ if (has_acpi)
arm64_bus_method = ARM64_BUS_ACPI;
+ else if (has_fdt)
+ arm64_bus_method = ARM64_BUS_FDT;
}
/*
@@ -886,6 +725,21 @@ memory_mapping_mode(vm_paddr_t pa)
return (VM_MEMATTR_DEVICE);
}
+#ifdef FDT
+static void
+fdt_physmem_hardware_region_cb(const struct mem_region *mr, void *arg __unused)
+{
+ physmem_hardware_region(mr->mr_start, mr->mr_size);
+}
+
+static void
+fdt_physmem_exclude_region_cb(const struct mem_region *mr, void *arg __unused)
+{
+ physmem_exclude_region(mr->mr_start, mr->mr_size,
+ EXFLAG_NODUMP | EXFLAG_NOALLOC);
+}
+#endif
+
void
initarm(struct arm64_bootparams *abp)
{
@@ -893,28 +747,19 @@ initarm(struct arm64_bootparams *abp)
struct pcpu *pcpup;
char *env;
#ifdef FDT
- struct mem_region mem_regions[FDT_MEM_REGIONS];
- int mem_regions_sz;
phandle_t root;
char dts_version[255];
#endif
vm_offset_t lastaddr;
- caddr_t kmdp;
bool valid;
TSRAW(&thread0, TS_ENTER, __func__, NULL);
boot_el = abp->boot_el;
- hcr_el2 = abp->hcr_el2;
- /* Parse loader or FDT boot parametes. Determine last used address. */
+ /* Parse loader or FDT boot parameters. Determine last used address. */
lastaddr = parse_boot_param(abp);
- /* Find the kernel address */
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
-
identify_cpu(0);
identify_hypervisor_smbios();
@@ -936,33 +781,31 @@ initarm(struct arm64_bootparams *abp)
PCPU_SET(curthread, &thread0);
PCPU_SET(midr, get_midr());
- link_elf_ireloc(kmdp);
+ link_elf_ireloc();
#ifdef FDT
- try_load_dtb(kmdp);
+ try_load_dtb();
#endif
- efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
+ efi_systbl_phys = MD_FETCH(preload_kmdp, MODINFOMD_FW_HANDLE,
+ vm_paddr_t);
/* Load the physical memory ranges */
- efihdr = (struct efi_map_header *)preload_search_info(kmdp,
+ efihdr = (struct efi_map_header *)preload_search_info(preload_kmdp,
MODINFO_METADATA | MODINFOMD_EFI_MAP);
if (efihdr != NULL)
- add_efi_map_entries(efihdr);
+ efi_map_add_entries(efihdr);
#ifdef FDT
else {
/* Grab physical memory regions information from device tree. */
- if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
+ if (fdt_foreach_mem_region(fdt_physmem_hardware_region_cb,
NULL) != 0)
panic("Cannot get physical memory regions");
- physmem_hardware_regions(mem_regions, mem_regions_sz);
}
- if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0)
- physmem_exclude_regions(mem_regions, mem_regions_sz,
- EXFLAG_NODUMP | EXFLAG_NOALLOC);
+ fdt_foreach_reserved_mem(fdt_physmem_exclude_region_cb, NULL);
#endif
/* Exclude the EFI framebuffer from our view of physical memory. */
- efifb = (struct efi_fb *)preload_search_info(kmdp,
+ efifb = (struct efi_fb *)preload_search_info(preload_kmdp,
MODINFO_METADATA | MODINFOMD_EFI_FB);
if (efifb != NULL)
physmem_exclude_region(efifb->fb_addr, efifb->fb_size,
@@ -972,16 +815,30 @@ initarm(struct arm64_bootparams *abp)
init_param1();
cache_setup();
- pan_setup();
- /* Bootstrap enough of pmap to enter the kernel proper */
- pmap_bootstrap(lastaddr - KERNBASE);
- /* Exclude entries needed in the DMAP region, but not phys_avail */
+ /*
+ * Perform a staged bootstrap of virtual memory.
+ *
+ * - First we create the DMAP region. This allows it to be used in
+ * later bootstrapping.
+ * - Next exclude memory that is needed in the DMAP region, but must
+ * not be used by FreeBSD.
+ * - Lastly complete the bootstrapping. It may use the physical
+ * memory map so any excluded memory must be marked as such before
+ * pmap_bootstrap() is called.
+ */
+ pmap_bootstrap_dmap(lastaddr - KERNBASE);
+ /*
+ * Exclude EFI entries needed in the DMAP, e.g. EFI_MD_TYPE_RECLAIM
+ * may contain the ACPI tables but shouldn't be used by the kernel
+ */
if (efihdr != NULL)
- exclude_efi_map_entries(efihdr);
+ efi_map_exclude_entries(efihdr);
/* Do the same for reserve entries in the EFI MEMRESERVE table */
if (efi_systbl_phys != 0)
exclude_efi_memreserve(efi_systbl_phys);
+ /* Continue bootstrapping pmap */
+ pmap_bootstrap();
/*
* We carefully bootstrap the sanitizer map after we've excluded
@@ -996,8 +853,6 @@ initarm(struct arm64_bootparams *abp)
physmem_init_kernel_globals();
- devmap_bootstrap(0, NULL);
-
valid = bus_probe();
cninit();
@@ -1008,12 +863,8 @@ initarm(struct arm64_bootparams *abp)
panic("Invalid bus configuration: %s",
kern_getenv("kern.cfg.order"));
- /*
- * Check if pointer authentication is available on this system, and
- * if so enable its use. This needs to be called before init_proc0
- * as that will configure the thread0 pointer authentication keys.
- */
- ptrauth_init();
+ /* Detect early CPU feature support */
+ enable_cpu_feat(CPU_FEAT_EARLY_BOOT);
/*
* Dump the boot metadata. We have to wait for cninit() since console
@@ -1034,7 +885,6 @@ initarm(struct arm64_bootparams *abp)
if ((boothowto & RB_KDB) != 0)
kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
- pan_enable();
kcsan_cpu_init(0);
kasan_init();
@@ -1062,7 +912,7 @@ initarm(struct arm64_bootparams *abp)
if (boothowto & RB_VERBOSE) {
if (efihdr != NULL)
- print_efi_map_entries(efihdr);
+ efi_map_print_entries(efihdr);
physmem_print_tables();
}
diff --git a/sys/arm64/arm64/machdep_boot.c b/sys/arm64/arm64/machdep_boot.c
index b8e0c734bbb4..83bd74ea7317 100644
--- a/sys/arm64/arm64/machdep_boot.c
+++ b/sys/arm64/arm64/machdep_boot.c
@@ -98,7 +98,7 @@ fake_preload_metadata(void *dtb_ptr, size_t dtb_size)
PRELOAD_PUSH_STRING("kernel");
PRELOAD_PUSH_VALUE(uint32_t, MODINFO_TYPE);
- PRELOAD_PUSH_STRING("elf kernel");
+ PRELOAD_PUSH_STRING(preload_kerntype);
PRELOAD_PUSH_VALUE(uint32_t, MODINFO_ADDR);
PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t));
@@ -124,6 +124,9 @@ fake_preload_metadata(void *dtb_ptr, size_t dtb_size)
preload_metadata = (caddr_t)(uintptr_t)fake_preload;
+ /* Initialize preload_kmdp */
+ preload_initkmdp(true);
+
init_static_kenv(NULL, 0);
return (lastaddr);
@@ -188,7 +191,6 @@ static vm_offset_t
freebsd_parse_boot_param(struct arm64_bootparams *abp)
{
vm_offset_t lastaddr = 0;
- void *kmdp;
#ifdef DDB
vm_offset_t ksym_start;
vm_offset_t ksym_end;
@@ -198,17 +200,19 @@ freebsd_parse_boot_param(struct arm64_bootparams *abp)
return (0);
preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
+
+ /* Initialize preload_kmdp */
+ preload_initkmdp(false);
+ if (preload_kmdp == NULL)
return (0);
- boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
- loader_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
+ boothowto = MD_FETCH(preload_kmdp, MODINFOMD_HOWTO, int);
+ loader_envp = MD_FETCH(preload_kmdp, MODINFOMD_ENVP, char *);
init_static_kenv(loader_envp, 0);
- lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
+ lastaddr = MD_FETCH(preload_kmdp, MODINFOMD_KERNEND, vm_offset_t);
#ifdef DDB
- ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
- ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
+ ksym_start = MD_FETCH(preload_kmdp, MODINFOMD_SSYM, uintptr_t);
+ ksym_end = MD_FETCH(preload_kmdp, MODINFOMD_ESYM, uintptr_t);
db_fetch_ksymtab(ksym_start, ksym_end, 0);
#endif
return (lastaddr);
diff --git a/sys/arm64/arm64/mem.c b/sys/arm64/arm64/mem.c
index 1f44d547204e..2a998ca1f845 100644
--- a/sys/arm64/arm64/mem.c
+++ b/sys/arm64/arm64/mem.c
@@ -49,6 +49,7 @@ memrw(struct cdev *dev, struct uio *uio, int flags)
struct vm_page m;
vm_page_t marr;
vm_offset_t off, v;
+ vm_prot_t prot;
u_int cnt;
int error;
@@ -78,8 +79,16 @@ memrw(struct cdev *dev, struct uio *uio, int flags)
break;
}
- if (!kernacc((void *)v, cnt, uio->uio_rw == UIO_READ ?
- VM_PROT_READ : VM_PROT_WRITE)) {
+ switch (uio->uio_rw) {
+ case UIO_READ:
+ prot = VM_PROT_READ;
+ break;
+ case UIO_WRITE:
+ prot = VM_PROT_WRITE;
+ break;
+ }
+
+ if (!kernacc((void *)v, cnt, prot)) {
error = EFAULT;
break;
}
diff --git a/sys/arm64/arm64/memcmp.S b/sys/arm64/arm64/memcmp.S
index 8517a181f3f3..19f577f218e0 100644
--- a/sys/arm64/arm64/memcmp.S
+++ b/sys/arm64/arm64/memcmp.S
@@ -9,6 +9,8 @@
* ARMv8-a, AArch64, unaligned accesses.
*/
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
#define L(l) .L ## l
@@ -134,3 +136,4 @@ L(byte_loop):
END (memcmp)
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/memcpy.S b/sys/arm64/arm64/memcpy.S
index b394d6c1d30a..01daa8e1c228 100644
--- a/sys/arm64/arm64/memcpy.S
+++ b/sys/arm64/arm64/memcpy.S
@@ -11,6 +11,8 @@
*
*/
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
#define L(l) .L ## l
@@ -240,3 +242,4 @@ L(copy64_from_start):
END(memcpy)
EEND(memmove)
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/memset.S b/sys/arm64/arm64/memset.S
index ec434493ce13..f52bfd62cc54 100644
--- a/sys/arm64/arm64/memset.S
+++ b/sys/arm64/arm64/memset.S
@@ -31,6 +31,8 @@
*
*/
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
#define dstin x0
@@ -195,3 +197,5 @@ ENTRY(memset)
b.ne .Ltail_maybe_long
ret
END(memset)
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/minidump_machdep.c b/sys/arm64/arm64/minidump_machdep.c
index 8ee626953aef..749c96545506 100644
--- a/sys/arm64/arm64/minidump_machdep.c
+++ b/sys/arm64/arm64/minidump_machdep.c
@@ -310,8 +310,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
for (i = 0; i < Ln_ENTRIES; i++) {
for (j = 0; j < Ln_ENTRIES; j++) {
tmpbuffer[j] = (pa + i * L2_SIZE +
- j * PAGE_SIZE) | ATTR_DEFAULT |
- L3_PAGE;
+ j * PAGE_SIZE) | ATTR_AF |
+ pmap_sh_attr | L3_PAGE;
}
error = blk_write(di, (char *)&tmpbuffer, 0,
PAGE_SIZE);
@@ -330,7 +330,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
/* Generate fake l3 entries based upon the l1 entry */
for (i = 0; i < Ln_ENTRIES; i++) {
tmpbuffer[i] = (pa + i * PAGE_SIZE) |
- ATTR_DEFAULT | L3_PAGE;
+ ATTR_AF | pmap_sh_attr | L3_PAGE;
}
error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
if (error)
diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c
index 9c6175445572..e4d011df3a06 100644
--- a/sys/arm64/arm64/mp_machdep.c
+++ b/sys/arm64/arm64/mp_machdep.c
@@ -56,6 +56,7 @@
#include <machine/machdep.h>
#include <machine/cpu.h>
+#include <machine/cpu_feat.h>
#include <machine/debug_monitor.h>
#include <machine/intr.h>
#include <machine/smp.h>
@@ -90,6 +91,7 @@ static struct {
} fdt_quirks[] = {
{ "arm,foundation-aarch64", MP_QUIRK_CPULIST },
{ "arm,fvp-base", MP_QUIRK_CPULIST },
+ { "arm,fvp-base-revc", MP_QUIRK_CPULIST },
/* This is incorrect in some DTS files */
{ "arm,vfp-base", MP_QUIRK_CPULIST },
{ NULL, 0 },
@@ -106,14 +108,13 @@ static void ipi_stop(void *);
static u_int fdt_cpuid;
#endif
-void mpentry(unsigned long cpuid);
+void mpentry_psci(unsigned long cpuid);
+void mpentry_spintable(void);
void init_secondary(uint64_t);
-/* Synchronize AP startup. */
-static struct mtx ap_boot_mtx;
-
/* Used to initialize the PCPU ahead of calling init_secondary(). */
void *bootpcpu;
+uint64_t ap_cpuid;
/* Stacks for AP initialization, discarded once idle threads are started. */
void *bootstack;
@@ -123,7 +124,7 @@ static void *bootstacks[MAXCPU];
static volatile int aps_started;
/* Set to 1 once we're ready to let the APs out of the pen. */
-static volatile int aps_ready;
+static volatile int aps_after_dev, aps_ready;
/* Temporary variables for init_secondary() */
static void *dpcpu[MAXCPU - 1];
@@ -135,11 +136,53 @@ is_boot_cpu(uint64_t target_cpu)
return (PCPU_GET_MPIDR(cpuid_to_pcpu[0]) == (target_cpu & CPU_AFF_MASK));
}
+static bool
+wait_for_aps(void)
+{
+ for (int i = 0, started = 0; i < 2000; i++) {
+ int32_t nstarted;
+
+ nstarted = atomic_load_32(&aps_started);
+ if (nstarted == mp_ncpus - 1)
+ return (true);
+
+ /*
+ * Don't time out while we are making progress. Some large
+ * systems can take a while to start all CPUs.
+ */
+ if (nstarted > started) {
+ i = 0;
+ started = nstarted;
+ }
+ DELAY(1000);
+ }
+
+ return (false);
+}
+
static void
-release_aps(void *dummy __unused)
+release_aps_after_dev(void *dummy __unused)
{
- int i, started;
+ /* Only release CPUs if they exist */
+ if (mp_ncpus == 1)
+ return;
+ atomic_store_int(&aps_started, 0);
+ atomic_store_rel_int(&aps_after_dev, 1);
+ /* Wake up the other CPUs */
+ __asm __volatile(
+ "dsb ishst \n"
+ "sev \n"
+ ::: "memory");
+
+ wait_for_aps();
+}
+SYSINIT(aps_after_dev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE + 1,
+ release_aps_after_dev, NULL);
+
+static void
+release_aps(void *dummy __unused)
+{
/* Only release CPUs if they exist */
if (mp_ncpus == 1)
return;
@@ -151,6 +194,7 @@ release_aps(void *dummy __unused)
intr_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
intr_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
+ atomic_store_int(&aps_started, 0);
atomic_store_rel_int(&aps_ready, 1);
/* Wake up the other CPUs */
__asm __volatile(
@@ -160,24 +204,13 @@ release_aps(void *dummy __unused)
printf("Release APs...");
- started = 0;
- for (i = 0; i < 2000; i++) {
- if (atomic_load_acq_int(&smp_started) != 0) {
- printf("done\n");
- return;
- }
- /*
- * Don't time out while we are making progress. Some large
- * systems can take a while to start all CPUs.
- */
- if (smp_cpus > started) {
- i = 0;
- started = smp_cpus;
- }
- DELAY(1000);
- }
+ if (wait_for_aps())
+ printf("done\n");
+ else
+ printf("APs not started\n");
- printf("APs not started\n");
+ smp_cpus = atomic_load_int(&aps_started) + 1;
+ atomic_store_rel_int(&smp_started, 1);
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
@@ -221,8 +254,23 @@ init_secondary(uint64_t cpu)
/* Ensure the stores in identify_cpu have completed */
atomic_thread_fence_acq_rel();
- /* Signal the BSP and spin until it has released all APs. */
+ /* Detect early CPU feature support */
+ enable_cpu_feat(CPU_FEAT_EARLY_BOOT);
+
+ /* Signal we are waiting for aps_after_dev */
atomic_add_int(&aps_started, 1);
+
+ /* Wait for devices to be ready */
+ while (!atomic_load_int(&aps_after_dev))
+ __asm __volatile("wfe");
+
+ install_cpu_errata();
+ enable_cpu_feat(CPU_FEAT_AFTER_DEV);
+
+ /* Signal we are done */
+ atomic_add_int(&aps_started, 1);
+
+ /* Wait until we can run the scheduler */
while (!atomic_load_int(&aps_ready))
__asm __volatile("wfe");
@@ -237,8 +285,6 @@ init_secondary(uint64_t cpu)
("pmap0 doesn't match cpu %ld's ttbr0", cpu));
pcpup->pc_curpmap = pmap0;
- install_cpu_errata();
-
intr_pic_init_secondary();
/* Start per-CPU event timers. */
@@ -249,15 +295,9 @@ init_secondary(uint64_t cpu)
#endif
dbg_init();
- pan_enable();
- mtx_lock_spin(&ap_boot_mtx);
- atomic_add_rel_32(&smp_cpus, 1);
- if (smp_cpus == mp_ncpus) {
- /* enable IPI's, tlb shootdown, freezes etc */
- atomic_store_rel_int(&smp_started, 1);
- }
- mtx_unlock_spin(&ap_boot_mtx);
+ /* Signal the CPU is ready */
+ atomic_add_int(&aps_started, 1);
kcsan_cpu_init(cpu);
@@ -420,18 +460,25 @@ enable_cpu_spin(uint64_t cpu, vm_paddr_t entry, vm_paddr_t release_paddr)
{
vm_paddr_t *release_addr;
- release_addr = pmap_mapdev(release_paddr, sizeof(*release_addr));
+ ap_cpuid = cpu & CPU_AFF_MASK;
+
+ release_addr = pmap_mapdev_attr(release_paddr, sizeof(*release_addr),
+ VM_MEMATTR_DEFAULT);
if (release_addr == NULL)
return (ENOMEM);
*release_addr = entry;
+ cpu_dcache_wbinv_range(release_addr, sizeof(*release_addr));
pmap_unmapdev(release_addr, sizeof(*release_addr));
__asm __volatile(
- "dsb sy \n"
"sev \n"
::: "memory");
+ /* Wait for the target CPU to start */
+ while (atomic_load_64(&ap_cpuid) != 0)
+ __asm __volatile("wfe");
+
return (0);
}
@@ -475,7 +522,6 @@ start_cpu(u_int cpuid, uint64_t target_cpu, int domain, vm_paddr_t release_addr)
bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
- pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
/*
* A limited set of hardware we support can only do spintables and
@@ -483,10 +529,13 @@ start_cpu(u_int cpuid, uint64_t target_cpu, int domain, vm_paddr_t release_addr)
* PSCI branch here.
*/
MPASS(release_addr == 0 || !psci_present);
- if (release_addr != 0)
+ if (release_addr != 0) {
+ pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry_spintable);
err = enable_cpu_spin(target_cpu, pa, release_addr);
- else
+ } else {
+ pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry_psci);
err = enable_cpu_psci(target_cpu, pa, cpuid);
+ }
if (err != 0) {
pcpu_destroy(pcpup);
@@ -679,8 +728,6 @@ cpu_mp_start(void)
{
uint64_t mpidr;
- mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
-
/* CPU 0 is always boot CPU. */
CPU_SET(0, &all_cpus);
mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
diff --git a/sys/arm64/arm64/nexus.c b/sys/arm64/arm64/nexus.c
index 3e9399384855..26b3389db172 100644
--- a/sys/arm64/arm64/nexus.c
+++ b/sys/arm64/arm64/nexus.c
@@ -106,7 +106,6 @@ static bus_print_child_t nexus_print_child;
static bus_activate_resource_t nexus_activate_resource;
static bus_alloc_resource_t nexus_alloc_resource;
-static bus_deactivate_resource_t nexus_deactivate_resource;
static bus_get_resource_list_t nexus_get_reslist;
static bus_get_rman_t nexus_get_rman;
static bus_map_resource_t nexus_map_resource;
@@ -127,13 +126,16 @@ static ofw_bus_map_intr_t nexus_ofw_map_intr;
#endif
static device_method_t nexus_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
/* Bus interface */
DEVMETHOD(bus_add_child, nexus_add_child),
DEVMETHOD(bus_print_child, nexus_print_child),
DEVMETHOD(bus_activate_resource, nexus_activate_resource),
DEVMETHOD(bus_adjust_resource, bus_generic_rman_adjust_resource),
DEVMETHOD(bus_alloc_resource, nexus_alloc_resource),
- DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_rman_deactivate_resource),
DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource),
DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
DEVMETHOD(bus_get_resource_list, nexus_get_reslist),
@@ -178,8 +180,8 @@ nexus_attach(device_t dev)
if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0))
panic("nexus_attach irq_rman");
- bus_generic_probe(dev);
- bus_generic_attach(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -222,7 +224,6 @@ nexus_get_rman(device_t bus, int type, u_int flags)
case SYS_RES_IRQ:
return (&irq_rman);
case SYS_RES_MEMORY:
- case SYS_RES_IOPORT:
return (&mem_rman);
default:
return (NULL);
@@ -331,15 +332,14 @@ nexus_activate_resource_flags(device_t bus, device_t child, struct resource *r,
struct resource_map map;
int err, use_np;
- if ((err = rman_activate_resource(r)) != 0)
- return (err);
-
/*
* If this is a memory resource, map it into the kernel.
*/
switch (rman_get_type(r)) {
- case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
+ if ((err = rman_activate_resource(r)) != 0)
+ return (err);
+
if ((rman_get_flags(r) & RF_UNMAPPED) == 0) {
resource_init_map_request(&args);
use_np = (flags & BUS_SPACE_MAP_NONPOSTED) != 0 ||
@@ -359,12 +359,8 @@ nexus_activate_resource_flags(device_t bus, device_t child, struct resource *r,
rman_set_mapping(r, &map);
}
break;
- case SYS_RES_IRQ:
- err = intr_activate_irq(child, r);
- if (err != 0) {
- rman_deactivate_resource(r);
- return (err);
- }
+ default:
+ return (bus_generic_rman_activate_resource(bus, child, r));
}
return (0);
}
@@ -384,26 +380,6 @@ nexus_get_reslist(device_t dev, device_t child)
}
static int
-nexus_deactivate_resource(device_t bus, device_t child, struct resource *r)
-{
- int error;
-
- switch (rman_get_type(r)) {
- case SYS_RES_MEMORY:
- case SYS_RES_IOPORT:
- return (bus_generic_rman_deactivate_resource(bus, child, r));
- case SYS_RES_IRQ:
- error = rman_deactivate_resource(r);
- if (error)
- return (error);
- intr_deactivate_irq(child, r);
- return (0);
- default:
- return (EINVAL);
- }
-}
-
-static int
nexus_map_resource(device_t bus, device_t child, struct resource *r,
struct resource_map_request *argsp, struct resource_map *map)
{
@@ -415,9 +391,8 @@ nexus_map_resource(device_t bus, device_t child, struct resource *r,
if ((rman_get_flags(r) & RF_ACTIVE) == 0)
return (ENXIO);
- /* Mappings are only supported on I/O and memory resources. */
+ /* Mappings are only supported on memory resources. */
switch (rman_get_type(r)) {
- case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
break;
default:
@@ -447,7 +422,6 @@ nexus_unmap_resource(device_t bus, device_t child, struct resource *r,
switch (rman_get_type(r)) {
case SYS_RES_MEMORY:
- case SYS_RES_IOPORT:
pmap_unmapdev(map->r_vaddr, map->r_size);
return (0);
default:
@@ -505,7 +479,6 @@ nexus_fdt_activate_resource(device_t bus, device_t child, struct resource *r)
flags = 0;
switch (rman_get_type(r)) {
case SYS_RES_MEMORY:
- case SYS_RES_IOPORT:
/*
* If the fdt parent has the nonposted-mmio property we
* need to use non-posted IO to access the device. When
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index ea7ff18971e4..d2e56a270f54 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -146,6 +146,7 @@
#include <vm/uma.h>
#include <machine/asan.h>
+#include <machine/cpu_feat.h>
#include <machine/machdep.h>
#include <machine/md_var.h>
#include <machine/pcb.h>
@@ -181,12 +182,13 @@
#define pmap_l2_pindex(v) ((v) >> L2_SHIFT)
#ifdef __ARM_FEATURE_BTI_DEFAULT
-#define ATTR_KERN_GP ATTR_S1_GP
+pt_entry_t __read_mostly pmap_gp_attr;
+#define ATTR_KERN_GP pmap_gp_attr
#else
#define ATTR_KERN_GP 0
#endif
-#define PMAP_SAN_PTE_BITS (ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | \
- ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
+#define PMAP_SAN_PTE_BITS (ATTR_AF | ATTR_S1_XN | pmap_sh_attr | \
+ ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
struct pmap_large_md_page {
struct rwlock pv_lock;
@@ -281,6 +283,9 @@ VM_PAGE_TO_PV_LIST_LOCK(vm_page_t m)
} \
} while (0)
+#define PTE_TO_VM_PAGE(pte) PHYS_TO_VM_PAGE(PTE_TO_PHYS(pte))
+#define VM_PAGE_TO_PTE(m) PHYS_TO_PTE(VM_PAGE_TO_PHYS(m))
+
/*
* The presence of this flag indicates that the mapping is writeable.
* If the ATTR_S1_AP_RO bit is also set, then the mapping is clean, otherwise
@@ -352,15 +357,24 @@ static u_int physmap_idx;
static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"VM/pmap parameters");
+static int pmap_growkernel_panic = 0;
+SYSCTL_INT(_vm_pmap, OID_AUTO, growkernel_panic, CTLFLAG_RDTUN,
+ &pmap_growkernel_panic, 0,
+ "panic on failure to allocate kernel page table page");
+
+bool pmap_lpa_enabled __read_mostly = false;
+pt_entry_t pmap_sh_attr __read_mostly = ATTR_SH(ATTR_SH_IS);
+
#if PAGE_SIZE == PAGE_SIZE_4K
#define L1_BLOCKS_SUPPORTED 1
#else
-/* TODO: Make this dynamic when we support FEAT_LPA2 (TCR_EL1.DS == 1) */
-#define L1_BLOCKS_SUPPORTED 0
+#define L1_BLOCKS_SUPPORTED (pmap_lpa_enabled)
#endif
#define PMAP_ASSERT_L1_BLOCKS_SUPPORTED MPASS(L1_BLOCKS_SUPPORTED)
+static bool pmap_l1_supported __read_mostly = false;
+
/*
* This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs
* that it has currently allocated to a pmap, a cursor ("asid_next") to
@@ -402,7 +416,6 @@ SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, epoch, CTLFLAG_RD, &vmids.asid_epoch, 0,
"The current epoch number");
void (*pmap_clean_stage2_tlbi)(void);
-void (*pmap_invalidate_vpipt_icache)(void);
void (*pmap_stage2_invalidate_range)(uint64_t, vm_offset_t, vm_offset_t, bool);
void (*pmap_stage2_invalidate_all)(uint64_t);
@@ -426,7 +439,6 @@ void (*pmap_stage2_invalidate_all)(uint64_t);
#define TLBI_VA_SHIFT 12
#define TLBI_VA_MASK ((1ul << 44) - 1)
#define TLBI_VA(addr) (((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK)
-#define TLBI_VA_L3_INCR (L3_SIZE >> TLBI_VA_SHIFT)
static int __read_frequently superpages_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
@@ -467,6 +479,7 @@ static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
vm_offset_t va, struct rwlock **lockp);
static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
+static bool pmap_demote_l2c(pmap_t pmap, pt_entry_t *l2p, vm_offset_t va);
static bool pmap_demote_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va);
static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
@@ -505,7 +518,8 @@ static void pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
static uma_zone_t pmap_bti_ranges_zone;
-static bool pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
+static bool pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
+ pt_entry_t *pte);
static pt_entry_t pmap_pte_bti(pmap_t pmap, vm_offset_t va);
static void pmap_bti_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
static void *bti_dup_range(void *ctx, void *data);
@@ -1105,6 +1119,7 @@ pmap_bootstrap_l2_table(struct pmap_bootstrap_state *state)
static void
pmap_bootstrap_l2_block(struct pmap_bootstrap_state *state, int i)
{
+ pt_entry_t contig;
u_int l2_slot;
bool first;
@@ -1115,7 +1130,7 @@ pmap_bootstrap_l2_block(struct pmap_bootstrap_state *state, int i)
pmap_bootstrap_l1_table(state);
MPASS((state->va & L2_OFFSET) == 0);
- for (first = true;
+ for (first = true, contig = 0;
state->va < DMAP_MAX_ADDRESS &&
(physmap[i + 1] - state->pa) >= L2_SIZE;
state->va += L2_SIZE, state->pa += L2_SIZE) {
@@ -1126,13 +1141,27 @@ pmap_bootstrap_l2_block(struct pmap_bootstrap_state *state, int i)
if (!first && (state->pa & L1_OFFSET) == 0)
break;
+ /*
+ * If we have an aligned, contiguous chunk of L2C_ENTRIES
+ * L2 blocks, set the contiguous bit within each PTE so that
+ * the chunk can be cached using only one TLB entry.
+ */
+ if ((state->pa & L2C_OFFSET) == 0) {
+ if (state->va + L2C_SIZE < DMAP_MAX_ADDRESS &&
+ physmap[i + 1] - state->pa >= L2C_SIZE) {
+ contig = ATTR_CONTIGUOUS;
+ } else {
+ contig = 0;
+ }
+ }
+
first = false;
l2_slot = pmap_l2_index(state->va);
MPASS((state->pa & L2_OFFSET) == 0);
MPASS(state->l2[l2_slot] == 0);
pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) |
- ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP |
- ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
+ ATTR_AF | pmap_sh_attr | ATTR_S1_XN | ATTR_KERN_GP |
+ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L2_BLOCK);
}
MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
}
@@ -1181,22 +1210,42 @@ pmap_bootstrap_l3_page(struct pmap_bootstrap_state *state, int i)
MPASS((state->pa & L3_OFFSET) == 0);
MPASS(state->l3[l3_slot] == 0);
pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) |
- ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP |
+ ATTR_AF | pmap_sh_attr | ATTR_S1_XN | ATTR_KERN_GP |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L3_PAGE);
}
MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
}
-static void
-pmap_bootstrap_dmap(vm_paddr_t min_pa)
+void
+pmap_bootstrap_dmap(vm_size_t kernlen)
{
+ vm_paddr_t start_pa, pa;
+ uint64_t tcr;
int i;
- dmap_phys_base = min_pa & ~L1_OFFSET;
+ tcr = READ_SPECIALREG(tcr_el1);
+
+ /* Verify that the ASID is set through TTBR0. */
+ KASSERT((tcr & TCR_A1) == 0, ("pmap_bootstrap: TCR_EL1.A1 != 0"));
+
+ if ((tcr & TCR_DS) != 0)
+ pmap_lpa_enabled = true;
+
+ pmap_l1_supported = L1_BLOCKS_SUPPORTED;
+
+ start_pa = pmap_early_vtophys(KERNBASE);
+
+ bs_state.freemempos = KERNBASE + kernlen;
+ bs_state.freemempos = roundup2(bs_state.freemempos, PAGE_SIZE);
+
+ /* Fill in physmap array. */
+ physmap_idx = physmem_avail(physmap, nitems(physmap));
+
+ dmap_phys_base = physmap[0] & ~L1_OFFSET;
dmap_phys_max = 0;
dmap_max_addr = 0;
- for (i = 0; i < (physmap_idx * 2); i += 2) {
+ for (i = 0; i < physmap_idx; i += 2) {
bs_state.pa = physmap[i] & ~L3_OFFSET;
bs_state.va = bs_state.pa - dmap_phys_base + DMAP_MIN_ADDRESS;
@@ -1220,7 +1269,8 @@ pmap_bootstrap_dmap(vm_paddr_t min_pa)
MPASS((bs_state.pa & L1_OFFSET) == 0);
pmap_store(
&bs_state.l1[pmap_l1_index(bs_state.va)],
- PHYS_TO_PTE(bs_state.pa) | ATTR_DEFAULT |
+ PHYS_TO_PTE(bs_state.pa) | ATTR_AF |
+ pmap_sh_attr |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
ATTR_S1_XN | ATTR_KERN_GP | L1_BLOCK);
}
@@ -1247,6 +1297,12 @@ pmap_bootstrap_dmap(vm_paddr_t min_pa)
}
cpu_tlb_flushID();
+
+ bs_state.dmap_valid = true;
+
+ /* Exclude the kernel and DMAP region */
+ pa = pmap_early_vtophys(bs_state.freemempos);
+ physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
}
static void
@@ -1277,15 +1333,11 @@ pmap_bootstrap_l3(vm_offset_t va)
* Bootstrap the system enough to run with virtual memory.
*/
void
-pmap_bootstrap(vm_size_t kernlen)
+pmap_bootstrap(void)
{
vm_offset_t dpcpu, msgbufpv;
- vm_paddr_t start_pa, pa, min_pa;
- int i;
-
- /* Verify that the ASID is set through TTBR0. */
- KASSERT((READ_SPECIALREG(tcr_el1) & TCR_A1) == 0,
- ("pmap_bootstrap: TCR_EL1.A1 != 0"));
+ vm_paddr_t start_pa, pa;
+ size_t largest_phys_size;
/* Set this early so we can use the pagetable walking functions */
kernel_pmap_store.pm_l0 = pagetable_l0_ttbr1;
@@ -1300,36 +1352,42 @@ pmap_bootstrap(vm_size_t kernlen)
kernel_pmap->pm_ttbr = kernel_pmap->pm_l0_paddr;
kernel_pmap->pm_asid_set = &asids;
- /* Assume the address we were loaded to is a valid physical address */
- min_pa = pmap_early_vtophys(KERNBASE);
+ /* Reserve some VA space for early BIOS/ACPI mapping */
+ preinit_map_va = roundup2(bs_state.freemempos, L2_SIZE);
- physmap_idx = physmem_avail(physmap, nitems(physmap));
- physmap_idx /= 2;
+ virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
+ virtual_avail = roundup2(virtual_avail, L1_SIZE);
+ virtual_end = VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE;
+ kernel_vm_end = virtual_avail;
/*
- * Find the minimum physical address. physmap is sorted,
- * but may contain empty ranges.
+ * We only use PXN when we know nothing will be executed from it, e.g.
+ * the DMAP region.
*/
- for (i = 0; i < physmap_idx * 2; i += 2) {
- if (physmap[i] == physmap[i + 1])
- continue;
- if (physmap[i] <= min_pa)
- min_pa = physmap[i];
- }
+ bs_state.table_attrs &= ~TATTR_PXN_TABLE;
- bs_state.freemempos = KERNBASE + kernlen;
- bs_state.freemempos = roundup2(bs_state.freemempos, PAGE_SIZE);
+ /*
+ * Find the physical memory we could use. This needs to be after we
+ * exclude any memory that is mapped into the DMAP region but should
+ * not be used by the kernel, e.g. some UEFI memory types.
+ */
+ physmap_idx = physmem_avail(physmap, nitems(physmap));
- /* Create a direct map region early so we can use it for pa -> va */
- pmap_bootstrap_dmap(min_pa);
- bs_state.dmap_valid = true;
/*
- * We only use PXN when we know nothing will be executed from it, e.g.
- * the DMAP region.
+ * Find space for early allocations. We search for the largest
+ * region. This is because the user may choose a large msgbuf.
+ * This could be smarter, e.g. to allow multiple regions to be
+ * used & switch to the next when one is full.
*/
- bs_state.table_attrs &= ~TATTR_PXN_TABLE;
+ largest_phys_size = 0;
+ for (int i = 0; i < physmap_idx; i += 2) {
+ if ((physmap[i + 1] - physmap[i]) > largest_phys_size) {
+ largest_phys_size = physmap[i + 1] - physmap[i];
+ bs_state.freemempos = PHYS_TO_DMAP(physmap[i]);
+ }
+ }
- start_pa = pa = pmap_early_vtophys(KERNBASE);
+ start_pa = pmap_early_vtophys(bs_state.freemempos);
/*
* Create the l2 tables up to VM_MAX_KERNEL_ADDRESS. We assume that the
@@ -1355,19 +1413,9 @@ pmap_bootstrap(vm_size_t kernlen)
alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
msgbufp = (void *)msgbufpv;
- /* Reserve some VA space for early BIOS/ACPI mapping */
- preinit_map_va = roundup2(bs_state.freemempos, L2_SIZE);
-
- virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
- virtual_avail = roundup2(virtual_avail, L1_SIZE);
- virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE);
- kernel_vm_end = virtual_avail;
-
pa = pmap_early_vtophys(bs_state.freemempos);
physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
-
- cpu_tlb_flushID();
}
#if defined(KASAN) || defined(KMSAN)
@@ -1419,9 +1467,7 @@ pmap_bootstrap_san1(vm_offset_t va, int scale)
* Rebuild physmap one more time, we may have excluded more regions from
* allocation since pmap_bootstrap().
*/
- bzero(physmap, sizeof(physmap));
physmap_idx = physmem_avail(physmap, nitems(physmap));
- physmap_idx /= 2;
eva = va + (virtual_avail - VM_MIN_KERNEL_ADDRESS) / scale;
@@ -1430,7 +1476,7 @@ pmap_bootstrap_san1(vm_offset_t va, int scale)
* the shadow map as high up as we can to avoid depleting the lower 4GB in case
* it's needed for, e.g., an xhci controller that can only do 32-bit DMA.
*/
- for (i = (physmap_idx * 2) - 2; i >= 0; i -= 2) {
+ for (i = physmap_idx - 2; i >= 0; i -= 2) {
vm_paddr_t plow, phigh;
/* L2 mappings must be backed by memory that is L2-aligned */
@@ -1531,11 +1577,11 @@ pmap_init_pv_table(void)
int domain, i, j, pages;
/*
- * We strongly depend on the size being a power of two, so the assert
- * is overzealous. However, should the struct be resized to a
- * different power of two, the code below needs to be revisited.
+ * We depend on the size being evenly divisible into a page so
+ * that the pv_table array can be indexed directly while
+ * safely spanning multiple pages from different domains.
*/
- CTASSERT((sizeof(*pvd) == 64));
+ CTASSERT(PAGE_SIZE % sizeof(*pvd) == 0);
/*
* Calculate the size of the array.
@@ -1609,9 +1655,79 @@ pmap_init_pv_table(void)
}
}
+static bool
+pmap_dbm_check(const struct cpu_feat *feat __unused, u_int midr __unused)
+{
+ uint64_t id_aa64mmfr1;
+
+ id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
+ return (ID_AA64MMFR1_HAFDBS_VAL(id_aa64mmfr1) >=
+ ID_AA64MMFR1_HAFDBS_AF_DBS);
+}
+
+static bool
+pmap_dbm_has_errata(const struct cpu_feat *feat __unused, u_int midr,
+ u_int **errata_list, u_int *errata_count)
+{
+ /* Disable on Cortex-A55 for erratum 1024718 - all revisions */
+ if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_ARM,
+ CPU_PART_CORTEX_A55, 0, 0)) {
+ static u_int errata_id = 1024718;
+
+ *errata_list = &errata_id;
+ *errata_count = 1;
+ return (true);
+ }
+
+ /* Disable on Cortex-A510 for erratum 2051678 - r0p0 to r0p2 */
+ if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_VAR_MASK,
+ CPU_IMPL_ARM, CPU_PART_CORTEX_A510, 0, 0)) {
+ if (CPU_REV(PCPU_GET(midr)) < 3) {
+ static u_int errata_id = 2051678;
+
+ *errata_list = &errata_id;
+ *errata_count = 1;
+ return (true);
+ }
+ }
+
+ return (false);
+}
+
+static void
+pmap_dbm_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status, u_int *errata_list __unused,
+ u_int errata_count)
+{
+ uint64_t tcr;
+
+ /* Skip if there is an erratum affecting DBM */
+ if (errata_status != ERRATA_NONE)
+ return;
+
+ tcr = READ_SPECIALREG(tcr_el1) | TCR_HD;
+ WRITE_SPECIALREG(tcr_el1, tcr);
+ isb();
+ /* Flush the local TLB for the TCR_HD flag change */
+ dsb(nshst);
+ __asm __volatile("tlbi vmalle1");
+ dsb(nsh);
+ isb();
+}
+
+static struct cpu_feat feat_dbm = {
+ .feat_name = "FEAT_HAFDBS (DBM)",
+ .feat_check = pmap_dbm_check,
+ .feat_has_errata = pmap_dbm_has_errata,
+ .feat_enable = pmap_dbm_enable,
+ .feat_flags = CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU,
+};
+DATA_SET(cpu_feat_set, feat_dbm);
+
/*
* Initialize the pmap module.
- * Called by vm_init, to initialize any structures that the pmap
+ *
+ * Called by vm_mem_init(), to initialize any structures that the pmap
* system needs to map virtual memory.
*/
void
@@ -1627,11 +1743,14 @@ pmap_init(void)
if (superpages_enabled) {
KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
("pmap_init: can't assign to pagesizes[1]"));
- pagesizes[1] = L2_SIZE;
+ pagesizes[1] = L3C_SIZE;
+ KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0,
+ ("pmap_init: can't assign to pagesizes[2]"));
+ pagesizes[2] = L2_SIZE;
if (L1_BLOCKS_SUPPORTED) {
- KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0,
- ("pmap_init: can't assign to pagesizes[2]"));
- pagesizes[2] = L1_SIZE;
+ KASSERT(MAXPAGESIZES > 3 && pagesizes[3] == 0,
+ ("pmap_init: can't assign to pagesizes[3]"));
+ pagesizes[3] = L1_SIZE;
}
}
@@ -1664,35 +1783,60 @@ pmap_init(void)
vm_initialized = 1;
}
+static SYSCTL_NODE(_vm_pmap, OID_AUTO, l1, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "L1 (1GB/64GB) page mapping counters");
+
+static COUNTER_U64_DEFINE_EARLY(pmap_l1_demotions);
+SYSCTL_COUNTER_U64(_vm_pmap_l1, OID_AUTO, demotions, CTLFLAG_RD,
+ &pmap_l1_demotions, "L1 (1GB/64GB) page demotions");
+
+SYSCTL_BOOL(_vm_pmap_l1, OID_AUTO, supported, CTLFLAG_RD, &pmap_l1_supported,
+ 0, "L1 blocks are supported");
+
+static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2c, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "L2C (32MB/1GB) page mapping counters");
+
+static COUNTER_U64_DEFINE_EARLY(pmap_l2c_demotions);
+SYSCTL_COUNTER_U64(_vm_pmap_l2c, OID_AUTO, demotions, CTLFLAG_RD,
+ &pmap_l2c_demotions, "L2C (32MB/1GB) page demotions");
+
static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"2MB page mapping counters");
-static u_long pmap_l2_demotions;
-SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
- &pmap_l2_demotions, 0, "2MB page demotions");
+static COUNTER_U64_DEFINE_EARLY(pmap_l2_demotions);
+SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
+ &pmap_l2_demotions, "L2 (2MB/32MB) page demotions");
-static u_long pmap_l2_mappings;
-SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
- &pmap_l2_mappings, 0, "2MB page mappings");
+static COUNTER_U64_DEFINE_EARLY(pmap_l2_mappings);
+SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
+ &pmap_l2_mappings, "L2 (2MB/32MB) page mappings");
-static u_long pmap_l2_p_failures;
-SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
- &pmap_l2_p_failures, 0, "2MB page promotion failures");
+static COUNTER_U64_DEFINE_EARLY(pmap_l2_p_failures);
+SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
+ &pmap_l2_p_failures, "L2 (2MB/32MB) page promotion failures");
-static u_long pmap_l2_promotions;
-SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
- &pmap_l2_promotions, 0, "2MB page promotions");
+static COUNTER_U64_DEFINE_EARLY(pmap_l2_promotions);
+SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
+ &pmap_l2_promotions, "L2 (2MB/32MB) page promotions");
static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3c, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
- "64KB page mapping counters");
+ "L3C (64KB/2MB) page mapping counters");
-static u_long pmap_l3c_demotions;
-SYSCTL_ULONG(_vm_pmap_l3c, OID_AUTO, demotions, CTLFLAG_RD,
- &pmap_l3c_demotions, 0, "64KB page demotions");
+static COUNTER_U64_DEFINE_EARLY(pmap_l3c_demotions);
+SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, demotions, CTLFLAG_RD,
+ &pmap_l3c_demotions, "L3C (64KB/2MB) page demotions");
-static u_long pmap_l3c_mappings;
-SYSCTL_ULONG(_vm_pmap_l3c, OID_AUTO, mappings, CTLFLAG_RD,
- &pmap_l3c_mappings, 0, "64KB page mappings");
+static COUNTER_U64_DEFINE_EARLY(pmap_l3c_mappings);
+SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, mappings, CTLFLAG_RD,
+ &pmap_l3c_mappings, "L3C (64KB/2MB) page mappings");
+
+static COUNTER_U64_DEFINE_EARLY(pmap_l3c_p_failures);
+SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, p_failures, CTLFLAG_RD,
+ &pmap_l3c_p_failures, "L3C (64KB/2MB) page promotion failures");
+
+static COUNTER_U64_DEFINE_EARLY(pmap_l3c_promotions);
+SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, promotions, CTLFLAG_RD,
+ &pmap_l3c_promotions, "L3C (64KB/2MB) page promotions");
/*
* If the given value for "final_only" is false, then any cached intermediate-
@@ -1760,12 +1904,12 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only)
}
/*
- * Invalidates any cached final- and optionally intermediate-level TLB entries
- * for the specified virtual address range in the given virtual address space.
+ * Use stride L{1,2}_SIZE when invalidating the TLB entries for L{1,2}_BLOCK
+ * mappings. Otherwise, use stride L3_SIZE.
*/
static __inline void
-pmap_s1_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
- bool final_only)
+pmap_s1_invalidate_strided(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
+ vm_offset_t stride, bool final_only)
{
uint64_t end, r, start;
@@ -1775,19 +1919,30 @@ pmap_s1_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
if (pmap == kernel_pmap) {
start = TLBI_VA(sva);
end = TLBI_VA(eva);
- for (r = start; r < end; r += TLBI_VA_L3_INCR)
+ for (r = start; r < end; r += TLBI_VA(stride))
pmap_s1_invalidate_kernel(r, final_only);
} else {
start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
start |= TLBI_VA(sva);
end |= TLBI_VA(eva);
- for (r = start; r < end; r += TLBI_VA_L3_INCR)
+ for (r = start; r < end; r += TLBI_VA(stride))
pmap_s1_invalidate_user(r, final_only);
}
dsb(ish);
isb();
}
+/*
+ * Invalidates any cached final- and optionally intermediate-level TLB entries
+ * for the specified virtual address range in the given virtual address space.
+ */
+static __inline void
+pmap_s1_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
+ bool final_only)
+{
+ pmap_s1_invalidate_strided(pmap, sva, eva, L3_SIZE, final_only);
+}
+
static __inline void
pmap_s2_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
bool final_only)
@@ -1961,6 +2116,56 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
}
/*
+ * Returns true if the entire kernel virtual address range is mapped
+ */
+static bool
+pmap_kmapped_range(vm_offset_t sva, vm_size_t size)
+{
+ pt_entry_t *pte, tpte;
+ vm_offset_t eva;
+
+ KASSERT(sva >= VM_MIN_KERNEL_ADDRESS,
+ ("%s: Invalid virtual address: %lx", __func__, sva));
+ MPASS(size != 0);
+ eva = sva + size - 1;
+ KASSERT(eva > sva, ("%s: Size too large: sva %lx, size %lx", __func__,
+ sva, size));
+
+ while (sva <= eva) {
+ pte = pmap_l1(kernel_pmap, sva);
+ if (pte == NULL)
+ return (false);
+ tpte = pmap_load(pte);
+ if (tpte == 0)
+ return (false);
+ if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
+ sva = (sva & ~L1_OFFSET) + L1_SIZE;
+ continue;
+ }
+
+ pte = pmap_l1_to_l2(&tpte, sva);
+ tpte = pmap_load(pte);
+ if (tpte == 0)
+ return (false);
+ if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
+ sva = (sva & ~L2_OFFSET) + L2_SIZE;
+ continue;
+ }
+ pte = pmap_l2_to_l3(&tpte, sva);
+ tpte = pmap_load(pte);
+ if (tpte == 0)
+ return (false);
+ MPASS((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_PAGE);
+ if ((tpte & ATTR_CONTIGUOUS) == ATTR_CONTIGUOUS)
+ sva = (sva & ~L3C_OFFSET) + L3C_SIZE;
+ else
+ sva = (sva & ~L3_OFFSET) + L3_SIZE;
+ }
+
+ return (true);
+}
+
+/*
* Walks the page tables to translate a kernel virtual address to a
* physical address. Returns true if the kva is valid and stores the
* physical address in pa if it is not NULL.
@@ -2071,8 +2276,8 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
KASSERT((size & PAGE_MASK) == 0,
("pmap_kenter: Mapping is not page-sized"));
- attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
- ATTR_KERN_GP | ATTR_S1_IDX(mode);
+ attr = ATTR_AF | pmap_sh_attr | ATTR_S1_AP(ATTR_S1_AP_RW) |
+ ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(mode);
old_l3e = 0;
va = sva;
while (size != 0) {
@@ -2087,7 +2292,7 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
*/
if ((va & L2_OFFSET) == 0 && size >= L2_SIZE &&
(pa & L2_OFFSET) == 0 && vm_initialized) {
- mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(pde)));
+ mpte = PTE_TO_VM_PAGE(pmap_load(pde));
KASSERT(pmap_every_pte_zero(VM_PAGE_TO_PHYS(mpte)),
("pmap_kenter: Unexpected mapping"));
PMAP_LOCK(kernel_pmap);
@@ -2271,7 +2476,7 @@ void
pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
{
pd_entry_t *pde;
- pt_entry_t attr, old_l3e, pa, *pte;
+ pt_entry_t attr, old_l3e, *pte;
vm_offset_t va;
vm_page_t m;
int i, lvl;
@@ -2286,11 +2491,11 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
("pmap_qenter: Invalid level %d", lvl));
m = ma[i];
- pa = VM_PAGE_TO_PHYS(m);
- attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
+ attr = ATTR_AF | pmap_sh_attr |
+ ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
ATTR_KERN_GP | ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
pte = pmap_l2_to_l3(pde, va);
- old_l3e |= pmap_load_store(pte, PHYS_TO_PTE(pa) | attr);
+ old_l3e |= pmap_load_store(pte, VM_PAGE_TO_PTE(m) | attr);
va += L3_SIZE;
}
@@ -2403,7 +2608,7 @@ _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
l1 = pmap_l1(pmap, va);
tl1 = pmap_load(l1);
- l2pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tl1));
+ l2pg = PTE_TO_VM_PAGE(tl1);
pmap_unwire_l3(pmap, va, l2pg, free);
} else if (m->pindex < (NUL2E + NUL1E)) {
/* We just released an l2, unhold the matching l1 */
@@ -2412,7 +2617,7 @@ _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
l0 = pmap_l0(pmap, va);
tl0 = pmap_load(l0);
- l1pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tl0));
+ l1pg = PTE_TO_VM_PAGE(tl0);
pmap_unwire_l3(pmap, va, l1pg, free);
}
pmap_invalidate_page(pmap, va, false);
@@ -2439,7 +2644,7 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
if (ADDR_IS_KERNEL(va))
return (0);
KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
- mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(ptepde));
+ mpte = PTE_TO_VM_PAGE(ptepde);
return (pmap_unwire_l3(pmap, va, mpte, free));
}
@@ -2602,7 +2807,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
l0p = &pmap->pm_l0[l0index];
KASSERT((pmap_load(l0p) & ATTR_DESCR_VALID) == 0,
("%s: L0 entry %#lx is valid", __func__, pmap_load(l0p)));
- l0e = PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L0_TABLE;
+ l0e = VM_PAGE_TO_PTE(m) | L0_TABLE;
/*
* Mark all kernel memory as not accessible from userspace
@@ -2634,7 +2839,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
return (NULL);
}
} else {
- l1pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tl0));
+ l1pg = PTE_TO_VM_PAGE(tl0);
l1pg->ref_count++;
}
@@ -2642,7 +2847,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
l1 = &l1[ptepindex & Ln_ADDR_MASK];
KASSERT((pmap_load(l1) & ATTR_DESCR_VALID) == 0,
("%s: L1 entry %#lx is valid", __func__, pmap_load(l1)));
- pmap_store(l1, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L1_TABLE);
+ pmap_store(l1, VM_PAGE_TO_PTE(m) | L1_TABLE);
} else {
vm_pindex_t l0index, l1index;
pd_entry_t *l0, *l1, *l2;
@@ -2677,7 +2882,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
return (NULL);
}
} else {
- l2pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tl1));
+ l2pg = PTE_TO_VM_PAGE(tl1);
l2pg->ref_count++;
}
}
@@ -2686,7 +2891,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
l2 = &l2[ptepindex & Ln_ADDR_MASK];
KASSERT((pmap_load(l2) & ATTR_DESCR_VALID) == 0,
("%s: L2 entry %#lx is valid", __func__, pmap_load(l2)));
- pmap_store(l2, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L2_TABLE);
+ pmap_store(l2, VM_PAGE_TO_PTE(m) | L2_TABLE);
}
pmap_resident_count_inc(pmap, 1);
@@ -2711,7 +2916,7 @@ retry:
l2 = pmap_l1_to_l2(l1, va);
if (!ADDR_IS_KERNEL(va)) {
/* Add a reference to the L2 page. */
- l2pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l1)));
+ l2pg = PTE_TO_VM_PAGE(pmap_load(l1));
l2pg->ref_count++;
} else
l2pg = NULL;
@@ -2780,7 +2985,7 @@ retry:
case 2:
tpde = pmap_load(pde);
if (tpde != 0) {
- m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tpde));
+ m = PTE_TO_VM_PAGE(tpde);
m->ref_count++;
return (m);
}
@@ -2896,10 +3101,9 @@ SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
/*
* grow the number of kernel page table entries, if needed
*/
-void
-pmap_growkernel(vm_offset_t addr)
+static int
+pmap_growkernel_nopanic(vm_offset_t addr)
{
- vm_paddr_t paddr;
vm_page_t nkpg;
pd_entry_t *l0, *l1, *l2;
@@ -2921,14 +3125,13 @@ pmap_growkernel(vm_offset_t addr)
if (pmap_load(l1) == 0) {
/* We need a new PDP entry */
nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
- VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (nkpg == NULL)
- panic("pmap_growkernel: no memory to grow kernel");
- nkpg->pindex = kernel_vm_end >> L1_SHIFT;
+ return (KERN_RESOURCE_SHORTAGE);
+ nkpg->pindex = pmap_l1_pindex(kernel_vm_end);
/* See the dmb() in _pmap_alloc_l3(). */
dmb(ishst);
- paddr = VM_PAGE_TO_PHYS(nkpg);
- pmap_store(l1, PHYS_TO_PTE(paddr) | L1_TABLE);
+ pmap_store(l1, VM_PAGE_TO_PTE(nkpg) | L1_TABLE);
continue; /* try again */
}
l2 = pmap_l1_to_l2(l1, kernel_vm_end);
@@ -2941,15 +3144,14 @@ pmap_growkernel(vm_offset_t addr)
continue;
}
- nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
- VM_ALLOC_ZERO);
+ nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
+ VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (nkpg == NULL)
- panic("pmap_growkernel: no memory to grow kernel");
- nkpg->pindex = kernel_vm_end >> L2_SHIFT;
+ return (KERN_RESOURCE_SHORTAGE);
+ nkpg->pindex = pmap_l2_pindex(kernel_vm_end);
/* See the dmb() in _pmap_alloc_l3(). */
dmb(ishst);
- paddr = VM_PAGE_TO_PHYS(nkpg);
- pmap_store(l2, PHYS_TO_PTE(paddr) | L2_TABLE);
+ pmap_store(l2, VM_PAGE_TO_PTE(nkpg) | L2_TABLE);
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
@@ -2957,6 +3159,18 @@ pmap_growkernel(vm_offset_t addr)
break;
}
}
+ return (KERN_SUCCESS);
+}
+
+int
+pmap_growkernel(vm_offset_t addr)
+{
+ int rv;
+
+ rv = pmap_growkernel_nopanic(addr);
+ if (rv != KERN_SUCCESS && pmap_growkernel_panic)
+ panic("pmap_growkernel: no memory to grow kernel");
+ return (rv);
}
/***************************************************
@@ -3102,7 +3316,7 @@ reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain)
if ((tpte & ATTR_CONTIGUOUS) != 0)
(void)pmap_demote_l3c(pmap, pte, va);
tpte = pmap_load_clear(pte);
- m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tpte));
+ m = PTE_TO_VM_PAGE(tpte);
if (pmap_pte_dirty(pmap, tpte))
vm_page_dirty(m);
if ((tpte & ATTR_AF) != 0) {
@@ -3682,7 +3896,7 @@ pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
if (old_l2 & ATTR_SW_MANAGED) {
- m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(old_l2));
+ m = PTE_TO_VM_PAGE(old_l2);
pvh = page_to_pvh(m);
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
pmap_pvh_free(pvh, pmap, sva);
@@ -3734,7 +3948,7 @@ pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
pmap->pm_stats.wired_count -= 1;
pmap_resident_count_dec(pmap, 1);
if (old_l3 & ATTR_SW_MANAGED) {
- m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(old_l3));
+ m = PTE_TO_VM_PAGE(old_l3);
if (pmap_pte_dirty(pmap, old_l3))
vm_page_dirty(m);
if (old_l3 & ATTR_AF)
@@ -3793,7 +4007,7 @@ pmap_remove_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va, vm_offset_t *vap,
pmap->pm_stats.wired_count -= L3C_ENTRIES;
pmap_resident_count_dec(pmap, L3C_ENTRIES);
if ((first_l3e & ATTR_SW_MANAGED) != 0) {
- m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(first_l3e));
+ m = PTE_TO_VM_PAGE(first_l3e);
new_lock = VM_PAGE_TO_PV_LIST_LOCK(m);
if (new_lock != *lockp) {
if (*lockp != NULL) {
@@ -3862,7 +4076,7 @@ pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
("pmap_remove_l3_range: range crosses an L3 page table boundary"));
- l3pg = !ADDR_IS_KERNEL(sva) ? PHYS_TO_VM_PAGE(PTE_TO_PHYS(l2e)) : NULL;
+ l3pg = !ADDR_IS_KERNEL(sva) ? PTE_TO_VM_PAGE(l2e) : NULL;
va = eva;
for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
old_l3 = pmap_load(l3);
@@ -3899,7 +4113,7 @@ pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
pmap->pm_stats.wired_count--;
pmap_resident_count_dec(pmap, 1);
if ((old_l3 & ATTR_SW_MANAGED) != 0) {
- m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(old_l3));
+ m = PTE_TO_VM_PAGE(old_l3);
if (pmap_pte_dirty(pmap, old_l3))
vm_page_dirty(m);
if ((old_l3 & ATTR_AF) != 0)
@@ -4013,9 +4227,6 @@ pmap_remove1(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool map_delete)
va_next = eva;
l2 = pmap_l1_to_l2(l1, sva);
- if (l2 == NULL)
- continue;
-
l3_paddr = pmap_load(l2);
if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
@@ -4212,7 +4423,7 @@ pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
(nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
pmap_pte_dirty(pmap, old_l2)) {
- m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(old_l2));
+ m = PTE_TO_VM_PAGE(old_l2);
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
vm_page_dirty(mt);
}
@@ -4263,7 +4474,7 @@ pmap_mask_set_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
if ((l3e & ATTR_SW_MANAGED) != 0 &&
(nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
dirty) {
- m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l3p)));
+ m = PTE_TO_VM_PAGE(pmap_load(l3p));
for (mt = m; mt < &m[L3C_ENTRIES]; mt++)
vm_page_dirty(mt);
}
@@ -4327,7 +4538,8 @@ pmap_mask_set_locked(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t m
if (sva + L2_SIZE == va_next && eva >= va_next) {
pmap_protect_l2(pmap, l2, sva, mask, nbits);
continue;
- } else if (pmap_demote_l2(pmap, l2, sva) == NULL)
+ } else if ((pmap_load(l2) & mask) == nbits ||
+ pmap_demote_l2(pmap, l2, sva) == NULL)
continue;
}
KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
@@ -4357,8 +4569,22 @@ pmap_mask_set_locked(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t m
va = va_next;
}
if ((l3 & ATTR_CONTIGUOUS) != 0) {
- l3p += L3C_ENTRIES - 1;
- sva += L3C_SIZE - L3_SIZE;
+ /*
+ * Does this L3C page extend beyond
+ * the requested range? Handle the
+ * possibility that "va_next" is zero.
+ */
+ if ((sva | L3C_OFFSET) > va_next - 1)
+ break;
+
+ /*
+ * Skip ahead to the last L3_PAGE
+ * within this L3C page.
+ */
+ l3p = (pt_entry_t *)((uintptr_t)l3p |
+ ((L3C_ENTRIES - 1) *
+ sizeof(pt_entry_t)));
+ sva |= L3C_SIZE - L3_SIZE;
}
continue;
}
@@ -4397,7 +4623,7 @@ pmap_mask_set_locked(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t m
if ((l3 & ATTR_SW_MANAGED) != 0 &&
(nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
pmap_pte_dirty(pmap, l3))
- vm_page_dirty(PHYS_TO_VM_PAGE(PTE_TO_PHYS(l3)));
+ vm_page_dirty(PTE_TO_VM_PAGE(l3));
if (va == va_next)
va = sva;
@@ -4514,18 +4740,11 @@ static void
pmap_update_entry(pmap_t pmap, pd_entry_t *ptep, pd_entry_t newpte,
vm_offset_t va, vm_size_t size)
{
- pd_entry_t *lip, *ptep_end;
register_t intr;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-
- if ((newpte & ATTR_SW_NO_PROMOTE) != 0)
- panic("%s: Updating non-promote pte", __func__);
-
- if (size == L3C_SIZE)
- ptep_end = ptep + L3C_ENTRIES;
- else
- ptep_end = ptep + 1;
+ KASSERT((newpte & ATTR_SW_NO_PROMOTE) == 0,
+ ("%s: Updating non-promote pte", __func__));
/*
* Ensure we don't get switched out with the page table in an
@@ -4539,8 +4758,7 @@ pmap_update_entry(pmap_t pmap, pd_entry_t *ptep, pd_entry_t newpte,
* unchanged, so that a lockless, concurrent pmap_kextract() can still
* lookup the physical address.
*/
- for (lip = ptep; lip < ptep_end; lip++)
- pmap_clear_bits(lip, ATTR_DESCR_VALID);
+ pmap_clear_bits(ptep, ATTR_DESCR_VALID);
/*
* When promoting, the L{1,2}_TABLE entry that is being replaced might
@@ -4550,9 +4768,48 @@ pmap_update_entry(pmap_t pmap, pd_entry_t *ptep, pd_entry_t newpte,
pmap_s1_invalidate_range(pmap, va, va + size, false);
/* Create the new mapping */
+ pmap_store(ptep, newpte);
+ dsb(ishst);
+
+ intr_restore(intr);
+}
+
+/*
+ * Performs a break-before-make update of an ATTR_CONTIGUOUS mapping.
+ */
+static void __nosanitizecoverage
+pmap_update_strided(pmap_t pmap, pd_entry_t *ptep, pd_entry_t *ptep_end,
+ pd_entry_t newpte, vm_offset_t va, vm_offset_t stride, vm_size_t size)
+{
+ pd_entry_t *lip;
+ register_t intr;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((newpte & ATTR_SW_NO_PROMOTE) == 0,
+ ("%s: Updating non-promote pte", __func__));
+
+ /*
+ * Ensure we don't get switched out with the page table in an
+ * inconsistent state. We also need to ensure no interrupts fire
+ * as they may make use of an address we are about to invalidate.
+ */
+ intr = intr_disable();
+
+ /*
+ * Clear the old mapping's valid bits, but leave the rest of each
+ * entry unchanged, so that a lockless, concurrent pmap_kextract() can
+ * still lookup the physical address.
+ */
+ for (lip = ptep; lip < ptep_end; lip++)
+ pmap_clear_bits(lip, ATTR_DESCR_VALID);
+
+ /* Only final entries are changing. */
+ pmap_s1_invalidate_strided(pmap, va, va + size, stride, true);
+
+ /* Create the new mapping. */
for (lip = ptep; lip < ptep_end; lip++) {
pmap_store(lip, newpte);
- newpte += PAGE_SIZE;
+ newpte += stride;
}
dsb(ishst);
@@ -4635,7 +4892,7 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t mpte,
/* ... is not the first physical page within an L2 block */
if ((PTE_TO_PHYS(newl2) & L2_OFFSET) != 0 ||
((newl2 & ATTR_DESCR_MASK) != L3_PAGE)) { /* ... or is invalid */
- atomic_add_long(&pmap_l2_p_failures, 1);
+ counter_u64_add(pmap_l2_p_failures, 1);
CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
" in pmap %p", va, pmap);
return (false);
@@ -4683,7 +4940,7 @@ setl2:
for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
oldl3 = pmap_load(l3);
if ((PTE_TO_PHYS(oldl3) | (oldl3 & ATTR_DESCR_MASK)) != pa) {
- atomic_add_long(&pmap_l2_p_failures, 1);
+ counter_u64_add(pmap_l2_p_failures, 1);
CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
" in pmap %p", va, pmap);
return (false);
@@ -4702,7 +4959,7 @@ setl3:
oldl3 &= ~ATTR_SW_DBM;
}
if ((oldl3 & ATTR_PROMOTE) != (newl2 & ATTR_PROMOTE)) {
- atomic_add_long(&pmap_l2_p_failures, 1);
+ counter_u64_add(pmap_l2_p_failures, 1);
CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
" in pmap %p", va, pmap);
return (false);
@@ -4725,14 +4982,14 @@ setl3:
* destroyed by pmap_remove_l3().
*/
if (mpte == NULL)
- mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2)));
+ mpte = PTE_TO_VM_PAGE(pmap_load(l2));
KASSERT(mpte >= vm_page_array &&
mpte < &vm_page_array[vm_page_array_size],
("pmap_promote_l2: page table page is out of range"));
KASSERT(mpte->pindex == pmap_l2_pindex(va),
("pmap_promote_l2: page table page's pindex is wrong"));
if (pmap_insert_pt_page(pmap, mpte, true, all_l3e_AF != 0)) {
- atomic_add_long(&pmap_l2_p_failures, 1);
+ counter_u64_add(pmap_l2_p_failures, 1);
CTR2(KTR_PMAP,
"pmap_promote_l2: failure for va %#lx in pmap %p", va,
pmap);
@@ -4744,33 +5001,161 @@ setl3:
pmap_update_entry(pmap, l2, newl2 | L2_BLOCK, va & ~L2_OFFSET, L2_SIZE);
- atomic_add_long(&pmap_l2_promotions, 1);
+ counter_u64_add(pmap_l2_promotions, 1);
CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
pmap);
return (true);
}
+
+/*
+ * Tries to promote an aligned, contiguous set of base page mappings to a
+ * single L3C page mapping. For promotion to occur, two conditions must be
+ * met: (1) the base page mappings must map aligned, contiguous physical
+ * memory and (2) the base page mappings must have identical characteristics
+ * except for the accessed flag.
+ */
+static bool
+pmap_promote_l3c(pmap_t pmap, pd_entry_t *l3p, vm_offset_t va)
+{
+ pd_entry_t all_l3e_AF, firstl3c, *l3, oldl3, pa;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ /*
+ * Currently, this function only supports promotion on stage 1 pmaps
+ * because it tests stage 1 specific fields and performs a break-
+ * before-make sequence that is incorrect for stage 2 pmaps.
+ */
+ if (pmap->pm_stage != PM_STAGE1 || !pmap_ps_enabled(pmap))
+ return (false);
+
+ /*
+ * Compute the address of the first L3 entry in the superpage
+ * candidate.
+ */
+ l3p = (pt_entry_t *)((uintptr_t)l3p & ~((L3C_ENTRIES *
+ sizeof(pt_entry_t)) - 1));
+
+ firstl3c = pmap_load(l3p);
+
+ /*
+ * Examine the first L3 entry. Abort if this L3E is ineligible for
+ * promotion...
+ */
+ if ((firstl3c & ATTR_SW_NO_PROMOTE) != 0)
+ return (false);
+ /* ...is not properly aligned... */
+ if ((PTE_TO_PHYS(firstl3c) & L3C_OFFSET) != 0 ||
+ (firstl3c & ATTR_DESCR_MASK) != L3_PAGE) { /* ...or is invalid. */
+ counter_u64_add(pmap_l3c_p_failures, 1);
+ CTR2(KTR_PMAP, "pmap_promote_l3c: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (false);
+ }
+
+ /*
+ * If the first L3 entry is a clean read-write mapping, convert it
+ * to a read-only mapping. See pmap_promote_l2() for the rationale.
+ */
+set_first:
+ if ((firstl3c & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
+ (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
+ /*
+ * When the mapping is clean, i.e., ATTR_S1_AP_RO is set,
+ * ATTR_SW_DBM can be cleared without a TLB invalidation.
+ */
+ if (!atomic_fcmpset_64(l3p, &firstl3c, firstl3c & ~ATTR_SW_DBM))
+ goto set_first;
+ firstl3c &= ~ATTR_SW_DBM;
+ CTR2(KTR_PMAP, "pmap_promote_l3c: protect for va %#lx"
+ " in pmap %p", va & ~L3C_OFFSET, pmap);
+ }
+
+ /*
+ * Check that the rest of the L3 entries are compatible with the first,
+ * and convert clean read-write mappings to read-only mappings.
+ */
+ all_l3e_AF = firstl3c & ATTR_AF;
+ pa = (PTE_TO_PHYS(firstl3c) | (firstl3c & ATTR_DESCR_MASK)) +
+ L3C_SIZE - PAGE_SIZE;
+ for (l3 = l3p + L3C_ENTRIES - 1; l3 > l3p; l3--) {
+ oldl3 = pmap_load(l3);
+ if ((PTE_TO_PHYS(oldl3) | (oldl3 & ATTR_DESCR_MASK)) != pa) {
+ counter_u64_add(pmap_l3c_p_failures, 1);
+ CTR2(KTR_PMAP, "pmap_promote_l3c: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (false);
+ }
+set_l3:
+ if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
+ (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
+ /*
+ * When the mapping is clean, i.e., ATTR_S1_AP_RO is
+ * set, ATTR_SW_DBM can be cleared without a TLB
+ * invalidation.
+ */
+ if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
+ ~ATTR_SW_DBM))
+ goto set_l3;
+ oldl3 &= ~ATTR_SW_DBM;
+ CTR2(KTR_PMAP, "pmap_promote_l3c: protect for va %#lx"
+ " in pmap %p", (oldl3 & ~ATTR_MASK & L3C_OFFSET) |
+ (va & ~L3C_OFFSET), pmap);
+ }
+ if ((oldl3 & ATTR_PROMOTE) != (firstl3c & ATTR_PROMOTE)) {
+ counter_u64_add(pmap_l3c_p_failures, 1);
+ CTR2(KTR_PMAP, "pmap_promote_l3c: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (false);
+ }
+ all_l3e_AF &= oldl3;
+ pa -= PAGE_SIZE;
+ }
+
+ /*
+ * Unless all PTEs have ATTR_AF set, clear it from the superpage
+ * mapping, so that promotions triggered by speculative mappings,
+ * such as pmap_enter_quick(), don't automatically mark the
+ * underlying pages as referenced.
+ */
+ firstl3c &= ~ATTR_AF | all_l3e_AF;
+
+ /*
+ * Remake the mappings with the contiguous bit set.
+ */
+ pmap_update_strided(pmap, l3p, l3p + L3C_ENTRIES, firstl3c |
+ ATTR_CONTIGUOUS, va & ~L3C_OFFSET, L3_SIZE, L3C_SIZE);
+
+ counter_u64_add(pmap_l3c_promotions, 1);
+ CTR2(KTR_PMAP, "pmap_promote_l3c: success for va %#lx in pmap %p", va,
+ pmap);
+ return (true);
+}
#endif /* VM_NRESERVLEVEL > 0 */
static int
-pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t newpte, int flags,
+pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t pte, int flags,
int psind)
{
- pd_entry_t *l0p, *l1p, *l2p, origpte;
+ pd_entry_t *l0p, *l1p, *l2p, *l3p, newpte, origpte, *tl3p;
vm_page_t mp;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT(psind > 0 && psind < MAXPAGESIZES,
("psind %d unexpected", psind));
- KASSERT((PTE_TO_PHYS(newpte) & (pagesizes[psind] - 1)) == 0,
- ("unaligned phys address %#lx newpte %#lx psind %d",
- PTE_TO_PHYS(newpte), newpte, psind));
+ KASSERT((PTE_TO_PHYS(pte) & (pagesizes[psind] - 1)) == 0,
+ ("unaligned phys address %#lx pte %#lx psind %d",
+ PTE_TO_PHYS(pte), pte, psind));
restart:
- if (!pmap_bti_same(pmap, va, va + pagesizes[psind]))
+ newpte = pte;
+ if (!pmap_bti_same(pmap, va, va + pagesizes[psind], &newpte))
return (KERN_PROTECTION_FAILURE);
- if (psind == 2) {
+ if (psind == 3) {
PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
+ KASSERT(pagesizes[psind] == L1_SIZE,
+ ("pagesizes[%d] != L1_SIZE", psind));
l0p = pmap_l0(pmap, va);
if ((pmap_load(l0p) & ATTR_DESCR_VALID) == 0) {
mp = _pmap_alloc_l3(pmap, pmap_l0_pindex(va), NULL);
@@ -4790,8 +5175,7 @@ restart:
KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va));
origpte = pmap_load(l1p);
if ((origpte & ATTR_DESCR_VALID) == 0) {
- mp = PHYS_TO_VM_PAGE(
- PTE_TO_PHYS(pmap_load(l0p)));
+ mp = PTE_TO_VM_PAGE(pmap_load(l0p));
mp->ref_count++;
}
}
@@ -4801,7 +5185,9 @@ restart:
("va %#lx changing 1G phys page l1 %#lx newpte %#lx",
va, origpte, newpte));
pmap_store(l1p, newpte);
- } else /* (psind == 1) */ {
+ } else if (psind == 2) {
+ KASSERT(pagesizes[psind] == L2_SIZE,
+ ("pagesizes[%d] != L2_SIZE", psind));
l2p = pmap_l2(pmap, va);
if (l2p == NULL) {
mp = _pmap_alloc_l3(pmap, pmap_l1_pindex(va), NULL);
@@ -4820,8 +5206,7 @@ restart:
l1p = pmap_l1(pmap, va);
origpte = pmap_load(l2p);
if ((origpte & ATTR_DESCR_VALID) == 0) {
- mp = PHYS_TO_VM_PAGE(
- PTE_TO_PHYS(pmap_load(l1p)));
+ mp = PTE_TO_VM_PAGE(pmap_load(l1p));
mp->ref_count++;
}
}
@@ -4831,6 +5216,40 @@ restart:
("va %#lx changing 2M phys page l2 %#lx newpte %#lx",
va, origpte, newpte));
pmap_store(l2p, newpte);
+ } else /* (psind == 1) */ {
+ KASSERT(pagesizes[psind] == L3C_SIZE,
+ ("pagesizes[%d] != L3C_SIZE", psind));
+ l2p = pmap_l2(pmap, va);
+ if (l2p == NULL || (pmap_load(l2p) & ATTR_DESCR_VALID) == 0) {
+ mp = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), NULL);
+ if (mp == NULL) {
+ if ((flags & PMAP_ENTER_NOSLEEP) != 0)
+ return (KERN_RESOURCE_SHORTAGE);
+ PMAP_UNLOCK(pmap);
+ vm_wait(NULL);
+ PMAP_LOCK(pmap);
+ goto restart;
+ }
+ mp->ref_count += L3C_ENTRIES - 1;
+ l3p = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
+ l3p = &l3p[pmap_l3_index(va)];
+ } else {
+ l3p = pmap_l2_to_l3(l2p, va);
+ if ((pmap_load(l3p) & ATTR_DESCR_VALID) == 0) {
+ mp = PTE_TO_VM_PAGE(pmap_load(l2p));
+ mp->ref_count += L3C_ENTRIES;
+ }
+ }
+ for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
+ origpte = pmap_load(tl3p);
+ KASSERT((origpte & ATTR_DESCR_VALID) == 0 ||
+ ((origpte & ATTR_CONTIGUOUS) != 0 &&
+ PTE_TO_PHYS(origpte) == PTE_TO_PHYS(newpte)),
+ ("va %#lx changing 64K phys page l3 %#lx newpte %#lx",
+ va, origpte, newpte));
+ pmap_store(tl3p, newpte);
+ newpte += L3_SIZE;
+ }
}
dsb(ishst);
@@ -4869,7 +5288,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_paddr_t opa, pa;
vm_page_t mpte, om;
bool nosleep;
- int lvl, rv;
+ int full_lvl, lvl, rv;
KASSERT(ADDR_IS_CANONICAL(va),
("%s: Address not in canonical form: %lx", __func__, va));
@@ -4878,7 +5297,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if ((m->oflags & VPO_UNMANAGED) == 0)
VM_PAGE_OBJECT_BUSY_ASSERT(m);
pa = VM_PAGE_TO_PHYS(m);
- new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_DEFAULT | L3_PAGE);
+ new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_AF | pmap_sh_attr |
+ L3_PAGE);
new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
new_l3 |= pmap_pte_prot(pmap, prot);
if ((flags & PMAP_ENTER_WIRED) != 0)
@@ -4922,30 +5342,50 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
lock = NULL;
PMAP_LOCK(pmap);
- /* Wait until we lock the pmap to protect the bti rangeset */
- new_l3 |= pmap_pte_bti(pmap, va);
-
if ((flags & PMAP_ENTER_LARGEPAGE) != 0) {
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("managed largepage va %#lx flags %#x", va, flags));
- new_l3 &= ~L3_PAGE;
- if (psind == 2) {
+ if (psind == 3) {
PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
+ new_l3 &= ~L3_PAGE;
new_l3 |= L1_BLOCK;
- } else /* (psind == 1) */
+ } else if (psind == 2) {
+ new_l3 &= ~L3_PAGE;
new_l3 |= L2_BLOCK;
+ } else /* (psind == 1) */
+ new_l3 |= ATTR_CONTIGUOUS;
rv = pmap_enter_largepage(pmap, va, new_l3, flags, psind);
goto out;
}
- if (psind == 1) {
+ if (psind == 2) {
/* Assert the required virtual and physical alignment. */
KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned"));
- KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
+ KASSERT(m->psind > 1, ("pmap_enter: m->psind < psind"));
rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK,
flags, m, &lock);
goto out;
}
mpte = NULL;
+ if (psind == 1) {
+ KASSERT((va & L3C_OFFSET) == 0, ("pmap_enter: va unaligned"));
+ KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
+ rv = pmap_enter_l3c(pmap, va, new_l3 | ATTR_CONTIGUOUS, flags,
+ m, &mpte, &lock);
+#if VM_NRESERVLEVEL > 0
+ /*
+ * Attempt L2 promotion, if both the PTP and a level 1
+ * reservation are fully populated.
+ */
+ if (rv == KERN_SUCCESS &&
+ (mpte == NULL || mpte->ref_count == NL3PG) &&
+ (m->flags & PG_FICTITIOUS) == 0 &&
+ vm_reserv_level_iffullpop(m) == 1) {
+ pde = pmap_l2(pmap, va);
+ (void)pmap_promote_l2(pmap, pde, va, mpte, &lock);
+ }
+#endif
+ goto out;
+ }
/*
* In the case that a page table page is not
@@ -4956,7 +5396,7 @@ retry:
if (pde != NULL && lvl == 2) {
l3 = pmap_l2_to_l3(pde, va);
if (!ADDR_IS_KERNEL(va) && mpte == NULL) {
- mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(pde)));
+ mpte = PTE_TO_VM_PAGE(pmap_load(pde));
mpte->ref_count++;
}
goto havel3;
@@ -4966,8 +5406,7 @@ retry:
(l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
l3 = &l3[pmap_l3_index(va)];
if (!ADDR_IS_KERNEL(va)) {
- mpte = PHYS_TO_VM_PAGE(
- PTE_TO_PHYS(pmap_load(l2)));
+ mpte = PTE_TO_VM_PAGE(pmap_load(l2));
mpte->ref_count++;
}
goto havel3;
@@ -4997,6 +5436,7 @@ havel3:
orig_l3 = pmap_load(l3);
opa = PTE_TO_PHYS(orig_l3);
pv = NULL;
+ new_l3 |= pmap_pte_bti(pmap, va);
/*
* Is the specified virtual address already mapped?
@@ -5161,12 +5601,18 @@ validate:
#if VM_NRESERVLEVEL > 0
/*
- * If both the page table page and the reservation are fully
- * populated, then attempt promotion.
+ * First, attempt L3C promotion, if the virtual and physical addresses
+ * are aligned with each other and an underlying reservation has the
+ * neighboring L3 pages allocated. The first condition is simply an
+ * optimization that recognizes some eventual promotion failures early
+ * at a lower run-time cost. Then, if both a level 1 reservation and
+ * the PTP are fully populated, attempt L2 promotion.
*/
- if ((mpte == NULL || mpte->ref_count == NL3PG) &&
+ if ((va & L3C_OFFSET) == (pa & L3C_OFFSET) &&
(m->flags & PG_FICTITIOUS) == 0 &&
- vm_reserv_level_iffullpop(m) == 0)
+ (full_lvl = vm_reserv_level_iffullpop(m)) >= 0 &&
+ pmap_promote_l3c(pmap, l3, va) &&
+ full_lvl == 1 && (mpte == NULL || mpte->ref_count == NL3PG))
(void)pmap_promote_l2(pmap, pde, va, mpte, &lock);
#endif
@@ -5195,14 +5641,13 @@ pmap_enter_l2_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
KASSERT(ADDR_IS_CANONICAL(va),
("%s: Address not in canonical form: %lx", __func__, va));
- new_l2 = (pd_entry_t)(PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | ATTR_DEFAULT |
+ new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | pmap_sh_attr |
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
L2_BLOCK);
- new_l2 |= pmap_pte_bti(pmap, va);
- if ((m->oflags & VPO_UNMANAGED) == 0) {
+ if ((m->oflags & VPO_UNMANAGED) == 0)
new_l2 |= ATTR_SW_MANAGED;
- new_l2 &= ~ATTR_AF;
- }
+ else
+ new_l2 |= ATTR_AF;
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
new_l2 |= ATTR_S1_XN;
@@ -5271,7 +5716,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
* and let vm_fault() cope. Check after l2 allocation, since
* it could sleep.
*/
- if (!pmap_bti_same(pmap, va, va + L2_SIZE)) {
+ if (!pmap_bti_same(pmap, va, va + L2_SIZE, &new_l2)) {
KASSERT(l2pg != NULL, ("pmap_enter_l2: missing L2 PTP"));
pmap_abort_ptp(pmap, va, l2pg);
return (KERN_PROTECTION_FAILURE);
@@ -5324,7 +5769,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
* an invalidation at all levels after clearing
* the L2_TABLE entry.
*/
- mt = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2)));
+ mt = PTE_TO_VM_PAGE(pmap_load(l2));
if (pmap_insert_pt_page(pmap, mt, false, false))
panic("pmap_enter_l2: trie insert failed");
pmap_clear(l2);
@@ -5339,12 +5784,14 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
if ((new_l2 & ATTR_SW_WIRED) != 0 && pmap != kernel_pmap) {
uwptpg = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (uwptpg == NULL) {
+ pmap_abort_ptp(pmap, va, l2pg);
return (KERN_RESOURCE_SHORTAGE);
}
uwptpg->pindex = pmap_l2_pindex(va);
if (pmap_insert_pt_page(pmap, uwptpg, true, false)) {
vm_page_unwire_noq(uwptpg);
vm_page_free(uwptpg);
+ pmap_abort_ptp(pmap, va, l2pg);
return (KERN_RESOURCE_SHORTAGE);
}
pmap_resident_count_inc(pmap, 1);
@@ -5400,7 +5847,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
pmap_store(l2, new_l2);
dsb(ishst);
- atomic_add_long(&pmap_l2_mappings, 1);
+ counter_u64_add(pmap_l2_mappings, 1);
CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
va, pmap);
@@ -5423,14 +5870,13 @@ pmap_enter_l3c_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *ml3p,
KASSERT(ADDR_IS_CANONICAL(va),
("%s: Address not in canonical form: %lx", __func__, va));
- l3e = PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | ATTR_DEFAULT |
+ l3e = VM_PAGE_TO_PTE(m) | pmap_sh_attr |
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
ATTR_CONTIGUOUS | L3_PAGE;
- l3e |= pmap_pte_bti(pmap, va);
- if ((m->oflags & VPO_UNMANAGED) == 0) {
+ if ((m->oflags & VPO_UNMANAGED) == 0)
l3e |= ATTR_SW_MANAGED;
- l3e &= ~ATTR_AF;
- }
+ else
+ l3e |= ATTR_AF;
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
l3e |= ATTR_S1_XN;
@@ -5460,6 +5906,8 @@ pmap_enter_l3c(pmap_t pmap, vm_offset_t va, pt_entry_t l3e, u_int flags,
("pmap_enter_l3c: va is not aligned"));
KASSERT(!VA_IS_CLEANMAP(va) || (l3e & ATTR_SW_MANAGED) == 0,
("pmap_enter_l3c: managed mapping within the clean submap"));
+ KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
+ ("pmap_enter_l3c: l3e is missing ATTR_CONTIGUOUS"));
/*
* If the L3 PTP is not resident, we attempt to create it here.
@@ -5492,9 +5940,8 @@ retry:
l3p = pmap_demote_l2_locked(pmap, l2p,
va, lockp);
if (l3p != NULL) {
- *ml3p = PHYS_TO_VM_PAGE(
- PTE_TO_PHYS(pmap_load(
- l2p)));
+ *ml3p = PTE_TO_VM_PAGE(
+ pmap_load(l2p));
(*ml3p)->ref_count +=
L3C_ENTRIES;
goto have_l3p;
@@ -5508,8 +5955,7 @@ retry:
* count. Otherwise, we attempt to allocate it.
*/
if (lvl == 2 && pmap_load(pde) != 0) {
- *ml3p = PHYS_TO_VM_PAGE(PTE_TO_PHYS(
- pmap_load(pde)));
+ *ml3p = PTE_TO_VM_PAGE(pmap_load(pde));
(*ml3p)->ref_count += L3C_ENTRIES;
} else {
*ml3p = _pmap_alloc_l3(pmap, l2pindex, (flags &
@@ -5558,7 +6004,7 @@ have_l3p:
* and let vm_fault() cope. Check after L3 allocation, since
* it could sleep.
*/
- if (!pmap_bti_same(pmap, va, va + L3C_SIZE)) {
+ if (!pmap_bti_same(pmap, va, va + L3C_SIZE, &l3e)) {
KASSERT(*ml3p != NULL, ("pmap_enter_l3c: missing L3 PTP"));
(*ml3p)->ref_count -= L3C_ENTRIES - 1;
pmap_abort_ptp(pmap, va, *ml3p);
@@ -5630,7 +6076,7 @@ have_l3p:
}
dsb(ishst);
- atomic_add_long(&pmap_l3c_mappings, 1);
+ counter_u64_add(pmap_l3c_mappings, 1);
CTR2(KTR_PMAP, "pmap_enter_l3c: success for va %#lx in pmap %p",
va, pmap);
return (KERN_SUCCESS);
@@ -5652,37 +6098,46 @@ void
pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m_start, vm_prot_t prot)
{
+ struct pctrie_iter pages;
struct rwlock *lock;
vm_offset_t va;
vm_page_t m, mpte;
- vm_pindex_t diff, psize;
int rv;
VM_OBJECT_ASSERT_LOCKED(m_start->object);
- psize = atop(end - start);
mpte = NULL;
- m = m_start;
+ vm_page_iter_limit_init(&pages, m_start->object,
+ m_start->pindex + atop(end - start));
+ m = vm_radix_iter_lookup(&pages, m_start->pindex);
lock = NULL;
PMAP_LOCK(pmap);
- while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
- va = start + ptoa(diff);
+ while (m != NULL) {
+ va = start + ptoa(m->pindex - m_start->pindex);
if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
- m->psind == 1 && pmap_ps_enabled(pmap) &&
+ m->psind == 2 && pmap_ps_enabled(pmap) &&
((rv = pmap_enter_l2_rx(pmap, va, m, prot, &lock)) ==
- KERN_SUCCESS || rv == KERN_NO_SPACE))
- m = &m[L2_SIZE / PAGE_SIZE - 1];
- else if ((va & L3C_OFFSET) == 0 && va + L3C_SIZE <= end &&
- (VM_PAGE_TO_PHYS(m) & L3C_OFFSET) == 0 &&
- vm_reserv_is_populated(m, L3C_ENTRIES) &&
- pmap_ps_enabled(pmap) &&
+ KERN_SUCCESS || rv == KERN_NO_SPACE)) {
+ m = vm_radix_iter_jump(&pages, L2_SIZE / PAGE_SIZE);
+ } else if ((va & L3C_OFFSET) == 0 && va + L3C_SIZE <= end &&
+ m->psind >= 1 && pmap_ps_enabled(pmap) &&
((rv = pmap_enter_l3c_rx(pmap, va, m, &mpte, prot,
- &lock)) == KERN_SUCCESS || rv == KERN_NO_SPACE))
- m = &m[L3C_ENTRIES - 1];
- else
- mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
- &lock);
- m = TAILQ_NEXT(m, listq);
+ &lock)) == KERN_SUCCESS || rv == KERN_NO_SPACE)) {
+ m = vm_radix_iter_jump(&pages, L3C_ENTRIES);
+ } else {
+ /*
+ * In general, if a superpage mapping were possible,
+ * it would have been created above. That said, if
+ * start and end are not superpage aligned, then
+ * promotion might be possible at the ends of [start,
+ * end). However, in practice, those promotion
+ * attempts are so unlikely to succeed that they are
+ * not worth trying.
+ */
+ mpte = pmap_enter_quick_locked(pmap, va, m, prot |
+ VM_PROT_NO_PROMOTE, mpte, &lock);
+ m = vm_radix_iter_step(&pages);
+ }
}
if (lock != NULL)
rw_wunlock(lock);
@@ -5715,10 +6170,9 @@ static vm_page_t
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
{
- pd_entry_t *pde;
pt_entry_t *l1, *l2, *l3, l3_val;
vm_paddr_t pa;
- int lvl;
+ int full_lvl, lvl;
KASSERT(!VA_IS_CLEANMAP(va) ||
(m->oflags & VPO_UNMANAGED) != 0,
@@ -5762,8 +6216,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
L2_BLOCK)
return (NULL);
- mpte = PHYS_TO_VM_PAGE(
- PTE_TO_PHYS(pmap_load(l2)));
+ mpte = PTE_TO_VM_PAGE(pmap_load(l2));
mpte->ref_count++;
} else {
mpte = _pmap_alloc_l3(pmap, l2pindex,
@@ -5781,13 +6234,13 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
l3 = &l3[pmap_l3_index(va)];
} else {
mpte = NULL;
- pde = pmap_pde(kernel_pmap, va, &lvl);
- KASSERT(pde != NULL,
+ l2 = pmap_pde(kernel_pmap, va, &lvl);
+ KASSERT(l2 != NULL,
("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
va));
KASSERT(lvl == 2,
("pmap_enter_quick_locked: Invalid level %d", lvl));
- l3 = pmap_l2_to_l3(pde, va);
+ l3 = pmap_l2_to_l3(l2, va);
}
/*
@@ -5815,8 +6268,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pmap_resident_count_inc(pmap, 1);
pa = VM_PAGE_TO_PHYS(m);
- l3_val = PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
- ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
+ l3_val = PHYS_TO_PTE(pa) | pmap_sh_attr |
+ ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
l3_val |= pmap_pte_bti(pmap, va);
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
@@ -5831,10 +6284,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
/*
* Now validate mapping with RO protection
*/
- if ((m->oflags & VPO_UNMANAGED) == 0) {
+ if ((m->oflags & VPO_UNMANAGED) == 0)
l3_val |= ATTR_SW_MANAGED;
- l3_val &= ~ATTR_AF;
- }
+ else
+ l3_val |= ATTR_AF;
/* Sync icache before the mapping is stored to PTE */
if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
@@ -5846,14 +6299,21 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
#if VM_NRESERVLEVEL > 0
/*
- * If both the PTP and the reservation are fully populated, then
- * attempt promotion.
+ * First, attempt L3C promotion, if the virtual and physical addresses
+ * are aligned with each other and an underlying reservation has the
+ * neighboring L3 pages allocated. The first condition is simply an
+ * optimization that recognizes some eventual promotion failures early
+ * at a lower run-time cost. Then, attempt L2 promotion, if both a
+ * level 1 reservation and the PTP are fully populated.
*/
- if ((mpte == NULL || mpte->ref_count == NL3PG) &&
+ if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
+ (va & L3C_OFFSET) == (pa & L3C_OFFSET) &&
(m->flags & PG_FICTITIOUS) == 0 &&
- vm_reserv_level_iffullpop(m) == 0) {
+ (full_lvl = vm_reserv_level_iffullpop(m)) >= 0 &&
+ pmap_promote_l3c(pmap, l3, va) &&
+ full_lvl == 1 && (mpte == NULL || mpte->ref_count == NL3PG)) {
if (l2 == NULL)
- l2 = pmap_pde(pmap, va, &lvl);
+ l2 = pmap_l2(pmap, va);
/*
* If promotion succeeds, then the next call to this function
@@ -6022,8 +6482,7 @@ pmap_copy_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va, pt_entry_t l3e,
return (false);
}
- if (!pmap_pv_insert_l3c(pmap, va, PHYS_TO_VM_PAGE(PTE_TO_PHYS(l3e)),
- lockp)) {
+ if (!pmap_pv_insert_l3c(pmap, va, PTE_TO_VM_PAGE(l3e), lockp)) {
if (ml3 != NULL)
pmap_abort_ptp(pmap, va, ml3);
return (false);
@@ -6042,7 +6501,7 @@ pmap_copy_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va, pt_entry_t l3e,
l3e += L3_SIZE;
}
pmap_resident_count_inc(pmap, L3C_ENTRIES);
- atomic_add_long(&pmap_l3c_mappings, 1);
+ counter_u64_add(pmap_l3c_mappings, 1);
CTR2(KTR_PMAP, "pmap_copy_l3c: success for va %#lx in pmap %p",
va, pmap);
return (true);
@@ -6112,8 +6571,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
l1 = pmap_l1(dst_pmap, addr);
} else {
l0 = pmap_l0(dst_pmap, addr);
- dst_m = PHYS_TO_VM_PAGE(
- PTE_TO_PHYS(pmap_load(l0)));
+ dst_m = PTE_TO_VM_PAGE(pmap_load(l0));
dst_m->ref_count++;
}
KASSERT(pmap_load(l1) == 0,
@@ -6161,14 +6619,14 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
pmap_store(l2, srcptepaddr);
pmap_resident_count_inc(dst_pmap, L2_SIZE /
PAGE_SIZE);
- atomic_add_long(&pmap_l2_mappings, 1);
+ counter_u64_add(pmap_l2_mappings, 1);
} else
pmap_abort_ptp(dst_pmap, addr, dst_m);
continue;
}
KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
("pmap_copy: invalid L2 entry"));
- srcmpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(srcptepaddr));
+ srcmpte = PTE_TO_VM_PAGE(srcptepaddr);
KASSERT(srcmpte->ref_count > 0,
("pmap_copy: source page table page is unused"));
if (va_next > end_addr)
@@ -6205,7 +6663,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
src_pte += L3C_ENTRIES - 1;
} else if (pmap_load(dst_pte) == 0 &&
pmap_try_insert_pv_entry(dst_pmap, addr,
- PHYS_TO_VM_PAGE(PTE_TO_PHYS(ptetemp)), &lock)) {
+ PTE_TO_VM_PAGE(ptetemp), &lock)) {
/*
* Clear the wired, contiguous, modified, and
* accessed bits from the destination PTE.
@@ -7219,7 +7677,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
* can be avoided by making the page
* dirty now.
*/
- m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(oldl3));
+ m = PTE_TO_VM_PAGE(oldl3);
vm_page_dirty(m);
}
if ((oldl3 & ATTR_CONTIGUOUS) != 0) {
@@ -7392,6 +7850,11 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
pt_entry_t *l2;
int i, lvl, l2_blocks, free_l2_count, start_idx;
+ /* Use the DMAP region if we can */
+ if (PHYS_IN_DMAP(pa) && PHYS_IN_DMAP(pa + size - 1) &&
+ pmap_kmapped_range(PHYS_TO_DMAP(pa), size))
+ return ((void *)PHYS_TO_DMAP(pa));
+
if (!vm_initialized) {
/*
* No L3 ptables so map entire L2 blocks where start VA is:
@@ -7460,9 +7923,9 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
/* Insert L2_BLOCK */
l2 = pmap_l1_to_l2(pde, va);
old_l2e |= pmap_load_store(l2,
- PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_XN |
- ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
- L2_BLOCK);
+ PHYS_TO_PTE(pa) | ATTR_AF | pmap_sh_attr |
+ ATTR_S1_XN | ATTR_KERN_GP |
+ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
va += L2_SIZE;
pa += L2_SIZE;
@@ -7507,10 +7970,25 @@ pmap_unmapbios(void *p, vm_size_t size)
vm_offset_t offset, va, va_trunc;
pd_entry_t *pde;
pt_entry_t *l2;
- int i, lvl, l2_blocks, block;
+ int error __diagused, i, lvl, l2_blocks, block;
bool preinit_map;
va = (vm_offset_t)p;
+ if (VIRT_IN_DMAP(va)) {
+ KASSERT(VIRT_IN_DMAP(va + size - 1),
+ ("%s: End address not in DMAP region: %lx", __func__,
+ va + size - 1));
+ /* Ensure the attributes are as expected for the DMAP region */
+ PMAP_LOCK(kernel_pmap);
+ error = pmap_change_props_locked(va, size,
+ PROT_READ | PROT_WRITE, VM_MEMATTR_DEFAULT, false);
+ PMAP_UNLOCK(kernel_pmap);
+ KASSERT(error == 0, ("%s: Failed to reset DMAP attributes: %d",
+ __func__, error));
+
+ return;
+ }
+
l2_blocks =
(roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT;
KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size));
@@ -7714,8 +8192,9 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
MPASS((pmap_load(ptep) & ATTR_SW_NO_PROMOTE) == 0);
/*
- * Split the entry to an level 3 table, then
- * set the new attribute.
+ * Find the entry and demote it if the requested change
+ * only applies to part of the address range mapped by
+ * the entry.
*/
switch (lvl) {
default:
@@ -7734,6 +8213,16 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
ptep = pmap_l1_to_l2(ptep, tmpva);
/* FALLTHROUGH */
case 2:
+ if ((pmap_load(ptep) & ATTR_CONTIGUOUS) != 0) {
+ if ((tmpva & L2C_OFFSET) == 0 &&
+ (base + size - tmpva) >= L2C_SIZE) {
+ pte_size = L2C_SIZE;
+ break;
+ }
+ if (!pmap_demote_l2c(kernel_pmap, ptep,
+ tmpva))
+ return (EINVAL);
+ }
if ((tmpva & L2_OFFSET) == 0 &&
(base + size - tmpva) >= L2_SIZE) {
pte_size = L2_SIZE;
@@ -7765,8 +8254,26 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
pte &= ~mask;
pte |= bits;
- pmap_update_entry(kernel_pmap, ptep, pte, tmpva,
- pte_size);
+ switch (pte_size) {
+ case L2C_SIZE:
+ pmap_update_strided(kernel_pmap, ptep, ptep +
+ L2C_ENTRIES, pte, tmpva, L2_SIZE, L2C_SIZE);
+ break;
+ case L3C_SIZE:
+ pmap_update_strided(kernel_pmap, ptep, ptep +
+ L3C_ENTRIES, pte, tmpva, L3_SIZE, L3C_SIZE);
+ break;
+ default:
+ /*
+ * We are updating a single block or page entry,
+ * so regardless of pte_size pass PAGE_SIZE in
+ * order that a single TLB invalidation is
+ * performed.
+ */
+ pmap_update_entry(kernel_pmap, ptep, pte, tmpva,
+ PAGE_SIZE);
+ break;
+ }
pa = PTE_TO_PHYS(pte);
if (!VIRT_IN_DMAP(tmpva) && PHYS_IN_DMAP(pa)) {
@@ -7841,13 +8348,14 @@ pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
newl2 = oldl1 & ATTR_MASK;
/* Create the new entries */
+ newl2 |= ATTR_CONTIGUOUS;
for (i = 0; i < Ln_ENTRIES; i++) {
l2[i] = newl2 | phys;
phys += L2_SIZE;
}
- KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK),
- ("Invalid l2 page (%lx != %lx)", l2[0],
- (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
+ KASSERT(l2[0] == (ATTR_CONTIGUOUS | (oldl1 & ~ATTR_DESCR_MASK) |
+ L2_BLOCK), ("Invalid l2 page (%lx != %lx)", l2[0],
+ ATTR_CONTIGUOUS | (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
if (tmpl1 != 0) {
pmap_kenter(tmpl1, PAGE_SIZE,
@@ -7858,6 +8366,7 @@ pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
+ counter_u64_add(pmap_l1_demotions, 1);
fail:
if (tmpl1 != 0) {
pmap_kremove(tmpl1);
@@ -8071,7 +8580,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
if ((oldl2 & ATTR_SW_MANAGED) != 0)
pmap_pv_demote_l2(pmap, va, PTE_TO_PHYS(oldl2), lockp);
- atomic_add_long(&pmap_l2_demotions, 1);
+ counter_u64_add(pmap_l2_demotions, 1);
CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
" in pmap %p %lx", va, pmap, l3[0]);
@@ -8099,6 +8608,96 @@ pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
}
/*
+ * Demote an L2C superpage mapping to L2C_ENTRIES L2 block mappings.
+ */
+static bool
+pmap_demote_l2c(pmap_t pmap, pt_entry_t *l2p, vm_offset_t va)
+{
+ pd_entry_t *l2c_end, *l2c_start, l2e, mask, nbits, *tl2p;
+ vm_offset_t tmpl3;
+ register_t intr;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
+ l2c_start = (pd_entry_t *)((uintptr_t)l2p & ~((L2C_ENTRIES *
+ sizeof(pd_entry_t)) - 1));
+ l2c_end = l2c_start + L2C_ENTRIES;
+ tmpl3 = 0;
+ if ((va & ~L2C_OFFSET) < (vm_offset_t)l2c_end &&
+ (vm_offset_t)l2c_start < (va & ~L2C_OFFSET) + L2C_SIZE) {
+ tmpl3 = kva_alloc(PAGE_SIZE);
+ if (tmpl3 == 0)
+ return (false);
+ pmap_kenter(tmpl3, PAGE_SIZE,
+ DMAP_TO_PHYS((vm_offset_t)l2c_start) & ~L3_OFFSET,
+ VM_MEMATTR_WRITE_BACK);
+ l2c_start = (pd_entry_t *)(tmpl3 +
+ ((vm_offset_t)l2c_start & PAGE_MASK));
+ l2c_end = (pd_entry_t *)(tmpl3 +
+ ((vm_offset_t)l2c_end & PAGE_MASK));
+ }
+ mask = 0;
+ nbits = ATTR_DESCR_VALID;
+ intr = intr_disable();
+
+ /*
+ * Break the mappings.
+ */
+ for (tl2p = l2c_start; tl2p < l2c_end; tl2p++) {
+ /*
+ * Clear the mapping's contiguous and valid bits, but leave
+ * the rest of the entry unchanged, so that a lockless,
+ * concurrent pmap_kextract() can still lookup the physical
+ * address.
+ */
+ l2e = pmap_load(tl2p);
+ KASSERT((l2e & ATTR_CONTIGUOUS) != 0,
+ ("pmap_demote_l2c: missing ATTR_CONTIGUOUS"));
+ KASSERT((l2e & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) !=
+ (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RO)),
+ ("pmap_demote_l2c: missing ATTR_S1_AP_RW"));
+ while (!atomic_fcmpset_64(tl2p, &l2e, l2e & ~(ATTR_CONTIGUOUS |
+ ATTR_DESCR_VALID)))
+ cpu_spinwait();
+
+ /*
+ * Hardware accessed and dirty bit maintenance might only
+ * update a single L2 entry, so we must combine the accessed
+ * and dirty bits from this entire set of contiguous L2
+ * entries.
+ */
+ if ((l2e & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
+ (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM))
+ mask = ATTR_S1_AP_RW_BIT;
+ nbits |= l2e & ATTR_AF;
+ }
+ if ((nbits & ATTR_AF) != 0) {
+ pmap_s1_invalidate_strided(pmap, va & ~L2C_OFFSET, (va +
+ L2C_SIZE) & ~L2C_OFFSET, L2_SIZE, true);
+ }
+
+ /*
+ * Remake the mappings, updating the accessed and dirty bits.
+ */
+ l2e = (pmap_load(l2c_start) & ~mask) | nbits;
+ for (tl2p = l2c_start; tl2p < l2c_end; tl2p++) {
+ pmap_store(tl2p, l2e);
+ l2e += L2_SIZE;
+ }
+ dsb(ishst);
+
+ intr_restore(intr);
+ if (tmpl3 != 0) {
+ pmap_kremove(tmpl3);
+ kva_free(tmpl3, PAGE_SIZE);
+ }
+ counter_u64_add(pmap_l2c_demotions, 1);
+ CTR2(KTR_PMAP, "pmap_demote_l2c: success for va %#lx in pmap %p",
+ va, pmap);
+ return (true);
+}
+
+/*
* Demote a L3C superpage mapping to L3C_ENTRIES 4KB page mappings.
*/
static bool
@@ -8169,10 +8768,10 @@ pmap_demote_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va)
/*
* Remake the mappings, updating the accessed and dirty bits.
*/
+ l3e = (pmap_load(l3c_start) & ~mask) | nbits;
for (tl3p = l3c_start; tl3p < l3c_end; tl3p++) {
- l3e = pmap_load(tl3p);
- while (!atomic_fcmpset_64(tl3p, &l3e, (l3e & ~mask) | nbits))
- cpu_spinwait();
+ pmap_store(tl3p, l3e);
+ l3e += L3_SIZE;
}
dsb(ishst);
@@ -8181,7 +8780,7 @@ pmap_demote_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va)
pmap_kremove(tmpl3);
kva_free(tmpl3, PAGE_SIZE);
}
- atomic_add_long(&pmap_l3c_demotions, 1);
+ counter_u64_add(pmap_l3c_demotions, 1);
CTR2(KTR_PMAP, "pmap_demote_l3c: success for va %#lx in pmap %p",
va, pmap);
return (true);
@@ -8226,7 +8825,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
{
pt_entry_t *pte, tpte;
vm_paddr_t mask, pa;
- int lvl, val;
+ int lvl, psind, val;
bool managed;
PMAP_ASSERT_STAGE1(pmap);
@@ -8238,21 +8837,22 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
switch (lvl) {
case 3:
mask = L3_OFFSET;
+ psind = (tpte & ATTR_CONTIGUOUS) != 0 ? 1 : 0;
break;
case 2:
mask = L2_OFFSET;
+ psind = 2;
break;
case 1:
mask = L1_OFFSET;
+ psind = 3;
break;
default:
panic("pmap_mincore: invalid level %d", lvl);
}
managed = (tpte & ATTR_SW_MANAGED) != 0;
- val = MINCORE_INCORE;
- if (lvl != 3)
- val |= MINCORE_PSIND(3 - lvl);
+ val = MINCORE_INCORE | MINCORE_PSIND(psind);
if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed &&
(tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)))
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
@@ -8656,20 +9256,16 @@ pmap_stage2_fault(pmap_t pmap, uint64_t esr, uint64_t far)
ptep = pmap_pte(pmap, far, &lvl);
fault_exec:
if (ptep != NULL && (pte = pmap_load(ptep)) != 0) {
- if (icache_vmid) {
- pmap_invalidate_vpipt_icache();
- } else {
- /*
- * If accessing an executable page invalidate
- * the I-cache so it will be valid when we
- * continue execution in the guest. The D-cache
- * is assumed to already be clean to the Point
- * of Coherency.
- */
- if ((pte & ATTR_S2_XN_MASK) !=
- ATTR_S2_XN(ATTR_S2_XN_NONE)) {
- invalidate_icache();
- }
+ /*
+ * If accessing an executable page invalidate
+ * the I-cache so it will be valid when we
+ * continue execution in the guest. The D-cache
+ * is assumed to already be clean to the Point
+ * of Coherency.
+ */
+ if ((pte & ATTR_S2_XN_MASK) !=
+ ATTR_S2_XN(ATTR_S2_XN_NONE)) {
+ invalidate_icache();
}
pmap_set_bits(ptep, ATTR_AF | ATTR_DESCR_VALID);
rv = KERN_SUCCESS;
@@ -8758,12 +9354,23 @@ pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
if (pmap_klookup(far, NULL))
rv = KERN_SUCCESS;
} else {
- PMAP_LOCK(pmap);
+ bool owned;
+
+ /*
+ * In the EFIRT driver we lock the pmap before
+ * calling into the runtime service. As the lock
+ * is already owned by the current thread skip
+ * locking it again.
+ */
+ owned = PMAP_OWNED(pmap);
+ if (!owned)
+ PMAP_LOCK(pmap);
/* Ask the MMU to check the address. */
intr = intr_disable();
par = arm64_address_translate_s1e0r(far);
intr_restore(intr);
- PMAP_UNLOCK(pmap);
+ if (!owned)
+ PMAP_UNLOCK(pmap);
/*
* If the translation was successful, then we can
@@ -8788,18 +9395,37 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
{
vm_offset_t superpage_offset;
- if (size < L2_SIZE)
+ if (size < L3C_SIZE)
return;
if (object != NULL && (object->flags & OBJ_COLORED) != 0)
offset += ptoa(object->pg_color);
+
+ /*
+ * Considering the object's physical alignment, is the mapping large
+ * enough to encompass an L2 (2MB/32MB) superpage ...
+ */
superpage_offset = offset & L2_OFFSET;
- if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
- (*addr & L2_OFFSET) == superpage_offset)
+ if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) >= L2_SIZE) {
+ /*
+ * If the virtual and physical alignments differ, then
+ * increase the virtual address so that the alignments match.
+ */
+ if ((*addr & L2_OFFSET) < superpage_offset)
+ *addr = (*addr & ~L2_OFFSET) + superpage_offset;
+ else if ((*addr & L2_OFFSET) > superpage_offset)
+ *addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) +
+ superpage_offset;
return;
- if ((*addr & L2_OFFSET) < superpage_offset)
- *addr = (*addr & ~L2_OFFSET) + superpage_offset;
- else
- *addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
+ }
+ /* ... or an L3C (64KB/2MB) superpage? */
+ superpage_offset = offset & L3C_OFFSET;
+ if (size - ((L3C_SIZE - superpage_offset) & L3C_OFFSET) >= L3C_SIZE) {
+ if ((*addr & L3C_OFFSET) < superpage_offset)
+ *addr = (*addr & ~L3C_OFFSET) + superpage_offset;
+ else if ((*addr & L3C_OFFSET) > superpage_offset)
+ *addr = ((*addr + L3C_OFFSET) & ~L3C_OFFSET) +
+ superpage_offset;
+ }
}
/**
@@ -8932,10 +9558,18 @@ pmap_bti_deassign_all(pmap_t pmap)
rangeset_remove_all(pmap->pm_bti);
}
+/*
+ * Returns true if the BTI setting is the same across the specified address
+ * range, and false otherwise. When returning true, updates the referenced PTE
+ * to reflect the BTI setting.
+ *
+ * Only stage 1 pmaps support BTI. The kernel pmap is always a stage 1 pmap
+ * that has the same BTI setting implicitly across its entire address range.
+ */
static bool
-pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t *pte)
{
- struct rs_el *prev_rs, *rs;
+ struct rs_el *rs;
vm_offset_t va;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -8943,22 +9577,24 @@ pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
("%s: Start address not in canonical form: %lx", __func__, sva));
KASSERT(ADDR_IS_CANONICAL(eva),
("%s: End address not in canonical form: %lx", __func__, eva));
+ KASSERT((*pte & ATTR_S1_GP) == 0,
+ ("%s: pte %lx has ATTR_S1_GP preset", __func__, *pte));
- if (pmap->pm_bti == NULL || ADDR_IS_KERNEL(sva))
+ if (pmap == kernel_pmap) {
+ *pte |= ATTR_KERN_GP;
+ return (true);
+ }
+ if (pmap->pm_bti == NULL)
return (true);
- MPASS(!ADDR_IS_KERNEL(eva));
- for (va = sva; va < eva; prev_rs = rs) {
- rs = rangeset_lookup(pmap->pm_bti, va);
- if (va == sva)
- prev_rs = rs;
- else if ((rs == NULL) ^ (prev_rs == NULL))
+ PMAP_ASSERT_STAGE1(pmap);
+ rs = rangeset_containing(pmap->pm_bti, sva);
+ if (rs == NULL)
+ return (rangeset_empty(pmap->pm_bti, sva, eva));
+ while ((va = rs->re_end) < eva) {
+ if ((rs = rangeset_beginning(pmap->pm_bti, va)) == NULL)
return (false);
- if (rs == NULL) {
- va += PAGE_SIZE;
- continue;
- }
- va = rs->re_end;
}
+ *pte |= ATTR_S1_GP;
return (true);
}
@@ -8972,7 +9608,8 @@ pmap_pte_bti(pmap_t pmap, vm_offset_t va)
return (0);
if (pmap == kernel_pmap)
return (ATTR_KERN_GP);
- if (pmap->pm_bti != NULL && rangeset_lookup(pmap->pm_bti, va) != NULL)
+ if (pmap->pm_bti != NULL &&
+ rangeset_containing(pmap->pm_bti, va) != NULL)
return (ATTR_S1_GP);
return (0);
}
@@ -9150,18 +9787,17 @@ pmap_san_enter(vm_offset_t va)
MPASS(l1 != NULL);
if ((pmap_load(l1) & ATTR_DESCR_VALID) == 0) {
m = pmap_san_enter_alloc_l3();
- pmap_store(l1, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L1_TABLE);
+ pmap_store(l1, VM_PAGE_TO_PTE(m) | L1_TABLE);
}
l2 = pmap_l1_to_l2(l1, va);
if ((pmap_load(l2) & ATTR_DESCR_VALID) == 0) {
m = pmap_san_enter_alloc_l2();
if (m != NULL) {
- pmap_store(l2, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) |
+ pmap_store(l2, VM_PAGE_TO_PTE(m) |
PMAP_SAN_PTE_BITS | L2_BLOCK);
} else {
m = pmap_san_enter_alloc_l3();
- pmap_store(l2, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) |
- L2_TABLE);
+ pmap_store(l2, VM_PAGE_TO_PTE(m) | L2_TABLE);
}
dmb(ishst);
}
@@ -9171,8 +9807,7 @@ pmap_san_enter(vm_offset_t va)
if ((pmap_load(l3) & ATTR_DESCR_VALID) != 0)
return;
m = pmap_san_enter_alloc_l3();
- pmap_store(l3, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) |
- PMAP_SAN_PTE_BITS | L3_PAGE);
+ pmap_store(l3, VM_PAGE_TO_PTE(m) | PMAP_SAN_PTE_BITS | L3_PAGE);
dmb(ishst);
}
#endif /* KASAN || KMSAN */
@@ -9187,6 +9822,7 @@ struct pmap_kernel_map_range {
int l3pages;
int l3contig;
int l2blocks;
+ int l2contig;
int l1blocks;
};
@@ -9225,15 +9861,15 @@ sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
break;
}
- sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c%c %6s %d %d %d %d\n",
+ sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c%c %6s %d %d %d %d %d\n",
range->sva, eva,
(range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-',
(range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x',
(range->attrs & ATTR_S1_UXN) != 0 ? '-' : 'X',
(range->attrs & ATTR_S1_AP(ATTR_S1_AP_USER)) != 0 ? 'u' : 's',
(range->attrs & ATTR_S1_GP) != 0 ? 'g' : '-',
- mode, range->l1blocks, range->l2blocks, range->l3contig,
- range->l3pages);
+ mode, range->l1blocks, range->l2contig, range->l2blocks,
+ range->l3contig, range->l3pages);
/* Reset to sentinel value. */
range->sva = 0xfffffffffffffffful;
@@ -9398,7 +10034,12 @@ sysctl_kmaps(SYSCTL_HANDLER_ARGS)
if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) {
sysctl_kmaps_check(sb, &range, sva,
l0e, l1e, l2e, 0);
- range.l2blocks++;
+ if ((l2e & ATTR_CONTIGUOUS) != 0)
+ range.l2contig +=
+ k % L2C_ENTRIES == 0 ?
+ 1 : 0;
+ else
+ range.l2blocks++;
sva += L2_SIZE;
continue;
}
diff --git a/sys/arm64/arm64/ptrauth.c b/sys/arm64/arm64/ptrauth.c
index 40f9c86d9468..a943220d66cd 100644
--- a/sys/arm64/arm64/ptrauth.c
+++ b/sys/arm64/arm64/ptrauth.c
@@ -43,6 +43,7 @@
#include <machine/armreg.h>
#include <machine/cpu.h>
+#include <machine/cpu_feat.h>
#include <machine/reg.h>
#include <machine/vmparam.h>
@@ -81,10 +82,10 @@ ptrauth_disable(void)
return (false);
}
-void
-ptrauth_init(void)
+static bool
+ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
- uint64_t isar1;
+ uint64_t isar;
int pac_enable;
/*
@@ -96,28 +97,69 @@ ptrauth_init(void)
if (!pac_enable) {
if (boothowto & RB_VERBOSE)
printf("Pointer authentication is disabled\n");
- return;
+ goto out;
}
- if (!get_kernel_reg(ID_AA64ISAR1_EL1, &isar1))
- return;
-
if (ptrauth_disable())
- return;
+ goto out;
/*
* This assumes if there is pointer authentication on the boot CPU
* it will also be available on any non-boot CPUs. If this is ever
* not the case we will have to add a quirk.
*/
- if (ID_AA64ISAR1_APA_VAL(isar1) > 0 ||
- ID_AA64ISAR1_API_VAL(isar1) > 0) {
- enable_ptrauth = true;
- elf64_addr_mask.code |= PAC_ADDR_MASK;
- elf64_addr_mask.data |= PAC_ADDR_MASK;
+
+ /*
+ * The QARMA5 or implementation efined algorithms are reported in
+ * ID_AA64ISAR1_EL1.
+ */
+ if (get_kernel_reg(ID_AA64ISAR1_EL1, &isar)) {
+ if (ID_AA64ISAR1_APA_VAL(isar) > 0 ||
+ ID_AA64ISAR1_API_VAL(isar) > 0) {
+ return (true);
+ }
+ }
+
+ /* The QARMA3 algorithm is reported in ID_AA64ISAR2_EL1. */
+ if (get_kernel_reg(ID_AA64ISAR2_EL1, &isar)) {
+ if (ID_AA64ISAR2_APA3_VAL(isar) > 0) {
+ return (true);
+ }
}
+
+out:
+ /*
+ * Pointer authentication may be disabled, mask out the ID fields we
+ * expose to userspace and the rest of the kernel so they don't try
+ * to use it.
+ */
+ update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK |
+ ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK |
+ ID_AA64ISAR1_GPI_MASK, 0);
+ update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0);
+
+ return (false);
}
+static void
+ptrauth_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
+ u_int errata_count __unused)
+{
+ enable_ptrauth = true;
+ elf64_addr_mask.code |= PAC_ADDR_MASK;
+ elf64_addr_mask.data |= PAC_ADDR_MASK;
+}
+
+
+static struct cpu_feat feat_pauth = {
+ .feat_name = "FEAT_PAuth",
+ .feat_check = ptrauth_check,
+ .feat_enable = ptrauth_enable,
+ .feat_flags = CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM,
+};
+DATA_SET(cpu_feat_set, feat_pauth);
+
/* Copy the keys when forking a new process */
void
ptrauth_fork(struct thread *new_td, struct thread *orig_td)
@@ -169,13 +211,11 @@ ptrauth_thread_alloc(struct thread *td)
* Load the userspace keys. We can't use WRITE_SPECIALREG as we need
* to set the architecture extension.
*/
-#define LOAD_KEY(space, name) \
-__asm __volatile( \
- ".arch_extension pauth \n" \
- "msr "#name"keylo_el1, %0 \n" \
- "msr "#name"keyhi_el1, %1 \n" \
- ".arch_extension nopauth \n" \
- :: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo), \
+#define LOAD_KEY(space, name, reg) \
+__asm __volatile( \
+ "msr "__XSTRING(MRS_REG_ALT_NAME(reg ## KeyLo_EL1))", %0 \n" \
+ "msr "__XSTRING(MRS_REG_ALT_NAME(reg ## KeyHi_EL1))", %1 \n" \
+ :: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo), \
"r"(td->td_md.md_ptrauth_##space.name.pa_key_hi))
void
@@ -187,7 +227,7 @@ ptrauth_thread0(struct thread *td)
/* TODO: Generate a random number here */
memset(&td->td_md.md_ptrauth_kern, 0,
sizeof(td->td_md.md_ptrauth_kern));
- LOAD_KEY(kern, apia);
+ LOAD_KEY(kern, apia, APIA);
/*
* No isb as this is called before ptrauth_start so can rely on
* the instruction barrier there.
@@ -240,8 +280,8 @@ ptrauth_mp_start(uint64_t cpu)
__asm __volatile(
".arch_extension pauth \n"
- "msr apiakeylo_el1, %0 \n"
- "msr apiakeyhi_el1, %1 \n"
+ "msr "__XSTRING(APIAKeyLo_EL1_REG)", %0 \n"
+ "msr "__XSTRING(APIAKeyHi_EL1_REG)", %1 \n"
".arch_extension nopauth \n"
:: "r"(start_key.pa_key_lo), "r"(start_key.pa_key_hi));
@@ -257,7 +297,7 @@ struct thread *
ptrauth_switch(struct thread *td)
{
if (enable_ptrauth) {
- LOAD_KEY(kern, apia);
+ LOAD_KEY(kern, apia, APIA);
isb();
}
@@ -271,7 +311,7 @@ ptrauth_exit_el0(struct thread *td)
if (!enable_ptrauth)
return;
- LOAD_KEY(kern, apia);
+ LOAD_KEY(kern, apia, APIA);
isb();
}
@@ -282,11 +322,11 @@ ptrauth_enter_el0(struct thread *td)
if (!enable_ptrauth)
return;
- LOAD_KEY(user, apia);
- LOAD_KEY(user, apib);
- LOAD_KEY(user, apda);
- LOAD_KEY(user, apdb);
- LOAD_KEY(user, apga);
+ LOAD_KEY(user, apia, APIA);
+ LOAD_KEY(user, apib, APIB);
+ LOAD_KEY(user, apda, APDA);
+ LOAD_KEY(user, apdb, APDB);
+ LOAD_KEY(user, apga, APGA);
/*
* No isb as this is called from the exception handler so can rely
* on the eret instruction to be the needed context synchronizing event.
diff --git a/sys/arm64/arm64/sdt_machdep.c b/sys/arm64/arm64/sdt_machdep.c
new file mode 100644
index 000000000000..23324ffbf333
--- /dev/null
+++ b/sys/arm64/arm64/sdt_machdep.c
@@ -0,0 +1,77 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Mark Johnston <markj@FreeBSD.org>
+ */
+
+#include <sys/systm.h>
+#include <sys/sdt.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+#include <machine/vmparam.h>
+
+/*
+ * Return true if we can overwrite a nop at "patchpoint" with a jump to the
+ * target address.
+ */
+bool
+sdt_tracepoint_valid(uintptr_t patchpoint, uintptr_t target)
+{
+ void *addr;
+ int64_t offset;
+
+ if (!arm64_get_writable_addr((void *)patchpoint, &addr))
+ return (false);
+
+ if (patchpoint == target ||
+ (patchpoint & (INSN_SIZE - 1)) != 0 ||
+ (target & (INSN_SIZE - 1)) != 0)
+ return (false);
+ offset = target - patchpoint;
+ if (offset < -(1 << 26) || offset > (1 << 26))
+ return (false);
+ return (true);
+}
+
+/*
+ * Overwrite the copy of _SDT_ASM_PATCH_INSTR at the tracepoint with a jump to the
+ * target address.
+ */
+void
+sdt_tracepoint_patch(uintptr_t patchpoint, uintptr_t target)
+{
+ void *addr;
+ uint32_t instr;
+
+ KASSERT(sdt_tracepoint_valid(patchpoint, target),
+ ("%s: invalid tracepoint %#lx -> %#lx",
+ __func__, patchpoint, target));
+
+ if (!arm64_get_writable_addr((void *)patchpoint, &addr))
+ panic("%s: Unable to write new instruction", __func__);
+
+ instr = (((target - patchpoint) >> 2) & 0x3fffffful) | 0x14000000;
+ memcpy(addr, &instr, sizeof(instr));
+ cpu_icache_sync_range((void *)patchpoint, INSN_SIZE);
+}
+
+/*
+ * Overwrite the patchpoint with a nop instruction.
+ */
+void
+sdt_tracepoint_restore(uintptr_t patchpoint)
+{
+ void *addr;
+ uint32_t instr;
+
+ if (!arm64_get_writable_addr((void *)patchpoint, &addr))
+ panic("%s: Unable to write new instruction", __func__);
+
+ instr = 0xd503201f;
+ memcpy(addr, &instr, sizeof(instr));
+ cpu_icache_sync_range((void *)patchpoint, INSN_SIZE);
+}
diff --git a/sys/arm64/arm64/sigtramp.S b/sys/arm64/arm64/sigtramp.S
index f1936e695f33..3f1bb42c269f 100644
--- a/sys/arm64/arm64/sigtramp.S
+++ b/sys/arm64/arm64/sigtramp.S
@@ -27,6 +27,7 @@
*/
#include "assym.inc"
+#include <sys/elf_common.h>
#include <sys/syscall.h>
#include <machine/asm.h>
@@ -57,3 +58,5 @@ esigcode:
.global szsigcode
szsigcode:
.quad esigcode - sigcode
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/stack_machdep.c b/sys/arm64/arm64/stack_machdep.c
index e5e105aeb955..fde975ffc7d2 100644
--- a/sys/arm64/arm64/stack_machdep.c
+++ b/sys/arm64/arm64/stack_machdep.c
@@ -59,8 +59,6 @@ stack_save_td(struct stack *st, struct thread *td)
struct unwind_state frame;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- KASSERT(!TD_IS_SWAPPED(td),
- ("stack_save_td: thread %p is swapped", td));
if (TD_IS_RUNNING(td))
return (EOPNOTSUPP);
diff --git a/sys/arm64/arm64/strcmp.S b/sys/arm64/arm64/strcmp.S
index 0d66aae07d9e..d31576bbcf34 100644
--- a/sys/arm64/arm64/strcmp.S
+++ b/sys/arm64/arm64/strcmp.S
@@ -12,6 +12,8 @@
* MTE compatible.
*/
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
#define L(l) .L ## l
@@ -187,3 +189,4 @@ L(done):
END (strcmp)
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/strncmp.S b/sys/arm64/arm64/strncmp.S
index 595de0312678..1b475b4ce449 100644
--- a/sys/arm64/arm64/strncmp.S
+++ b/sys/arm64/arm64/strncmp.S
@@ -11,6 +11,8 @@
* MTE compatible.
*/
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
#define L(l) .L ## l
@@ -305,3 +307,4 @@ L(ret0):
ret
END(strncmp)
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/support.S b/sys/arm64/arm64/support.S
index bb93cfd521e1..2d067c7f7730 100644
--- a/sys/arm64/arm64/support.S
+++ b/sys/arm64/arm64/support.S
@@ -29,6 +29,8 @@
*
*/
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
#include <machine/setjmp.h>
#include <machine/param.h>
@@ -387,3 +389,5 @@ ENTRY(pagezero_cache)
ret
END(pagezero_cache)
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S
index 9c43de3a9eae..7b6010a5f51f 100644
--- a/sys/arm64/arm64/swtch.S
+++ b/sys/arm64/arm64/swtch.S
@@ -33,6 +33,8 @@
#include "opt_kstack_pages.h"
#include "opt_sched.h"
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
#include <machine/armreg.h>
.macro clear_step_flag pcbflags, tmp
@@ -54,6 +56,21 @@
.endm
/*
+ * Lower 32 bits of CONTEXTIDR_EL1 are PID
+ * Upper 32 bits are reserved for future use e.g. TID
+ */
+.macro pid_in_context_idr
+ adrp x9, arm64_pid_in_contextidr
+ ldrb w10, [x9, :lo12:arm64_pid_in_contextidr]
+ cbz w10, 998f
+ ldr x9, [x1, #TD_PROC]
+ /* PID is always 0 or positive, do not sign extend */
+ ldr w10, [x9, #P_PID]
+ msr contextidr_el1, x10
+998:
+.endm
+
+/*
* void cpu_throw(struct thread *old, struct thread *new)
*/
ENTRY(cpu_throw)
@@ -64,7 +81,10 @@ ENTRY(cpu_throw)
ldr x4, [x0, #TD_PCB]
ldr w5, [x4, #PCB_FLAGS]
clear_step_flag w5, x6
+
1:
+ /* debug/trace: set CONTEXTIDR_EL1 to current PID, if enabled */
+ pid_in_context_idr
#ifdef VFP
/* Backup the new thread pointer around a call to C code */
@@ -145,10 +165,11 @@ ENTRY(cpu_switch)
mov x20, x1
mov x21, x2
+ /* debug/trace: set CONTEXTIDR_EL1 to current PID, if enabled */
+ pid_in_context_idr
+
#ifdef VFP
- /* Load the pcb address */
- mov x1, x4
- bl vfp_save_state
+ bl vfp_save_state_switch
mov x0, x20
#else
mov x0, x1
@@ -267,13 +288,12 @@ ENTRY(savectx)
mrs x6, tpidr_el0
stp x5, x6, [x0, #PCB_SP]
- /* Store the VFP registers */
#ifdef VFP
- mov x28, lr
- bl vfp_save_state_savectx
- mov lr, x28
-#endif
-
+ /* Store the VFP registers */
+ b vfp_save_state_savectx
+#else
ret
+#endif
END(savectx)
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/arm64/sys_machdep.c b/sys/arm64/arm64/sys_machdep.c
index eedc57f7c572..33000b6c223b 100644
--- a/sys/arm64/arm64/sys_machdep.c
+++ b/sys/arm64/arm64/sys_machdep.c
@@ -30,20 +30,26 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
+#include <sys/sysctl.h>
#include <sys/sysproto.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
+#include <machine/pcb.h>
#include <machine/sysarch.h>
#include <machine/vmparam.h>
+#include <security/audit/audit.h>
+
int
sysarch(struct thread *td, struct sysarch_args *uap)
{
struct arm64_guard_page_args gp_args;
+ struct pcb *pcb;
vm_offset_t eva;
+ unsigned long sve_len;
int error;
switch (uap->op) {
@@ -73,6 +79,13 @@ sysarch(struct thread *td, struct sysarch_args *uap)
error = pmap_bti_set(vmspace_pmap(td->td_proc->p_vmspace),
trunc_page(gp_args.addr), round_page(eva));
break;
+ case ARM64_GET_SVE_VL:
+ pcb = td->td_pcb;
+ sve_len = pcb->pcb_sve_len;
+ error = EINVAL;
+ if (sve_len != 0)
+ error = copyout(&sve_len, uap->parms, sizeof(sve_len));
+ break;
default:
error = EINVAL;
break;
@@ -80,3 +93,8 @@ sysarch(struct thread *td, struct sysarch_args *uap)
return (error);
}
+
+bool arm64_pid_in_contextidr = false;
+SYSCTL_BOOL(_machdep, OID_AUTO, pid_in_contextidr, CTLFLAG_RW,
+ &arm64_pid_in_contextidr, false,
+ "Save PID into CONTEXTIDR_EL1 register on context switch");
diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c
index 6cc1933095c8..bed58095201a 100644
--- a/sys/arm64/arm64/trap.c
+++ b/sys/arm64/arm64/trap.c
@@ -85,6 +85,9 @@ static void print_registers(struct trapframe *frame);
int (*dtrace_invop_jump_addr)(struct trapframe *);
+u_long cnt_efirt_faults;
+int print_efirt_faults;
+
typedef void (abort_handler)(struct thread *, struct trapframe *, uint64_t,
uint64_t, int);
@@ -308,10 +311,18 @@ data_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
break;
}
}
- intr_enable();
+ if (td->td_md.md_spinlock_count == 0 &&
+ (frame->tf_spsr & PSR_DAIF_INTR) != PSR_DAIF_INTR) {
+ MPASS((frame->tf_spsr & PSR_DAIF_INTR) == 0);
+ intr_enable();
+ }
map = kernel_map;
} else {
- intr_enable();
+ if (td->td_md.md_spinlock_count == 0 &&
+ (frame->tf_spsr & PSR_DAIF_INTR) != PSR_DAIF_INTR) {
+ MPASS((frame->tf_spsr & PSR_DAIF_INTR) == 0);
+ intr_enable();
+ }
map = &td->td_proc->p_vmspace->vm_map;
if (map == NULL)
map = kernel_map;
@@ -338,8 +349,9 @@ data_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
td->td_md.md_spinlock_count);
}
#endif
- if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK |
- WARN_GIANTOK, NULL, "Kernel page fault") != 0) {
+ if ((td->td_pflags & TDP_NOFAULTING) == 0 &&
+ (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK |
+ WARN_GIANTOK, NULL, "Kernel page fault") != 0)) {
print_registers(frame);
print_gp_register("far", far);
printf(" esr: 0x%.16lx\n", esr);
@@ -375,7 +387,6 @@ data_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
bad_far:
if (td->td_intr_nesting_level == 0 &&
pcb->pcb_onfault != 0) {
- frame->tf_x[0] = error;
frame->tf_elr = pcb->pcb_onfault;
return;
}
@@ -540,11 +551,10 @@ do_el1h_sync(struct thread *td, struct trapframe *frame)
break;
case EXCP_BRK:
#ifdef KDTRACE_HOOKS
- if ((esr & ESR_ELx_ISS_MASK) == 0x40d && \
- dtrace_invop_jump_addr != 0) {
- dtrace_invop_jump_addr(frame);
+ if ((esr & ESR_ELx_ISS_MASK) == 0x40d /* BRK_IMM16_VAL */ &&
+ dtrace_invop_jump_addr != NULL &&
+ dtrace_invop_jump_addr(frame) == 0)
break;
- }
#endif
#ifdef KDB
kdb_trap(exception, 0, frame);
@@ -568,8 +578,6 @@ do_el1h_sync(struct thread *td, struct trapframe *frame)
panic("FPAC kernel exception");
break;
case EXCP_UNKNOWN:
- if (undef_insn(1, frame))
- break;
print_registers(frame);
print_gp_register("far", far);
panic("Undefined instruction: %08x",
@@ -639,8 +647,10 @@ do_el0_sync(struct thread *td, struct trapframe *frame)
#endif
break;
case EXCP_SVE:
- call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)frame->tf_elr,
- exception);
+ /* Returns true if this thread can use SVE */
+ if (!sve_restore_state(td))
+ call_trapsignal(td, SIGILL, ILL_ILLTRP,
+ (void *)frame->tf_elr, exception);
userret(td, frame);
break;
case EXCP_SVC32:
@@ -664,7 +674,7 @@ do_el0_sync(struct thread *td, struct trapframe *frame)
}
break;
case EXCP_UNKNOWN:
- if (!undef_insn(0, frame))
+ if (!undef_insn(frame))
call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)far,
exception);
userret(td, frame);
@@ -704,7 +714,7 @@ do_el0_sync(struct thread *td, struct trapframe *frame)
* instruction to access a special register userspace doesn't
* have access to.
*/
- if (!undef_insn(0, frame))
+ if (!undef_insn(frame))
call_trapsignal(td, SIGILL, ILL_PRVOPC,
(void *)frame->tf_elr, exception);
userret(td, frame);
@@ -734,7 +744,8 @@ do_el0_sync(struct thread *td, struct trapframe *frame)
break;
}
- KASSERT((td->td_pcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
+ KASSERT(
+ (td->td_pcb->pcb_fpflags & ~(PCB_FP_USERMASK|PCB_FP_SVEVALID)) == 0,
("Kernel VFP flags set while entering userspace"));
KASSERT(
td->td_pcb->pcb_fpusaved == &td->td_pcb->pcb_fpustate,
diff --git a/sys/arm64/arm64/uio_machdep.c b/sys/arm64/arm64/uio_machdep.c
index c42aee94506e..1c12940419cc 100644
--- a/sys/arm64/arm64/uio_machdep.c
+++ b/sys/arm64/arm64/uio_machdep.c
@@ -95,18 +95,26 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
switch (uio->uio_segflg) {
case UIO_USERSPACE:
maybe_yield();
- if (uio->uio_rw == UIO_READ)
+ switch (uio->uio_rw) {
+ case UIO_READ:
error = copyout(cp, iov->iov_base, cnt);
- else
+ break;
+ case UIO_WRITE:
error = copyin(iov->iov_base, cp, cnt);
+ break;
+ }
if (error)
goto out;
break;
case UIO_SYSSPACE:
- if (uio->uio_rw == UIO_READ)
+ switch (uio->uio_rw) {
+ case UIO_READ:
bcopy(cp, iov->iov_base, cnt);
- else
+ break;
+ case UIO_WRITE:
bcopy(iov->iov_base, cp, cnt);
+ break;
+ }
break;
case UIO_NOCOPY:
break;
diff --git a/sys/arm64/arm64/undefined.c b/sys/arm64/arm64/undefined.c
index c307281ea523..19f34fa91702 100644
--- a/sys/arm64/arm64/undefined.c
+++ b/sys/arm64/arm64/undefined.c
@@ -82,41 +82,23 @@ struct undef_handler {
undef_handler_t uh_handler;
};
-/*
- * Create two undefined instruction handler lists, one for userspace, one for
- * the kernel. This allows us to handle instructions that will trap
- */
-LIST_HEAD(, undef_handler) undef_handlers[2];
+/* System instruction handlers, e.g. msr, mrs, sys */
+struct sys_handler {
+ LIST_ENTRY(sys_handler) sys_link;
+ undef_sys_handler_t sys_handler;
+};
/*
- * Work around a bug in QEMU prior to 2.5.1 where reading unknown ID
- * registers would raise an exception when they should return 0.
+ * Create the undefined instruction handler lists.
+ * This allows us to handle instructions that will trap.
*/
-static int
-id_aa64mmfr2_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
- uint32_t esr)
-{
- int reg;
-
-#define MRS_ID_AA64MMFR2_EL0_MASK (MRS_MASK | 0x000fffe0)
-#define MRS_ID_AA64MMFR2_EL0_VALUE (MRS_VALUE | 0x00080740)
-
- /* mrs xn, id_aa64mfr2_el1 */
- if ((insn & MRS_ID_AA64MMFR2_EL0_MASK) == MRS_ID_AA64MMFR2_EL0_VALUE) {
- reg = MRS_REGISTER(insn);
-
- frame->tf_elr += INSN_SIZE;
- if (reg < nitems(frame->tf_x)) {
- frame->tf_x[reg] = 0;
- } else if (reg == 30) {
- frame->tf_lr = 0;
- }
- /* If reg is 32 then write to xzr, i.e. do nothing */
-
- return (1);
- }
- return (0);
-}
+LIST_HEAD(, sys_handler) sys_handlers = LIST_HEAD_INITIALIZER(sys_handler);
+LIST_HEAD(, undef_handler) undef_handlers =
+ LIST_HEAD_INITIALIZER(undef_handlers);
+#ifdef COMPAT_FREEBSD32
+LIST_HEAD(, undef_handler) undef32_handlers =
+ LIST_HEAD_INITIALIZER(undef32_handlers);
+#endif
static bool
arm_cond_match(uint32_t insn, struct trapframe *frame)
@@ -179,8 +161,7 @@ gdb_trapper(vm_offset_t va, uint32_t insn, struct trapframe *frame,
struct thread *td = curthread;
if (insn == GDB_BREAKPOINT || insn == GDB5_BREAKPOINT) {
- if (SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
- va < VM_MAXUSER_ADDRESS) {
+ if (va < VM_MAXUSER_ADDRESS) {
ksiginfo_t ksi;
ksiginfo_init_trap(&ksi);
@@ -212,8 +193,7 @@ swp_emulate(vm_offset_t va, uint32_t insn, struct trapframe *frame,
* swp, swpb only; there are no Thumb swp/swpb instructions so we can
* safely bail out if we're in Thumb mode.
*/
- if (!compat32_emul_swp || !SV_PROC_FLAG(td->td_proc, SV_ILP32) ||
- (frame->tf_spsr & PSR_T) != 0)
+ if (!compat32_emul_swp || (frame->tf_spsr & PSR_T) != 0)
return (0);
else if ((insn & 0x0fb00ff0) != 0x01000090)
return (0);
@@ -278,29 +258,38 @@ fault:
void
undef_init(void)
{
-
- LIST_INIT(&undef_handlers[0]);
- LIST_INIT(&undef_handlers[1]);
-
- install_undef_handler(false, id_aa64mmfr2_handler);
#ifdef COMPAT_FREEBSD32
- install_undef_handler(true, gdb_trapper);
- install_undef_handler(true, swp_emulate);
+ install_undef32_handler(gdb_trapper);
+ install_undef32_handler(swp_emulate);
#endif
}
void *
-install_undef_handler(bool user, undef_handler_t func)
+install_undef_handler(undef_handler_t func)
{
struct undef_handler *uh;
uh = malloc(sizeof(*uh), M_UNDEF, M_WAITOK);
uh->uh_handler = func;
- LIST_INSERT_HEAD(&undef_handlers[user ? 0 : 1], uh, uh_link);
+ LIST_INSERT_HEAD(&undef_handlers, uh, uh_link);
return (uh);
}
+#ifdef COMPAT_FREEBSD32
+void *
+install_undef32_handler(undef_handler_t func)
+{
+ struct undef_handler *uh;
+
+ uh = malloc(sizeof(*uh), M_UNDEF, M_WAITOK);
+ uh->uh_handler = func;
+ LIST_INSERT_HEAD(&undef32_handlers, uh, uh_link);
+
+ return (uh);
+}
+#endif
+
void
remove_undef_handler(void *handle)
{
@@ -311,24 +300,135 @@ remove_undef_handler(void *handle)
free(handle, M_UNDEF);
}
+void
+install_sys_handler(undef_sys_handler_t func)
+{
+ struct sys_handler *sysh;
+
+ sysh = malloc(sizeof(*sysh), M_UNDEF, M_WAITOK);
+ sysh->sys_handler = func;
+ LIST_INSERT_HEAD(&sys_handlers, sysh, sys_link);
+}
+
+bool
+undef_sys(uint64_t esr, struct trapframe *frame)
+{
+ struct sys_handler *sysh;
+
+ LIST_FOREACH(sysh, &sys_handlers, sys_link) {
+ if (sysh->sys_handler(esr, frame))
+ return (true);
+ }
+
+ return (false);
+}
+
+static bool
+undef_sys_insn(struct trapframe *frame, uint32_t insn)
+{
+ uint64_t esr;
+ bool read;
+
+#define MRS_MASK 0xfff00000
+#define MRS_VALUE 0xd5300000
+#define MSR_REG_VALUE 0xd5100000
+#define MSR_IMM_VALUE 0xd5000000
+#define MRS_REGISTER(insn) ((insn) & 0x0000001f)
+#define MRS_Op0_SHIFT 19
+#define MRS_Op0_MASK 0x00180000
+#define MRS_Op1_SHIFT 16
+#define MRS_Op1_MASK 0x00070000
+#define MRS_CRn_SHIFT 12
+#define MRS_CRn_MASK 0x0000f000
+#define MRS_CRm_SHIFT 8
+#define MRS_CRm_MASK 0x00000f00
+#define MRS_Op2_SHIFT 5
+#define MRS_Op2_MASK 0x000000e0
+
+ read = false;
+ switch (insn & MRS_MASK) {
+ case MRS_VALUE:
+ read = true;
+ break;
+ case MSR_REG_VALUE:
+ break;
+ case MSR_IMM_VALUE:
+ /*
+ * MSR (immediate) needs special handling. The
+ * source register is always 31 (xzr), CRn is 4,
+ * and op0 is hard coded as 0.
+ */
+ if (MRS_REGISTER(insn) != 31)
+ return (false);
+ if ((insn & MRS_CRn_MASK) >> MRS_CRn_SHIFT != 4)
+ return (false);
+ if ((insn & MRS_Op0_MASK) >> MRS_Op0_SHIFT != 0)
+ return (false);
+ break;
+ default:
+ return (false);
+ }
+
+ /* Create a fake EXCP_MSR esr value */
+ esr = EXCP_MSR << ESR_ELx_EC_SHIFT;
+ esr |= ESR_ELx_IL;
+ esr |= __ISS_MSR_REG(
+ (insn & MRS_Op0_MASK) >> MRS_Op0_SHIFT,
+ (insn & MRS_Op1_MASK) >> MRS_Op1_SHIFT,
+ (insn & MRS_CRn_MASK) >> MRS_CRn_SHIFT,
+ (insn & MRS_CRm_MASK) >> MRS_CRm_SHIFT,
+ (insn & MRS_Op2_MASK) >> MRS_Op2_SHIFT);
+ esr |= MRS_REGISTER(insn) << ISS_MSR_Rt_SHIFT;
+ if (read)
+ esr |= ISS_MSR_DIR;
+
+#undef MRS_MASK
+#undef MRS_VALUE
+#undef MSR_REG_VALUE
+#undef MSR_IMM_VALUE
+#undef MRS_REGISTER
+#undef MRS_Op0_SHIFT
+#undef MRS_Op0_MASK
+#undef MRS_Op1_SHIFT
+#undef MRS_Op1_MASK
+#undef MRS_CRn_SHIFT
+#undef MRS_CRn_MASK
+#undef MRS_CRm_SHIFT
+#undef MRS_CRm_MASK
+#undef MRS_Op2_SHIFT
+#undef MRS_Op2_MASK
+
+ return (undef_sys(esr, frame));
+}
+
int
-undef_insn(u_int el, struct trapframe *frame)
+undef_insn(struct trapframe *frame)
{
struct undef_handler *uh;
uint32_t insn;
int ret;
- KASSERT(el < 2, ("Invalid exception level %u", el));
+ ret = fueword32((uint32_t *)frame->tf_elr, &insn);
+ /* Raise a SIGILL if we are unable to read the instruction */
+ if (ret != 0)
+ return (0);
- if (el == 0) {
- ret = fueword32((uint32_t *)frame->tf_elr, &insn);
- if (ret != 0)
- panic("Unable to read userspace faulting instruction");
- } else {
- insn = *(uint32_t *)frame->tf_elr;
+#ifdef COMPAT_FREEBSD32
+ if (SV_PROC_FLAG(curthread->td_proc, SV_ILP32)) {
+ LIST_FOREACH(uh, &undef32_handlers, uh_link) {
+ ret = uh->uh_handler(frame->tf_elr, insn, frame,
+ frame->tf_esr);
+ if (ret)
+ return (1);
+ }
+ return (0);
}
+#endif
+
+ if (undef_sys_insn(frame, insn))
+ return (1);
- LIST_FOREACH(uh, &undef_handlers[el], uh_link) {
+ LIST_FOREACH(uh, &undef_handlers, uh_link) {
ret = uh->uh_handler(frame->tf_elr, insn, frame, frame->tf_esr);
if (ret)
return (1);
diff --git a/sys/arm64/arm64/vfp.c b/sys/arm64/arm64/vfp.c
index f35cd960702b..bcddebfaf66e 100644
--- a/sys/arm64/arm64/vfp.c
+++ b/sys/arm64/arm64/vfp.c
@@ -30,11 +30,15 @@
#ifdef VFP
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/elf.h>
+#include <sys/eventhandler.h>
#include <sys/limits.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
+#include <sys/reg.h>
+#include <sys/smp.h>
#include <vm/uma.h>
@@ -60,6 +64,63 @@ struct fpu_kern_ctx {
static uma_zone_t fpu_save_area_zone;
static struct vfpstate *fpu_initialstate;
+static u_int sve_max_vector_len;
+
+static size_t
+_sve_buf_size(u_int sve_len)
+{
+ size_t len;
+
+ /* 32 vector registers */
+ len = (size_t)sve_len * 32;
+ /*
+ * 16 predicate registers and the fault fault register, each 1/8th
+ * the size of a vector register.
+ */
+ len += ((size_t)sve_len * 17) / 8;
+ /*
+ * FPSR and FPCR
+ */
+ len += sizeof(uint64_t) * 2;
+
+ return (len);
+}
+
+size_t
+sve_max_buf_size(void)
+{
+ MPASS(sve_max_vector_len > 0);
+ return (_sve_buf_size(sve_max_vector_len));
+}
+
+size_t
+sve_buf_size(struct thread *td)
+{
+ struct pcb *pcb;
+
+ pcb = td->td_pcb;
+ MPASS(pcb->pcb_svesaved != NULL);
+ MPASS(pcb->pcb_sve_len > 0);
+
+ return (_sve_buf_size(pcb->pcb_sve_len));
+}
+
+static void *
+sve_alloc(void)
+{
+ void *buf;
+
+ buf = malloc(sve_max_buf_size(), M_FPUKERN_CTX, M_WAITOK | M_ZERO);
+
+ return (buf);
+}
+
+static void
+sve_free(void *buf)
+{
+ free(buf, M_FPUKERN_CTX);
+}
+
void
vfp_enable(void)
{
@@ -71,13 +132,30 @@ vfp_enable(void)
isb();
}
+static void
+sve_enable(void)
+{
+ uint32_t cpacr;
+
+ cpacr = READ_SPECIALREG(cpacr_el1);
+ /* Enable FP */
+ cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_NONE;
+ /* Enable SVE */
+ cpacr = (cpacr & ~CPACR_ZEN_MASK) | CPACR_ZEN_TRAP_NONE;
+ WRITE_SPECIALREG(cpacr_el1, cpacr);
+ isb();
+}
+
void
vfp_disable(void)
{
uint32_t cpacr;
cpacr = READ_SPECIALREG(cpacr_el1);
+ /* Disable FP */
cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_ALL1;
+ /* Disable SVE */
+ cpacr = (cpacr & ~CPACR_ZEN_MASK) | CPACR_ZEN_TRAP_ALL1;
WRITE_SPECIALREG(cpacr_el1, cpacr);
isb();
}
@@ -171,9 +249,266 @@ vfp_restore(struct vfpstate *state)
}
static void
-vfp_save_state_common(struct thread *td, struct pcb *pcb)
+sve_store(void *state, u_int sve_len)
+{
+ vm_offset_t f_start, p_start, z_start;
+ uint64_t fpcr, fpsr;
+
+ /*
+ * Calculate the start of each register groups. There are three
+ * groups depending on size, with the First Fault Register (FFR)
+ * stored with the predicate registers as we use one of them to
+ * temporarily hold it.
+ *
+ * +-------------------------+-------------------+
+ * | Contents | Register size |
+ * z_start -> +-------------------------+-------------------+
+ * | | |
+ * | 32 Z regs | sve_len |
+ * | | |
+ * p_start -> +-------------------------+-------------------+
+ * | | |
+ * | 16 Predicate registers | 1/8 size of Z reg |
+ * | 1 First Fault register | |
+ * | | |
+ * f_start -> +-------------------------+-------------------+
+ * | | |
+ * | FPSR/FPCR | 32 bit |
+ * | | |
+ * +-------------------------+-------------------+
+ */
+ z_start = (vm_offset_t)state;
+ p_start = z_start + sve_len * 32;
+ f_start = p_start + (sve_len / 8) * 17;
+
+ __asm __volatile(
+ ".arch_extension sve \n"
+ "str z0, [%0, #0, MUL VL] \n"
+ "str z1, [%0, #1, MUL VL] \n"
+ "str z2, [%0, #2, MUL VL] \n"
+ "str z3, [%0, #3, MUL VL] \n"
+ "str z4, [%0, #4, MUL VL] \n"
+ "str z5, [%0, #5, MUL VL] \n"
+ "str z6, [%0, #6, MUL VL] \n"
+ "str z7, [%0, #7, MUL VL] \n"
+ "str z8, [%0, #8, MUL VL] \n"
+ "str z9, [%0, #9, MUL VL] \n"
+ "str z10, [%0, #10, MUL VL] \n"
+ "str z11, [%0, #11, MUL VL] \n"
+ "str z12, [%0, #12, MUL VL] \n"
+ "str z13, [%0, #13, MUL VL] \n"
+ "str z14, [%0, #14, MUL VL] \n"
+ "str z15, [%0, #15, MUL VL] \n"
+ "str z16, [%0, #16, MUL VL] \n"
+ "str z17, [%0, #17, MUL VL] \n"
+ "str z18, [%0, #18, MUL VL] \n"
+ "str z19, [%0, #19, MUL VL] \n"
+ "str z20, [%0, #20, MUL VL] \n"
+ "str z21, [%0, #21, MUL VL] \n"
+ "str z22, [%0, #22, MUL VL] \n"
+ "str z23, [%0, #23, MUL VL] \n"
+ "str z24, [%0, #24, MUL VL] \n"
+ "str z25, [%0, #25, MUL VL] \n"
+ "str z26, [%0, #26, MUL VL] \n"
+ "str z27, [%0, #27, MUL VL] \n"
+ "str z28, [%0, #28, MUL VL] \n"
+ "str z29, [%0, #29, MUL VL] \n"
+ "str z30, [%0, #30, MUL VL] \n"
+ "str z31, [%0, #31, MUL VL] \n"
+ /* Store the predicate registers */
+ "str p0, [%1, #0, MUL VL] \n"
+ "str p1, [%1, #1, MUL VL] \n"
+ "str p2, [%1, #2, MUL VL] \n"
+ "str p3, [%1, #3, MUL VL] \n"
+ "str p4, [%1, #4, MUL VL] \n"
+ "str p5, [%1, #5, MUL VL] \n"
+ "str p6, [%1, #6, MUL VL] \n"
+ "str p7, [%1, #7, MUL VL] \n"
+ "str p8, [%1, #8, MUL VL] \n"
+ "str p9, [%1, #9, MUL VL] \n"
+ "str p10, [%1, #10, MUL VL] \n"
+ "str p11, [%1, #11, MUL VL] \n"
+ "str p12, [%1, #12, MUL VL] \n"
+ "str p13, [%1, #13, MUL VL] \n"
+ "str p14, [%1, #14, MUL VL] \n"
+ "str p15, [%1, #15, MUL VL] \n"
+ ".arch_extension nosve \n"
+ : : "r"(z_start), "r"(p_start));
+
+ /* Save the FFR if needed */
+ /* TODO: Skip if in SME streaming mode (when supported) */
+ __asm __volatile(
+ ".arch_extension sve \n"
+ "rdffr p0.b \n"
+ "str p0, [%0, #16, MUL VL] \n"
+ /*
+ * Load the old p0 value to ensure it is consistent if we enable
+ * without calling sve_restore, e.g. switch to a kernel thread and
+ * back.
+ */
+ "ldr p0, [%0, #0, MUL VL] \n"
+ ".arch_extension nosve \n"
+ : : "r"(p_start));
+
+ __asm __volatile(
+ ".arch_extension fp \n"
+ "mrs %0, fpsr \n"
+ "mrs %1, fpcr \n"
+ "stp %w0, %w1, [%2] \n"
+ ".arch_extension nofp \n"
+ : "=&r"(fpsr), "=&r"(fpcr) : "r"(f_start));
+}
+
+static void
+sve_restore(void *state, u_int sve_len)
+{
+ vm_offset_t f_start, p_start, z_start;
+ uint64_t fpcr, fpsr;
+
+ /* See sve_store for the layout of the state buffer */
+ z_start = (vm_offset_t)state;
+ p_start = z_start + sve_len * 32;
+ f_start = p_start + (sve_len / 8) * 17;
+
+ __asm __volatile(
+ ".arch_extension sve \n"
+ "ldr p0, [%0, #16, MUL VL] \n"
+ "wrffr p0.b \n"
+ ".arch_extension nosve \n"
+ : : "r"(p_start));
+
+ __asm __volatile(
+ ".arch_extension sve \n"
+ "ldr z0, [%0, #0, MUL VL] \n"
+ "ldr z1, [%0, #1, MUL VL] \n"
+ "ldr z2, [%0, #2, MUL VL] \n"
+ "ldr z3, [%0, #3, MUL VL] \n"
+ "ldr z4, [%0, #4, MUL VL] \n"
+ "ldr z5, [%0, #5, MUL VL] \n"
+ "ldr z6, [%0, #6, MUL VL] \n"
+ "ldr z7, [%0, #7, MUL VL] \n"
+ "ldr z8, [%0, #8, MUL VL] \n"
+ "ldr z9, [%0, #9, MUL VL] \n"
+ "ldr z10, [%0, #10, MUL VL] \n"
+ "ldr z11, [%0, #11, MUL VL] \n"
+ "ldr z12, [%0, #12, MUL VL] \n"
+ "ldr z13, [%0, #13, MUL VL] \n"
+ "ldr z14, [%0, #14, MUL VL] \n"
+ "ldr z15, [%0, #15, MUL VL] \n"
+ "ldr z16, [%0, #16, MUL VL] \n"
+ "ldr z17, [%0, #17, MUL VL] \n"
+ "ldr z18, [%0, #18, MUL VL] \n"
+ "ldr z19, [%0, #19, MUL VL] \n"
+ "ldr z20, [%0, #20, MUL VL] \n"
+ "ldr z21, [%0, #21, MUL VL] \n"
+ "ldr z22, [%0, #22, MUL VL] \n"
+ "ldr z23, [%0, #23, MUL VL] \n"
+ "ldr z24, [%0, #24, MUL VL] \n"
+ "ldr z25, [%0, #25, MUL VL] \n"
+ "ldr z26, [%0, #26, MUL VL] \n"
+ "ldr z27, [%0, #27, MUL VL] \n"
+ "ldr z28, [%0, #28, MUL VL] \n"
+ "ldr z29, [%0, #29, MUL VL] \n"
+ "ldr z30, [%0, #30, MUL VL] \n"
+ "ldr z31, [%0, #31, MUL VL] \n"
+ /* Store the predicate registers */
+ "ldr p0, [%1, #0, MUL VL] \n"
+ "ldr p1, [%1, #1, MUL VL] \n"
+ "ldr p2, [%1, #2, MUL VL] \n"
+ "ldr p3, [%1, #3, MUL VL] \n"
+ "ldr p4, [%1, #4, MUL VL] \n"
+ "ldr p5, [%1, #5, MUL VL] \n"
+ "ldr p6, [%1, #6, MUL VL] \n"
+ "ldr p7, [%1, #7, MUL VL] \n"
+ "ldr p8, [%1, #8, MUL VL] \n"
+ "ldr p9, [%1, #9, MUL VL] \n"
+ "ldr p10, [%1, #10, MUL VL] \n"
+ "ldr p11, [%1, #11, MUL VL] \n"
+ "ldr p12, [%1, #12, MUL VL] \n"
+ "ldr p13, [%1, #13, MUL VL] \n"
+ "ldr p14, [%1, #14, MUL VL] \n"
+ "ldr p15, [%1, #15, MUL VL] \n"
+ ".arch_extension nosve \n"
+ : : "r"(z_start), "r"(p_start));
+
+ __asm __volatile(
+ ".arch_extension fp \n"
+ "ldp %w0, %w1, [%2] \n"
+ "msr fpsr, %0 \n"
+ "msr fpcr, %1 \n"
+ ".arch_extension nofp \n"
+ : "=&r"(fpsr), "=&r"(fpcr) : "r"(f_start));
+}
+
+/*
+ * Sync the VFP registers to the SVE register state, e.g. in signal return
+ * when userspace may have changed the vfp register values and expect them
+ * to be used when the signal handler returns.
+ */
+void
+vfp_to_sve_sync(struct thread *td)
+{
+ struct pcb *pcb;
+ uint32_t *fpxr;
+
+ pcb = td->td_pcb;
+ if (pcb->pcb_svesaved == NULL)
+ return;
+
+ MPASS(pcb->pcb_fpusaved != NULL);
+
+ /* Copy the VFP registers to the SVE region */
+ for (int i = 0; i < nitems(pcb->pcb_fpusaved->vfp_regs); i++) {
+ __uint128_t *sve_reg;
+
+ sve_reg = (__uint128_t *)((uintptr_t)pcb->pcb_svesaved +
+ i * pcb->pcb_sve_len);
+ *sve_reg = pcb->pcb_fpusaved->vfp_regs[i];
+ }
+
+ fpxr = (uint32_t *)((uintptr_t)pcb->pcb_svesaved +
+ (32 * pcb->pcb_sve_len) + (17 * pcb->pcb_sve_len / 8));
+ fpxr[0] = pcb->pcb_fpusaved->vfp_fpsr;
+ fpxr[1] = pcb->pcb_fpusaved->vfp_fpcr;
+}
+
+/*
+ * Sync the SVE registers to the VFP register state.
+ */
+void
+sve_to_vfp_sync(struct thread *td)
+{
+ struct pcb *pcb;
+ uint32_t *fpxr;
+
+ pcb = td->td_pcb;
+ if (pcb->pcb_svesaved == NULL)
+ return;
+
+ MPASS(pcb->pcb_fpusaved == &pcb->pcb_fpustate);
+
+ /* Copy the SVE registers to the VFP saved state */
+ for (int i = 0; i < nitems(pcb->pcb_fpusaved->vfp_regs); i++) {
+ __uint128_t *sve_reg;
+
+ sve_reg = (__uint128_t *)((uintptr_t)pcb->pcb_svesaved +
+ i * pcb->pcb_sve_len);
+ pcb->pcb_fpusaved->vfp_regs[i] = *sve_reg;
+ }
+
+ fpxr = (uint32_t *)((uintptr_t)pcb->pcb_svesaved +
+ (32 * pcb->pcb_sve_len) + (17 * pcb->pcb_sve_len / 8));
+ pcb->pcb_fpusaved->vfp_fpsr = fpxr[0];
+ pcb->pcb_fpusaved->vfp_fpcr = fpxr[1];
+}
+
+static void
+vfp_save_state_common(struct thread *td, struct pcb *pcb, bool full_save)
{
uint32_t cpacr;
+ bool save_sve;
+
+ save_sve = false;
critical_enter();
/*
@@ -181,14 +516,49 @@ vfp_save_state_common(struct thread *td, struct pcb *pcb)
* i.e. return if we are trapping on FP access.
*/
cpacr = READ_SPECIALREG(cpacr_el1);
- if ((cpacr & CPACR_FPEN_MASK) == CPACR_FPEN_TRAP_NONE) {
- KASSERT(PCPU_GET(fpcurthread) == td,
- ("Storing an invalid VFP state"));
+ if ((cpacr & CPACR_FPEN_MASK) != CPACR_FPEN_TRAP_NONE)
+ goto done;
+
+ KASSERT(PCPU_GET(fpcurthread) == td,
+ ("Storing an invalid VFP state"));
+ /*
+ * Also save the SVE state. As SVE depends on the VFP being
+ * enabled we can rely on only needing to check this when
+ * the VFP unit has been enabled.
+ */
+ if ((cpacr & CPACR_ZEN_MASK) == CPACR_ZEN_TRAP_NONE) {
+ /* If SVE is enabled it should be valid */
+ MPASS((pcb->pcb_fpflags & PCB_FP_SVEVALID) != 0);
+
+ /*
+ * If we are switching while in a system call skip saving
+ * SVE registers. The ABI allows us to drop them over any
+ * system calls, however doing so is expensive in SVE
+ * heavy userspace code. This would require us to disable
+ * SVE for all system calls and trap the next use of them.
+ * As an optimisation only disable SVE on context switch.
+ */
+ if (td->td_frame == NULL ||
+ (ESR_ELx_EXCEPTION(td->td_frame->tf_esr) != EXCP_SVC64 &&
+ td->td_sa.code != (u_int)-1))
+ save_sve = true;
+ }
+
+ if (save_sve) {
+ KASSERT(pcb->pcb_svesaved != NULL,
+ ("Storing to a NULL SVE state"));
+ sve_store(pcb->pcb_svesaved, pcb->pcb_sve_len);
+ if (full_save)
+ sve_to_vfp_sync(td);
+ } else {
+ pcb->pcb_fpflags &= ~PCB_FP_SVEVALID;
vfp_store(pcb->pcb_fpusaved);
- dsb(ish);
- vfp_disable();
}
+ dsb(ish);
+ vfp_disable();
+
+done:
critical_exit();
}
@@ -199,7 +569,7 @@ vfp_save_state(struct thread *td, struct pcb *pcb)
KASSERT(pcb != NULL, ("NULL vfp pcb"));
KASSERT(td->td_pcb == pcb, ("Invalid vfp pcb"));
- vfp_save_state_common(td, pcb);
+ vfp_save_state_common(td, pcb, true);
}
void
@@ -207,13 +577,24 @@ vfp_save_state_savectx(struct pcb *pcb)
{
/*
* savectx() will be called on panic with dumppcb as an argument,
- * dumppcb doesn't have pcb_fpusaved set, so set it to save
- * the VFP registers.
+ * dumppcb either has no pcb_fpusaved set or it was previously set
+ * to its own fpu state.
+ *
+ * In both cases we can set it here to the pcb fpu state.
*/
- MPASS(pcb->pcb_fpusaved == NULL);
+ MPASS(pcb->pcb_fpusaved == NULL ||
+ pcb->pcb_fpusaved == &pcb->pcb_fpustate);
pcb->pcb_fpusaved = &pcb->pcb_fpustate;
- vfp_save_state_common(curthread, pcb);
+ vfp_save_state_common(curthread, pcb, true);
+}
+
+void
+vfp_save_state_switch(struct thread *td)
+{
+ KASSERT(td != NULL, ("NULL vfp thread"));
+
+ vfp_save_state_common(td, td->td_pcb, false);
}
/*
@@ -223,21 +604,40 @@ vfp_save_state_savectx(struct pcb *pcb)
void
vfp_new_thread(struct thread *newtd, struct thread *oldtd, bool fork)
{
- struct pcb *newpcb;
+ struct pcb *newpcb, *oldpcb;
newpcb = newtd->td_pcb;
+ oldpcb = oldtd->td_pcb;
/* Kernel threads start with clean VFP */
if ((oldtd->td_pflags & TDP_KTHREAD) != 0) {
newpcb->pcb_fpflags &=
- ~(PCB_FP_STARTED | PCB_FP_KERN | PCB_FP_NOSAVE);
+ ~(PCB_FP_STARTED | PCB_FP_SVEVALID | PCB_FP_KERN |
+ PCB_FP_NOSAVE);
} else {
MPASS((newpcb->pcb_fpflags & (PCB_FP_KERN|PCB_FP_NOSAVE)) == 0);
+
+ /*
+ * The only SVE register state to be guaranteed to be saved
+ * a system call is the lower bits of the Z registers as
+ * these are aliased with the existing FP registers. Because
+ * we can only create a new thread or fork through a system
+ * call it is safe to drop the SVE state in the new thread.
+ */
+ newpcb->pcb_fpflags &= ~PCB_FP_SVEVALID;
if (!fork) {
newpcb->pcb_fpflags &= ~PCB_FP_STARTED;
}
}
+ newpcb->pcb_svesaved = NULL;
+ if (oldpcb->pcb_svesaved == NULL)
+ newpcb->pcb_sve_len = sve_max_vector_len;
+ else
+ KASSERT(newpcb->pcb_sve_len == oldpcb->pcb_sve_len,
+ ("%s: pcb sve vector length differs: %x != %x", __func__,
+ newpcb->pcb_sve_len, oldpcb->pcb_sve_len));
+
newpcb->pcb_fpusaved = &newpcb->pcb_fpustate;
newpcb->pcb_vfpcpu = UINT_MAX;
}
@@ -264,23 +664,48 @@ vfp_reset_state(struct thread *td, struct pcb *pcb)
("pcb_fpusaved should point to pcb_fpustate."));
pcb->pcb_fpustate.vfp_fpcr = VFPCR_INIT;
pcb->pcb_fpustate.vfp_fpsr = 0;
+ /* XXX: Memory leak when using SVE between fork & exec? */
+ pcb->pcb_svesaved = NULL;
pcb->pcb_vfpcpu = UINT_MAX;
pcb->pcb_fpflags = 0;
}
-void
-vfp_restore_state(void)
+static void
+vfp_restore_state_common(struct thread *td, int flags)
{
struct pcb *curpcb;
u_int cpu;
+ bool restore_sve;
+
+ KASSERT(td == curthread, ("%s: Called with non-current thread",
+ __func__));
critical_enter();
cpu = PCPU_GET(cpuid);
- curpcb = curthread->td_pcb;
- curpcb->pcb_fpflags |= PCB_FP_STARTED;
+ curpcb = td->td_pcb;
- vfp_enable();
+ /*
+ * If SVE has been used and the base VFP state is in use then
+ * restore the SVE registers. A non-base VFP state should only
+ * be used by the kernel and SVE should onlu be used by userspace.
+ */
+ restore_sve = false;
+ if ((curpcb->pcb_fpflags & PCB_FP_SVEVALID) != 0 &&
+ curpcb->pcb_fpusaved == &curpcb->pcb_fpustate) {
+ MPASS(curpcb->pcb_svesaved != NULL);
+ /* SVE shouldn't be enabled in the kernel */
+ MPASS((flags & PCB_FP_KERN) == 0);
+ restore_sve = true;
+ }
+
+ if (restore_sve) {
+ MPASS((curpcb->pcb_fpflags & PCB_FP_SVEVALID) != 0);
+ sve_enable();
+ } else {
+ curpcb->pcb_fpflags |= PCB_FP_STARTED;
+ vfp_enable();
+ }
/*
* If the previous thread on this cpu to use the VFP was not the
@@ -288,8 +713,19 @@ vfp_restore_state(void)
* cpu we need to restore the old state.
*/
if (PCPU_GET(fpcurthread) != curthread || cpu != curpcb->pcb_vfpcpu) {
- vfp_restore(curthread->td_pcb->pcb_fpusaved);
- PCPU_SET(fpcurthread, curthread);
+ /*
+ * The VFP registers are the lower 128 bits of the SVE
+ * registers. Use the SVE store state if it was previously
+ * enabled.
+ */
+ if (restore_sve) {
+ MPASS(td->td_pcb->pcb_svesaved != NULL);
+ sve_restore(td->td_pcb->pcb_svesaved,
+ td->td_pcb->pcb_sve_len);
+ } else {
+ vfp_restore(td->td_pcb->pcb_fpusaved);
+ }
+ PCPU_SET(fpcurthread, td);
curpcb->pcb_vfpcpu = cpu;
}
@@ -297,6 +733,85 @@ vfp_restore_state(void)
}
void
+vfp_restore_state(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ vfp_restore_state_common(td, td->td_pcb->pcb_fpflags);
+}
+
+bool
+sve_restore_state(struct thread *td)
+{
+ struct pcb *curpcb;
+ void *svesaved;
+ uint64_t cpacr;
+
+ KASSERT(td == curthread, ("%s: Called with non-current thread",
+ __func__));
+
+ curpcb = td->td_pcb;
+
+ /* The SVE state should alias the base VFP state */
+ MPASS(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate);
+
+ /* SVE not enabled, tell the caller to raise a fault */
+ if (curpcb->pcb_sve_len == 0) {
+ /*
+ * The init pcb is created before we read the vector length.
+ * Set it to the default length.
+ */
+ if (sve_max_vector_len == 0)
+ return (false);
+
+ MPASS(curpcb->pcb_svesaved == NULL);
+ curpcb->pcb_sve_len = sve_max_vector_len;
+ }
+
+ if (curpcb->pcb_svesaved == NULL) {
+ /* SVE should be disabled so will be invalid */
+ MPASS((curpcb->pcb_fpflags & PCB_FP_SVEVALID) == 0);
+
+ /*
+ * Allocate the SVE buffer of this thread.
+ * Enable interrupts so the allocation can sleep
+ */
+ svesaved = sve_alloc();
+
+ critical_enter();
+
+ /* Restore the VFP state if needed */
+ cpacr = READ_SPECIALREG(cpacr_el1);
+ if ((cpacr & CPACR_FPEN_MASK) != CPACR_FPEN_TRAP_NONE) {
+ vfp_restore_state_common(td, curpcb->pcb_fpflags);
+ }
+
+ /*
+ * Set the flags after enabling the VFP as the SVE saved
+ * state will be invalid.
+ */
+ curpcb->pcb_svesaved = svesaved;
+ curpcb->pcb_fpflags |= PCB_FP_SVEVALID;
+ sve_enable();
+
+ critical_exit();
+ } else {
+ vfp_restore_state_common(td, curpcb->pcb_fpflags);
+
+ /* Enable SVE if it wasn't previously enabled */
+ if ((curpcb->pcb_fpflags & PCB_FP_SVEVALID) == 0) {
+ critical_enter();
+ sve_enable();
+ curpcb->pcb_fpflags |= PCB_FP_SVEVALID;
+ critical_exit();
+ }
+ }
+
+ return (true);
+}
+
+void
vfp_init_secondary(void)
{
uint64_t pfr;
@@ -340,6 +855,213 @@ vfp_init(const void *dummy __unused)
SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL);
+static void
+sve_thread_dtor(void *arg __unused, struct thread *td)
+{
+ sve_free(td->td_pcb->pcb_svesaved);
+}
+
+static void
+sve_pcpu_read(void *arg)
+{
+ u_int *len;
+ uint64_t vl;
+
+ len = arg;
+
+ /* Enable SVE to read zcr_el1 and VFP for rdvl */
+ sve_enable();
+
+ /* Set the longest vector length */
+ WRITE_SPECIALREG(ZCR_EL1_REG, ZCR_LEN_MASK);
+ isb();
+
+ /* Read the real vector length */
+ __asm __volatile(
+ ".arch_extension sve \n"
+ "rdvl %0, #1 \n"
+ ".arch_extension nosve \n"
+ : "=&r"(vl));
+
+ vfp_disable();
+
+ len[PCPU_GET(cpuid)] = vl;
+}
+
+static void
+sve_init(const void *dummy __unused)
+{
+ u_int *len_list;
+ uint64_t reg;
+ int i;
+
+ if (!get_kernel_reg(ID_AA64PFR0_EL1, &reg))
+ return;
+
+ if (ID_AA64PFR0_SVE_VAL(reg) == ID_AA64PFR0_SVE_NONE)
+ return;
+
+ len_list = malloc(sizeof(*len_list) * (mp_maxid + 1), M_TEMP,
+ M_WAITOK | M_ZERO);
+ smp_rendezvous(NULL, sve_pcpu_read, NULL, len_list);
+
+ sve_max_vector_len = ZCR_LEN_BYTES(ZCR_LEN_MASK);
+ CPU_FOREACH(i) {
+ if (bootverbose)
+ printf("CPU%d SVE vector length: %u\n", i, len_list[i]);
+ sve_max_vector_len = MIN(sve_max_vector_len, len_list[i]);
+ }
+ free(len_list, M_TEMP);
+
+ if (bootverbose)
+ printf("SVE with %u byte vectors\n", sve_max_vector_len);
+
+ if (sve_max_vector_len > 0) {
+ EVENTHANDLER_REGISTER(thread_dtor, sve_thread_dtor, NULL,
+ EVENTHANDLER_PRI_ANY);
+ }
+}
+SYSINIT(sve, SI_SUB_SMP, SI_ORDER_ANY, sve_init, NULL);
+
+static bool
+get_arm64_sve(struct regset *rs, struct thread *td, void *buf,
+ size_t *sizep)
+{
+ struct svereg_header *header;
+ struct pcb *pcb;
+ size_t buf_size;
+ uint16_t sve_flags;
+
+ pcb = td->td_pcb;
+
+ /* If there is no SVE support in HW then we don't support NT_ARM_SVE */
+ if (pcb->pcb_sve_len == 0)
+ return (false);
+
+ sve_flags = 0;
+ if ((pcb->pcb_fpflags & PCB_FP_SVEVALID) == 0) {
+ /* If SVE hasn't been used yet provide the VFP registers */
+ buf_size = sizeof(struct fpreg);
+ sve_flags |= SVEREG_FLAG_FP;
+ } else {
+ /* We have SVE registers */
+ buf_size = sve_buf_size(td);
+ sve_flags |= SVEREG_FLAG_SVE;
+ KASSERT(pcb->pcb_svesaved != NULL, ("%s: no saved sve",
+ __func__));
+ }
+
+ if (buf != NULL) {
+ KASSERT(*sizep == sizeof(struct svereg_header) + buf_size,
+ ("%s: invalid size", __func__));
+
+ if (td == curthread && (pcb->pcb_fpflags & PCB_FP_STARTED) != 0)
+ vfp_save_state(td, pcb);
+
+ header = buf;
+ memset(header, 0, sizeof(*header));
+
+ header->sve_size = sizeof(struct svereg_header) + buf_size;
+ header->sve_maxsize = sizeof(struct svereg_header) +
+ sve_max_buf_size();
+ header->sve_vec_len = pcb->pcb_sve_len;
+ header->sve_max_vec_len = sve_max_vector_len;
+ header->sve_flags = sve_flags;
+
+ if ((sve_flags & SVEREG_FLAG_REGS_MASK) == SVEREG_FLAG_FP) {
+ struct fpreg *fpregs;
+
+ fpregs = (void *)(&header[1]);
+ memcpy(fpregs->fp_q, pcb->pcb_fpustate.vfp_regs,
+ sizeof(fpregs->fp_q));
+ fpregs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
+ fpregs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
+ } else {
+ memcpy((void *)(&header[1]), pcb->pcb_svesaved,
+ buf_size);
+ }
+ }
+ *sizep = sizeof(struct svereg_header) + buf_size;
+
+ return (true);
+}
+
+static bool
+set_arm64_sve(struct regset *rs, struct thread *td, void *buf, size_t size)
+{
+ struct svereg_header *header;
+ struct pcb *pcb;
+ size_t buf_size;
+ uint16_t sve_flags;
+
+ pcb = td->td_pcb;
+
+ /* If there is no SVE support in HW then we don't support NT_ARM_SVE */
+ if (pcb->pcb_sve_len == 0)
+ return (false);
+
+ sve_flags = 0;
+ if ((pcb->pcb_fpflags & PCB_FP_SVEVALID) == 0) {
+ /*
+ * If the SVE state is invalid it provide the FP registers.
+ * This may be beause it hasn't been used, or it has but
+ * was switched out in a system call.
+ */
+ buf_size = sizeof(struct fpreg);
+ sve_flags |= SVEREG_FLAG_FP;
+ } else {
+ /* We have SVE registers */
+ MPASS(pcb->pcb_svesaved != NULL);
+ buf_size = sve_buf_size(td);
+ sve_flags |= SVEREG_FLAG_SVE;
+ KASSERT(pcb->pcb_svesaved != NULL, ("%s: no saved sve",
+ __func__));
+ }
+
+ if (size != sizeof(struct svereg_header) + buf_size)
+ return (false);
+
+ header = buf;
+ /* Sanity checks on the header */
+ if (header->sve_size != sizeof(struct svereg_header) + buf_size)
+ return (false);
+
+ if (header->sve_maxsize != sizeof(struct svereg_header) +
+ sve_max_buf_size())
+ return (false);
+
+ if (header->sve_vec_len != pcb->pcb_sve_len)
+ return (false);
+
+ if (header->sve_max_vec_len != sve_max_vector_len)
+ return (false);
+
+ if (header->sve_flags != sve_flags)
+ return (false);
+
+ if ((sve_flags & SVEREG_FLAG_REGS_MASK) == SVEREG_FLAG_FP) {
+ struct fpreg *fpregs;
+
+ fpregs = (void *)(&header[1]);
+ memcpy(pcb->pcb_fpustate.vfp_regs, fpregs->fp_q,
+ sizeof(fpregs->fp_q));
+ pcb->pcb_fpustate.vfp_fpcr = fpregs->fp_cr;
+ pcb->pcb_fpustate.vfp_fpsr = fpregs->fp_sr;
+ } else {
+ /* Restore the SVE registers */
+ memcpy(pcb->pcb_svesaved, (void *)(&header[1]), buf_size);
+ }
+
+ return (true);
+}
+
+static struct regset regset_arm64_sve = {
+ .note = NT_ARM_SVE,
+ .get = get_arm64_sve,
+ .set = set_arm64_sve,
+};
+ELF_REGSET(regset_arm64_sve);
+
struct fpu_kern_ctx *
fpu_kern_alloc_ctx(u_int flags)
{
diff --git a/sys/arm64/arm64/vm_machdep.c b/sys/arm64/arm64/vm_machdep.c
index 457157e34a6f..38a126ff602f 100644
--- a/sys/arm64/arm64/vm_machdep.c
+++ b/sys/arm64/arm64/vm_machdep.c
@@ -138,16 +138,6 @@ cpu_reset(void)
}
void
-cpu_thread_swapin(struct thread *td)
-{
-}
-
-void
-cpu_thread_swapout(struct thread *td)
-{
-}
-
-void
cpu_set_syscall_retval(struct thread *td, int error)
{
struct trapframe *frame;
@@ -235,7 +225,7 @@ cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
}
int
-cpu_set_user_tls(struct thread *td, void *tls_base)
+cpu_set_user_tls(struct thread *td, void *tls_base, int thr_flags __unused)
{
struct pcb *pcb;
@@ -301,6 +291,14 @@ cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
}
void
+cpu_update_pcb(struct thread *td)
+{
+ MPASS(td == curthread);
+ td->td_pcb->pcb_tpidr_el0 = READ_SPECIALREG(tpidr_el0);
+ td->td_pcb->pcb_tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
+}
+
+void
cpu_exit(struct thread *td)
{
}
diff --git a/sys/arm64/broadcom/brcmmdio/mdio_mux_iproc.c b/sys/arm64/broadcom/brcmmdio/mdio_mux_iproc.c
index 9e73985ded0d..81e79a16bf02 100644
--- a/sys/arm64/broadcom/brcmmdio/mdio_mux_iproc.c
+++ b/sys/arm64/broadcom/brcmmdio/mdio_mux_iproc.c
@@ -342,7 +342,7 @@ brcm_iproc_mdio_attach(device_t dev)
ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL);
/* Add newbus device for this FDT node */
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
printf("Failed to add child\n");
resource_list_free(&di->di_rl);
@@ -362,7 +362,8 @@ brcm_iproc_mdio_attach(device_t dev)
node = ofw_bus_get_node(dev);
OF_device_register_xref(OF_xref_from_node(node), dev);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
error:
brcm_iproc_mdio_detach(dev);
diff --git a/sys/arm64/broadcom/brcmmdio/mdio_nexus_iproc.c b/sys/arm64/broadcom/brcmmdio/mdio_nexus_iproc.c
index f1bf5ac38bf9..2d3185e22041 100644
--- a/sys/arm64/broadcom/brcmmdio/mdio_nexus_iproc.c
+++ b/sys/arm64/broadcom/brcmmdio/mdio_nexus_iproc.c
@@ -174,7 +174,8 @@ brcm_mdionexus_fdt_attach(device_t dev)
if (err != 0)
return (err);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static const struct ofw_bus_devinfo *
@@ -215,7 +216,7 @@ brcm_mdionexus_ofw_bus_attach(device_t dev)
ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL);
/* Add newbus device for this FDT node */
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
resource_list_free(&di->di_rl);
ofw_bus_gen_destroy_devinfo(&di->di_dinfo);
diff --git a/sys/arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c b/sys/arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c
index f8f5c670c35b..89db48842e55 100644
--- a/sys/arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c
+++ b/sys/arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c
@@ -154,5 +154,6 @@ ns2_pcie_phy_fdt_attach(device_t dev)
if (ns2_pci_phy_init(dev) < 0)
return (EINVAL);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
diff --git a/sys/arm64/broadcom/genet/if_genet.c b/sys/arm64/broadcom/genet/if_genet.c
index e102b6c3a95f..0602f076b257 100644
--- a/sys/arm64/broadcom/genet/if_genet.c
+++ b/sys/arm64/broadcom/genet/if_genet.c
@@ -365,10 +365,7 @@ static void
gen_destroy(struct gen_softc *sc)
{
- if (sc->miibus) { /* can't happen */
- device_delete_child(sc->dev, sc->miibus);
- sc->miibus = NULL;
- }
+ device_delete_children(sc->dev);
bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
gen_bus_dma_teardown(sc);
@@ -1069,6 +1066,10 @@ gen_encap(struct gen_softc *sc, struct mbuf **mp)
GEN_ASSERT_LOCKED(sc);
q = &sc->tx_queue[DEF_TXQUEUE];
+ if (q->queued == q->nentries) {
+ /* tx_queue is full */
+ return (ENOBUFS);
+ }
m = *mp;
diff --git a/sys/arm64/cavium/thunder_pcie_fdt.c b/sys/arm64/cavium/thunder_pcie_fdt.c
index f173a28b637d..87dc113ad781 100644
--- a/sys/arm64/cavium/thunder_pcie_fdt.c
+++ b/sys/arm64/cavium/thunder_pcie_fdt.c
@@ -152,7 +152,7 @@ thunder_pcie_ofw_bus_attach(device_t dev)
ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL);
/* Add newbus device for this FDT node */
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
resource_list_free(&di->di_rl);
ofw_bus_gen_destroy_devinfo(&di->di_dinfo);
diff --git a/sys/arm64/cavium/thunder_pcie_pem.c b/sys/arm64/cavium/thunder_pcie_pem.c
index dd6ef14f1a6e..b01dfecb347e 100644
--- a/sys/arm64/cavium/thunder_pcie_pem.c
+++ b/sys/arm64/cavium/thunder_pcie_pem.c
@@ -254,16 +254,12 @@ thunder_pem_write_ivar(device_t dev, device_t child, int index,
static int
thunder_pem_activate_resource(device_t dev, device_t child, struct resource *r)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct thunder_pem_softc *sc;
sc = device_get_softc(dev);
-#endif
switch (rman_get_type(r)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_activate_bus(sc->id, child, r));
-#endif
case SYS_RES_MEMORY:
case SYS_RES_IOPORT:
return (bus_generic_rman_activate_resource(dev, child, r));
@@ -276,16 +272,12 @@ static int
thunder_pem_deactivate_resource(device_t dev, device_t child,
struct resource *r)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct thunder_pem_softc *sc;
sc = device_get_softc(dev);
-#endif
switch (rman_get_type(r)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_deactivate_bus(sc->id, child, r));
-#endif
case SYS_RES_MEMORY:
case SYS_RES_IOPORT:
return (bus_generic_rman_deactivate_resource(dev, child, r));
@@ -350,16 +342,12 @@ static int
thunder_pem_adjust_resource(device_t dev, device_t child, struct resource *res,
rman_res_t start, rman_res_t end)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct thunder_pem_softc *sc;
sc = device_get_softc(dev);
-#endif
switch (rman_get_type(res)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_adjust_bus(sc->id, child, res, start, end));
-#endif
case SYS_RES_MEMORY:
case SYS_RES_IOPORT:
return (bus_generic_rman_adjust_resource(dev, child, res, start,
@@ -671,11 +659,9 @@ thunder_pem_alloc_resource(device_t dev, device_t child, int type, int *rid,
device_t parent_dev;
switch (type) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_alloc_bus(sc->id, child, rid, start, end,
count, flags));
-#endif
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
break;
@@ -718,15 +704,11 @@ static int
thunder_pem_release_resource(device_t dev, device_t child, struct resource *res)
{
device_t parent_dev;
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct thunder_pem_softc *sc = device_get_softc(dev);
-#endif
switch (rman_get_type(res)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_release_bus(sc->id, child, res));
-#endif
case SYS_RES_MEMORY:
case SYS_RES_IOPORT:
return (bus_generic_rman_release_resource(dev, child, res));
@@ -766,7 +748,7 @@ thunder_pem_probe(device_t dev)
if ((pci_vendor_id == THUNDER_PEM_VENDOR_ID) &&
(pci_device_id == THUNDER_PEM_DEVICE_ID)) {
- device_set_desc_copy(dev, THUNDER_PEM_DESC);
+ device_set_desc(dev, THUNDER_PEM_DESC);
return (0);
}
@@ -924,9 +906,9 @@ thunder_pem_attach(device_t dev)
goto fail_io;
}
- device_add_child(dev, "pci", -1);
-
- return (bus_generic_attach(dev));
+ device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
fail_io:
rman_fini(&sc->io_rman);
diff --git a/sys/arm64/conf/DEFAULTS b/sys/arm64/conf/DEFAULTS
index 5d267af4b7d7..fb8b74783867 100644
--- a/sys/arm64/conf/DEFAULTS
+++ b/sys/arm64/conf/DEFAULTS
@@ -15,7 +15,6 @@ options GEOM_PART_GPT
# Default congestion control algorithm
options CC_CUBIC # include CUBIC congestion control
-options NEW_PCIB
options INTRNG
options NETLINK # netlink(4) support
diff --git a/sys/arm64/conf/GENERIC.hints b/sys/arm64/conf/GENERIC.hints
new file mode 100644
index 000000000000..9f009dcd175f
--- /dev/null
+++ b/sys/arm64/conf/GENERIC.hints
@@ -0,0 +1,2 @@
+# Empty device.hints
+# Use to wire devices in the system to specific driver instances
diff --git a/sys/arm64/conf/NOTES b/sys/arm64/conf/NOTES
index 3e8499c872b9..e773253da6d7 100644
--- a/sys/arm64/conf/NOTES
+++ b/sys/arm64/conf/NOTES
@@ -16,6 +16,7 @@
# kernel modules.
#
options KDTRACE_HOOKS
+options KDTRACE_MIB_SDT
#
# Most of the following is copied from ARM64 GENERIC.
@@ -43,12 +44,14 @@ options SOC_INTEL_STRATIX10
options SOC_MARVELL_8K
options SOC_NVIDIA_TEGRA210
options SOC_NXP_LS
+options SOC_ROCKCHIP
options SOC_ROCKCHIP_RK3328
options SOC_ROCKCHIP_RK3399
+options SOC_ROCKCHIP_RK3568
options SOC_XILINX_ZYNQ
# Timer drivers
-device a10_timer
+device aw_timer
# Annapurna Alpine drivers
device al_ccu # Alpine Cache Coherency Unit
@@ -95,6 +98,10 @@ device ice_ddp # Intel 800 Series DDP Package
# Etherswitch devices
device e6000sw # Marvell mv88e6085 based switches
+# Storage
+# Broadcom MPT Fusion, version 4, is 64-bit only
+device mpi3mr # LSI-Logic MPT-Fusion 4
+
# MMC/SD/SDIO Card slot support
device sdhci_xenon # Marvell Xenon SD/MMC controller
device aw_mmc # Allwinner SD/MMC controller
@@ -161,7 +168,10 @@ device aw_wdog # Allwinner Watchdog
# Power management controllers
device axp81x # X-Powers AXP81x PMIC
+device rk8xx # RockChip RK8XX base support
device rk805 # RockChip RK805 PMIC
+device rk808 # RockChip RK808 PMIC
+device rk817 # RockChip RK817 PMIC
# EFUSE
device aw_sid # Allwinner Secure ID EFUSE
@@ -240,6 +250,8 @@ nooptions COMPAT_FREEBSD10
# arm64 supports 32-bit FreeBSD/arm binaries (armv[67] ABIs)
options COMPAT_FREEBSD32 # Compatible with FreeBSD/arm
+options IOMMU # ARM64 SMMU/IOMMU
+
#####################################################################
# ZFS support
diff --git a/sys/arm64/conf/std.allwinner b/sys/arm64/conf/std.allwinner
index 2bcbdc32e111..345322cebb0f 100644
--- a/sys/arm64/conf/std.allwinner
+++ b/sys/arm64/conf/std.allwinner
@@ -8,7 +8,7 @@ options SOC_ALLWINNER_H5
options SOC_ALLWINNER_H6
# Timer drivers
-device a10_timer
+device aw_timer
# DMA controller
device a31_dmac
diff --git a/sys/arm64/conf/std.arm64 b/sys/arm64/conf/std.arm64
index cc4a5acbb314..c83e98c17a33 100644
--- a/sys/arm64/conf/std.arm64
+++ b/sys/arm64/conf/std.arm64
@@ -12,6 +12,7 @@ options INET # InterNETworking
options INET6 # IPv6 communications protocols
options CC_CUBIC # include CUBIC congestion control
options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5
+options IPSEC_OFFLOAD # Inline ipsec offload infra
options ROUTE_MPATH # Multipath routing support
options FIB_ALGO # Modular fib lookups
options TCP_OFFLOAD # TCP offload
@@ -101,3 +102,6 @@ options PPS_SYNC
# EFI devices
device efidev # EFI pseudo-device
device efirtc # EFI RTC
+
+# SMBIOS -- all EFI platforms
+device smbios
diff --git a/sys/arm64/conf/std.dev b/sys/arm64/conf/std.dev
index eefff7e3a6e0..c5c364ffda04 100644
--- a/sys/arm64/conf/std.dev
+++ b/sys/arm64/conf/std.dev
@@ -99,7 +99,9 @@ device uhci # UHCI USB interface
device ehci # EHCI USB interface (USB 2.0)
device xhci # XHCI USB interface (USB 3.0)
device usb # USB Bus (required)
-device ukbd # Keyboard
+device usbhid # USB HID Transport
+device hkbd # HID Keyboard
+device ukbd # USB Keyboard
device umass # Disks/Mass storage - Requires scbus and da
# Sound support
@@ -112,6 +114,11 @@ device mmcsd # mmc/sd flash cards
# HID support
options HID_DEBUG # enable debug msgs
device hid # Generic HID support
+device hidbus # Generic HID Bus
# Firmware
device mmio_sram # Generic on-chip SRAM
+
+# Wireless options
+options IEEE80211_DEBUG # enable debug msgs
+options IEEE80211_SUPPORT_MESH # enable 802.11s draft support
diff --git a/sys/arm64/conf/std.qcom b/sys/arm64/conf/std.qcom
index af7e0add8b40..4051df46bf49 100644
--- a/sys/arm64/conf/std.qcom
+++ b/sys/arm64/conf/std.qcom
@@ -13,3 +13,6 @@ device uart_msm # Qualcomm MSM UART driver
device sdhci
options FDT
+
+# DTBs
+makeoptions MODULES_EXTRA+="dtb/qcom"
diff --git a/sys/arm64/conf/std.rockchip b/sys/arm64/conf/std.rockchip
index 3733ddc4eeae..857cd7ed102b 100644
--- a/sys/arm64/conf/std.rockchip
+++ b/sys/arm64/conf/std.rockchip
@@ -3,6 +3,7 @@
#
# SoC support
+options SOC_ROCKCHIP
options SOC_ROCKCHIP_RK3328
options SOC_ROCKCHIP_RK3399
options SOC_ROCKCHIP_RK3568
@@ -20,7 +21,9 @@ device rk_i2c # RockChip I2C controller
device fan53555 # Fairchild Semi FAN53555/SYR82x Regulator
# Power management controllers
+device rk8xx # RockChip RK8XX base support
device rk805 # RockChip RK805 PMIC
+device rk808 # RockChip RK808 PMIC
device rk817 # RockChip RK817 PMIC
device syr827 # Silergy SYR827 PMIC
device tcs4525 # TCS 4525 PMIC
diff --git a/sys/arm64/conf/std.xilinx b/sys/arm64/conf/std.xilinx
index 50ebf5ade53b..2283616e8cdf 100644
--- a/sys/arm64/conf/std.xilinx
+++ b/sys/arm64/conf/std.xilinx
@@ -15,6 +15,7 @@ device cgem # Cadence GEM Gigabit Ethernet device
# MMC/SD/SDIO Card slot support
device sdhci
+device sdhci_xilinx
# IICBUS
device cdnc_i2c
diff --git a/sys/arm64/freescale/imx/clk/imx_clk_composite.c b/sys/arm64/freescale/imx/clk/imx_clk_composite.c
index 1d5ab5908d8c..3d2ef06fb8a2 100644
--- a/sys/arm64/freescale/imx/clk/imx_clk_composite.c
+++ b/sys/arm64/freescale/imx/clk/imx_clk_composite.c
@@ -217,6 +217,7 @@ imx_clk_composite_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout
p_names = clknode_get_parent_names(clk);
best_diff = 0;
+ best_parent = -1;
for (p_idx = 0; p_idx != clknode_get_parents_num(clk); p_idx++) {
p_clk = clknode_find_by_name(p_names[p_idx]);
@@ -243,6 +244,10 @@ imx_clk_composite_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout
if (best_diff == INT64_MAX)
return (ERANGE);
+ /* If we didn't find a new best_parent just return */
+ if (best_parent == -1)
+ return (0);
+
if ((flags & CLK_SET_DRYRUN) != 0) {
*fout = best;
return (0);
diff --git a/sys/arm64/freescale/imx/imx8mp_ccm.c b/sys/arm64/freescale/imx/imx8mp_ccm.c
new file mode 100644
index 000000000000..ed6289c7a096
--- /dev/null
+++ b/sys/arm64/freescale/imx/imx8mp_ccm.c
@@ -0,0 +1,693 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Tom Jones <thj@freebsd.org>
+ * under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/cdefs.h>
+
+/*
+ * Clocks driver for Freescale i.MX 8M Plus SoC.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <machine/bus.h>
+
+#include <arm64/freescale/imx/imx_ccm.h>
+#include <arm64/freescale/imx/imx8mp_ccm.h>
+#include <arm64/freescale/imx/clk/imx_clk_gate.h>
+#include <arm64/freescale/imx/clk/imx_clk_mux.h>
+#include <arm64/freescale/imx/clk/imx_clk_composite.h>
+#include <arm64/freescale/imx/clk/imx_clk_sscg_pll.h>
+#include <arm64/freescale/imx/clk/imx_clk_frac_pll.h>
+
+static const char *pll_ref_p[] = {
+ "osc_24m", "dummy", "dummy", "dummy"
+};
+static const char * audio_pll1_bypass_p[] = {
+ "audio_pll1", "audio_pll1_ref_sel"
+};
+static const char * audio_pll2_bypass_p[] = {
+ "audio_pll2", "audio_pll2_ref_sel"
+};
+static const char * video_pll1_bypass_p[] = {
+ "video_pll1", "video_pll1_ref_sel"
+};
+static const char * dram_pll_bypass_p[] = {
+ "dram_pll", "dram_pll_ref_sel"
+};
+static const char * gpu_pll_bypass_p[] = {
+ "gpu_pll", "gpu_pll_ref_sel"
+};
+static const char * vpu_pll_bypass_p[] = {
+ "vpu_pll", "vpu_pll_ref_sel"
+};
+static const char * arm_pll_bypass_p[] = {
+ "arm_pll", "arm_pll_ref_sel"
+};
+static const char * sys_pll1_bypass_p[] = {
+ "sys_pll1", "sys_pll1_ref_sel"
+};
+static const char * sys_pll2_bypass_p[] = {
+ "sys_pll2", "sys_pll2_ref_sel"
+};
+static const char * sys_pll3_bypass_p[] = {
+ "sys_pll3", "sys_pll3_ref_sel"
+};
+
+/*
+ * Table 5-1 of "i.MX 8M Plus Applications Processor Reference Manual" provides
+ * the Clock Root Table.
+ */
+static const char *a53_p[] = {
+ "osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m",
+ "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "sys_pll3_out"
+};
+static const char * a53_core_p[] = {
+ "arm_a53_div", "arm_pll_out"
+};
+static const char *ahb_p[] = {
+ "osc_24m", "sys_pll1_133m", "sys_pll1_800m", "sys_pll1_400m",
+ "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out"
+};
+static const char *audio_ahb_p[] = {
+ "osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out"
+};
+static const char *audio_axi_p[] = {
+ "osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out"
+};
+static const char *can_p[] = {
+ "osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out"
+};
+static const char *clkout_p[] = {
+ "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "dummy", "dummy",
+ "gpu_pll_out", "vpu_pll_out", "arm_pll_out", "sys_pll1", "sys_pll2",
+ "sys_pll3", "dummy", "dummy", "osc_24m", "dummy", "osc_32k"
+};
+static const char *dram_alt_p[] = {
+ "osc_24m", "sys_pll1_800m", "sys_pll1_100m", "sys_pll2_500m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll1_out", "sys_pll1_266m"
+};
+static const char *dram_apb_p[] = {
+ "osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out"
+};
+static const char *dram_core_p[] = {
+ "dram_pll_out", "dram_alt_root"
+};
+static const char *ecspi_p[] = {
+ "osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out"
+};
+static const char *enet_axi_p[] = {
+ "osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_250m",
+ "sys_pll2_200m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out"
+};
+static const char *enet_phy_ref_p[] = {
+ "osc_24m", "sys_pll2_50m", "sys_pll2_125m", "sys_pll2_200m",
+ "sys_pll2_500m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out",
+};
+static const char *enet_qos_p[] = {
+ "osc_24m", "sys_pll2_125m", "sys_pll2_50m", "sys_pll2_100m",
+ "sys_pll1_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4",
+};
+static const char *enet_qos_timer_p[] = {
+ "osc_24m", "sys_pll2_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4", "video_pll1_out",
+};
+static const char *enet_ref_p[] = {
+ "osc_24m", "sys_pll2_125m", "sys_pll2_50m", "sys_pll2_100m",
+ "sys_pll1_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4",
+};
+static const char *enet_timer_p[] = {
+ "osc_24m", "sys_pll2_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4", "video_pll1_out",
+};
+static const char *gic_p[] = {
+ "osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_500m", "clk_ext4", "audio_pll2_out"
+};
+static const char *gpt_p[] = {
+ "osc_24m", "sys_pll2_100m", "sys_pll1_400m", "sys_pll1_40m",
+ "video_pll1_out", "sys_pll1_80m", "audio_pll1_out", "clk_ext1"
+};
+static const char *gpu_p[] = {
+ "osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out"
+};
+static const char *gpu_ahb_p[] = {
+ "osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out"
+};
+static const char *gpu_axi_p[] = {
+ "osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out"
+};
+static const char *hdmi_24m_p[] = {
+ "osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", "sys_pll1_133m"
+};
+static const char *hdmi_fdcc_tst_p[] = {
+ "osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out"
+};
+static const char *hdmi_ref_266m_p[] = {
+ "osc_24m", "sys_pll1_400m", "sys_pll3_out", "sys_pll2_333m",
+ "sys_pll1_266m", "sys_pll2_200m", "audio_pll1_out", "video_pll1_out"
+};
+static const char *hsio_axi_p[] = {
+ "osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_100m",
+ "sys_pll2_200m", "clk_ext2", "clk_ext4", "audio_pll2_out"
+};
+static const char *i2c_p[] = {
+ "osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", "sys_pll1_133m"
+};
+static const char *ipp_do_clko1_p[] = {
+ "osc_24m", "sys_pll1_800m", "sys_pll1_133m", "sys_pll1_200m",
+ "audio_pll2_out", "sys_pll2_500m", "vpu_pll_out", "sys_pll1_80m"
+};
+static const char *ipp_do_clko2_p[] = {
+ "osc_24m", "sys_pll2_200m", "sys_pll1_400m", "sys_pll2_166m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out", "osc_32k"
+};
+static const char *m7_p[] = {
+ "osc_24m", "sys_pll2_200m", "sys_pll2_250m", "vpu_pll_out",
+ "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out"
+};
+static const char *main_axi_p[] = {
+ "osc_24m", "sys_pll2_333m", "sys_pll1_800m", "sys_pll2_250m",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "sys_pll1_100m"
+};
+static const char *media_apb_p[] = {
+ "osc_24m", "sys_pll2_125m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "sys_pll1_133m"
+};
+static const char *media_axi_p[] = {
+ "osc_24m", "sys_pll2_1000m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "sys_pll2_500m"
+};
+static const char *media_cam1_pix_p[] = {
+ "osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out"
+};
+static const char *media_cam2_pix_p[] = {
+ "osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out",
+ "video_pll1_out"
+};
+static const char *media_disp_pix_p[] = {
+ "osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4"
+};
+static const char *media_isp_p[] = {
+ "osc_24m", "sys_pll2_1000m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll1_400m", "audio_pll2_out", "clk_ext1", "sys_pll2_500m"
+};
+static const char *media_mipi_phy1_ref_p[] = {
+ "osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out"
+};
+static const char *media_mipi_test_byte_p[] = {
+ "osc_24m", "sys_pll2_200m", "sys_pll2_50m", "sys_pll3_out",
+ "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_160m", "sys_pll1_200m"
+};
+static const char *media_ldb_p[] = {
+ "osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out"
+};
+static const char *memrepair_p[] = {
+ "osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out"
+};
+static const char *mipi_dsi_esc_rx_p[] = {
+ "osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out"
+};
+static const char *ml_p[] = {
+ "osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out"
+};
+static const char *ml_ahb_p[] = {
+ "osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out"
+};
+static const char *ml_axi_p[] = {
+ "osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out"
+};
+static const char *nand_p[] = {
+ "osc_24m", "sys_pll2_500m", "audio_pll1_out", "sys_pll1_400m",
+ "audio_pll2_out", "sys_pll3_out", "sys_pll2_250m", "video_pll1_out"
+};
+static const char *noc_p[] = {
+ "osc_24m", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_1000m",
+ "sys_pll2_500m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out"
+};
+static const char *noc_io_p[] = {
+ "osc_24m", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_1000m",
+ "sys_pll2_500m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out"
+};
+static const char *pcie_aux_p[] = {
+ "osc_24m", "sys_pll2_200m", "sys_pll2_50m", "sys_pll3_out",
+ "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_160m", "sys_pll1_200m"
+};
+static const char *pdm_p[] = {
+ "osc_24m", "sys_pll2_100m", "audio_pll1_out", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out"
+};
+static const char *pwm_p[] = {
+ "osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
+ "sys_pll3_out", "clk_ext1", "sys_pll1_80m", "video_pll1_out"
+};
+static const char *qspi_p[] = {
+ "osc_24m", "sys_pll1_400m", "sys_pll2_333m", "sys_pll2_500m",
+ "audio_pll2_out", "sys_pll1_266m", "sys_pll3_out", "sys_pll1_100m"
+};
+static const char *sai1_p[] = {
+ "osc_24m" , "sys_pll1_133m" , "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "clk_ext1", "clk_ext2", "dummy",
+};
+static const char *sai2_p[] = {
+ "osc_24m" , "sys_pll1_133m" , "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "clk_ext2", "clk_ext3", "dummy",
+};
+static const char *sai3_p[] = {
+ "osc_24m" , "sys_pll1_133m" , "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "clk_ext3", "clk_ext4", "dummy",
+};
+static const char *sai5_p[] = {
+ "osc_24m" , "sys_pll1_133m" , "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "clk_ext2", "clk_ext3", "dummy",
+};
+static const char *sai6_p[] = {
+ "osc_24m" , "sys_pll1_133m" , "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "clk_ext3", "clk_ext4", "dummy",
+};
+static const char *sai7_p[] = {
+ "osc_24m" , "sys_pll1_133m" , "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "clk_ext3", "clk_ext4", "dummy",
+};
+static const char *uart_p[] = {
+ "osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
+ "sys_pll3_out", "clk_ext2", "clk_ext4", "audio_pll2_out"
+};
+static const char *usb_core_ref_p[] = {
+ "osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m",
+ "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out"
+};
+static const char *usdhc_p[] = {
+ "osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
+ "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m"
+};
+static const char *usb_phy_ref_p[] = {
+ "osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m",
+ "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out"
+};
+static const char *usdhc_nand_p[] = {
+ "osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_200m",
+ "sys_pll1_133m", "sys_pll3_out", "sys_pll2_250m", "audio_pll1_out"
+};
+static const char *vpu_bus_p[] = {
+ "osc_24m", "sys_pll1_800m", "vpu_pll_out", "audio_pll2_out",
+ "sys_pll3_out", "sys_pll2_1000m", "sys_pll2_200m", "sys_pll1_100m"
+};
+static const char *vpu_g_p[] = {
+ "osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll1_100m", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out"
+};
+static const char *vpu_vc8000e_p[] = {
+ "osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "audio_pll2_out", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out"
+};
+static const char *wdog_p[] = {
+ "osc_24m", "sys_pll1_133m", "sys_pll1_160m", "vpu_pll_out",
+ "sys_pll2_125m", "sys_pll3_out", "sys_pll1_80m", "sys_pll2_166m"
+};
+static const char *wrclk_p[] = {
+ "osc_24m", "sys_pll1_40m", "vpu_pll_out", "sys_pll3_out",
+ "sys_pll2_200m", "sys_pll1_266m", "sys_pll2_500m", "sys_pll1_100m"
+};
+
+static struct imx_clk imx8mp_clks[] = {
+ FIXED(IMX8MP_CLK_DUMMY, "dummy", 0),
+
+ LINK(IMX8MP_CLK_32K, "osc_32k"),
+ LINK(IMX8MP_CLK_24M, "osc_24m"),
+ LINK(IMX8MP_CLK_EXT1, "clk_ext1"),
+ LINK(IMX8MP_CLK_EXT2, "clk_ext2"),
+ LINK(IMX8MP_CLK_EXT3, "clk_ext3"),
+ LINK(IMX8MP_CLK_EXT4, "clk_ext4"),
+
+ MUX(IMX8MP_AUDIO_PLL1_REF_SEL, "audio_pll1_ref_sel", pll_ref_p, 0, 0x00, 0, 2),
+ MUX(IMX8MP_AUDIO_PLL2_REF_SEL, "audio_pll2_ref_sel", pll_ref_p, 0, 0x14, 0, 2),
+ MUX(IMX8MP_VIDEO_PLL1_REF_SEL, "video_pll1_ref_sel", pll_ref_p, 0, 0x28, 0, 2),
+ MUX(IMX8MP_DRAM_PLL_REF_SEL, "dram_pll_ref_sel", pll_ref_p, 0, 0x50, 0, 2),
+ MUX(IMX8MP_GPU_PLL_REF_SEL, "gpu_pll_ref_sel", pll_ref_p, 0, 0x64, 0, 2),
+ MUX(IMX8MP_VPU_PLL_REF_SEL, "vpu_pll_ref_sel", pll_ref_p, 0, 0x74, 0, 2),
+ MUX(IMX8MP_ARM_PLL_REF_SEL, "arm_pll_ref_sel", pll_ref_p, 0, 0x84, 0, 2),
+ MUX(IMX8MP_SYS_PLL1_REF_SEL, "sys_pll1_ref_sel", pll_ref_p, 0, 0x94, 0, 2),
+ MUX(IMX8MP_SYS_PLL2_REF_SEL, "sys_pll2_ref_sel", pll_ref_p, 0, 0x104, 0, 2),
+ MUX(IMX8MP_SYS_PLL3_REF_SEL, "sys_pll3_ref_sel", pll_ref_p, 0, 0x114, 0, 2),
+
+ FRAC_PLL(IMX8MP_AUDIO_PLL1, "audio_pll1", "audio_pll1_ref_sel", 0x00),
+ FRAC_PLL(IMX8MP_AUDIO_PLL2, "audio_pll2", "audio_pll2_ref_sel", 0x14),
+ FRAC_PLL(IMX8MP_VIDEO_PLL1, "video_pll1", "video_pll1_ref_sel", 0x28),
+ FRAC_PLL(IMX8MP_DRAM_PLL, "dram_pll", "dram_pll_ref_sel", 0x50),
+ FRAC_PLL(IMX8MP_GPU_PLL, "gpu_pll", "gpu_pll_ref_sel", 0x64),
+ FRAC_PLL(IMX8MP_VPU_PLL, "vpu_pll", "vpu_pll_ref_sel", 0x74),
+ FRAC_PLL(IMX8MP_ARM_PLL, "arm_pll", "arm_pll_ref_sel", 0x84),
+ FRAC_PLL(IMX8MP_SYS_PLL1, "sys_pll1", "sys_pll1_ref_sel", 0x94),
+ FRAC_PLL(IMX8MP_SYS_PLL2, "sys_pll2", "sys_pll2_ref_sel", 0x104),
+ FRAC_PLL(IMX8MP_SYS_PLL3, "sys_pll3", "sys_pll3_ref_sel", 0x114),
+
+ MUX(IMX8MP_AUDIO_PLL1_BYPASS, "audio_pll1_bypass", audio_pll1_bypass_p, 1, 0x00, 16, 1),
+ MUX(IMX8MP_AUDIO_PLL2_BYPASS, "audio_pll2_bypass", audio_pll2_bypass_p, 1, 0x14, 16, 1),
+ MUX(IMX8MP_VIDEO_PLL1_BYPASS, "video_pll1_bypass", video_pll1_bypass_p, 1, 0x28, 16, 1),
+ MUX(IMX8MP_DRAM_PLL_BYPASS, "dram_pll_bypass", dram_pll_bypass_p, 1, 0x50, 16, 1),
+ MUX(IMX8MP_GPU_PLL_BYPASS, "gpu_pll_bypass", gpu_pll_bypass_p, 1, 0x64, 28, 1),
+ MUX(IMX8MP_VPU_PLL_BYPASS, "vpu_pll_bypass", vpu_pll_bypass_p, 1, 0x74, 28, 1),
+ MUX(IMX8MP_ARM_PLL_BYPASS, "arm_pll_bypass", arm_pll_bypass_p, 1, 0x84, 28, 1),
+ MUX(IMX8MP_SYS_PLL1_BYPASS, "sys_pll1_bypass", sys_pll1_bypass_p, 1, 0x94, 28, 1),
+ MUX(IMX8MP_SYS_PLL2_BYPASS, "sys_pll2_bypass", sys_pll2_bypass_p, 1, 0x104, 28, 1),
+ MUX(IMX8MP_SYS_PLL3_BYPASS, "sys_pll3_bypass", sys_pll3_bypass_p, 1, 0x114, 28, 1),
+
+ GATE(IMX8MP_AUDIO_PLL1_OUT, "audio_pll1_out", "audio_pll1_bypass", 0x00, 13),
+ GATE(IMX8MP_AUDIO_PLL2_OUT, "audio_pll2_out", "audio_pll2_bypass", 0x14, 13),
+ GATE(IMX8MP_VIDEO_PLL1_OUT, "video_pll1_out", "video_pll1_bypass", 0x28, 13),
+ GATE(IMX8MP_DRAM_PLL_OUT, "dram_pll_out", "dram_pll_bypass", 0x50, 13),
+ GATE(IMX8MP_GPU_PLL_OUT, "gpu_pll_out", "gpu_pll_bypass", 0x64, 11),
+ GATE(IMX8MP_VPU_PLL_OUT, "vpu_pll_out", "vpu_pll_bypass", 0x74, 11),
+ GATE(IMX8MP_ARM_PLL_OUT, "arm_pll_out", "arm_pll_bypass", 0x84, 11),
+ GATE(IMX8MP_SYS_PLL1_OUT, "sys_pll1_out", "sys_pll1_bypass", 0x94, 11),
+ GATE(IMX8MP_SYS_PLL2_OUT, "sys_pll2_out", "sys_pll2_bypass", 0x104, 11),
+ GATE(IMX8MP_SYS_PLL3_OUT, "sys_pll3_out", "sys_pll3_bypass", 0x114, 11),
+
+ FFACT(IMX8MP_SYS_PLL1_40M, "sys_pll1_40m", "sys_pll1_out", 1, 20),
+ FFACT(IMX8MP_SYS_PLL1_80M, "sys_pll1_80m", "sys_pll1_out", 1, 10),
+ FFACT(IMX8MP_SYS_PLL1_100M, "sys_pll1_100m", "sys_pll1_out", 1, 8),
+ FFACT(IMX8MP_SYS_PLL1_133M, "sys_pll1_133m", "sys_pll1_out", 1, 6),
+ FFACT(IMX8MP_SYS_PLL1_160M, "sys_pll1_160m", "sys_pll1_out", 1, 5),
+ FFACT(IMX8MP_SYS_PLL1_200M, "sys_pll1_200m", "sys_pll1_out", 1, 4),
+ FFACT(IMX8MP_SYS_PLL1_266M, "sys_pll1_266m", "sys_pll1_out", 1, 3),
+ FFACT(IMX8MP_SYS_PLL1_400M, "sys_pll1_400m", "sys_pll1_out", 1, 2),
+ FFACT(IMX8MP_SYS_PLL1_800M, "sys_pll1_800m", "sys_pll1_out", 1, 1),
+
+ FFACT(IMX8MP_SYS_PLL2_50M, "sys_pll2_50m", "sys_pll2_out", 1, 20),
+ FFACT(IMX8MP_SYS_PLL2_100M, "sys_pll2_100m", "sys_pll2_out", 1, 10),
+ FFACT(IMX8MP_SYS_PLL2_125M, "sys_pll2_125m", "sys_pll2_out", 1, 8),
+ FFACT(IMX8MP_SYS_PLL2_166M, "sys_pll2_166m", "sys_pll2_out", 1, 6),
+ FFACT(IMX8MP_SYS_PLL2_200M, "sys_pll2_200m", "sys_pll2_out", 1, 5),
+ FFACT(IMX8MP_SYS_PLL2_250M, "sys_pll2_250m", "sys_pll2_out", 1, 4),
+ FFACT(IMX8MP_SYS_PLL2_333M, "sys_pll2_333m", "sys_pll2_out", 1, 3),
+ FFACT(IMX8MP_SYS_PLL2_500M, "sys_pll2_500m", "sys_pll2_out", 1, 2),
+ FFACT(IMX8MP_SYS_PLL2_1000M, "sys_pll2_1000m", "sys_pll2_out", 1, 1),
+
+ MUX(IMX8MP_CLK_CLKOUT1_SEL, "clkout1_sel", clkout_p, 0x128, 4, 4, 1),
+ DIV(IMX8MP_CLK_CLKOUT1_DIV, "clkout1_div", "clkout1_sel", 0x128, 0, 4),
+ GATE(IMX8MP_CLK_CLKOUT1, "clkout1", "clkout1_div", 0x128, 8),
+
+ MUX(IMX8MP_CLK_CLKOUT2_SEL, "clkout2_sel", clkout_p, 0x128, 20, 4, 1),
+ DIV(IMX8MP_CLK_CLKOUT2_DIV, "clkout2_div", "clkout2_sel", 0x128, 16, 4),
+ GATE(IMX8MP_CLK_CLKOUT2, "clkout2", "clkout2_div", 0x128, 24),
+
+ COMPOSITE(IMX8MP_CLK_A53_DIV, "arm_a53_div", a53_p, 0x8000, 0),
+ COMPOSITE(IMX8MP_CLK_M7_CORE, "m7_core", m7_p, 0x8080, 0),
+ COMPOSITE(IMX8MP_CLK_ML_CORE, "ml_core", ml_p, 0x8100, 0),
+ COMPOSITE(IMX8MP_CLK_GPU3D_CORE, "gpu3d_core", gpu_p, 0x8180, 0),
+ COMPOSITE(IMX8MP_CLK_GPU3D_SHADER_CORE, "gpu3d_shader", gpu_p, 0x8200, 0),
+ COMPOSITE(IMX8MP_CLK_GPU2D_CORE, "gpu2d_core", gpu_p, 0x8280, 0),
+ COMPOSITE(IMX8MP_CLK_AUDIO_AXI, "audio_axi", audio_axi_p, 0x8300, 0),
+ COMPOSITE(IMX8MP_CLK_HSIO_AXI, "hsio_axi", hsio_axi_p, 0x8380, 0),
+ COMPOSITE(IMX8MP_CLK_MEDIA_ISP, "media_isp", media_isp_p, 0x8400, 0),
+ COMPOSITE(IMX8MP_CLK_NAND_USDHC_BUS, "nand_usdhc_bus", usdhc_nand_p, 0x8900, 1),
+
+ MUX(IMX8MP_CLK_A53_CORE, "arm_a53_core", a53_core_p, 0x9880, 24, 1, 1),
+
+ COMPOSITE(IMX8MP_CLK_MAIN_AXI, "main_axi", main_axi_p, 0x8800, 1),
+ COMPOSITE(IMX8MP_CLK_ENET_AXI, "enet_axi", enet_axi_p, 0x8880, 1),
+ COMPOSITE(IMX8MP_CLK_VPU_BUS, "vpu_bus", vpu_bus_p, 0x8980, 1),
+ COMPOSITE(IMX8MP_CLK_MEDIA_AXI, "media_axi", media_axi_p, 0x8a00, 1),
+ COMPOSITE(IMX8MP_CLK_MEDIA_APB, "media_apb", media_apb_p, 0x8a80, 1),
+ COMPOSITE(IMX8MP_CLK_HDMI_APB, "hdmi_apb", media_apb_p, 0x8b00, 1),
+ COMPOSITE(IMX8MP_CLK_HDMI_AXI, "hdmi_axi", media_axi_p, 0x8b80, 1),
+ COMPOSITE(IMX8MP_CLK_GPU_AXI, "gpu_axi", gpu_axi_p, 0x8c00, 1),
+ COMPOSITE(IMX8MP_CLK_GPU_AHB, "gpu_ahb", gpu_ahb_p, 0x8c80, 1),
+ COMPOSITE(IMX8MP_CLK_NOC, "noc", noc_p, 0x8d00, 1),
+ COMPOSITE(IMX8MP_CLK_NOC_IO, "noc_io", noc_io_p, 0x8d80, 1),
+ COMPOSITE(IMX8MP_CLK_ML_AXI, "ml_axi", ml_axi_p, 0x8e00, 1),
+ COMPOSITE(IMX8MP_CLK_ML_AHB, "ml_ahb", ml_ahb_p, 0x8e80, 1),
+
+ COMPOSITE(IMX8MP_CLK_AHB, "ahb_root", ahb_p, 0x9000, 1),
+ COMPOSITE(IMX8MP_CLK_AUDIO_AHB, "audio_ahb", audio_ahb_p, 0x9100, 1),
+ COMPOSITE(IMX8MP_CLK_MIPI_DSI_ESC_RX, "mipi_dsi_esc_rx", mipi_dsi_esc_rx_p, 0x9200, 1),
+ COMPOSITE(IMX8MP_CLK_MEDIA_DISP2_PIX, "media_disp2_pix", media_disp_pix_p, 0x9300, 1),
+
+ DIV(IMX8MP_CLK_IPG_ROOT, "ipg_root", "ahb_root", 0x9080, 0, 1),
+
+ COMPOSITE(IMX8MP_CLK_DRAM_ALT, "dram_alt", dram_alt_p, 0xa000, 0),
+ COMPOSITE(IMX8MP_CLK_DRAM_APB, "dram_apb", dram_apb_p, 0xa080, 0),
+
+ COMPOSITE(IMX8MP_CLK_VPU_G1, "vpu_g1", vpu_g_p, 0xa100, 0),
+ COMPOSITE(IMX8MP_CLK_VPU_G2, "vpu_g2", vpu_g_p, 0xa180, 0),
+
+ COMPOSITE(IMX8MP_CLK_CAN1, "can1", can_p, 0xa200, 0),
+ COMPOSITE(IMX8MP_CLK_CAN2, "can2", can_p, 0xa280, 0),
+
+ COMPOSITE(IMX8MP_CLK_PCIE_AUX, "pcie_aux", pcie_aux_p, 0xa400, 0),
+
+ COMPOSITE(IMX8MP_CLK_SAI1, "sai1", sai1_p, 0xa580, 0),
+ COMPOSITE(IMX8MP_CLK_SAI2, "sai2", sai2_p, 0xa600, 0),
+ COMPOSITE(IMX8MP_CLK_SAI3, "sai3", sai3_p, 0xa680, 0),
+ COMPOSITE(IMX8MP_CLK_SAI5, "sai5", sai5_p, 0xa780, 0),
+ COMPOSITE(IMX8MP_CLK_SAI6, "sai6", sai6_p, 0xa800, 0),
+ COMPOSITE(IMX8MP_CLK_SAI7, "sai7", sai7_p, 0xc300, 0),
+
+ COMPOSITE(IMX8MP_CLK_ENET_QOS, "enet_qos", enet_qos_p, 0xa880, 0),
+ COMPOSITE(IMX8MP_CLK_ENET_QOS_TIMER, "enet_qos_timer", enet_qos_timer_p, 0xa900, 0),
+ COMPOSITE(IMX8MP_CLK_ENET_REF, "enet_ref", enet_ref_p, 0xa980, 0),
+ COMPOSITE(IMX8MP_CLK_ENET_TIMER, "enet_timer", enet_timer_p, 0xaa00, 0),
+ COMPOSITE(IMX8MP_CLK_ENET_PHY_REF, "enet_phy_ref", enet_phy_ref_p, 0xaa80, 0),
+
+ COMPOSITE(IMX8MP_CLK_NAND, "nand", nand_p, 0xab00, 0),
+ COMPOSITE(IMX8MP_CLK_QSPI, "qspi", qspi_p, 0xab80, 0),
+
+ COMPOSITE(IMX8MP_CLK_USDHC1, "usdhc1", usdhc_p, 0xac00, 0),
+ COMPOSITE(IMX8MP_CLK_USDHC2, "usdhc2", usdhc_p, 0xac80, 0),
+ COMPOSITE(IMX8MP_CLK_USDHC3, "usdhc3", usdhc_p, 0xbc80, 0),
+
+ COMPOSITE(IMX8MP_CLK_I2C1, "i2c1", i2c_p, 0xad00, 0),
+ COMPOSITE(IMX8MP_CLK_I2C2, "i2c2", i2c_p, 0xad80, 0),
+ COMPOSITE(IMX8MP_CLK_I2C3, "i2c3", i2c_p, 0xae00, 0),
+ COMPOSITE(IMX8MP_CLK_I2C4, "i2c4", i2c_p, 0xae80, 0),
+ COMPOSITE(IMX8MP_CLK_I2C5, "i2c5", i2c_p, 0xa480, 0),
+ COMPOSITE(IMX8MP_CLK_I2C6, "i2c6", i2c_p, 0xa500, 0),
+
+ COMPOSITE(IMX8MP_CLK_UART1, "uart1", uart_p, 0xaf00, 0),
+ COMPOSITE(IMX8MP_CLK_UART2, "uart2", uart_p, 0xaf80, 0),
+ COMPOSITE(IMX8MP_CLK_UART3, "uart3", uart_p, 0xb000, 0),
+ COMPOSITE(IMX8MP_CLK_UART4, "uart4", uart_p, 0xb080, 0),
+
+ COMPOSITE(IMX8MP_CLK_USB_CORE_REF, "usb_core_ref", usb_core_ref_p, 0xb100, 0),
+ COMPOSITE(IMX8MP_CLK_USB_PHY_REF, "usb_phy_ref", usb_phy_ref_p, 0xb180, 0),
+
+ COMPOSITE(IMX8MP_CLK_GIC, "gic", gic_p, 0xb200, 0),
+
+ COMPOSITE(IMX8MP_CLK_ECSPI1, "ecspi1", ecspi_p, 0xb280, 0),
+ COMPOSITE(IMX8MP_CLK_ECSPI2, "ecspi2", ecspi_p, 0xb300, 0),
+ COMPOSITE(IMX8MP_CLK_ECSPI3, "ecspi3", ecspi_p, 0xc180, 0),
+
+ COMPOSITE(IMX8MP_CLK_PWM1, "pwm1", pwm_p, 0xb380, 0),
+ COMPOSITE(IMX8MP_CLK_PWM2, "pwm2", pwm_p, 0xb400, 0),
+ COMPOSITE(IMX8MP_CLK_PWM3, "pwm3", pwm_p, 0xb480, 0),
+ COMPOSITE(IMX8MP_CLK_PWM4, "pwm4", pwm_p, 0xb500, 0),
+
+ COMPOSITE(IMX8MP_CLK_GPT1, "gpt1", gpt_p, 0xb580, 0),
+ COMPOSITE(IMX8MP_CLK_GPT2, "gpt2", gpt_p, 0xb600, 0),
+ COMPOSITE(IMX8MP_CLK_GPT3, "gpt3", gpt_p, 0xb680, 0),
+ COMPOSITE(IMX8MP_CLK_GPT4, "gpt4", gpt_p, 0xb700, 0),
+ COMPOSITE(IMX8MP_CLK_GPT5, "gpt5", gpt_p, 0xb780, 0),
+ COMPOSITE(IMX8MP_CLK_GPT6, "gpt6", gpt_p, 0xb800, 0),
+
+ COMPOSITE(IMX8MP_CLK_WDOG, "wdog", wdog_p, 0xb900, 0),
+ COMPOSITE(IMX8MP_CLK_WRCLK, "wrclk", wrclk_p, 0xb980, 0),
+
+ COMPOSITE(IMX8MP_CLK_IPP_DO_CLKO1, "ipp_do_clko1", ipp_do_clko1_p, 0xba00, 0),
+ COMPOSITE(IMX8MP_CLK_IPP_DO_CLKO2, "ipp_do_clko2", ipp_do_clko2_p, 0xba80, 0),
+
+ COMPOSITE(IMX8MP_CLK_HDMI_FDCC_TST, "hdmi_fdcc_tst", hdmi_fdcc_tst_p, 0xbb00, 0),
+ COMPOSITE(IMX8MP_CLK_HDMI_24M, "hdmi_24m", hdmi_24m_p, 0xbb80, 0),
+ COMPOSITE(IMX8MP_CLK_HDMI_REF_266M, "hdmi_ref_266m", hdmi_ref_266m_p, 0xbc00, 0),
+ COMPOSITE(IMX8MP_CLK_MEDIA_CAM1_PIX, "media_cam1_pix", media_cam1_pix_p, 0xbd00, 0),
+ COMPOSITE(IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, "media_mipi_phy1_ref", media_mipi_phy1_ref_p, 0xbd80, 0),
+ COMPOSITE(IMX8MP_CLK_MEDIA_DISP1_PIX, "media_disp1_pix", media_disp_pix_p, 0xbe00, 0),
+ COMPOSITE(IMX8MP_CLK_MEDIA_CAM2_PIX, "media_cam2_pix", media_cam2_pix_p, 0xbe80, 0),
+ COMPOSITE(IMX8MP_CLK_MEDIA_LDB, "media_ldb", media_ldb_p, 0xbf00, 0),
+
+ COMPOSITE(IMX8MP_CLK_MEMREPAIR, "mem_repair", memrepair_p, 0xbf80, 0),
+
+ COMPOSITE(IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, "media_mipi_test_byte", media_mipi_test_byte_p, 0xc100, 0),
+ COMPOSITE(IMX8MP_CLK_PDM, "pdm", pdm_p, 0xc200, 0),
+ COMPOSITE(IMX8MP_CLK_VPU_VC8000E, "vpu_vc8000e", vpu_vc8000e_p, 0xc280, 0),
+
+ FFACT(IMX8MP_CLK_DRAM_ALT_ROOT, "dram_alt_root", "dram_alt", 1, 4),
+
+ MUX(IMX8MP_CLK_DRAM_CORE, "dram_core_clk", dram_core_p, 0x9800, 24, 1, 1),
+ ROOT_GATE(IMX8MP_CLK_DRAM1_ROOT, "dram1_root_clk", "dram_core_clk", 0x4050),
+
+ ROOT_GATE(IMX8MP_CLK_ECSPI1_ROOT, "ecspi1_root_clk", "ecspi1", 0x4070),
+ ROOT_GATE(IMX8MP_CLK_ECSPI2_ROOT, "ecspi2_root_clk", "ecspi2", 0x4080),
+ ROOT_GATE(IMX8MP_CLK_ECSPI3_ROOT, "ecspi3_root_clk", "ecspi3", 0x4090),
+ ROOT_GATE(IMX8MP_CLK_ENET1_ROOT, "enet1_root_clk", "enet_axi", 0x40a0),
+
+ ROOT_GATE(IMX8MP_CLK_GPIO1_ROOT ,"gpio1_root_clk", "ipg_root", 0x40b0),
+ ROOT_GATE(IMX8MP_CLK_GPIO2_ROOT ,"gpio2_root_clk", "ipg_root", 0x40c0),
+ ROOT_GATE(IMX8MP_CLK_GPIO3_ROOT ,"gpio3_root_clk", "ipg_root", 0x40d0),
+ ROOT_GATE(IMX8MP_CLK_GPIO4_ROOT ,"gpio4_root_clk", "ipg_root", 0x40e0),
+ ROOT_GATE(IMX8MP_CLK_GPIO5_ROOT ,"gpio5_root_clk", "ipg_root", 0x40f0),
+
+ ROOT_GATE(IMX8MP_CLK_GPT1_ROOT, "gpt1_root_clk", "gpt1", 0x4100),
+ ROOT_GATE(IMX8MP_CLK_GPT2_ROOT, "gpt2_root_clk", "gpt2", 0x4110),
+ ROOT_GATE(IMX8MP_CLK_GPT3_ROOT, "gpt3_root_clk", "gpt3", 0x4120),
+ ROOT_GATE(IMX8MP_CLK_GPT4_ROOT, "gpt4_root_clk", "gpt4", 0x4130),
+ ROOT_GATE(IMX8MP_CLK_GPT5_ROOT, "gpt5_root_clk", "gpt5", 0x4140),
+ ROOT_GATE(IMX8MP_CLK_GPT6_ROOT, "gpt6_root_clk", "gpt6", 0x4150),
+
+ ROOT_GATE(IMX8MP_CLK_I2C1_ROOT ,"i2c1_root_clk", "i2c1", 0x4170),
+ ROOT_GATE(IMX8MP_CLK_I2C2_ROOT ,"i2c2_root_clk", "i2c2", 0x4180),
+ ROOT_GATE(IMX8MP_CLK_I2C3_ROOT ,"i2c3_root_clk", "i2c3", 0x4190),
+ ROOT_GATE(IMX8MP_CLK_I2C4_ROOT ,"i2c4_root_clk", "i2c4", 0x41a0),
+ ROOT_GATE(IMX8MP_CLK_I2C5_ROOT ,"i2c5_root_clk", "i2c5", 0x4330),
+ ROOT_GATE(IMX8MP_CLK_I2C6_ROOT ,"i2c6_root_clk", "i2c6", 0x4340),
+
+ ROOT_GATE(IMX8MP_CLK_MU_ROOT, "mu_root_clk", "ipg_root", 0x4210),
+ ROOT_GATE(IMX8MP_CLK_OCOTP_ROOT, "ocotp_root_clk", "ipg_root", 0x4220),
+ ROOT_GATE(IMX8MP_CLK_PCIE_ROOT, "pcie_root_clk", "pcie_aux", 0x4250),
+
+ ROOT_GATE(IMX8MP_CLK_PWM1_ROOT, "pwm1_root_clk", "pwm1", 0x4280),
+ ROOT_GATE(IMX8MP_CLK_PWM2_ROOT, "pwm2_root_clk", "pwm2", 0x4290),
+ ROOT_GATE(IMX8MP_CLK_PWM3_ROOT, "pwm3_root_clk", "pwm3", 0x42a0),
+ ROOT_GATE(IMX8MP_CLK_PWM4_ROOT, "pwm4_root_clk", "pwm4", 0x42b0),
+ ROOT_GATE(IMX8MP_CLK_QOS_ROOT, "qos_root_clk", "ipg_root", 0x42c0),
+ ROOT_GATE(IMX8MP_CLK_QOS_ENET_ROOT, "qos_enet_root_clk", "ipg_root", 0x42e0),
+ ROOT_GATE(IMX8MP_CLK_QSPI_ROOT, "qspi_root_clk", "qspi", 0x42f0),
+
+ ROOT_GATE(IMX8MP_CLK_NAND_ROOT, "nand_root_clk", "nand", 0x4300),
+ ROOT_GATE(IMX8MP_CLK_NAND_USDHC_BUS_RAWNAND_CLK, "nand_usdhc_rawnand_clk", "nand_usdhc_bus", 0x4300),
+
+ ROOT_GATE(IMX8MP_CLK_CAN1_ROOT, "can1_root_clk", "can1", 0x4350),
+ ROOT_GATE(IMX8MP_CLK_CAN2_ROOT, "can2_root_clk", "can2", 0x4360),
+
+ ROOT_GATE(IMX8MP_CLK_SDMA1_ROOT, "sdma1_root_clk", "ipg_root", 0x43a0),
+ ROOT_GATE(IMX8MP_CLK_SIM_ENET_ROOT, "sim_enet_root_clk", "enet_axi", 0x4400),
+ ROOT_GATE(IMX8MP_CLK_ENET_QOS_ROOT, "enet_qos_root_clk", "sim_enet_root_clk", 0x43b0),
+ ROOT_GATE(IMX8MP_CLK_GPU2D_ROOT, "gpu2d_root_clk", "gpu2d_core", 0x4450),
+ ROOT_GATE(IMX8MP_CLK_GPU3D_ROOT, "gpu3d_root_clk", "gpu3d_core", 0x4460),
+
+ ROOT_GATE(IMX8MP_CLK_UART1_ROOT ,"uart1_root_clk", "uart1", 0x4490),
+ ROOT_GATE(IMX8MP_CLK_UART2_ROOT ,"uart2_root_clk", "uart2", 0x44a0),
+ ROOT_GATE(IMX8MP_CLK_UART3_ROOT ,"uart3_root_clk", "uart3", 0x44b0),
+ ROOT_GATE(IMX8MP_CLK_UART4_ROOT ,"uart4_root_clk", "uart4", 0x44c0),
+
+ ROOT_GATE(IMX8MP_CLK_USB_ROOT ,"usb_root_clk", "hsio_axi", 0x44d0),
+ ROOT_GATE(IMX8MP_CLK_USB_SUSP ,"usb_suspend_clk", "osc_32k", 0x44d0),
+ ROOT_GATE(IMX8MP_CLK_USB_PHY_ROOT ,"usb_phy_root_clk", "usb_phy_ref", 0x44f0),
+ ROOT_GATE(IMX8MP_CLK_USDHC1_ROOT ,"usdhc1_root_clk", "usdhc1", 0x4510),
+ ROOT_GATE(IMX8MP_CLK_USDHC2_ROOT ,"usdhc2_root_clk", "usdhc2", 0x4520),
+ ROOT_GATE(IMX8MP_CLK_USDHC3_ROOT ,"usdhc3_root_clk", "usdhc3", 0x45e0),
+
+ ROOT_GATE(IMX8MP_CLK_HSIO_ROOT, "hsio_root_clk", "ipg_root", 0x45c0),
+
+ ROOT_GATE(IMX8MP_CLK_WDOG1_ROOT, "wdog1_root_clk", "wdog", 0x4530),
+ ROOT_GATE(IMX8MP_CLK_WDOG2_ROOT, "wdog2_root_clk", "wdog", 0x4540),
+ ROOT_GATE(IMX8MP_CLK_WDOG3_ROOT, "wdog3_root_clk", "wdog", 0x4550),
+ ROOT_GATE(IMX8MP_CLK_VPU_G1_ROOT, "vpu_g1_root_clk", "vpu_g1", 0x4560),
+ ROOT_GATE(IMX8MP_CLK_GPU_ROOT, "gpu_root_clk", "gpu_axi", 0x4570),
+ ROOT_GATE(IMX8MP_CLK_VPU_VC8KE_ROOT, "vpu_vc8ke_root_clk", "vpu_vc8000e", 0x4590),
+ ROOT_GATE(IMX8MP_CLK_VPU_G2_ROOT, "vpu_g2_root_clk", "vpu_g2", 0x45a0),
+ ROOT_GATE(IMX8MP_CLK_NPU_ROOT, "npu_root_clk", "ml_core", 0x45b0),
+
+ ROOT_GATE(IMX8MP_CLK_MEDIA_APB_ROOT, "media_apb_root_clk", "media_apb", 0x45d0),
+ ROOT_GATE(IMX8MP_CLK_MEDIA_AXI_ROOT, "media_axi_root_clk", "media_axi", 0x45d0),
+ ROOT_GATE(IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT, "media_cam1_pix_root_clk", "media_cam1_pix", 0x45d0),
+ ROOT_GATE(IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT, "media_cam2_pix_root_clk", "media_cam2_pix", 0x45d0),
+ ROOT_GATE(IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT, "media_disp1_pix_root_clk", "media_disp1_pix", 0x45d0),
+ ROOT_GATE(IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT, "media_disp2_pix_root_clk", "media_disp2_pix", 0x45d0),
+ ROOT_GATE(IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT, "media_mipi_phy1_ref_root", "media_mipi_phy1_ref", 0x45d0),
+ ROOT_GATE(IMX8MP_CLK_MEDIA_LDB_ROOT, "media_ldb_root_clk", "media_ldb", 0x45d0),
+ ROOT_GATE(IMX8MP_CLK_MEDIA_ISP_ROOT, "media_isp_root_clk", "media_isp", 0x45d0),
+
+ ROOT_GATE(IMX8MP_CLK_HDMI_ROOT, "hdmi_root_clk", "hdmi_axi", 0x45f0),
+ ROOT_GATE(IMX8MP_CLK_TSENSOR_ROOT, "tsensor_root_clk", "ipg_root", 0x4620),
+ ROOT_GATE(IMX8MP_CLK_VPU_ROOT, "vpu_root_clk", "vpu_bus", 0x4630),
+
+ ROOT_GATE(IMX8MP_CLK_AUDIO_AHB_ROOT, "audio_ahb_root", "audio_ahb", 0x4650),
+ ROOT_GATE(IMX8MP_CLK_AUDIO_AXI_ROOT, "audio_axi_root", "audio_axi", 0x4650),
+ ROOT_GATE(IMX8MP_CLK_SAI1_ROOT, "sai1_root", "sai1", 0x4650),
+ ROOT_GATE(IMX8MP_CLK_SAI2_ROOT, "sai2_root", "sai2", 0x4650),
+ ROOT_GATE(IMX8MP_CLK_SAI3_ROOT, "sai3_root", "sai3", 0x4650),
+ ROOT_GATE(IMX8MP_CLK_SAI5_ROOT, "sai5_root", "sai5", 0x4650),
+ ROOT_GATE(IMX8MP_CLK_SAI6_ROOT, "sai6_root", "sai6", 0x4650),
+ ROOT_GATE(IMX8MP_CLK_SAI7_ROOT, "sai7_root", "sai7", 0x4650),
+ ROOT_GATE(IMX8MP_CLK_PDM_ROOT, "pdm_root", "pdm", 0x4650),
+};
+
+static int
+imx8mp_ccm_attach(device_t dev)
+{
+ struct imx_ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ sc->clks = imx8mp_clks;
+ sc->nclks = nitems(imx8mp_clks);
+
+ return (imx_ccm_attach(dev));
+}
+
+static int
+imx8mp_ccm_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_is_compatible(dev, "fsl,imx8mp-ccm") == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Freescale i.MX 8M Plus Clock Control Module");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static device_method_t imx8mp_ccm_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, imx8mp_ccm_probe),
+ DEVMETHOD(device_attach, imx8mp_ccm_attach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(imx8mp_ccm, imx8mp_ccm_driver, imx8mp_ccm_methods,
+ sizeof(struct imx_ccm_softc), imx_ccm_driver);
+
+EARLY_DRIVER_MODULE(imx8mp_ccm, simplebus, imx8mp_ccm_driver, 0, 0,
+ BUS_PASS_CPU + BUS_PASS_ORDER_EARLY);
diff --git a/sys/arm64/freescale/imx/imx8mp_ccm.h b/sys/arm64/freescale/imx/imx8mp_ccm.h
new file mode 100644
index 000000000000..7f0bf5e979c1
--- /dev/null
+++ b/sys/arm64/freescale/imx/imx8mp_ccm.h
@@ -0,0 +1,456 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Tom Jones <thj@freebsd.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ */
+
+#ifndef __IMX8MP_CCM_H__
+#define __IMX8MP_CCM_H__
+
+#define IMX8MP_CLK_DUMMY 0
+#define IMX8MP_CLK_32K 1
+#define IMX8MP_CLK_24M 2
+#define IMX8MP_OSC_HDMI_CLK 3
+#define IMX8MP_CLK_EXT1 4
+#define IMX8MP_CLK_EXT2 5
+#define IMX8MP_CLK_EXT3 6
+#define IMX8MP_CLK_EXT4 7
+
+#define IMX8MP_AUDIO_PLL1_REF_SEL 8
+#define IMX8MP_AUDIO_PLL2_REF_SEL 9
+
+#define IMX8MP_VIDEO_PLL1_REF_SEL 10
+
+#define IMX8MP_DRAM_PLL_REF_SEL 11
+#define IMX8MP_GPU_PLL_REF_SEL 12
+#define IMX8MP_VPU_PLL_REF_SEL 13
+#define IMX8MP_ARM_PLL_REF_SEL 14
+
+#define IMX8MP_SYS_PLL1_REF_SEL 15
+#define IMX8MP_SYS_PLL2_REF_SEL 16
+#define IMX8MP_SYS_PLL3_REF_SEL 17
+
+#define IMX8MP_AUDIO_PLL1 18
+#define IMX8MP_AUDIO_PLL2 19
+#define IMX8MP_VIDEO_PLL1 20
+#define IMX8MP_DRAM_PLL 21
+#define IMX8MP_GPU_PLL 22
+#define IMX8MP_VPU_PLL 23
+#define IMX8MP_ARM_PLL 24
+
+#define IMX8MP_SYS_PLL1 25
+#define IMX8MP_SYS_PLL2 26
+#define IMX8MP_SYS_PLL3 27
+
+#define IMX8MP_AUDIO_PLL1_BYPASS 28
+#define IMX8MP_AUDIO_PLL2_BYPASS 29
+#define IMX8MP_VIDEO_PLL1_BYPASS 30
+#define IMX8MP_DRAM_PLL_BYPASS 31
+#define IMX8MP_GPU_PLL_BYPASS 32
+#define IMX8MP_VPU_PLL_BYPASS 33
+#define IMX8MP_ARM_PLL_BYPASS 34
+#define IMX8MP_SYS_PLL1_BYPASS 35
+#define IMX8MP_SYS_PLL2_BYPASS 36
+#define IMX8MP_SYS_PLL3_BYPASS 37
+
+#define IMX8MP_AUDIO_PLL1_OUT 38
+#define IMX8MP_AUDIO_PLL2_OUT 39
+#define IMX8MP_VIDEO_PLL1_OUT 40
+#define IMX8MP_DRAM_PLL_OUT 41
+#define IMX8MP_GPU_PLL_OUT 42
+#define IMX8MP_VPU_PLL_OUT 43
+#define IMX8MP_ARM_PLL_OUT 44
+
+#define IMX8MP_SYS_PLL1_OUT 45
+#define IMX8MP_SYS_PLL2_OUT 46
+#define IMX8MP_SYS_PLL3_OUT 47
+
+#define IMX8MP_SYS_PLL1_40M 48
+#define IMX8MP_SYS_PLL1_80M 49
+#define IMX8MP_SYS_PLL1_100M 50
+#define IMX8MP_SYS_PLL1_133M 51
+#define IMX8MP_SYS_PLL1_160M 52
+#define IMX8MP_SYS_PLL1_200M 53
+#define IMX8MP_SYS_PLL1_266M 54
+#define IMX8MP_SYS_PLL1_400M 55
+#define IMX8MP_SYS_PLL1_800M 56
+#define IMX8MP_SYS_PLL2_50M 57
+#define IMX8MP_SYS_PLL2_100M 58
+#define IMX8MP_SYS_PLL2_125M 59
+#define IMX8MP_SYS_PLL2_166M 60
+#define IMX8MP_SYS_PLL2_200M 61
+#define IMX8MP_SYS_PLL2_250M 62
+#define IMX8MP_SYS_PLL2_333M 63
+#define IMX8MP_SYS_PLL2_500M 64
+#define IMX8MP_SYS_PLL2_1000M 65
+
+#define IMX8MP_CLK_A53_SRC 66
+#define IMX8MP_CLK_M7_SRC 67
+#define IMX8MP_CLK_ML_SRC 68
+#define IMX8MP_CLK_GPU3D_CORE_SRC 69
+#define IMX8MP_CLK_GPU3D_SHADER_SRC 70
+#define IMX8MP_CLK_GPU2D_SRC 71
+#define IMX8MP_CLK_AUDIO_AXI_SRC 72
+#define IMX8MP_CLK_HSIO_AXI_SRC 73
+#define IMX8MP_CLK_MEDIA_ISP_SRC 74
+
+#define IMX8MP_CLK_A53_CG 75
+#define IMX8MP_CLK_M4_CG 76
+#define IMX8MP_CLK_ML_CG 77
+#define IMX8MP_CLK_GPU3D_CORE_CG 78
+#define IMX8MP_CLK_GPU3D_SHADER_CG 79
+#define IMX8MP_CLK_GPU2D_CG 80
+#define IMX8MP_CLK_AUDIO_AXI_CG 81
+#define IMX8MP_CLK_HSIO_AXI_CG 82
+#define IMX8MP_CLK_MEDIA_ISP_CG 83
+
+#define IMX8MP_CLK_A53_DIV 84
+#define IMX8MP_CLK_M7_DIV 85
+#define IMX8MP_CLK_ML_DIV 86
+#define IMX8MP_CLK_GPU3D_CORE_DIV 87
+#define IMX8MP_CLK_GPU3D_SHADER_DIV 88
+#define IMX8MP_CLK_GPU2D_DIV 89
+#define IMX8MP_CLK_AUDIO_AXI_DIV 90
+#define IMX8MP_CLK_HSIO_AXI_DIV 91
+#define IMX8MP_CLK_MEDIA_ISP_DIV 92
+
+#define IMX8MP_CLK_MAIN_AXI 93
+#define IMX8MP_CLK_ENET_AXI 94
+#define IMX8MP_CLK_NAND_USDHC_BUS 95
+#define IMX8MP_CLK_VPU_BUS 96
+#define IMX8MP_CLK_MEDIA_AXI 97
+#define IMX8MP_CLK_MEDIA_APB 98
+#define IMX8MP_CLK_HDMI_APB 99
+#define IMX8MP_CLK_HDMI_AXI 100
+#define IMX8MP_CLK_GPU_AXI 101
+#define IMX8MP_CLK_GPU_AHB 102
+#define IMX8MP_CLK_NOC 103
+#define IMX8MP_CLK_NOC_IO 104
+#define IMX8MP_CLK_ML_AXI 105
+#define IMX8MP_CLK_ML_AHB 106
+#define IMX8MP_CLK_AHB 107
+#define IMX8MP_CLK_AUDIO_AHB 108
+#define IMX8MP_CLK_MIPI_DSI_ESC_RX 109
+#define IMX8MP_CLK_IPG_ROOT 110
+#define IMX8MP_CLK_DRAM_ALT 112
+#define IMX8MP_CLK_DRAM_APB 113
+
+#define IMX8MP_CLK_VPU_G1 114
+#define IMX8MP_CLK_VPU_G2 115
+
+#define IMX8MP_CLK_CAN1 116
+#define IMX8MP_CLK_CAN2 117
+#define IMX8MP_CLK_MEMREPAIR 118
+#define IMX8MP_CLK_PCIE_AUX 120
+
+#define IMX8MP_CLK_I2C5 121
+#define IMX8MP_CLK_I2C6 122
+
+#define IMX8MP_CLK_SAI1 123
+#define IMX8MP_CLK_SAI2 124
+#define IMX8MP_CLK_SAI3 125
+#define IMX8MP_CLK_SAI5 127
+#define IMX8MP_CLK_SAI6 128
+
+#define IMX8MP_CLK_ENET_QOS 129
+#define IMX8MP_CLK_ENET_QOS_TIMER 130
+#define IMX8MP_CLK_ENET_REF 131
+#define IMX8MP_CLK_ENET_TIMER 132
+#define IMX8MP_CLK_ENET_PHY_REF 133
+
+#define IMX8MP_CLK_NAND 134
+#define IMX8MP_CLK_QSPI 135
+
+#define IMX8MP_CLK_USDHC1 136
+#define IMX8MP_CLK_USDHC2 137
+
+#define IMX8MP_CLK_I2C1 138
+#define IMX8MP_CLK_I2C2 139
+#define IMX8MP_CLK_I2C3 140
+#define IMX8MP_CLK_I2C4 141
+
+#define IMX8MP_CLK_UART1 142
+#define IMX8MP_CLK_UART2 143
+#define IMX8MP_CLK_UART3 144
+#define IMX8MP_CLK_UART4 145
+
+#define IMX8MP_CLK_USB_CORE_REF 146
+#define IMX8MP_CLK_USB_PHY_REF 147
+
+#define IMX8MP_CLK_GIC 148
+
+#define IMX8MP_CLK_ECSPI1 149
+#define IMX8MP_CLK_ECSPI2 150
+
+#define IMX8MP_CLK_PWM1 151
+#define IMX8MP_CLK_PWM2 152
+#define IMX8MP_CLK_PWM3 153
+#define IMX8MP_CLK_PWM4 154
+
+#define IMX8MP_CLK_GPT1 155
+#define IMX8MP_CLK_GPT2 156
+#define IMX8MP_CLK_GPT3 157
+#define IMX8MP_CLK_GPT4 158
+#define IMX8MP_CLK_GPT5 159
+#define IMX8MP_CLK_GPT6 160
+
+#define IMX8MP_CLK_TRACE 161
+#define IMX8MP_CLK_WDOG 162
+#define IMX8MP_CLK_WRCLK 163
+#define IMX8MP_CLK_IPP_DO_CLKO1 164
+#define IMX8MP_CLK_IPP_DO_CLKO2 165
+#define IMX8MP_CLK_HDMI_FDCC_TST 166
+#define IMX8MP_CLK_HDMI_24M 167
+#define IMX8MP_CLK_HDMI_REF_266M 168
+#define IMX8MP_CLK_USDHC3 169
+
+#define IMX8MP_CLK_MEDIA_CAM1_PIX 170
+#define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF 171
+#define IMX8MP_CLK_MEDIA_DISP1_PIX 172
+#define IMX8MP_CLK_MEDIA_CAM2_PIX 173
+#define IMX8MP_CLK_MEDIA_LDB 174
+#define IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC 175
+#define IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE 178
+
+#define IMX8MP_CLK_ECSPI3 179
+#define IMX8MP_CLK_PDM 180
+#define IMX8MP_CLK_VPU_VC8000E 181
+#define IMX8MP_CLK_SAI7 182
+#define IMX8MP_CLK_GPC_ROOT 183
+#define IMX8MP_CLK_ANAMIX_ROOT 184
+#define IMX8MP_CLK_CPU_ROOT 185
+#define IMX8MP_CLK_CSU_ROOT 186
+
+#define IMX8MP_CLK_DEBUG_ROOT 187
+#define IMX8MP_CLK_DRAM1_ROOT 188
+
+#define IMX8MP_CLK_ECSPI1_ROOT 189
+#define IMX8MP_CLK_ECSPI2_ROOT 190
+#define IMX8MP_CLK_ECSPI3_ROOT 191
+#define IMX8MP_CLK_ENET1_ROOT 192
+
+#define IMX8MP_CLK_GPIO1_ROOT 193
+#define IMX8MP_CLK_GPIO2_ROOT 194
+#define IMX8MP_CLK_GPIO3_ROOT 195
+#define IMX8MP_CLK_GPIO4_ROOT 196
+#define IMX8MP_CLK_GPIO5_ROOT 197
+
+#define IMX8MP_CLK_GPT1_ROOT 198
+#define IMX8MP_CLK_GPT2_ROOT 199
+#define IMX8MP_CLK_GPT3_ROOT 200
+#define IMX8MP_CLK_GPT4_ROOT 201
+#define IMX8MP_CLK_GPT5_ROOT 202
+#define IMX8MP_CLK_GPT6_ROOT 203
+
+#define IMX8MP_CLK_HS_ROOT 204
+
+#define IMX8MP_CLK_I2C1_ROOT 205
+#define IMX8MP_CLK_I2C2_ROOT 206
+#define IMX8MP_CLK_I2C3_ROOT 207
+#define IMX8MP_CLK_I2C4_ROOT 208
+
+#define IMX8MP_CLK_IOMUX_ROOT 209
+#define IMX8MP_CLK_IPMUX1_ROOT 210
+#define IMX8MP_CLK_IPMUX2_ROOT 211
+#define IMX8MP_CLK_IPMUX3_ROOT 212
+
+#define IMX8MP_CLK_MU_ROOT 213
+#define IMX8MP_CLK_OCOTP_ROOT 214
+#define IMX8MP_CLK_OCRAM_ROOT 215
+#define IMX8MP_CLK_OCRAM_S_ROOT 216
+#define IMX8MP_CLK_PCIE_ROOT 217
+#define IMX8MP_CLK_PERFMON1_ROOT 218
+#define IMX8MP_CLK_PERFMON2_ROOT 219
+
+#define IMX8MP_CLK_PWM1_ROOT 220
+#define IMX8MP_CLK_PWM2_ROOT 221
+#define IMX8MP_CLK_PWM3_ROOT 222
+#define IMX8MP_CLK_PWM4_ROOT 223
+
+#define IMX8MP_CLK_QOS_ROOT 224
+#define IMX8MP_CLK_QOS_ENET_ROOT 225
+#define IMX8MP_CLK_QSPI_ROOT 226
+#define IMX8MP_CLK_NAND_ROOT 227
+#define IMX8MP_CLK_NAND_USDHC_BUS_RAWNAND_CLK 228
+#define IMX8MP_CLK_RDC_ROOT 229
+#define IMX8MP_CLK_ROM_ROOT 230
+
+#define IMX8MP_CLK_I2C5_ROOT 231
+#define IMX8MP_CLK_I2C6_ROOT 232
+
+#define IMX8MP_CLK_CAN1_ROOT 233
+#define IMX8MP_CLK_CAN2_ROOT 234
+
+#define IMX8MP_CLK_SCTR_ROOT 235
+#define IMX8MP_CLK_SDMA1_ROOT 236
+#define IMX8MP_CLK_ENET_QOS_ROOT 237
+#define IMX8MP_CLK_SEC_DEBUG_ROOT 238
+#define IMX8MP_CLK_SEMA1_ROOT 239
+#define IMX8MP_CLK_SEMA2_ROOT 240
+#define IMX8MP_CLK_IRQ_STEER_ROOT 241
+#define IMX8MP_CLK_SIM_ENET_ROOT 242
+#define IMX8MP_CLK_SIM_M_ROOT 243
+#define IMX8MP_CLK_SIM_MAIN_ROOT 244
+#define IMX8MP_CLK_SIM_S_ROOT 245
+#define IMX8MP_CLK_SIM_WAKEUP_ROOT 246
+#define IMX8MP_CLK_GPU2D_ROOT 247
+#define IMX8MP_CLK_GPU3D_ROOT 248
+#define IMX8MP_CLK_SNVS_ROOT 249
+#define IMX8MP_CLK_TRACE_ROOT 250
+
+#define IMX8MP_CLK_UART1_ROOT 251
+#define IMX8MP_CLK_UART2_ROOT 252
+#define IMX8MP_CLK_UART3_ROOT 253
+#define IMX8MP_CLK_UART4_ROOT 254
+
+#define IMX8MP_CLK_USB_ROOT 255
+#define IMX8MP_CLK_USB_PHY_ROOT 256
+#define IMX8MP_CLK_USDHC1_ROOT 257
+#define IMX8MP_CLK_USDHC2_ROOT 258
+
+#define IMX8MP_CLK_WDOG1_ROOT 259
+#define IMX8MP_CLK_WDOG2_ROOT 260
+#define IMX8MP_CLK_WDOG3_ROOT 261
+
+#define IMX8MP_CLK_VPU_G1_ROOT 262
+#define IMX8MP_CLK_GPU_ROOT 263
+#define IMX8MP_CLK_NOC_WRAPPER_ROOT 264
+#define IMX8MP_CLK_VPU_VC8KE_ROOT 265
+#define IMX8MP_CLK_VPU_G2_ROOT 266
+#define IMX8MP_CLK_NPU_ROOT 267
+#define IMX8MP_CLK_HSIO_ROOT 268
+
+#define IMX8MP_CLK_MEDIA_APB_ROOT 269
+#define IMX8MP_CLK_MEDIA_AXI_ROOT 270
+#define IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT 271
+#define IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT 272
+#define IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT 273
+#define IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT 274
+#define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT 275
+#define IMX8MP_CLK_MEDIA_ISP_ROOT 276
+
+#define IMX8MP_CLK_USDHC3_ROOT 277
+#define IMX8MP_CLK_HDMI_ROOT 278
+#define IMX8MP_CLK_XTAL_ROOT 279
+#define IMX8MP_CLK_PLL_ROOT 280
+#define IMX8MP_CLK_TSENSOR_ROOT 281
+#define IMX8MP_CLK_VPU_ROOT 282
+#define IMX8MP_CLK_MRPR_ROOT 283
+#define IMX8MP_CLK_AUDIO_ROOT 284
+#define IMX8MP_CLK_DRAM_ALT_ROOT 285
+#define IMX8MP_CLK_DRAM_CORE 286
+#define IMX8MP_CLK_ARM 287
+#define IMX8MP_CLK_A53_CORE 288
+
+#define IMX8MP_SYS_PLL1_40M_CG 289
+#define IMX8MP_SYS_PLL1_80M_CG 290
+#define IMX8MP_SYS_PLL1_100M_CG 291
+#define IMX8MP_SYS_PLL1_133M_CG 292
+#define IMX8MP_SYS_PLL1_160M_CG 293
+#define IMX8MP_SYS_PLL1_200M_CG 294
+#define IMX8MP_SYS_PLL1_266M_CG 295
+#define IMX8MP_SYS_PLL1_400M_CG 296
+#define IMX8MP_SYS_PLL2_50M_CG 297
+#define IMX8MP_SYS_PLL2_100M_CG 298
+#define IMX8MP_SYS_PLL2_125M_CG 299
+#define IMX8MP_SYS_PLL2_166M_CG 300
+#define IMX8MP_SYS_PLL2_200M_CG 301
+#define IMX8MP_SYS_PLL2_250M_CG 302
+#define IMX8MP_SYS_PLL2_333M_CG 303
+#define IMX8MP_SYS_PLL2_500M_CG 304
+
+#define IMX8MP_CLK_M7_CORE 305
+#define IMX8MP_CLK_ML_CORE 306
+#define IMX8MP_CLK_GPU3D_CORE 307
+#define IMX8MP_CLK_GPU3D_SHADER_CORE 308
+#define IMX8MP_CLK_GPU2D_CORE 309
+#define IMX8MP_CLK_AUDIO_AXI 310
+#define IMX8MP_CLK_HSIO_AXI 311
+#define IMX8MP_CLK_MEDIA_ISP 312
+#define IMX8MP_CLK_MEDIA_DISP2_PIX 313
+#define IMX8MP_CLK_CLKOUT1_SEL 314
+#define IMX8MP_CLK_CLKOUT1_DIV 315
+#define IMX8MP_CLK_CLKOUT1 316
+#define IMX8MP_CLK_CLKOUT2_SEL 317
+#define IMX8MP_CLK_CLKOUT2_DIV 318
+#define IMX8MP_CLK_CLKOUT2 319
+#define IMX8MP_CLK_USB_SUSP 320
+#define IMX8MP_CLK_AUDIO_AHB_ROOT IMX8MP_CLK_AUDIO_ROOT
+#define IMX8MP_CLK_AUDIO_AXI_ROOT 321
+
+#define IMX8MP_CLK_SAI1_ROOT 322
+#define IMX8MP_CLK_SAI2_ROOT 323
+#define IMX8MP_CLK_SAI3_ROOT 324
+#define IMX8MP_CLK_SAI5_ROOT 325
+#define IMX8MP_CLK_SAI6_ROOT 326
+#define IMX8MP_CLK_SAI7_ROOT 327
+
+#define IMX8MP_CLK_PDM_ROOT 328
+#define IMX8MP_CLK_MEDIA_LDB_ROOT 329
+#define IMX8MP_CLK_END 330
+
+#define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2 2
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK3 3
+#define IMX8MP_CLK_AUDIOMIX_SAI2_IPG 4
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1 5
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2 6
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK3 7
+#define IMX8MP_CLK_AUDIOMIX_SAI3_IPG 8
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1 9
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2 10
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK3 11
+#define IMX8MP_CLK_AUDIOMIX_SAI5_IPG 12
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1 13
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2 14
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK3 15
+#define IMX8MP_CLK_AUDIOMIX_SAI6_IPG 16
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1 17
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2 18
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK3 19
+#define IMX8MP_CLK_AUDIOMIX_SAI7_IPG 20
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1 21
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2 22
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK3 23
+#define IMX8MP_CLK_AUDIOMIX_ASRC_IPG 24
+#define IMX8MP_CLK_AUDIOMIX_PDM_IPG 25
+#define IMX8MP_CLK_AUDIOMIX_SDMA2_ROOT 26
+#define IMX8MP_CLK_AUDIOMIX_SDMA3_ROOT 27
+#define IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT 28
+#define IMX8MP_CLK_AUDIOMIX_DSP_ROOT 29
+#define IMX8MP_CLK_AUDIOMIX_DSPDBG_ROOT 30
+#define IMX8MP_CLK_AUDIOMIX_EARC_IPG 31
+#define IMX8MP_CLK_AUDIOMIX_OCRAMA_IPG 32
+#define IMX8MP_CLK_AUDIOMIX_AUD2HTX_IPG 33
+#define IMX8MP_CLK_AUDIOMIX_EDMA_ROOT 34
+#define IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT 35
+#define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36
+#define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37
+#define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2_SEL 43
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1_SEL 44
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2_SEL 45
+#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK1_SEL 46
+#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK2_SEL 47
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1_SEL 48
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2_SEL 49
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1_SEL 50
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2_SEL 51
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1_SEL 52
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2_SEL 53
+#define IMX8MP_CLK_AUDIOMIX_PDM_SEL 54
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL 55
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL 56
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS 57
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT 58
+
+#define IMX8MP_CLK_AUDIOMIX_END 59
+
+#endif /* __IMX8MP_CCM_H__ */
diff --git a/sys/arm64/freescale/imx/imx8mq_ccm.c b/sys/arm64/freescale/imx/imx8mq_ccm.c
index e89300c19d51..17bea24e9b39 100644
--- a/sys/arm64/freescale/imx/imx8mq_ccm.c
+++ b/sys/arm64/freescale/imx/imx8mq_ccm.c
@@ -27,7 +27,7 @@
#include <sys/cdefs.h>
/*
- * Clocks driver for Freescale i.MX8MQ SoC
+ * Clocks driver for Freescale i.MX 8M Quad SoC.
*/
#include <sys/param.h>
@@ -43,7 +43,7 @@
#include <machine/bus.h>
-#include <arm64/freescale/imx/imx_ccm_clk.h>
+#include <arm64/freescale/imx/imx_ccm.h>
#include <arm64/freescale/imx/imx8mq_ccm.h>
#include <arm64/freescale/imx/clk/imx_clk_gate.h>
#include <arm64/freescale/imx/clk/imx_clk_mux.h>
@@ -118,7 +118,7 @@ static const char *ahb_p[] = {
"sys3_pll_out", "audio_pll1_out", "video_pll1_out"
};
-static struct imx_clk imx_clks[] = {
+static struct imx_clk imx8mq_clks[] = {
FIXED(IMX8MQ_CLK_DUMMY, "dummy", 0),
LINK(IMX8MQ_CLK_32K, "ckil"),
@@ -275,119 +275,22 @@ struct ccm_softc {
int nclks;
};
-static inline uint32_t
-CCU_READ4(struct ccm_softc *sc, bus_size_t off)
-{
-
- return (bus_read_4(sc->mem_res, off));
-}
-
-static inline void
-CCU_WRITE4(struct ccm_softc *sc, bus_size_t off, uint32_t val)
-{
-
- bus_write_4(sc->mem_res, off, val);
-}
-
static int
-ccm_detach(device_t dev)
+imx8mq_ccm_attach(device_t dev)
{
- struct ccm_softc *sc;
+ struct imx_ccm_softc *sc;
sc = device_get_softc(dev);
+ sc->dev = dev;
- if (sc->mem_res != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res);
-
- return (0);
-}
-
-static int
-ccm_attach(device_t dev)
-{
- struct ccm_softc *sc;
- int err, rid;
- phandle_t node;
- int i;
+ sc->clks = imx8mq_clks;
+ sc->nclks = nitems(imx8mq_clks);
- sc = device_get_softc(dev);
- err = 0;
-
- /* Allocate bus_space resources. */
- rid = 0;
- sc->clks = imx_clks;
- sc->nclks = nitems(imx_clks);
- sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
- RF_ACTIVE);
- if (sc->mem_res == NULL) {
- device_printf(dev, "Cannot allocate memory resources\n");
- err = ENXIO;
- goto out;
- }
-
- mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
-
- sc->clkdom = clkdom_create(dev);
- if (sc->clkdom == NULL)
- panic("Cannot create clkdom\n");
-
- for (i = 0; i < sc->nclks; i++) {
- switch (sc->clks[i].type) {
- case IMX_CLK_UNDEFINED:
- break;
- case IMX_CLK_LINK:
- clknode_link_register(sc->clkdom,
- sc->clks[i].clk.link);
- break;
- case IMX_CLK_FIXED:
- clknode_fixed_register(sc->clkdom,
- sc->clks[i].clk.fixed);
- break;
- case IMX_CLK_MUX:
- imx_clk_mux_register(sc->clkdom, sc->clks[i].clk.mux);
- break;
- case IMX_CLK_GATE:
- imx_clk_gate_register(sc->clkdom, sc->clks[i].clk.gate);
- break;
- case IMX_CLK_COMPOSITE:
- imx_clk_composite_register(sc->clkdom, sc->clks[i].clk.composite);
- break;
- case IMX_CLK_SSCG_PLL:
- imx_clk_sscg_pll_register(sc->clkdom, sc->clks[i].clk.sscg_pll);
- break;
- case IMX_CLK_FRAC_PLL:
- imx_clk_frac_pll_register(sc->clkdom, sc->clks[i].clk.frac_pll);
- break;
- case IMX_CLK_DIV:
- clknode_div_register(sc->clkdom, sc->clks[i].clk.div);
- break;
- default:
- device_printf(dev, "Unknown clock type %d\n", sc->clks[i].type);
- return (ENXIO);
- }
- }
-
- if (clkdom_finit(sc->clkdom) != 0)
- panic("cannot finalize clkdom initialization\n");
-
- if (bootverbose)
- clkdom_dump(sc->clkdom);
-
- node = ofw_bus_get_node(dev);
- clk_set_assigned(dev, node);
-
- err = 0;
-
-out:
-
- if (err != 0)
- ccm_detach(dev);
-
- return (err);
+ return (imx_ccm_attach(dev));
}
static int
-ccm_probe(device_t dev)
+imx8mq_ccm_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
@@ -396,87 +299,21 @@ ccm_probe(device_t dev)
if (ofw_bus_is_compatible(dev, "fsl,imx8mq-ccm") == 0)
return (ENXIO);
- device_set_desc(dev, "Freescale i.MX8 Clock Control Module");
+ device_set_desc(dev, "Freescale i.MX 8M Quad Clock Control Module");
return (BUS_PROBE_DEFAULT);
}
-static int
-imx_ccm_write_4(device_t dev, bus_addr_t addr, uint32_t val)
-{
- struct ccm_softc *sc;
-
- sc = device_get_softc(dev);
- CCU_WRITE4(sc, addr, val);
- return (0);
-}
-
-static int
-imx_ccm_read_4(device_t dev, bus_addr_t addr, uint32_t *val)
-{
- struct ccm_softc *sc;
-
- sc = device_get_softc(dev);
-
- *val = CCU_READ4(sc, addr);
- return (0);
-}
-
-static int
-imx_ccm_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set)
-{
- struct ccm_softc *sc;
- uint32_t reg;
-
- sc = device_get_softc(dev);
-
- reg = CCU_READ4(sc, addr);
- reg &= ~clr;
- reg |= set;
- CCU_WRITE4(sc, addr, reg);
-
- return (0);
-}
-
-static void
-imx_ccm_device_lock(device_t dev)
-{
- struct ccm_softc *sc;
-
- sc = device_get_softc(dev);
- mtx_lock(&sc->mtx);
-}
-
-static void
-imx_ccm_device_unlock(device_t dev)
-{
- struct ccm_softc *sc;
-
- sc = device_get_softc(dev);
- mtx_unlock(&sc->mtx);
-}
-
-static device_method_t ccm_methods[] = {
+static device_method_t imx8mq_ccm_methods[] = {
/* Device interface */
- DEVMETHOD(device_probe, ccm_probe),
- DEVMETHOD(device_attach, ccm_attach),
- DEVMETHOD(device_detach, ccm_detach),
-
- /* clkdev interface */
- DEVMETHOD(clkdev_write_4, imx_ccm_write_4),
- DEVMETHOD(clkdev_read_4, imx_ccm_read_4),
- DEVMETHOD(clkdev_modify_4, imx_ccm_modify_4),
- DEVMETHOD(clkdev_device_lock, imx_ccm_device_lock),
- DEVMETHOD(clkdev_device_unlock, imx_ccm_device_unlock),
+ DEVMETHOD(device_probe, imx8mq_ccm_probe),
+ DEVMETHOD(device_attach, imx8mq_ccm_attach),
DEVMETHOD_END
};
-static driver_t ccm_driver = {
- "ccm",
- ccm_methods,
- sizeof(struct ccm_softc)
-};
+DEFINE_CLASS_1(imx8mq_ccm, imx8mq_ccm_driver, imx8mq_ccm_methods,
+ sizeof(struct imx_ccm_softc), imx_ccm_driver);
-EARLY_DRIVER_MODULE(ccm, simplebus, ccm_driver, 0, 0,
+EARLY_DRIVER_MODULE(imx8mq_ccm, simplebus, imx8mq_ccm_driver, 0, 0,
BUS_PASS_CPU + BUS_PASS_ORDER_EARLY);
diff --git a/sys/arm64/freescale/imx/imx_ccm.c b/sys/arm64/freescale/imx/imx_ccm.c
new file mode 100644
index 000000000000..a5660c5d0a5d
--- /dev/null
+++ b/sys/arm64/freescale/imx/imx_ccm.c
@@ -0,0 +1,237 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Oleksandr Tymoshenko <gonzo@FreeBSD.org>
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Tom Jones <thj@freebsd.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+
+/*
+ * Clock Control Module driver for Freescale i.MX 8M SoC family.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <machine/bus.h>
+
+#include <arm64/freescale/imx/imx_ccm.h>
+#include <arm64/freescale/imx/clk/imx_clk_gate.h>
+#include <arm64/freescale/imx/clk/imx_clk_mux.h>
+#include <arm64/freescale/imx/clk/imx_clk_composite.h>
+#include <arm64/freescale/imx/clk/imx_clk_sscg_pll.h>
+#include <arm64/freescale/imx/clk/imx_clk_frac_pll.h>
+
+#include "clkdev_if.h"
+
+static inline uint32_t
+CCU_READ4(struct imx_ccm_softc *sc, bus_size_t off)
+{
+
+ return (bus_read_4(sc->mem_res, off));
+}
+
+static inline void
+CCU_WRITE4(struct imx_ccm_softc *sc, bus_size_t off, uint32_t val)
+{
+
+ bus_write_4(sc->mem_res, off, val);
+}
+
+int
+imx_ccm_detach(device_t dev)
+{
+ struct imx_ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->mem_res != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res);
+
+ return (0);
+}
+
+int
+imx_ccm_attach(device_t dev)
+{
+ struct imx_ccm_softc *sc;
+ int err, rid;
+ phandle_t node;
+ int i;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ err = 0;
+
+ /* Allocate bus_space resources. */
+ rid = 0;
+ sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->mem_res == NULL) {
+ device_printf(dev, "Cannot allocate memory resources\n");
+ err = ENXIO;
+ goto out;
+ }
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ sc->clkdom = clkdom_create(dev);
+ if (sc->clkdom == NULL)
+ panic("Cannot create clkdom\n");
+
+ for (i = 0; i < sc->nclks; i++) {
+ switch (sc->clks[i].type) {
+ case IMX_CLK_UNDEFINED:
+ break;
+ case IMX_CLK_LINK:
+ clknode_link_register(sc->clkdom,
+ sc->clks[i].clk.link);
+ break;
+ case IMX_CLK_FIXED:
+ clknode_fixed_register(sc->clkdom,
+ sc->clks[i].clk.fixed);
+ break;
+ case IMX_CLK_MUX:
+ imx_clk_mux_register(sc->clkdom, sc->clks[i].clk.mux);
+ break;
+ case IMX_CLK_GATE:
+ imx_clk_gate_register(sc->clkdom, sc->clks[i].clk.gate);
+ break;
+ case IMX_CLK_COMPOSITE:
+ imx_clk_composite_register(sc->clkdom, sc->clks[i].clk.composite);
+ break;
+ case IMX_CLK_SSCG_PLL:
+ imx_clk_sscg_pll_register(sc->clkdom, sc->clks[i].clk.sscg_pll);
+ break;
+ case IMX_CLK_FRAC_PLL:
+ imx_clk_frac_pll_register(sc->clkdom, sc->clks[i].clk.frac_pll);
+ break;
+ case IMX_CLK_DIV:
+ clknode_div_register(sc->clkdom, sc->clks[i].clk.div);
+ break;
+ default:
+ device_printf(dev, "Unknown clock type %d\n", sc->clks[i].type);
+ return (ENXIO);
+ }
+ }
+
+ if (clkdom_finit(sc->clkdom) != 0)
+ panic("cannot finalize clkdom initialization\n");
+
+ if (bootverbose)
+ clkdom_dump(sc->clkdom);
+
+ node = ofw_bus_get_node(dev);
+ clk_set_assigned(dev, node);
+
+ err = 0;
+
+out:
+
+ if (err != 0)
+ imx_ccm_detach(dev);
+
+ return (err);
+}
+
+static int
+imx_ccm_write_4(device_t dev, bus_addr_t addr, uint32_t val)
+{
+ struct imx_ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+ CCU_WRITE4(sc, addr, val);
+ return (0);
+}
+
+static int
+imx_ccm_read_4(device_t dev, bus_addr_t addr, uint32_t *val)
+{
+ struct imx_ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ *val = CCU_READ4(sc, addr);
+ return (0);
+}
+
+static int
+imx_ccm_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set)
+{
+ struct imx_ccm_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ reg = CCU_READ4(sc, addr);
+ reg &= ~clr;
+ reg |= set;
+ CCU_WRITE4(sc, addr, reg);
+
+ return (0);
+}
+
+static void
+imx_ccm_device_lock(device_t dev)
+{
+ struct imx_ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->mtx);
+}
+
+static void
+imx_ccm_device_unlock(device_t dev)
+{
+ struct imx_ccm_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_unlock(&sc->mtx);
+}
+
+static device_method_t imx_ccm_methods[] = {
+ /* clkdev interface */
+ DEVMETHOD(clkdev_write_4, imx_ccm_write_4),
+ DEVMETHOD(clkdev_read_4, imx_ccm_read_4),
+ DEVMETHOD(clkdev_modify_4, imx_ccm_modify_4),
+ DEVMETHOD(clkdev_device_lock, imx_ccm_device_lock),
+ DEVMETHOD(clkdev_device_unlock, imx_ccm_device_unlock),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(imx_ccm, imx_ccm_driver, imx_ccm_methods,
+ sizeof(struct imx_ccm_softc));
diff --git a/sys/arm64/freescale/imx/imx_ccm_clk.h b/sys/arm64/freescale/imx/imx_ccm.h
index 4c16fa00fe6b..9c34789a648e 100644
--- a/sys/arm64/freescale/imx/imx_ccm_clk.h
+++ b/sys/arm64/freescale/imx/imx_ccm.h
@@ -25,8 +25,8 @@
* SUCH DAMAGE.
*/
-#ifndef IMX6_CCM_CLK_H
-#define IMX6_CCM_CLK_H
+#ifndef IMX8_CCM_H
+#define IMX8_CCM_H
#include <dev/clk/clk.h>
#include <dev/clk/clk_div.h>
@@ -34,6 +34,20 @@
#include <dev/clk/clk_gate.h>
#include <dev/clk/clk_link.h>
+int imx_ccm_attach(device_t);
+int imx_ccm_detach(device_t);
+
+struct imx_ccm_softc {
+ device_t dev;
+ struct resource *mem_res;
+ struct clkdom *clkdom;
+ struct mtx mtx;
+ struct imx_clk *clks;
+ int nclks;
+};
+
+DECLARE_CLASS(imx_ccm_driver);
+
enum imx_clk_type {
IMX_CLK_UNDEFINED = 0,
IMX_CLK_FIXED,
@@ -207,4 +221,4 @@ struct imx_clk {
}, \
}
-#endif
+#endif /* IMX8_CCM_H */
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index 9391b1e2c1b7..cd770386f852 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -36,29 +36,6 @@
#define INSN_SIZE 4
-#define MRS_MASK 0xfff00000
-#define MRS_VALUE 0xd5300000
-#define MRS_SPECIAL(insn) ((insn) & 0x000fffe0)
-#define MRS_REGISTER(insn) ((insn) & 0x0000001f)
-#define MRS_Op0_SHIFT 19
-#define MRS_Op0_MASK 0x00080000
-#define MRS_Op1_SHIFT 16
-#define MRS_Op1_MASK 0x00070000
-#define MRS_CRn_SHIFT 12
-#define MRS_CRn_MASK 0x0000f000
-#define MRS_CRm_SHIFT 8
-#define MRS_CRm_MASK 0x00000f00
-#define MRS_Op2_SHIFT 5
-#define MRS_Op2_MASK 0x000000e0
-#define MRS_Rt_SHIFT 0
-#define MRS_Rt_MASK 0x0000001f
-#define __MRS_REG(op0, op1, crn, crm, op2) \
- (((op0) << MRS_Op0_SHIFT) | ((op1) << MRS_Op1_SHIFT) | \
- ((crn) << MRS_CRn_SHIFT) | ((crm) << MRS_CRm_SHIFT) | \
- ((op2) << MRS_Op2_SHIFT))
-#define MRS_REG(reg) \
- __MRS_REG(reg##_op0, reg##_op1, reg##_CRn, reg##_CRm, reg##_op2)
-
#define __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
S##op0##_##op1##_C##crn##_C##crm##_##op2
#define _MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
@@ -77,6 +54,134 @@
#define UL(x) UINT64_C(x)
+/* AFSR0_EL1 - Auxiliary Fault Status Register 0 */
+#define AFSR0_EL1_REG MRS_REG_ALT_NAME(AFSR0_EL1)
+#define AFSR0_EL1_op0 3
+#define AFSR0_EL1_op1 0
+#define AFSR0_EL1_CRn 5
+#define AFSR0_EL1_CRm 1
+#define AFSR0_EL1_op2 0
+
+/* AFSR0_EL12 */
+#define AFSR0_EL12_REG MRS_REG_ALT_NAME(AFSR0_EL12)
+#define AFSR0_EL12_op0 3
+#define AFSR0_EL12_op1 5
+#define AFSR0_EL12_CRn 5
+#define AFSR0_EL12_CRm 1
+#define AFSR0_EL12_op2 0
+
+/* AFSR1_EL1 - Auxiliary Fault Status Register 1 */
+#define AFSR1_EL1_REG MRS_REG_ALT_NAME(AFSR1_EL1)
+#define AFSR1_EL1_op0 3
+#define AFSR1_EL1_op1 0
+#define AFSR1_EL1_CRn 5
+#define AFSR1_EL1_CRm 1
+#define AFSR1_EL1_op2 1
+
+/* AFSR1_EL12 */
+#define AFSR1_EL12_REG MRS_REG_ALT_NAME(AFSR1_EL12)
+#define AFSR1_EL12_op0 3
+#define AFSR1_EL12_op1 5
+#define AFSR1_EL12_CRn 5
+#define AFSR1_EL12_CRm 1
+#define AFSR1_EL12_op2 1
+
+/* AMAIR_EL1 - Auxiliary Memory Attribute Indirection Register */
+#define AMAIR_EL1_REG MRS_REG_ALT_NAME(AMAIR_EL1)
+#define AMAIR_EL1_op0 3
+#define AMAIR_EL1_op1 0
+#define AMAIR_EL1_CRn 10
+#define AMAIR_EL1_CRm 3
+#define AMAIR_EL1_op2 0
+
+/* AMAIR_EL12 */
+#define AMAIR_EL12_REG MRS_REG_ALT_NAME(AMAIR_EL12)
+#define AMAIR_EL12_op0 3
+#define AMAIR_EL12_op1 5
+#define AMAIR_EL12_CRn 10
+#define AMAIR_EL12_CRm 3
+#define AMAIR_EL12_op2 0
+
+/* APDAKeyHi_EL1 */
+#define APDAKeyHi_EL1_REG MRS_REG_ALT_NAME(APDAKeyHi_EL1)
+#define APDAKeyHi_EL1_op0 3
+#define APDAKeyHi_EL1_op1 0
+#define APDAKeyHi_EL1_CRn 2
+#define APDAKeyHi_EL1_CRm 2
+#define APDAKeyHi_EL1_op2 1
+
+/* APDAKeyLo_EL1 */
+#define APDAKeyLo_EL1_REG MRS_REG_ALT_NAME(APDAKeyLo_EL1)
+#define APDAKeyLo_EL1_op0 3
+#define APDAKeyLo_EL1_op1 0
+#define APDAKeyLo_EL1_CRn 2
+#define APDAKeyLo_EL1_CRm 2
+#define APDAKeyLo_EL1_op2 0
+
+/* APDBKeyHi_EL1 */
+#define APDBKeyHi_EL1_REG MRS_REG_ALT_NAME(APDBKeyHi_EL1)
+#define APDBKeyHi_EL1_op0 3
+#define APDBKeyHi_EL1_op1 0
+#define APDBKeyHi_EL1_CRn 2
+#define APDBKeyHi_EL1_CRm 2
+#define APDBKeyHi_EL1_op2 3
+
+/* APDBKeyLo_EL1 */
+#define APDBKeyLo_EL1_REG MRS_REG_ALT_NAME(APDBKeyLo_EL1)
+#define APDBKeyLo_EL1_op0 3
+#define APDBKeyLo_EL1_op1 0
+#define APDBKeyLo_EL1_CRn 2
+#define APDBKeyLo_EL1_CRm 2
+#define APDBKeyLo_EL1_op2 2
+
+/* APGAKeyHi_EL1 */
+#define APGAKeyHi_EL1_REG MRS_REG_ALT_NAME(APGAKeyHi_EL1)
+#define APGAKeyHi_EL1_op0 3
+#define APGAKeyHi_EL1_op1 0
+#define APGAKeyHi_EL1_CRn 2
+#define APGAKeyHi_EL1_CRm 3
+#define APGAKeyHi_EL1_op2 1
+
+/* APGAKeyLo_EL1 */
+#define APGAKeyLo_EL1_REG MRS_REG_ALT_NAME(APGAKeyLo_EL1)
+#define APGAKeyLo_EL1_op0 3
+#define APGAKeyLo_EL1_op1 0
+#define APGAKeyLo_EL1_CRn 2
+#define APGAKeyLo_EL1_CRm 3
+#define APGAKeyLo_EL1_op2 0
+
+/* APIAKeyHi_EL1 */
+#define APIAKeyHi_EL1_REG MRS_REG_ALT_NAME(APIAKeyHi_EL1)
+#define APIAKeyHi_EL1_op0 3
+#define APIAKeyHi_EL1_op1 0
+#define APIAKeyHi_EL1_CRn 2
+#define APIAKeyHi_EL1_CRm 1
+#define APIAKeyHi_EL1_op2 1
+
+/* APIAKeyLo_EL1 */
+#define APIAKeyLo_EL1_REG MRS_REG_ALT_NAME(APIAKeyLo_EL1)
+#define APIAKeyLo_EL1_op0 3
+#define APIAKeyLo_EL1_op1 0
+#define APIAKeyLo_EL1_CRn 2
+#define APIAKeyLo_EL1_CRm 1
+#define APIAKeyLo_EL1_op2 0
+
+/* APIBKeyHi_EL1 */
+#define APIBKeyHi_EL1_REG MRS_REG_ALT_NAME(APIBKeyHi_EL1)
+#define APIBKeyHi_EL1_op0 3
+#define APIBKeyHi_EL1_op1 0
+#define APIBKeyHi_EL1_CRn 2
+#define APIBKeyHi_EL1_CRm 1
+#define APIBKeyHi_EL1_op2 3
+
+/* APIBKeyLo_EL1 */
+#define APIBKeyLo_EL1_REG MRS_REG_ALT_NAME(APIBKeyLo_EL1)
+#define APIBKeyLo_EL1_op0 3
+#define APIBKeyLo_EL1_op1 0
+#define APIBKeyLo_EL1_CRn 2
+#define APIBKeyLo_EL1_CRm 1
+#define APIBKeyLo_EL1_op2 2
+
/* CCSIDR_EL1 - Cache Size ID Register */
#define CCSIDR_NumSets_MASK 0x0FFFE000
#define CCSIDR_NumSets64_MASK 0x00FFFFFF00000000
@@ -103,8 +208,21 @@
#define CLIDR_CTYPE_ID 0x3 /* Split instruction and data */
#define CLIDR_CTYPE_UNIFIED 0x4 /* Unified */
+/* CNTKCTL_EL1 - Counter-timer Kernel Control Register */
+#define CNTKCTL_EL1_op0 3
+#define CNTKCTL_EL1_op1 0
+#define CNTKCTL_EL1_CRn 14
+#define CNTKCTL_EL1_CRm 1
+#define CNTKCTL_EL1_op2 0
+
+/* CNTKCTL_EL12 - Counter-timer Kernel Control Register */
+#define CNTKCTL_EL12_op0 3
+#define CNTKCTL_EL12_op1 5
+#define CNTKCTL_EL12_CRn 14
+#define CNTKCTL_EL12_CRm 1
+#define CNTKCTL_EL12_op2 0
+
/* CNTP_CTL_EL0 - Counter-timer Physical Timer Control register */
-#define CNTP_CTL_EL0 MRS_REG(CNTP_CTL_EL0)
#define CNTP_CTL_EL0_op0 3
#define CNTP_CTL_EL0_op1 3
#define CNTP_CTL_EL0_CRn 14
@@ -115,7 +233,6 @@
#define CNTP_CTL_ISTATUS (1 << 2)
/* CNTP_CVAL_EL0 - Counter-timer Physical Timer CompareValue register */
-#define CNTP_CVAL_EL0 MRS_REG(CNTP_CVAL_EL0)
#define CNTP_CVAL_EL0_op0 3
#define CNTP_CVAL_EL0_op1 3
#define CNTP_CVAL_EL0_CRn 14
@@ -123,7 +240,6 @@
#define CNTP_CVAL_EL0_op2 2
/* CNTP_TVAL_EL0 - Counter-timer Physical Timer TimerValue register */
-#define CNTP_TVAL_EL0 MRS_REG(CNTP_TVAL_EL0)
#define CNTP_TVAL_EL0_op0 3
#define CNTP_TVAL_EL0_op1 3
#define CNTP_TVAL_EL0_CRn 14
@@ -131,14 +247,64 @@
#define CNTP_TVAL_EL0_op2 0
/* CNTPCT_EL0 - Counter-timer Physical Count register */
-#define CNTPCT_EL0 MRS_REG(CNTPCT_EL0)
+#define CNTPCT_EL0_ISS ISS_MSR_REG(CNTPCT_EL0)
#define CNTPCT_EL0_op0 3
#define CNTPCT_EL0_op1 3
#define CNTPCT_EL0_CRn 14
#define CNTPCT_EL0_CRm 0
#define CNTPCT_EL0_op2 1
+/* CNTV_CTL_EL0 - Counter-timer Virtual Timer Control register */
+#define CNTV_CTL_EL0_op0 3
+#define CNTV_CTL_EL0_op1 3
+#define CNTV_CTL_EL0_CRn 14
+#define CNTV_CTL_EL0_CRm 3
+#define CNTV_CTL_EL0_op2 1
+
+/* CNTV_CTL_EL02 - Counter-timer Virtual Timer Control register */
+#define CNTV_CTL_EL02_op0 3
+#define CNTV_CTL_EL02_op1 5
+#define CNTV_CTL_EL02_CRn 14
+#define CNTV_CTL_EL02_CRm 3
+#define CNTV_CTL_EL02_op2 1
+
+/* CNTV_CVAL_EL0 - Counter-timer Virtual Timer CompareValue register */
+#define CNTV_CVAL_EL0_op0 3
+#define CNTV_CVAL_EL0_op1 3
+#define CNTV_CVAL_EL0_CRn 14
+#define CNTV_CVAL_EL0_CRm 3
+#define CNTV_CVAL_EL0_op2 2
+
+/* CNTV_CVAL_EL02 - Counter-timer Virtual Timer CompareValue register */
+#define CNTV_CVAL_EL02_op0 3
+#define CNTV_CVAL_EL02_op1 5
+#define CNTV_CVAL_EL02_CRn 14
+#define CNTV_CVAL_EL02_CRm 3
+#define CNTV_CVAL_EL02_op2 2
+
+/* CONTEXTIDR_EL1 - Context ID register */
+#define CONTEXTIDR_EL1_REG MRS_REG_ALT_NAME(CONTEXTIDR_EL1)
+#define CONTEXTIDR_EL1_op0 3
+#define CONTEXTIDR_EL1_op1 0
+#define CONTEXTIDR_EL1_CRn 13
+#define CONTEXTIDR_EL1_CRm 0
+#define CONTEXTIDR_EL1_op2 1
+
+/* CONTEXTIDR_EL12 */
+#define CONTEXTIDR_EL12_REG MRS_REG_ALT_NAME(CONTEXTIDR_EL12)
+#define CONTEXTIDR_EL12_op0 3
+#define CONTEXTIDR_EL12_op1 5
+#define CONTEXTIDR_EL12_CRn 13
+#define CONTEXTIDR_EL12_CRm 0
+#define CONTEXTIDR_EL12_op2 1
+
/* CPACR_EL1 */
+#define CPACR_EL1_REG MRS_REG_ALT_NAME(CPACR_EL1)
+#define CPACR_EL1_op0 3
+#define CPACR_EL1_op1 0
+#define CPACR_EL1_CRn 1
+#define CPACR_EL1_CRm 0
+#define CPACR_EL1_op2 2
#define CPACR_ZEN_MASK (0x3 << 16)
#define CPACR_ZEN_TRAP_ALL1 (0x0 << 16) /* Traps from EL0 and EL1 */
#define CPACR_ZEN_TRAP_EL0 (0x1 << 16) /* Traps from EL0 */
@@ -151,41 +317,65 @@
#define CPACR_FPEN_TRAP_NONE (0x3 << 20) /* No traps */
#define CPACR_TTA (0x1 << 28)
+/* CPACR_EL12 */
+#define CPACR_EL12_REG MRS_REG_ALT_NAME(CPACR_EL12)
+#define CPACR_EL12_op0 3
+#define CPACR_EL12_op1 5
+#define CPACR_EL12_CRn 1
+#define CPACR_EL12_CRm 0
+#define CPACR_EL12_op2 2
+
/* CSSELR_EL1 - Cache size selection register */
#define CSSELR_Level(i) (i << 1)
#define CSSELR_InD 0x00000001
/* CTR_EL0 - Cache Type Register */
+#define CTR_EL0_REG MRS_REG_ALT_NAME(CTR_EL0)
+#define CTR_EL0_ISS ISS_MSR_REG(CTR_EL0)
+#define CTR_EL0_op0 3
+#define CTR_EL0_op1 3
+#define CTR_EL0_CRn 0
+#define CTR_EL0_CRm 0
+#define CTR_EL0_op2 1
#define CTR_RES1 (1 << 31)
#define CTR_TminLine_SHIFT 32
#define CTR_TminLine_MASK (UL(0x3f) << CTR_TminLine_SHIFT)
#define CTR_TminLine_VAL(reg) ((reg) & CTR_TminLine_MASK)
#define CTR_DIC_SHIFT 29
+#define CTR_DIC_WIDTH 1
#define CTR_DIC_MASK (0x1 << CTR_DIC_SHIFT)
#define CTR_DIC_VAL(reg) ((reg) & CTR_DIC_MASK)
+#define CTR_DIC_NONE (0x0 << CTR_DIC_SHIFT)
+#define CTR_DIC_IMPL (0x1 << CTR_DIC_SHIFT)
#define CTR_IDC_SHIFT 28
+#define CTR_IDC_WIDTH 1
#define CTR_IDC_MASK (0x1 << CTR_IDC_SHIFT)
#define CTR_IDC_VAL(reg) ((reg) & CTR_IDC_MASK)
+#define CTR_IDC_NONE (0x0 << CTR_IDC_SHIFT)
+#define CTR_IDC_IMPL (0x1 << CTR_IDC_SHIFT)
#define CTR_CWG_SHIFT 24
+#define CTR_CWG_WIDTH 4
#define CTR_CWG_MASK (0xf << CTR_CWG_SHIFT)
#define CTR_CWG_VAL(reg) ((reg) & CTR_CWG_MASK)
#define CTR_CWG_SIZE(reg) (4 << (CTR_CWG_VAL(reg) >> CTR_CWG_SHIFT))
#define CTR_ERG_SHIFT 20
+#define CTR_ERG_WIDTH 4
#define CTR_ERG_MASK (0xf << CTR_ERG_SHIFT)
#define CTR_ERG_VAL(reg) ((reg) & CTR_ERG_MASK)
#define CTR_ERG_SIZE(reg) (4 << (CTR_ERG_VAL(reg) >> CTR_ERG_SHIFT))
#define CTR_DLINE_SHIFT 16
+#define CTR_DLINE_WIDTH 4
#define CTR_DLINE_MASK (0xf << CTR_DLINE_SHIFT)
#define CTR_DLINE_VAL(reg) ((reg) & CTR_DLINE_MASK)
#define CTR_DLINE_SIZE(reg) (4 << (CTR_DLINE_VAL(reg) >> CTR_DLINE_SHIFT))
#define CTR_L1IP_SHIFT 14
+#define CTR_L1IP_WIDTH 2
#define CTR_L1IP_MASK (0x3 << CTR_L1IP_SHIFT)
#define CTR_L1IP_VAL(reg) ((reg) & CTR_L1IP_MASK)
-#define CTR_L1IP_VPIPT (0 << CTR_L1IP_SHIFT)
-#define CTR_L1IP_AIVIVT (1 << CTR_L1IP_SHIFT)
#define CTR_L1IP_VIPT (2 << CTR_L1IP_SHIFT)
#define CTR_L1IP_PIPT (3 << CTR_L1IP_SHIFT)
#define CTR_ILINE_SHIFT 0
+#define CTR_ILINE_WIDTH 4
#define CTR_ILINE_MASK (0xf << CTR_ILINE_SHIFT)
#define CTR_ILINE_VAL(reg) ((reg) & CTR_ILINE_MASK)
#define CTR_ILINE_SIZE(reg) (4 << (CTR_ILINE_VAL(reg) >> CTR_ILINE_SHIFT))
@@ -204,7 +394,7 @@
#define DAIF_I (1 << 1)
#define DAIF_F (1 << 0)
#define DAIF_ALL (DAIF_D | DAIF_A | DAIF_I | DAIF_F)
-#define DAIF_INTR (DAIF_I) /* All exceptions that pass */
+#define DAIF_INTR (DAIF_I | DAIF_F) /* All exceptions that pass */
/* through the intr framework */
/* DBGBCR<n>_EL1 - Debug Breakpoint Control Registers */
@@ -276,7 +466,6 @@
#define DCZID_BS_SIZE(reg) (((reg) & DCZID_BS_MASK) >> DCZID_BS_SHIFT)
/* DBGAUTHSTATUS_EL1 */
-#define DBGAUTHSTATUS_EL1 MRS_REG(DBGAUTHSTATUS_EL1)
#define DBGAUTHSTATUS_EL1_op0 2
#define DBGAUTHSTATUS_EL1_op1 0
#define DBGAUTHSTATUS_EL1_CRn 7
@@ -284,7 +473,6 @@
#define DBGAUTHSTATUS_EL1_op2 6
/* DBGCLAIMCLR_EL1 */
-#define DBGCLAIMCLR_EL1 MRS_REG(DBGCLAIMCLR_EL1)
#define DBGCLAIMCLR_EL1_op0 2
#define DBGCLAIMCLR_EL1_op1 0
#define DBGCLAIMCLR_EL1_CRn 7
@@ -292,7 +480,6 @@
#define DBGCLAIMCLR_EL1_op2 6
/* DBGCLAIMSET_EL1 */
-#define DBGCLAIMSET_EL1 MRS_REG(DBGCLAIMSET_EL1)
#define DBGCLAIMSET_EL1_op0 2
#define DBGCLAIMSET_EL1_op1 0
#define DBGCLAIMSET_EL1_CRn 7
@@ -300,13 +487,28 @@
#define DBGCLAIMSET_EL1_op2 6
/* DBGPRCR_EL1 */
-#define DBGPRCR_EL1 MRS_REG(DBGPRCR_EL1)
#define DBGPRCR_EL1_op0 2
#define DBGPRCR_EL1_op1 0
#define DBGPRCR_EL1_CRn 1
#define DBGPRCR_EL1_CRm 4
#define DBGPRCR_EL1_op2 4
+/* ELR_EL1 */
+#define ELR_EL1_REG MRS_REG_ALT_NAME(ELR_EL1)
+#define ELR_EL1_op0 3
+#define ELR_EL1_op1 0
+#define ELR_EL1_CRn 4
+#define ELR_EL1_CRm 0
+#define ELR_EL1_op2 1
+
+/* ELR_EL12 */
+#define ELR_EL12_REG MRS_REG_ALT_NAME(ELR_EL12)
+#define ELR_EL12_op0 3
+#define ELR_EL12_op1 5
+#define ELR_EL12_CRn 4
+#define ELR_EL12_CRm 0
+#define ELR_EL12_op2 1
+
/* ESR_ELx */
#define ESR_ELx_ISS_MASK 0x01ffffff
#define ISS_FP_TFV_SHIFT 23
@@ -365,12 +567,14 @@
#define ISS_MSR_REG_MASK \
(ISS_MSR_OP0_MASK | ISS_MSR_OP2_MASK | ISS_MSR_OP1_MASK | \
ISS_MSR_CRn_MASK | ISS_MSR_CRm_MASK)
+#define __ISS_MSR_REG(op0, op1, crn, crm, op2) \
+ (((op0) << ISS_MSR_OP0_SHIFT) | \
+ ((op1) << ISS_MSR_OP1_SHIFT) | \
+ ((crn) << ISS_MSR_CRn_SHIFT) | \
+ ((crm) << ISS_MSR_CRm_SHIFT) | \
+ ((op2) << ISS_MSR_OP2_SHIFT))
#define ISS_MSR_REG(reg) \
- (((reg ## _op0) << ISS_MSR_OP0_SHIFT) | \
- ((reg ## _op1) << ISS_MSR_OP1_SHIFT) | \
- ((reg ## _CRn) << ISS_MSR_CRn_SHIFT) | \
- ((reg ## _CRm) << ISS_MSR_CRm_SHIFT) | \
- ((reg ## _op2) << ISS_MSR_OP2_SHIFT))
+ __ISS_MSR_REG(reg##_op0, reg##_op1, reg##_CRn, reg##_CRm, reg##_op2)
#define ISS_DATA_ISV_SHIFT 24
#define ISS_DATA_ISV (0x01 << ISS_DATA_ISV_SHIFT)
@@ -447,6 +651,38 @@
#define EXCP_BRKPT_32 0x38 /* 32bits breakpoint */
#define EXCP_BRK 0x3c /* Breakpoint */
+/* ESR_EL1 */
+#define ESR_EL1_REG MRS_REG_ALT_NAME(ESR_EL1)
+#define ESR_EL1_op0 3
+#define ESR_EL1_op1 0
+#define ESR_EL1_CRn 5
+#define ESR_EL1_CRm 2
+#define ESR_EL1_op2 0
+
+/* ESR_EL12 */
+#define ESR_EL12_REG MRS_REG_ALT_NAME(ESR_EL12)
+#define ESR_EL12_op0 3
+#define ESR_EL12_op1 5
+#define ESR_EL12_CRn 5
+#define ESR_EL12_CRm 2
+#define ESR_EL12_op2 0
+
+/* FAR_EL1 */
+#define FAR_EL1_REG MRS_REG_ALT_NAME(FAR_EL1)
+#define FAR_EL1_op0 3
+#define FAR_EL1_op1 0
+#define FAR_EL1_CRn 6
+#define FAR_EL1_CRm 0
+#define FAR_EL1_op2 0
+
+/* FAR_EL12 */
+#define FAR_EL12_REG MRS_REG_ALT_NAME(FAR_EL12)
+#define FAR_EL12_op0 3
+#define FAR_EL12_op1 5
+#define FAR_EL12_CRn 6
+#define FAR_EL12_CRm 0
+#define FAR_EL12_op2 0
+
/* ICC_CTLR_EL1 */
#define ICC_CTLR_EL1_EOIMODE (1U << 1)
@@ -460,7 +696,6 @@
#define ICC_PMR_EL1_PRIO_MASK (0xFFUL)
/* ICC_SGI1R_EL1 */
-#define ICC_SGI1R_EL1 MRS_REG(ICC_SGI1R_EL1)
#define ICC_SGI1R_EL1_op0 3
#define ICC_SGI1R_EL1_op1 0
#define ICC_SGI1R_EL1_CRn 12
@@ -490,7 +725,8 @@
#define ICC_SRE_EL1_SRE (1U << 0)
/* ID_AA64AFR0_EL1 */
-#define ID_AA64AFR0_EL1 MRS_REG(ID_AA64AFR0_EL1)
+#define ID_AA64AFR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64AFR0_EL1)
+#define ID_AA64AFR0_EL1_ISS ISS_MSR_REG(ID_AA64AFR0_EL1)
#define ID_AA64AFR0_EL1_op0 3
#define ID_AA64AFR0_EL1_op1 0
#define ID_AA64AFR0_EL1_CRn 0
@@ -498,7 +734,8 @@
#define ID_AA64AFR0_EL1_op2 4
/* ID_AA64AFR1_EL1 */
-#define ID_AA64AFR1_EL1 MRS_REG(ID_AA64AFR1_EL1)
+#define ID_AA64AFR1_EL1_REG MRS_REG_ALT_NAME(ID_AA64AFR1_EL1)
+#define ID_AA64AFR1_EL1_ISS ISS_MSR_REG(ID_AA64AFR1_EL1)
#define ID_AA64AFR1_EL1_op0 3
#define ID_AA64AFR1_EL1_op1 0
#define ID_AA64AFR1_EL1_CRn 0
@@ -506,13 +743,15 @@
#define ID_AA64AFR1_EL1_op2 5
/* ID_AA64DFR0_EL1 */
-#define ID_AA64DFR0_EL1 MRS_REG(ID_AA64DFR0_EL1)
+#define ID_AA64DFR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64DFR0_EL1)
+#define ID_AA64DFR0_EL1_ISS ISS_MSR_REG(ID_AA64DFR0_EL1)
#define ID_AA64DFR0_EL1_op0 3
#define ID_AA64DFR0_EL1_op1 0
#define ID_AA64DFR0_EL1_CRn 0
#define ID_AA64DFR0_EL1_CRm 5
#define ID_AA64DFR0_EL1_op2 0
#define ID_AA64DFR0_DebugVer_SHIFT 0
+#define ID_AA64DFR0_DebugVer_WIDTH 4
#define ID_AA64DFR0_DebugVer_MASK (UL(0xf) << ID_AA64DFR0_DebugVer_SHIFT)
#define ID_AA64DFR0_DebugVer_VAL(x) ((x) & ID_AA64DFR0_DebugVer_MASK)
#define ID_AA64DFR0_DebugVer_8 (UL(0x6) << ID_AA64DFR0_DebugVer_SHIFT)
@@ -520,12 +759,15 @@
#define ID_AA64DFR0_DebugVer_8_2 (UL(0x8) << ID_AA64DFR0_DebugVer_SHIFT)
#define ID_AA64DFR0_DebugVer_8_4 (UL(0x9) << ID_AA64DFR0_DebugVer_SHIFT)
#define ID_AA64DFR0_DebugVer_8_8 (UL(0xa) << ID_AA64DFR0_DebugVer_SHIFT)
+#define ID_AA64DFR0_DebugVer_8_9 (UL(0xb) << ID_AA64DFR0_DebugVer_SHIFT)
#define ID_AA64DFR0_TraceVer_SHIFT 4
+#define ID_AA64DFR0_TraceVer_WIDTH 4
#define ID_AA64DFR0_TraceVer_MASK (UL(0xf) << ID_AA64DFR0_TraceVer_SHIFT)
#define ID_AA64DFR0_TraceVer_VAL(x) ((x) & ID_AA64DFR0_TraceVer_MASK)
#define ID_AA64DFR0_TraceVer_NONE (UL(0x0) << ID_AA64DFR0_TraceVer_SHIFT)
#define ID_AA64DFR0_TraceVer_IMPL (UL(0x1) << ID_AA64DFR0_TraceVer_SHIFT)
#define ID_AA64DFR0_PMUVer_SHIFT 8
+#define ID_AA64DFR0_PMUVer_WIDTH 4
#define ID_AA64DFR0_PMUVer_MASK (UL(0xf) << ID_AA64DFR0_PMUVer_SHIFT)
#define ID_AA64DFR0_PMUVer_VAL(x) ((x) & ID_AA64DFR0_PMUVer_MASK)
#define ID_AA64DFR0_PMUVer_NONE (UL(0x0) << ID_AA64DFR0_PMUVer_SHIFT)
@@ -535,25 +777,31 @@
#define ID_AA64DFR0_PMUVer_3_5 (UL(0x6) << ID_AA64DFR0_PMUVer_SHIFT)
#define ID_AA64DFR0_PMUVer_3_7 (UL(0x7) << ID_AA64DFR0_PMUVer_SHIFT)
#define ID_AA64DFR0_PMUVer_3_8 (UL(0x8) << ID_AA64DFR0_PMUVer_SHIFT)
+#define ID_AA64DFR0_PMUVer_3_9 (UL(0x9) << ID_AA64DFR0_PMUVer_SHIFT)
#define ID_AA64DFR0_PMUVer_IMPL (UL(0xf) << ID_AA64DFR0_PMUVer_SHIFT)
#define ID_AA64DFR0_BRPs_SHIFT 12
+#define ID_AA64DFR0_BRPs_WIDTH 4
#define ID_AA64DFR0_BRPs_MASK (UL(0xf) << ID_AA64DFR0_BRPs_SHIFT)
#define ID_AA64DFR0_BRPs_VAL(x) \
((((x) >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) + 1)
#define ID_AA64DFR0_PMSS_SHIFT 16
+#define ID_AA64DFR0_PMSS_WIDTH 4
#define ID_AA64DFR0_PMSS_MASK (UL(0xf) << ID_AA64DFR0_PMSS_SHIFT)
#define ID_AA64DFR0_PMSS_VAL(x) ((x) & ID_AA64DFR0_PMSS_MASK)
#define ID_AA64DFR0_PMSS_NONE (UL(0x0) << ID_AA64DFR0_PMSS_SHIFT)
#define ID_AA64DFR0_PMSS_IMPL (UL(0x1) << ID_AA64DFR0_PMSS_SHIFT)
#define ID_AA64DFR0_WRPs_SHIFT 20
+#define ID_AA64DFR0_WRPs_WIDTH 4
#define ID_AA64DFR0_WRPs_MASK (UL(0xf) << ID_AA64DFR0_WRPs_SHIFT)
#define ID_AA64DFR0_WRPs_VAL(x) \
((((x) >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) + 1)
#define ID_AA64DFR0_CTX_CMPs_SHIFT 28
+#define ID_AA64DFR0_CTX_CMPs_WIDTH 4
#define ID_AA64DFR0_CTX_CMPs_MASK (UL(0xf) << ID_AA64DFR0_CTX_CMPs_SHIFT)
#define ID_AA64DFR0_CTX_CMPs_VAL(x) \
((((x) >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) + 1)
#define ID_AA64DFR0_PMSVer_SHIFT 32
+#define ID_AA64DFR0_PMSVer_WIDTH 4
#define ID_AA64DFR0_PMSVer_MASK (UL(0xf) << ID_AA64DFR0_PMSVer_SHIFT)
#define ID_AA64DFR0_PMSVer_VAL(x) ((x) & ID_AA64DFR0_PMSVer_MASK)
#define ID_AA64DFR0_PMSVer_NONE (UL(0x0) << ID_AA64DFR0_PMSVer_SHIFT)
@@ -561,147 +809,192 @@
#define ID_AA64DFR0_PMSVer_SPE_1_1 (UL(0x2) << ID_AA64DFR0_PMSVer_SHIFT)
#define ID_AA64DFR0_PMSVer_SPE_1_2 (UL(0x3) << ID_AA64DFR0_PMSVer_SHIFT)
#define ID_AA64DFR0_PMSVer_SPE_1_3 (UL(0x4) << ID_AA64DFR0_PMSVer_SHIFT)
+#define ID_AA64DFR0_PMSVer_SPE_1_4 (UL(0x5) << ID_AA64DFR0_PMSVer_SHIFT)
#define ID_AA64DFR0_DoubleLock_SHIFT 36
+#define ID_AA64DFR0_DoubleLock_WIDTH 4
#define ID_AA64DFR0_DoubleLock_MASK (UL(0xf) << ID_AA64DFR0_DoubleLock_SHIFT)
#define ID_AA64DFR0_DoubleLock_VAL(x) ((x) & ID_AA64DFR0_DoubleLock_MASK)
#define ID_AA64DFR0_DoubleLock_IMPL (UL(0x0) << ID_AA64DFR0_DoubleLock_SHIFT)
#define ID_AA64DFR0_DoubleLock_NONE (UL(0xf) << ID_AA64DFR0_DoubleLock_SHIFT)
#define ID_AA64DFR0_TraceFilt_SHIFT 40
+#define ID_AA64DFR0_TraceFilt_WIDTH 4
#define ID_AA64DFR0_TraceFilt_MASK (UL(0xf) << ID_AA64DFR0_TraceFilt_SHIFT)
#define ID_AA64DFR0_TraceFilt_VAL(x) ((x) & ID_AA64DFR0_TraceFilt_MASK)
#define ID_AA64DFR0_TraceFilt_NONE (UL(0x0) << ID_AA64DFR0_TraceFilt_SHIFT)
#define ID_AA64DFR0_TraceFilt_8_4 (UL(0x1) << ID_AA64DFR0_TraceFilt_SHIFT)
#define ID_AA64DFR0_TraceBuffer_SHIFT 44
+#define ID_AA64DFR0_TraceBuffer_WIDTH 4
#define ID_AA64DFR0_TraceBuffer_MASK (UL(0xf) << ID_AA64DFR0_TraceBuffer_SHIFT)
#define ID_AA64DFR0_TraceBuffer_VAL(x) ((x) & ID_AA64DFR0_TraceBuffer_MASK)
#define ID_AA64DFR0_TraceBuffer_NONE (UL(0x0) << ID_AA64DFR0_TraceBuffer_SHIFT)
#define ID_AA64DFR0_TraceBuffer_IMPL (UL(0x1) << ID_AA64DFR0_TraceBuffer_SHIFT)
#define ID_AA64DFR0_MTPMU_SHIFT 48
+#define ID_AA64DFR0_MTPMU_WIDTH 4
#define ID_AA64DFR0_MTPMU_MASK (UL(0xf) << ID_AA64DFR0_MTPMU_SHIFT)
#define ID_AA64DFR0_MTPMU_VAL(x) ((x) & ID_AA64DFR0_MTPMU_MASK)
#define ID_AA64DFR0_MTPMU_NONE (UL(0x0) << ID_AA64DFR0_MTPMU_SHIFT)
#define ID_AA64DFR0_MTPMU_IMPL (UL(0x1) << ID_AA64DFR0_MTPMU_SHIFT)
#define ID_AA64DFR0_MTPMU_NONE_MT_RES0 (UL(0xf) << ID_AA64DFR0_MTPMU_SHIFT)
#define ID_AA64DFR0_BRBE_SHIFT 52
+#define ID_AA64DFR0_BRBE_WIDTH 4
#define ID_AA64DFR0_BRBE_MASK (UL(0xf) << ID_AA64DFR0_BRBE_SHIFT)
#define ID_AA64DFR0_BRBE_VAL(x) ((x) & ID_AA64DFR0_BRBE_MASK)
#define ID_AA64DFR0_BRBE_NONE (UL(0x0) << ID_AA64DFR0_BRBE_SHIFT)
#define ID_AA64DFR0_BRBE_IMPL (UL(0x1) << ID_AA64DFR0_BRBE_SHIFT)
#define ID_AA64DFR0_BRBE_EL3 (UL(0x2) << ID_AA64DFR0_BRBE_SHIFT)
#define ID_AA64DFR0_HPMN0_SHIFT 60
+#define ID_AA64DFR0_HPMN0_WIDTH 4
#define ID_AA64DFR0_HPMN0_MASK (UL(0xf) << ID_AA64DFR0_HPMN0_SHIFT)
#define ID_AA64DFR0_HPMN0_VAL(x) ((x) & ID_AA64DFR0_HPMN0_MASK)
#define ID_AA64DFR0_HPMN0_CONSTR (UL(0x0) << ID_AA64DFR0_HPMN0_SHIFT)
#define ID_AA64DFR0_HPMN0_DEFINED (UL(0x1) << ID_AA64DFR0_HPMN0_SHIFT)
/* ID_AA64DFR1_EL1 */
-#define ID_AA64DFR1_EL1 MRS_REG(ID_AA64DFR1_EL1)
+#define ID_AA64DFR1_EL1_REG MRS_REG_ALT_NAME(ID_AA64DFR1_EL1)
+#define ID_AA64DFR1_EL1_ISS ISS_MSR_REG(ID_AA64DFR1_EL1)
#define ID_AA64DFR1_EL1_op0 3
#define ID_AA64DFR1_EL1_op1 0
#define ID_AA64DFR1_EL1_CRn 0
#define ID_AA64DFR1_EL1_CRm 5
#define ID_AA64DFR1_EL1_op2 1
+#define ID_AA64DFR1_SPMU_SHIFT 32
+#define ID_AA64DFR1_SPMU_WIDTH 4
+#define ID_AA64DFR1_SPMU_MASK (UL(0xf) << ID_AA64DFR1_SPMU_SHIFT)
+#define ID_AA64DFR1_SPMU_VAL(x) ((x) & ID_AA64DFR1_SPMU_MASK)
+#define ID_AA64DFR1_SPMU_NONE (UL(0x0) << ID_AA64DFR1_SPMU_SHIFT)
+#define ID_AA64DFR1_SPMU_IMPL (UL(0x1) << ID_AA64DFR1_SPMU_SHIFT)
+#define ID_AA64DFR1_PMICNTR_SHIFT 36
+#define ID_AA64DFR1_PMICNTR_WIDTH 4
+#define ID_AA64DFR1_PMICNTR_MASK (UL(0xf) << ID_AA64DFR1_PMICNTR_SHIFT)
+#define ID_AA64DFR1_PMICNTR_VAL(x) ((x) & ID_AA64DFR1_PMICNTR_MASK)
+#define ID_AA64DFR1_PMICNTR_NONE (UL(0x0) << ID_AA64DFR1_PMICNTR_SHIFT)
+#define ID_AA64DFR1_PMICNTR_IMPL (UL(0x1) << ID_AA64DFR1_PMICNTR_SHIFT)
+#define ID_AA64DFR1_DPFZS_SHIFT 52
+#define ID_AA64DFR1_DPFZS_WIDTH 4
+#define ID_AA64DFR1_DPFZS_MASK (UL(0xf) << ID_AA64DFR1_DPFZS_SHIFT)
+#define ID_AA64DFR1_DPFZS_VAL(x) ((x) & ID_AA64DFR1_DPFZS_MASK)
+#define ID_AA64DFR1_DPFZS_NONE (UL(0x0) << ID_AA64DFR1_DPFZS_SHIFT)
+#define ID_AA64DFR1_DPFZS_IMPL (UL(0x1) << ID_AA64DFR1_DPFZS_SHIFT)
/* ID_AA64ISAR0_EL1 */
-#define ID_AA64ISAR0_EL1 MRS_REG(ID_AA64ISAR0_EL1)
+#define ID_AA64ISAR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64ISAR0_EL1)
+#define ID_AA64ISAR0_EL1_ISS ISS_MSR_REG(ID_AA64ISAR0_EL1)
#define ID_AA64ISAR0_EL1_op0 3
#define ID_AA64ISAR0_EL1_op1 0
#define ID_AA64ISAR0_EL1_CRn 0
#define ID_AA64ISAR0_EL1_CRm 6
#define ID_AA64ISAR0_EL1_op2 0
#define ID_AA64ISAR0_AES_SHIFT 4
+#define ID_AA64ISAR0_AES_WIDTH 4
#define ID_AA64ISAR0_AES_MASK (UL(0xf) << ID_AA64ISAR0_AES_SHIFT)
#define ID_AA64ISAR0_AES_VAL(x) ((x) & ID_AA64ISAR0_AES_MASK)
#define ID_AA64ISAR0_AES_NONE (UL(0x0) << ID_AA64ISAR0_AES_SHIFT)
#define ID_AA64ISAR0_AES_BASE (UL(0x1) << ID_AA64ISAR0_AES_SHIFT)
#define ID_AA64ISAR0_AES_PMULL (UL(0x2) << ID_AA64ISAR0_AES_SHIFT)
#define ID_AA64ISAR0_SHA1_SHIFT 8
+#define ID_AA64ISAR0_SHA1_WIDTH 4
#define ID_AA64ISAR0_SHA1_MASK (UL(0xf) << ID_AA64ISAR0_SHA1_SHIFT)
#define ID_AA64ISAR0_SHA1_VAL(x) ((x) & ID_AA64ISAR0_SHA1_MASK)
#define ID_AA64ISAR0_SHA1_NONE (UL(0x0) << ID_AA64ISAR0_SHA1_SHIFT)
#define ID_AA64ISAR0_SHA1_BASE (UL(0x1) << ID_AA64ISAR0_SHA1_SHIFT)
#define ID_AA64ISAR0_SHA2_SHIFT 12
+#define ID_AA64ISAR0_SHA2_WIDTH 4
#define ID_AA64ISAR0_SHA2_MASK (UL(0xf) << ID_AA64ISAR0_SHA2_SHIFT)
#define ID_AA64ISAR0_SHA2_VAL(x) ((x) & ID_AA64ISAR0_SHA2_MASK)
#define ID_AA64ISAR0_SHA2_NONE (UL(0x0) << ID_AA64ISAR0_SHA2_SHIFT)
#define ID_AA64ISAR0_SHA2_BASE (UL(0x1) << ID_AA64ISAR0_SHA2_SHIFT)
#define ID_AA64ISAR0_SHA2_512 (UL(0x2) << ID_AA64ISAR0_SHA2_SHIFT)
#define ID_AA64ISAR0_CRC32_SHIFT 16
+#define ID_AA64ISAR0_CRC32_WIDTH 4
#define ID_AA64ISAR0_CRC32_MASK (UL(0xf) << ID_AA64ISAR0_CRC32_SHIFT)
#define ID_AA64ISAR0_CRC32_VAL(x) ((x) & ID_AA64ISAR0_CRC32_MASK)
#define ID_AA64ISAR0_CRC32_NONE (UL(0x0) << ID_AA64ISAR0_CRC32_SHIFT)
#define ID_AA64ISAR0_CRC32_BASE (UL(0x1) << ID_AA64ISAR0_CRC32_SHIFT)
#define ID_AA64ISAR0_Atomic_SHIFT 20
+#define ID_AA64ISAR0_Atomic_WIDTH 4
#define ID_AA64ISAR0_Atomic_MASK (UL(0xf) << ID_AA64ISAR0_Atomic_SHIFT)
#define ID_AA64ISAR0_Atomic_VAL(x) ((x) & ID_AA64ISAR0_Atomic_MASK)
#define ID_AA64ISAR0_Atomic_NONE (UL(0x0) << ID_AA64ISAR0_Atomic_SHIFT)
#define ID_AA64ISAR0_Atomic_IMPL (UL(0x2) << ID_AA64ISAR0_Atomic_SHIFT)
#define ID_AA64ISAR0_TME_SHIFT 24
+#define ID_AA64ISAR0_TME_WIDTH 4
#define ID_AA64ISAR0_TME_MASK (UL(0xf) << ID_AA64ISAR0_TME_SHIFT)
#define ID_AA64ISAR0_TME_NONE (UL(0x0) << ID_AA64ISAR0_TME_SHIFT)
#define ID_AA64ISAR0_TME_IMPL (UL(0x1) << ID_AA64ISAR0_TME_SHIFT)
#define ID_AA64ISAR0_RDM_SHIFT 28
+#define ID_AA64ISAR0_RDM_WIDTH 4
#define ID_AA64ISAR0_RDM_MASK (UL(0xf) << ID_AA64ISAR0_RDM_SHIFT)
#define ID_AA64ISAR0_RDM_VAL(x) ((x) & ID_AA64ISAR0_RDM_MASK)
#define ID_AA64ISAR0_RDM_NONE (UL(0x0) << ID_AA64ISAR0_RDM_SHIFT)
#define ID_AA64ISAR0_RDM_IMPL (UL(0x1) << ID_AA64ISAR0_RDM_SHIFT)
#define ID_AA64ISAR0_SHA3_SHIFT 32
+#define ID_AA64ISAR0_SHA3_WIDTH 4
#define ID_AA64ISAR0_SHA3_MASK (UL(0xf) << ID_AA64ISAR0_SHA3_SHIFT)
#define ID_AA64ISAR0_SHA3_VAL(x) ((x) & ID_AA64ISAR0_SHA3_MASK)
#define ID_AA64ISAR0_SHA3_NONE (UL(0x0) << ID_AA64ISAR0_SHA3_SHIFT)
#define ID_AA64ISAR0_SHA3_IMPL (UL(0x1) << ID_AA64ISAR0_SHA3_SHIFT)
#define ID_AA64ISAR0_SM3_SHIFT 36
+#define ID_AA64ISAR0_SM3_WIDTH 4
#define ID_AA64ISAR0_SM3_MASK (UL(0xf) << ID_AA64ISAR0_SM3_SHIFT)
#define ID_AA64ISAR0_SM3_VAL(x) ((x) & ID_AA64ISAR0_SM3_MASK)
#define ID_AA64ISAR0_SM3_NONE (UL(0x0) << ID_AA64ISAR0_SM3_SHIFT)
#define ID_AA64ISAR0_SM3_IMPL (UL(0x1) << ID_AA64ISAR0_SM3_SHIFT)
#define ID_AA64ISAR0_SM4_SHIFT 40
+#define ID_AA64ISAR0_SM4_WIDTH 4
#define ID_AA64ISAR0_SM4_MASK (UL(0xf) << ID_AA64ISAR0_SM4_SHIFT)
#define ID_AA64ISAR0_SM4_VAL(x) ((x) & ID_AA64ISAR0_SM4_MASK)
#define ID_AA64ISAR0_SM4_NONE (UL(0x0) << ID_AA64ISAR0_SM4_SHIFT)
#define ID_AA64ISAR0_SM4_IMPL (UL(0x1) << ID_AA64ISAR0_SM4_SHIFT)
#define ID_AA64ISAR0_DP_SHIFT 44
+#define ID_AA64ISAR0_DP_WIDTH 4
#define ID_AA64ISAR0_DP_MASK (UL(0xf) << ID_AA64ISAR0_DP_SHIFT)
#define ID_AA64ISAR0_DP_VAL(x) ((x) & ID_AA64ISAR0_DP_MASK)
#define ID_AA64ISAR0_DP_NONE (UL(0x0) << ID_AA64ISAR0_DP_SHIFT)
#define ID_AA64ISAR0_DP_IMPL (UL(0x1) << ID_AA64ISAR0_DP_SHIFT)
#define ID_AA64ISAR0_FHM_SHIFT 48
+#define ID_AA64ISAR0_FHM_WIDTH 4
#define ID_AA64ISAR0_FHM_MASK (UL(0xf) << ID_AA64ISAR0_FHM_SHIFT)
#define ID_AA64ISAR0_FHM_VAL(x) ((x) & ID_AA64ISAR0_FHM_MASK)
#define ID_AA64ISAR0_FHM_NONE (UL(0x0) << ID_AA64ISAR0_FHM_SHIFT)
#define ID_AA64ISAR0_FHM_IMPL (UL(0x1) << ID_AA64ISAR0_FHM_SHIFT)
#define ID_AA64ISAR0_TS_SHIFT 52
+#define ID_AA64ISAR0_TS_WIDTH 4
#define ID_AA64ISAR0_TS_MASK (UL(0xf) << ID_AA64ISAR0_TS_SHIFT)
#define ID_AA64ISAR0_TS_VAL(x) ((x) & ID_AA64ISAR0_TS_MASK)
#define ID_AA64ISAR0_TS_NONE (UL(0x0) << ID_AA64ISAR0_TS_SHIFT)
#define ID_AA64ISAR0_TS_CondM_8_4 (UL(0x1) << ID_AA64ISAR0_TS_SHIFT)
#define ID_AA64ISAR0_TS_CondM_8_5 (UL(0x2) << ID_AA64ISAR0_TS_SHIFT)
#define ID_AA64ISAR0_TLB_SHIFT 56
+#define ID_AA64ISAR0_TLB_WIDTH 4
#define ID_AA64ISAR0_TLB_MASK (UL(0xf) << ID_AA64ISAR0_TLB_SHIFT)
#define ID_AA64ISAR0_TLB_VAL(x) ((x) & ID_AA64ISAR0_TLB_MASK)
#define ID_AA64ISAR0_TLB_NONE (UL(0x0) << ID_AA64ISAR0_TLB_SHIFT)
#define ID_AA64ISAR0_TLB_TLBIOS (UL(0x1) << ID_AA64ISAR0_TLB_SHIFT)
#define ID_AA64ISAR0_TLB_TLBIOSR (UL(0x2) << ID_AA64ISAR0_TLB_SHIFT)
#define ID_AA64ISAR0_RNDR_SHIFT 60
+#define ID_AA64ISAR0_RNDR_WIDTH 4
#define ID_AA64ISAR0_RNDR_MASK (UL(0xf) << ID_AA64ISAR0_RNDR_SHIFT)
#define ID_AA64ISAR0_RNDR_VAL(x) ((x) & ID_AA64ISAR0_RNDR_MASK)
#define ID_AA64ISAR0_RNDR_NONE (UL(0x0) << ID_AA64ISAR0_RNDR_SHIFT)
#define ID_AA64ISAR0_RNDR_IMPL (UL(0x1) << ID_AA64ISAR0_RNDR_SHIFT)
/* ID_AA64ISAR1_EL1 */
-#define ID_AA64ISAR1_EL1 MRS_REG(ID_AA64ISAR1_EL1)
+#define ID_AA64ISAR1_EL1_REG MRS_REG_ALT_NAME(ID_AA64ISAR1_EL1)
+#define ID_AA64ISAR1_EL1_ISS ISS_MSR_REG(ID_AA64ISAR1_EL1)
#define ID_AA64ISAR1_EL1_op0 3
#define ID_AA64ISAR1_EL1_op1 0
#define ID_AA64ISAR1_EL1_CRn 0
#define ID_AA64ISAR1_EL1_CRm 6
#define ID_AA64ISAR1_EL1_op2 1
#define ID_AA64ISAR1_DPB_SHIFT 0
+#define ID_AA64ISAR1_DPB_WIDTH 4
#define ID_AA64ISAR1_DPB_MASK (UL(0xf) << ID_AA64ISAR1_DPB_SHIFT)
#define ID_AA64ISAR1_DPB_VAL(x) ((x) & ID_AA64ISAR1_DPB_MASK)
#define ID_AA64ISAR1_DPB_NONE (UL(0x0) << ID_AA64ISAR1_DPB_SHIFT)
#define ID_AA64ISAR1_DPB_DCCVAP (UL(0x1) << ID_AA64ISAR1_DPB_SHIFT)
#define ID_AA64ISAR1_DPB_DCCVADP (UL(0x2) << ID_AA64ISAR1_DPB_SHIFT)
#define ID_AA64ISAR1_APA_SHIFT 4
+#define ID_AA64ISAR1_APA_WIDTH 4
#define ID_AA64ISAR1_APA_MASK (UL(0xf) << ID_AA64ISAR1_APA_SHIFT)
#define ID_AA64ISAR1_APA_VAL(x) ((x) & ID_AA64ISAR1_APA_MASK)
#define ID_AA64ISAR1_APA_NONE (UL(0x0) << ID_AA64ISAR1_APA_SHIFT)
@@ -711,6 +1004,7 @@
#define ID_AA64ISAR1_APA_FPAC (UL(0x4) << ID_AA64ISAR1_APA_SHIFT)
#define ID_AA64ISAR1_APA_FPAC_COMBINED (UL(0x5) << ID_AA64ISAR1_APA_SHIFT)
#define ID_AA64ISAR1_API_SHIFT 8
+#define ID_AA64ISAR1_API_WIDTH 4
#define ID_AA64ISAR1_API_MASK (UL(0xf) << ID_AA64ISAR1_API_SHIFT)
#define ID_AA64ISAR1_API_VAL(x) ((x) & ID_AA64ISAR1_API_MASK)
#define ID_AA64ISAR1_API_NONE (UL(0x0) << ID_AA64ISAR1_API_SHIFT)
@@ -720,68 +1014,82 @@
#define ID_AA64ISAR1_API_FPAC (UL(0x4) << ID_AA64ISAR1_API_SHIFT)
#define ID_AA64ISAR1_API_FPAC_COMBINED (UL(0x5) << ID_AA64ISAR1_API_SHIFT)
#define ID_AA64ISAR1_JSCVT_SHIFT 12
+#define ID_AA64ISAR1_JSCVT_WIDTH 4
#define ID_AA64ISAR1_JSCVT_MASK (UL(0xf) << ID_AA64ISAR1_JSCVT_SHIFT)
#define ID_AA64ISAR1_JSCVT_VAL(x) ((x) & ID_AA64ISAR1_JSCVT_MASK)
#define ID_AA64ISAR1_JSCVT_NONE (UL(0x0) << ID_AA64ISAR1_JSCVT_SHIFT)
#define ID_AA64ISAR1_JSCVT_IMPL (UL(0x1) << ID_AA64ISAR1_JSCVT_SHIFT)
#define ID_AA64ISAR1_FCMA_SHIFT 16
+#define ID_AA64ISAR1_FCMA_WIDTH 4
#define ID_AA64ISAR1_FCMA_MASK (UL(0xf) << ID_AA64ISAR1_FCMA_SHIFT)
#define ID_AA64ISAR1_FCMA_VAL(x) ((x) & ID_AA64ISAR1_FCMA_MASK)
#define ID_AA64ISAR1_FCMA_NONE (UL(0x0) << ID_AA64ISAR1_FCMA_SHIFT)
#define ID_AA64ISAR1_FCMA_IMPL (UL(0x1) << ID_AA64ISAR1_FCMA_SHIFT)
#define ID_AA64ISAR1_LRCPC_SHIFT 20
+#define ID_AA64ISAR1_LRCPC_WIDTH 4
#define ID_AA64ISAR1_LRCPC_MASK (UL(0xf) << ID_AA64ISAR1_LRCPC_SHIFT)
#define ID_AA64ISAR1_LRCPC_VAL(x) ((x) & ID_AA64ISAR1_LRCPC_MASK)
#define ID_AA64ISAR1_LRCPC_NONE (UL(0x0) << ID_AA64ISAR1_LRCPC_SHIFT)
#define ID_AA64ISAR1_LRCPC_RCPC_8_3 (UL(0x1) << ID_AA64ISAR1_LRCPC_SHIFT)
#define ID_AA64ISAR1_LRCPC_RCPC_8_4 (UL(0x2) << ID_AA64ISAR1_LRCPC_SHIFT)
#define ID_AA64ISAR1_GPA_SHIFT 24
+#define ID_AA64ISAR1_GPA_WIDTH 4
#define ID_AA64ISAR1_GPA_MASK (UL(0xf) << ID_AA64ISAR1_GPA_SHIFT)
#define ID_AA64ISAR1_GPA_VAL(x) ((x) & ID_AA64ISAR1_GPA_MASK)
#define ID_AA64ISAR1_GPA_NONE (UL(0x0) << ID_AA64ISAR1_GPA_SHIFT)
#define ID_AA64ISAR1_GPA_IMPL (UL(0x1) << ID_AA64ISAR1_GPA_SHIFT)
#define ID_AA64ISAR1_GPI_SHIFT 28
+#define ID_AA64ISAR1_GPI_WIDTH 4
#define ID_AA64ISAR1_GPI_MASK (UL(0xf) << ID_AA64ISAR1_GPI_SHIFT)
#define ID_AA64ISAR1_GPI_VAL(x) ((x) & ID_AA64ISAR1_GPI_MASK)
#define ID_AA64ISAR1_GPI_NONE (UL(0x0) << ID_AA64ISAR1_GPI_SHIFT)
#define ID_AA64ISAR1_GPI_IMPL (UL(0x1) << ID_AA64ISAR1_GPI_SHIFT)
#define ID_AA64ISAR1_FRINTTS_SHIFT 32
+#define ID_AA64ISAR1_FRINTTS_WIDTH 4
#define ID_AA64ISAR1_FRINTTS_MASK (UL(0xf) << ID_AA64ISAR1_FRINTTS_SHIFT)
#define ID_AA64ISAR1_FRINTTS_VAL(x) ((x) & ID_AA64ISAR1_FRINTTS_MASK)
#define ID_AA64ISAR1_FRINTTS_NONE (UL(0x0) << ID_AA64ISAR1_FRINTTS_SHIFT)
#define ID_AA64ISAR1_FRINTTS_IMPL (UL(0x1) << ID_AA64ISAR1_FRINTTS_SHIFT)
#define ID_AA64ISAR1_SB_SHIFT 36
+#define ID_AA64ISAR1_SB_WIDTH 4
#define ID_AA64ISAR1_SB_MASK (UL(0xf) << ID_AA64ISAR1_SB_SHIFT)
#define ID_AA64ISAR1_SB_VAL(x) ((x) & ID_AA64ISAR1_SB_MASK)
#define ID_AA64ISAR1_SB_NONE (UL(0x0) << ID_AA64ISAR1_SB_SHIFT)
#define ID_AA64ISAR1_SB_IMPL (UL(0x1) << ID_AA64ISAR1_SB_SHIFT)
#define ID_AA64ISAR1_SPECRES_SHIFT 40
+#define ID_AA64ISAR1_SPECRES_WIDTH 4
#define ID_AA64ISAR1_SPECRES_MASK (UL(0xf) << ID_AA64ISAR1_SPECRES_SHIFT)
#define ID_AA64ISAR1_SPECRES_VAL(x) ((x) & ID_AA64ISAR1_SPECRES_MASK)
#define ID_AA64ISAR1_SPECRES_NONE (UL(0x0) << ID_AA64ISAR1_SPECRES_SHIFT)
-#define ID_AA64ISAR1_SPECRES_IMPL (UL(0x1) << ID_AA64ISAR1_SPECRES_SHIFT)
+#define ID_AA64ISAR1_SPECRES_8_5 (UL(0x1) << ID_AA64ISAR1_SPECRES_SHIFT)
+#define ID_AA64ISAR1_SPECRES_8_9 (UL(0x2) << ID_AA64ISAR1_SPECRES_SHIFT)
#define ID_AA64ISAR1_BF16_SHIFT 44
+#define ID_AA64ISAR1_BF16_WIDTH 4
#define ID_AA64ISAR1_BF16_MASK (UL(0xf) << ID_AA64ISAR1_BF16_SHIFT)
#define ID_AA64ISAR1_BF16_VAL(x) ((x) & ID_AA64ISAR1_BF16_MASK)
#define ID_AA64ISAR1_BF16_NONE (UL(0x0) << ID_AA64ISAR1_BF16_SHIFT)
#define ID_AA64ISAR1_BF16_IMPL (UL(0x1) << ID_AA64ISAR1_BF16_SHIFT)
#define ID_AA64ISAR1_BF16_EBF (UL(0x2) << ID_AA64ISAR1_BF16_SHIFT)
#define ID_AA64ISAR1_DGH_SHIFT 48
+#define ID_AA64ISAR1_DGH_WIDTH 4
#define ID_AA64ISAR1_DGH_MASK (UL(0xf) << ID_AA64ISAR1_DGH_SHIFT)
#define ID_AA64ISAR1_DGH_VAL(x) ((x) & ID_AA64ISAR1_DGH_MASK)
#define ID_AA64ISAR1_DGH_NONE (UL(0x0) << ID_AA64ISAR1_DGH_SHIFT)
#define ID_AA64ISAR1_DGH_IMPL (UL(0x1) << ID_AA64ISAR1_DGH_SHIFT)
#define ID_AA64ISAR1_I8MM_SHIFT 52
+#define ID_AA64ISAR1_I8MM_WIDTH 4
#define ID_AA64ISAR1_I8MM_MASK (UL(0xf) << ID_AA64ISAR1_I8MM_SHIFT)
#define ID_AA64ISAR1_I8MM_VAL(x) ((x) & ID_AA64ISAR1_I8MM_MASK)
#define ID_AA64ISAR1_I8MM_NONE (UL(0x0) << ID_AA64ISAR1_I8MM_SHIFT)
#define ID_AA64ISAR1_I8MM_IMPL (UL(0x1) << ID_AA64ISAR1_I8MM_SHIFT)
#define ID_AA64ISAR1_XS_SHIFT 56
+#define ID_AA64ISAR1_XS_WIDTH 4
#define ID_AA64ISAR1_XS_MASK (UL(0xf) << ID_AA64ISAR1_XS_SHIFT)
#define ID_AA64ISAR1_XS_VAL(x) ((x) & ID_AA64ISAR1_XS_MASK)
#define ID_AA64ISAR1_XS_NONE (UL(0x0) << ID_AA64ISAR1_XS_SHIFT)
#define ID_AA64ISAR1_XS_IMPL (UL(0x1) << ID_AA64ISAR1_XS_SHIFT)
#define ID_AA64ISAR1_LS64_SHIFT 60
+#define ID_AA64ISAR1_LS64_WIDTH 4
#define ID_AA64ISAR1_LS64_MASK (UL(0xf) << ID_AA64ISAR1_LS64_SHIFT)
#define ID_AA64ISAR1_LS64_VAL(x) ((x) & ID_AA64ISAR1_LS64_MASK)
#define ID_AA64ISAR1_LS64_NONE (UL(0x0) << ID_AA64ISAR1_LS64_SHIFT)
@@ -790,28 +1098,33 @@
#define ID_AA64ISAR1_LS64_ACCDATA (UL(0x3) << ID_AA64ISAR1_LS64_SHIFT)
/* ID_AA64ISAR2_EL1 */
-#define ID_AA64ISAR2_EL1 MRS_REG(ID_AA64ISAR2_EL1)
+#define ID_AA64ISAR2_EL1_REG MRS_REG_ALT_NAME(ID_AA64ISAR2_EL1)
+#define ID_AA64ISAR2_EL1_ISS ISS_MSR_REG(ID_AA64ISAR2_EL1)
#define ID_AA64ISAR2_EL1_op0 3
#define ID_AA64ISAR2_EL1_op1 0
#define ID_AA64ISAR2_EL1_CRn 0
#define ID_AA64ISAR2_EL1_CRm 6
#define ID_AA64ISAR2_EL1_op2 2
#define ID_AA64ISAR2_WFxT_SHIFT 0
+#define ID_AA64ISAR2_WFxT_WIDTH 4
#define ID_AA64ISAR2_WFxT_MASK (UL(0xf) << ID_AA64ISAR2_WFxT_SHIFT)
#define ID_AA64ISAR2_WFxT_VAL(x) ((x) & ID_AA64ISAR2_WFxT_MASK)
#define ID_AA64ISAR2_WFxT_NONE (UL(0x0) << ID_AA64ISAR2_WFxT_SHIFT)
-#define ID_AA64ISAR2_WFxT_IMPL (UL(0x1) << ID_AA64ISAR2_WFxT_SHIFT)
+#define ID_AA64ISAR2_WFxT_IMPL (UL(0x2) << ID_AA64ISAR2_WFxT_SHIFT)
#define ID_AA64ISAR2_RPRES_SHIFT 4
+#define ID_AA64ISAR2_RPRES_WIDTH 4
#define ID_AA64ISAR2_RPRES_MASK (UL(0xf) << ID_AA64ISAR2_RPRES_SHIFT)
#define ID_AA64ISAR2_RPRES_VAL(x) ((x) & ID_AA64ISAR2_RPRES_MASK)
#define ID_AA64ISAR2_RPRES_NONE (UL(0x0) << ID_AA64ISAR2_RPRES_SHIFT)
#define ID_AA64ISAR2_RPRES_IMPL (UL(0x1) << ID_AA64ISAR2_RPRES_SHIFT)
#define ID_AA64ISAR2_GPA3_SHIFT 8
+#define ID_AA64ISAR2_GPA3_WIDTH 4
#define ID_AA64ISAR2_GPA3_MASK (UL(0xf) << ID_AA64ISAR2_GPA3_SHIFT)
#define ID_AA64ISAR2_GPA3_VAL(x) ((x) & ID_AA64ISAR2_GPA3_MASK)
#define ID_AA64ISAR2_GPA3_NONE (UL(0x0) << ID_AA64ISAR2_GPA3_SHIFT)
#define ID_AA64ISAR2_GPA3_IMPL (UL(0x1) << ID_AA64ISAR2_GPA3_SHIFT)
#define ID_AA64ISAR2_APA3_SHIFT 12
+#define ID_AA64ISAR2_APA3_WIDTH 4
#define ID_AA64ISAR2_APA3_MASK (UL(0xf) << ID_AA64ISAR2_APA3_SHIFT)
#define ID_AA64ISAR2_APA3_VAL(x) ((x) & ID_AA64ISAR2_APA3_MASK)
#define ID_AA64ISAR2_APA3_NONE (UL(0x0) << ID_AA64ISAR2_APA3_SHIFT)
@@ -821,29 +1134,64 @@
#define ID_AA64ISAR2_APA3_FPAC (UL(0x4) << ID_AA64ISAR2_APA3_SHIFT)
#define ID_AA64ISAR2_APA3_FPAC_COMBINED (UL(0x5) << ID_AA64ISAR2_APA3_SHIFT)
#define ID_AA64ISAR2_MOPS_SHIFT 16
+#define ID_AA64ISAR2_MOPS_WIDTH 4
#define ID_AA64ISAR2_MOPS_MASK (UL(0xf) << ID_AA64ISAR2_MOPS_SHIFT)
#define ID_AA64ISAR2_MOPS_VAL(x) ((x) & ID_AA64ISAR2_MOPS_MASK)
#define ID_AA64ISAR2_MOPS_NONE (UL(0x0) << ID_AA64ISAR2_MOPS_SHIFT)
#define ID_AA64ISAR2_MOPS_IMPL (UL(0x1) << ID_AA64ISAR2_MOPS_SHIFT)
#define ID_AA64ISAR2_BC_SHIFT 20
+#define ID_AA64ISAR2_BC_WIDTH 4
#define ID_AA64ISAR2_BC_MASK (UL(0xf) << ID_AA64ISAR2_BC_SHIFT)
#define ID_AA64ISAR2_BC_VAL(x) ((x) & ID_AA64ISAR2_BC_MASK)
#define ID_AA64ISAR2_BC_NONE (UL(0x0) << ID_AA64ISAR2_BC_SHIFT)
#define ID_AA64ISAR2_BC_IMPL (UL(0x1) << ID_AA64ISAR2_BC_SHIFT)
-#define ID_AA64ISAR2_PAC_frac_SHIFT 28
+#define ID_AA64ISAR2_PAC_frac_SHIFT 24
+#define ID_AA64ISAR2_PAC_frac_WIDTH 4
#define ID_AA64ISAR2_PAC_frac_MASK (UL(0xf) << ID_AA64ISAR2_PAC_frac_SHIFT)
#define ID_AA64ISAR2_PAC_frac_VAL(x) ((x) & ID_AA64ISAR2_PAC_frac_MASK)
#define ID_AA64ISAR2_PAC_frac_NONE (UL(0x0) << ID_AA64ISAR2_PAC_frac_SHIFT)
#define ID_AA64ISAR2_PAC_frac_IMPL (UL(0x1) << ID_AA64ISAR2_PAC_frac_SHIFT)
+#define ID_AA64ISAR2_CLRBHB_SHIFT 28
+#define ID_AA64ISAR2_CLRBHB_WIDTH 4
+#define ID_AA64ISAR2_CLRBHB_MASK (UL(0xf) << ID_AA64ISAR2_CLRBHB_SHIFT)
+#define ID_AA64ISAR2_CLRBHB_VAL(x) ((x) & ID_AA64ISAR2_CLRBHB_MASK)
+#define ID_AA64ISAR2_CLRBHB_NONE (UL(0x0) << ID_AA64ISAR2_CLRBHB_SHIFT)
+#define ID_AA64ISAR2_CLRBHB_IMPL (UL(0x1) << ID_AA64ISAR2_CLRBHB_SHIFT)
+#define ID_AA64ISAR2_PRFMSLC_SHIFT 40
+#define ID_AA64ISAR2_PRFMSLC_WIDTH 4
+#define ID_AA64ISAR2_PRFMSLC_MASK (UL(0xf) << ID_AA64ISAR2_PRFMSLC_SHIFT)
+#define ID_AA64ISAR2_PRFMSLC_VAL(x) ((x) & ID_AA64ISAR2_PRFMSLC_MASK)
+#define ID_AA64ISAR2_PRFMSLC_NONE (UL(0x0) << ID_AA64ISAR2_PRFMSLC_SHIFT)
+#define ID_AA64ISAR2_PRFMSLC_IMPL (UL(0x1) << ID_AA64ISAR2_PRFMSLC_SHIFT)
+#define ID_AA64ISAR2_RPRFM_SHIFT 48
+#define ID_AA64ISAR2_RPRFM_WIDTH 4
+#define ID_AA64ISAR2_RPRFM_MASK (UL(0xf) << ID_AA64ISAR2_RPRFM_SHIFT)
+#define ID_AA64ISAR2_RPRFM_VAL(x) ((x) & ID_AA64ISAR2_RPRFM_MASK)
+#define ID_AA64ISAR2_RPRFM_NONE (UL(0x0) << ID_AA64ISAR2_RPRFM_SHIFT)
+#define ID_AA64ISAR2_RPRFM_IMPL (UL(0x1) << ID_AA64ISAR2_RPRFM_SHIFT)
+#define ID_AA64ISAR2_CSSC_SHIFT 52
+#define ID_AA64ISAR2_CSSC_WIDTH 4
+#define ID_AA64ISAR2_CSSC_MASK (UL(0xf) << ID_AA64ISAR2_CSSC_SHIFT)
+#define ID_AA64ISAR2_CSSC_VAL(x) ((x) & ID_AA64ISAR2_CSSC_MASK)
+#define ID_AA64ISAR2_CSSC_NONE (UL(0x0) << ID_AA64ISAR2_CSSC_SHIFT)
+#define ID_AA64ISAR2_CSSC_IMPL (UL(0x1) << ID_AA64ISAR2_CSSC_SHIFT)
+#define ID_AA64ISAR2_ATS1A_SHIFT 60
+#define ID_AA64ISAR2_ATS1A_WIDTH 4
+#define ID_AA64ISAR2_ATS1A_MASK (UL(0xf) << ID_AA64ISAR2_ATS1A_SHIFT)
+#define ID_AA64ISAR2_ATS1A_VAL(x) ((x) & ID_AA64ISAR2_ATS1A_MASK)
+#define ID_AA64ISAR2_ATS1A_NONE (UL(0x0) << ID_AA64ISAR2_ATS1A_SHIFT)
+#define ID_AA64ISAR2_ATS1A_IMPL (UL(0x1) << ID_AA64ISAR2_ATS1A_SHIFT)
/* ID_AA64MMFR0_EL1 */
-#define ID_AA64MMFR0_EL1 MRS_REG(ID_AA64MMFR0_EL1)
+#define ID_AA64MMFR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64MMFR0_EL1)
+#define ID_AA64MMFR0_EL1_ISS ISS_MSR_REG(ID_AA64MMFR0_EL1)
#define ID_AA64MMFR0_EL1_op0 3
#define ID_AA64MMFR0_EL1_op1 0
#define ID_AA64MMFR0_EL1_CRn 0
#define ID_AA64MMFR0_EL1_CRm 7
#define ID_AA64MMFR0_EL1_op2 0
#define ID_AA64MMFR0_PARange_SHIFT 0
+#define ID_AA64MMFR0_PARange_WIDTH 4
#define ID_AA64MMFR0_PARange_MASK (UL(0xf) << ID_AA64MMFR0_PARange_SHIFT)
#define ID_AA64MMFR0_PARange_VAL(x) ((x) & ID_AA64MMFR0_PARange_MASK)
#define ID_AA64MMFR0_PARange_4G (UL(0x0) << ID_AA64MMFR0_PARange_SHIFT)
@@ -854,43 +1202,51 @@
#define ID_AA64MMFR0_PARange_256T (UL(0x5) << ID_AA64MMFR0_PARange_SHIFT)
#define ID_AA64MMFR0_PARange_4P (UL(0x6) << ID_AA64MMFR0_PARange_SHIFT)
#define ID_AA64MMFR0_ASIDBits_SHIFT 4
+#define ID_AA64MMFR0_ASIDBits_WIDTH 4
#define ID_AA64MMFR0_ASIDBits_MASK (UL(0xf) << ID_AA64MMFR0_ASIDBits_SHIFT)
#define ID_AA64MMFR0_ASIDBits_VAL(x) ((x) & ID_AA64MMFR0_ASIDBits_MASK)
#define ID_AA64MMFR0_ASIDBits_8 (UL(0x0) << ID_AA64MMFR0_ASIDBits_SHIFT)
#define ID_AA64MMFR0_ASIDBits_16 (UL(0x2) << ID_AA64MMFR0_ASIDBits_SHIFT)
#define ID_AA64MMFR0_BigEnd_SHIFT 8
+#define ID_AA64MMFR0_BigEnd_WIDTH 4
#define ID_AA64MMFR0_BigEnd_MASK (UL(0xf) << ID_AA64MMFR0_BigEnd_SHIFT)
#define ID_AA64MMFR0_BigEnd_VAL(x) ((x) & ID_AA64MMFR0_BigEnd_MASK)
#define ID_AA64MMFR0_BigEnd_FIXED (UL(0x0) << ID_AA64MMFR0_BigEnd_SHIFT)
#define ID_AA64MMFR0_BigEnd_MIXED (UL(0x1) << ID_AA64MMFR0_BigEnd_SHIFT)
#define ID_AA64MMFR0_SNSMem_SHIFT 12
+#define ID_AA64MMFR0_SNSMem_WIDTH 4
#define ID_AA64MMFR0_SNSMem_MASK (UL(0xf) << ID_AA64MMFR0_SNSMem_SHIFT)
#define ID_AA64MMFR0_SNSMem_VAL(x) ((x) & ID_AA64MMFR0_SNSMem_MASK)
#define ID_AA64MMFR0_SNSMem_NONE (UL(0x0) << ID_AA64MMFR0_SNSMem_SHIFT)
#define ID_AA64MMFR0_SNSMem_DISTINCT (UL(0x1) << ID_AA64MMFR0_SNSMem_SHIFT)
#define ID_AA64MMFR0_BigEndEL0_SHIFT 16
+#define ID_AA64MMFR0_BigEndEL0_WIDTH 4
#define ID_AA64MMFR0_BigEndEL0_MASK (UL(0xf) << ID_AA64MMFR0_BigEndEL0_SHIFT)
#define ID_AA64MMFR0_BigEndEL0_VAL(x) ((x) & ID_AA64MMFR0_BigEndEL0_MASK)
#define ID_AA64MMFR0_BigEndEL0_FIXED (UL(0x0) << ID_AA64MMFR0_BigEndEL0_SHIFT)
#define ID_AA64MMFR0_BigEndEL0_MIXED (UL(0x1) << ID_AA64MMFR0_BigEndEL0_SHIFT)
#define ID_AA64MMFR0_TGran16_SHIFT 20
+#define ID_AA64MMFR0_TGran16_WIDTH 4
#define ID_AA64MMFR0_TGran16_MASK (UL(0xf) << ID_AA64MMFR0_TGran16_SHIFT)
#define ID_AA64MMFR0_TGran16_VAL(x) ((x) & ID_AA64MMFR0_TGran16_MASK)
#define ID_AA64MMFR0_TGran16_NONE (UL(0x0) << ID_AA64MMFR0_TGran16_SHIFT)
#define ID_AA64MMFR0_TGran16_IMPL (UL(0x1) << ID_AA64MMFR0_TGran16_SHIFT)
#define ID_AA64MMFR0_TGran16_LPA2 (UL(0x2) << ID_AA64MMFR0_TGran16_SHIFT)
#define ID_AA64MMFR0_TGran64_SHIFT 24
+#define ID_AA64MMFR0_TGran64_WIDTH 4
#define ID_AA64MMFR0_TGran64_MASK (UL(0xf) << ID_AA64MMFR0_TGran64_SHIFT)
#define ID_AA64MMFR0_TGran64_VAL(x) ((x) & ID_AA64MMFR0_TGran64_MASK)
#define ID_AA64MMFR0_TGran64_IMPL (UL(0x0) << ID_AA64MMFR0_TGran64_SHIFT)
#define ID_AA64MMFR0_TGran64_NONE (UL(0xf) << ID_AA64MMFR0_TGran64_SHIFT)
#define ID_AA64MMFR0_TGran4_SHIFT 28
+#define ID_AA64MMFR0_TGran4_WIDTH 4
#define ID_AA64MMFR0_TGran4_MASK (UL(0xf) << ID_AA64MMFR0_TGran4_SHIFT)
#define ID_AA64MMFR0_TGran4_VAL(x) ((x) & ID_AA64MMFR0_TGran4_MASK)
#define ID_AA64MMFR0_TGran4_IMPL (UL(0x0) << ID_AA64MMFR0_TGran4_SHIFT)
#define ID_AA64MMFR0_TGran4_LPA2 (UL(0x1) << ID_AA64MMFR0_TGran4_SHIFT)
#define ID_AA64MMFR0_TGran4_NONE (UL(0xf) << ID_AA64MMFR0_TGran4_SHIFT)
#define ID_AA64MMFR0_TGran16_2_SHIFT 32
+#define ID_AA64MMFR0_TGran16_2_WIDTH 4
#define ID_AA64MMFR0_TGran16_2_MASK (UL(0xf) << ID_AA64MMFR0_TGran16_2_SHIFT)
#define ID_AA64MMFR0_TGran16_2_VAL(x) ((x) & ID_AA64MMFR0_TGran16_2_MASK)
#define ID_AA64MMFR0_TGran16_2_TGran16 (UL(0x0) << ID_AA64MMFR0_TGran16_2_SHIFT)
@@ -898,12 +1254,14 @@
#define ID_AA64MMFR0_TGran16_2_IMPL (UL(0x2) << ID_AA64MMFR0_TGran16_2_SHIFT)
#define ID_AA64MMFR0_TGran16_2_LPA2 (UL(0x3) << ID_AA64MMFR0_TGran16_2_SHIFT)
#define ID_AA64MMFR0_TGran64_2_SHIFT 36
+#define ID_AA64MMFR0_TGran64_2_WIDTH 4
#define ID_AA64MMFR0_TGran64_2_MASK (UL(0xf) << ID_AA64MMFR0_TGran64_2_SHIFT)
#define ID_AA64MMFR0_TGran64_2_VAL(x) ((x) & ID_AA64MMFR0_TGran64_2_MASK)
#define ID_AA64MMFR0_TGran64_2_TGran64 (UL(0x0) << ID_AA64MMFR0_TGran64_2_SHIFT)
#define ID_AA64MMFR0_TGran64_2_NONE (UL(0x1) << ID_AA64MMFR0_TGran64_2_SHIFT)
#define ID_AA64MMFR0_TGran64_2_IMPL (UL(0x2) << ID_AA64MMFR0_TGran64_2_SHIFT)
#define ID_AA64MMFR0_TGran4_2_SHIFT 40
+#define ID_AA64MMFR0_TGran4_2_WIDTH 4
#define ID_AA64MMFR0_TGran4_2_MASK (UL(0xf) << ID_AA64MMFR0_TGran4_2_SHIFT)
#define ID_AA64MMFR0_TGran4_2_VAL(x) ((x) & ID_AA64MMFR0_TGran4_2_MASK)
#define ID_AA64MMFR0_TGran4_2_TGran4 (UL(0x0) << ID_AA64MMFR0_TGran4_2_SHIFT)
@@ -911,16 +1269,20 @@
#define ID_AA64MMFR0_TGran4_2_IMPL (UL(0x2) << ID_AA64MMFR0_TGran4_2_SHIFT)
#define ID_AA64MMFR0_TGran4_2_LPA2 (UL(0x3) << ID_AA64MMFR0_TGran4_2_SHIFT)
#define ID_AA64MMFR0_ExS_SHIFT 44
+#define ID_AA64MMFR0_ExS_WIDTH 4
#define ID_AA64MMFR0_ExS_MASK (UL(0xf) << ID_AA64MMFR0_ExS_SHIFT)
#define ID_AA64MMFR0_ExS_VAL(x) ((x) & ID_AA64MMFR0_ExS_MASK)
#define ID_AA64MMFR0_ExS_ALL (UL(0x0) << ID_AA64MMFR0_ExS_SHIFT)
#define ID_AA64MMFR0_ExS_IMPL (UL(0x1) << ID_AA64MMFR0_ExS_SHIFT)
#define ID_AA64MMFR0_FGT_SHIFT 56
+#define ID_AA64MMFR0_FGT_WIDTH 4
#define ID_AA64MMFR0_FGT_MASK (UL(0xf) << ID_AA64MMFR0_FGT_SHIFT)
#define ID_AA64MMFR0_FGT_VAL(x) ((x) & ID_AA64MMFR0_FGT_MASK)
#define ID_AA64MMFR0_FGT_NONE (UL(0x0) << ID_AA64MMFR0_FGT_SHIFT)
-#define ID_AA64MMFR0_FGT_IMPL (UL(0x1) << ID_AA64MMFR0_FGT_SHIFT)
+#define ID_AA64MMFR0_FGT_8_6 (UL(0x1) << ID_AA64MMFR0_FGT_SHIFT)
+#define ID_AA64MMFR0_FGT_8_9 (UL(0x2) << ID_AA64MMFR0_FGT_SHIFT)
#define ID_AA64MMFR0_ECV_SHIFT 60
+#define ID_AA64MMFR0_ECV_WIDTH 4
#define ID_AA64MMFR0_ECV_MASK (UL(0xf) << ID_AA64MMFR0_ECV_SHIFT)
#define ID_AA64MMFR0_ECV_VAL(x) ((x) & ID_AA64MMFR0_ECV_MASK)
#define ID_AA64MMFR0_ECV_NONE (UL(0x0) << ID_AA64MMFR0_ECV_SHIFT)
@@ -928,208 +1290,307 @@
#define ID_AA64MMFR0_ECV_CNTHCTL (UL(0x2) << ID_AA64MMFR0_ECV_SHIFT)
/* ID_AA64MMFR1_EL1 */
-#define ID_AA64MMFR1_EL1 MRS_REG(ID_AA64MMFR1_EL1)
+#define ID_AA64MMFR1_EL1_REG MRS_REG_ALT_NAME(ID_AA64MMFR1_EL1)
+#define ID_AA64MMFR1_EL1_ISS ISS_MSR_REG(ID_AA64MMFR1_EL1)
#define ID_AA64MMFR1_EL1_op0 3
#define ID_AA64MMFR1_EL1_op1 0
#define ID_AA64MMFR1_EL1_CRn 0
#define ID_AA64MMFR1_EL1_CRm 7
#define ID_AA64MMFR1_EL1_op2 1
#define ID_AA64MMFR1_HAFDBS_SHIFT 0
+#define ID_AA64MMFR1_HAFDBS_WIDTH 4
#define ID_AA64MMFR1_HAFDBS_MASK (UL(0xf) << ID_AA64MMFR1_HAFDBS_SHIFT)
#define ID_AA64MMFR1_HAFDBS_VAL(x) ((x) & ID_AA64MMFR1_HAFDBS_MASK)
#define ID_AA64MMFR1_HAFDBS_NONE (UL(0x0) << ID_AA64MMFR1_HAFDBS_SHIFT)
#define ID_AA64MMFR1_HAFDBS_AF (UL(0x1) << ID_AA64MMFR1_HAFDBS_SHIFT)
#define ID_AA64MMFR1_HAFDBS_AF_DBS (UL(0x2) << ID_AA64MMFR1_HAFDBS_SHIFT)
#define ID_AA64MMFR1_VMIDBits_SHIFT 4
+#define ID_AA64MMFR1_VMIDBits_WIDTH 4
#define ID_AA64MMFR1_VMIDBits_MASK (UL(0xf) << ID_AA64MMFR1_VMIDBits_SHIFT)
#define ID_AA64MMFR1_VMIDBits_VAL(x) ((x) & ID_AA64MMFR1_VMIDBits_MASK)
#define ID_AA64MMFR1_VMIDBits_8 (UL(0x0) << ID_AA64MMFR1_VMIDBits_SHIFT)
#define ID_AA64MMFR1_VMIDBits_16 (UL(0x2) << ID_AA64MMFR1_VMIDBits_SHIFT)
#define ID_AA64MMFR1_VH_SHIFT 8
+#define ID_AA64MMFR1_VH_WIDTH 4
#define ID_AA64MMFR1_VH_MASK (UL(0xf) << ID_AA64MMFR1_VH_SHIFT)
#define ID_AA64MMFR1_VH_VAL(x) ((x) & ID_AA64MMFR1_VH_MASK)
#define ID_AA64MMFR1_VH_NONE (UL(0x0) << ID_AA64MMFR1_VH_SHIFT)
#define ID_AA64MMFR1_VH_IMPL (UL(0x1) << ID_AA64MMFR1_VH_SHIFT)
#define ID_AA64MMFR1_HPDS_SHIFT 12
+#define ID_AA64MMFR1_HPDS_WIDTH 4
#define ID_AA64MMFR1_HPDS_MASK (UL(0xf) << ID_AA64MMFR1_HPDS_SHIFT)
#define ID_AA64MMFR1_HPDS_VAL(x) ((x) & ID_AA64MMFR1_HPDS_MASK)
#define ID_AA64MMFR1_HPDS_NONE (UL(0x0) << ID_AA64MMFR1_HPDS_SHIFT)
#define ID_AA64MMFR1_HPDS_HPD (UL(0x1) << ID_AA64MMFR1_HPDS_SHIFT)
#define ID_AA64MMFR1_HPDS_TTPBHA (UL(0x2) << ID_AA64MMFR1_HPDS_SHIFT)
#define ID_AA64MMFR1_LO_SHIFT 16
+#define ID_AA64MMFR1_LO_WIDTH 4
#define ID_AA64MMFR1_LO_MASK (UL(0xf) << ID_AA64MMFR1_LO_SHIFT)
#define ID_AA64MMFR1_LO_VAL(x) ((x) & ID_AA64MMFR1_LO_MASK)
#define ID_AA64MMFR1_LO_NONE (UL(0x0) << ID_AA64MMFR1_LO_SHIFT)
#define ID_AA64MMFR1_LO_IMPL (UL(0x1) << ID_AA64MMFR1_LO_SHIFT)
#define ID_AA64MMFR1_PAN_SHIFT 20
+#define ID_AA64MMFR1_PAN_WIDTH 4
#define ID_AA64MMFR1_PAN_MASK (UL(0xf) << ID_AA64MMFR1_PAN_SHIFT)
#define ID_AA64MMFR1_PAN_VAL(x) ((x) & ID_AA64MMFR1_PAN_MASK)
#define ID_AA64MMFR1_PAN_NONE (UL(0x0) << ID_AA64MMFR1_PAN_SHIFT)
#define ID_AA64MMFR1_PAN_IMPL (UL(0x1) << ID_AA64MMFR1_PAN_SHIFT)
#define ID_AA64MMFR1_PAN_ATS1E1 (UL(0x2) << ID_AA64MMFR1_PAN_SHIFT)
-#define ID_AA64MMFR1_PAN_EPAN (UL(0x2) << ID_AA64MMFR1_PAN_SHIFT)
+#define ID_AA64MMFR1_PAN_EPAN (UL(0x3) << ID_AA64MMFR1_PAN_SHIFT)
#define ID_AA64MMFR1_SpecSEI_SHIFT 24
+#define ID_AA64MMFR1_SpecSEI_WIDTH 4
#define ID_AA64MMFR1_SpecSEI_MASK (UL(0xf) << ID_AA64MMFR1_SpecSEI_SHIFT)
#define ID_AA64MMFR1_SpecSEI_VAL(x) ((x) & ID_AA64MMFR1_SpecSEI_MASK)
#define ID_AA64MMFR1_SpecSEI_NONE (UL(0x0) << ID_AA64MMFR1_SpecSEI_SHIFT)
#define ID_AA64MMFR1_SpecSEI_IMPL (UL(0x1) << ID_AA64MMFR1_SpecSEI_SHIFT)
#define ID_AA64MMFR1_XNX_SHIFT 28
+#define ID_AA64MMFR1_XNX_WIDTH 4
#define ID_AA64MMFR1_XNX_MASK (UL(0xf) << ID_AA64MMFR1_XNX_SHIFT)
#define ID_AA64MMFR1_XNX_VAL(x) ((x) & ID_AA64MMFR1_XNX_MASK)
#define ID_AA64MMFR1_XNX_NONE (UL(0x0) << ID_AA64MMFR1_XNX_SHIFT)
#define ID_AA64MMFR1_XNX_IMPL (UL(0x1) << ID_AA64MMFR1_XNX_SHIFT)
#define ID_AA64MMFR1_TWED_SHIFT 32
+#define ID_AA64MMFR1_TWED_WIDTH 4
#define ID_AA64MMFR1_TWED_MASK (UL(0xf) << ID_AA64MMFR1_TWED_SHIFT)
#define ID_AA64MMFR1_TWED_VAL(x) ((x) & ID_AA64MMFR1_TWED_MASK)
#define ID_AA64MMFR1_TWED_NONE (UL(0x0) << ID_AA64MMFR1_TWED_SHIFT)
#define ID_AA64MMFR1_TWED_IMPL (UL(0x1) << ID_AA64MMFR1_TWED_SHIFT)
#define ID_AA64MMFR1_ETS_SHIFT 36
+#define ID_AA64MMFR1_ETS_WIDTH 4
#define ID_AA64MMFR1_ETS_MASK (UL(0xf) << ID_AA64MMFR1_ETS_SHIFT)
#define ID_AA64MMFR1_ETS_VAL(x) ((x) & ID_AA64MMFR1_ETS_MASK)
#define ID_AA64MMFR1_ETS_NONE (UL(0x0) << ID_AA64MMFR1_ETS_SHIFT)
-#define ID_AA64MMFR1_ETS_IMPL (UL(0x1) << ID_AA64MMFR1_ETS_SHIFT)
+#define ID_AA64MMFR1_ETS_NONE2 (UL(0x1) << ID_AA64MMFR1_ETS_SHIFT)
+#define ID_AA64MMFR1_ETS_IMPL (UL(0x2) << ID_AA64MMFR1_ETS_SHIFT)
#define ID_AA64MMFR1_HCX_SHIFT 40
+#define ID_AA64MMFR1_HCX_WIDTH 4
#define ID_AA64MMFR1_HCX_MASK (UL(0xf) << ID_AA64MMFR1_HCX_SHIFT)
#define ID_AA64MMFR1_HCX_VAL(x) ((x) & ID_AA64MMFR1_HCX_MASK)
#define ID_AA64MMFR1_HCX_NONE (UL(0x0) << ID_AA64MMFR1_HCX_SHIFT)
#define ID_AA64MMFR1_HCX_IMPL (UL(0x1) << ID_AA64MMFR1_HCX_SHIFT)
#define ID_AA64MMFR1_AFP_SHIFT 44
+#define ID_AA64MMFR1_AFP_WIDTH 4
#define ID_AA64MMFR1_AFP_MASK (UL(0xf) << ID_AA64MMFR1_AFP_SHIFT)
#define ID_AA64MMFR1_AFP_VAL(x) ((x) & ID_AA64MMFR1_AFP_MASK)
#define ID_AA64MMFR1_AFP_NONE (UL(0x0) << ID_AA64MMFR1_AFP_SHIFT)
#define ID_AA64MMFR1_AFP_IMPL (UL(0x1) << ID_AA64MMFR1_AFP_SHIFT)
#define ID_AA64MMFR1_nTLBPA_SHIFT 48
+#define ID_AA64MMFR1_nTLBPA_WIDTH 4
#define ID_AA64MMFR1_nTLBPA_MASK (UL(0xf) << ID_AA64MMFR1_nTLBPA_SHIFT)
#define ID_AA64MMFR1_nTLBPA_VAL(x) ((x) & ID_AA64MMFR1_nTLBPA_MASK)
#define ID_AA64MMFR1_nTLBPA_NONE (UL(0x0) << ID_AA64MMFR1_nTLBPA_SHIFT)
#define ID_AA64MMFR1_nTLBPA_IMPL (UL(0x1) << ID_AA64MMFR1_nTLBPA_SHIFT)
#define ID_AA64MMFR1_TIDCP1_SHIFT 52
+#define ID_AA64MMFR1_TIDCP1_WIDTH 4
#define ID_AA64MMFR1_TIDCP1_MASK (UL(0xf) << ID_AA64MMFR1_TIDCP1_SHIFT)
#define ID_AA64MMFR1_TIDCP1_VAL(x) ((x) & ID_AA64MMFR1_TIDCP1_MASK)
#define ID_AA64MMFR1_TIDCP1_NONE (UL(0x0) << ID_AA64MMFR1_TIDCP1_SHIFT)
#define ID_AA64MMFR1_TIDCP1_IMPL (UL(0x1) << ID_AA64MMFR1_TIDCP1_SHIFT)
#define ID_AA64MMFR1_CMOVW_SHIFT 56
+#define ID_AA64MMFR1_CMOVW_WIDTH 4
#define ID_AA64MMFR1_CMOVW_MASK (UL(0xf) << ID_AA64MMFR1_CMOVW_SHIFT)
#define ID_AA64MMFR1_CMOVW_VAL(x) ((x) & ID_AA64MMFR1_CMOVW_MASK)
#define ID_AA64MMFR1_CMOVW_NONE (UL(0x0) << ID_AA64MMFR1_CMOVW_SHIFT)
#define ID_AA64MMFR1_CMOVW_IMPL (UL(0x1) << ID_AA64MMFR1_CMOVW_SHIFT)
+#define ID_AA64MMFR1_ECBHB_SHIFT 60
+#define ID_AA64MMFR1_ECBHB_WIDTH 4
+#define ID_AA64MMFR1_ECBHB_MASK (UL(0xf) << ID_AA64MMFR1_ECBHB_SHIFT)
+#define ID_AA64MMFR1_ECBHB_VAL(x) ((x) & ID_AA64MMFR1_ECBHB_MASK)
+#define ID_AA64MMFR1_ECBHB_NONE (UL(0x0) << ID_AA64MMFR1_ECBHB_SHIFT)
+#define ID_AA64MMFR1_ECBHB_IMPL (UL(0x1) << ID_AA64MMFR1_ECBHB_SHIFT)
/* ID_AA64MMFR2_EL1 */
-#define ID_AA64MMFR2_EL1 MRS_REG(ID_AA64MMFR2_EL1)
+#define ID_AA64MMFR2_EL1_REG MRS_REG_ALT_NAME(ID_AA64MMFR2_EL1)
+#define ID_AA64MMFR2_EL1_ISS ISS_MSR_REG(ID_AA64MMFR2_EL1)
#define ID_AA64MMFR2_EL1_op0 3
#define ID_AA64MMFR2_EL1_op1 0
#define ID_AA64MMFR2_EL1_CRn 0
#define ID_AA64MMFR2_EL1_CRm 7
#define ID_AA64MMFR2_EL1_op2 2
#define ID_AA64MMFR2_CnP_SHIFT 0
+#define ID_AA64MMFR2_CnP_WIDTH 4
#define ID_AA64MMFR2_CnP_MASK (UL(0xf) << ID_AA64MMFR2_CnP_SHIFT)
#define ID_AA64MMFR2_CnP_VAL(x) ((x) & ID_AA64MMFR2_CnP_MASK)
#define ID_AA64MMFR2_CnP_NONE (UL(0x0) << ID_AA64MMFR2_CnP_SHIFT)
#define ID_AA64MMFR2_CnP_IMPL (UL(0x1) << ID_AA64MMFR2_CnP_SHIFT)
#define ID_AA64MMFR2_UAO_SHIFT 4
+#define ID_AA64MMFR2_UAO_WIDTH 4
#define ID_AA64MMFR2_UAO_MASK (UL(0xf) << ID_AA64MMFR2_UAO_SHIFT)
#define ID_AA64MMFR2_UAO_VAL(x) ((x) & ID_AA64MMFR2_UAO_MASK)
#define ID_AA64MMFR2_UAO_NONE (UL(0x0) << ID_AA64MMFR2_UAO_SHIFT)
#define ID_AA64MMFR2_UAO_IMPL (UL(0x1) << ID_AA64MMFR2_UAO_SHIFT)
#define ID_AA64MMFR2_LSM_SHIFT 8
+#define ID_AA64MMFR2_LSM_WIDTH 4
#define ID_AA64MMFR2_LSM_MASK (UL(0xf) << ID_AA64MMFR2_LSM_SHIFT)
#define ID_AA64MMFR2_LSM_VAL(x) ((x) & ID_AA64MMFR2_LSM_MASK)
#define ID_AA64MMFR2_LSM_NONE (UL(0x0) << ID_AA64MMFR2_LSM_SHIFT)
#define ID_AA64MMFR2_LSM_IMPL (UL(0x1) << ID_AA64MMFR2_LSM_SHIFT)
#define ID_AA64MMFR2_IESB_SHIFT 12
+#define ID_AA64MMFR2_IESB_WIDTH 4
#define ID_AA64MMFR2_IESB_MASK (UL(0xf) << ID_AA64MMFR2_IESB_SHIFT)
#define ID_AA64MMFR2_IESB_VAL(x) ((x) & ID_AA64MMFR2_IESB_MASK)
#define ID_AA64MMFR2_IESB_NONE (UL(0x0) << ID_AA64MMFR2_IESB_SHIFT)
#define ID_AA64MMFR2_IESB_IMPL (UL(0x1) << ID_AA64MMFR2_IESB_SHIFT)
#define ID_AA64MMFR2_VARange_SHIFT 16
+#define ID_AA64MMFR2_VARange_WIDTH 4
#define ID_AA64MMFR2_VARange_MASK (UL(0xf) << ID_AA64MMFR2_VARange_SHIFT)
#define ID_AA64MMFR2_VARange_VAL(x) ((x) & ID_AA64MMFR2_VARange_MASK)
#define ID_AA64MMFR2_VARange_48 (UL(0x0) << ID_AA64MMFR2_VARange_SHIFT)
#define ID_AA64MMFR2_VARange_52 (UL(0x1) << ID_AA64MMFR2_VARange_SHIFT)
#define ID_AA64MMFR2_CCIDX_SHIFT 20
+#define ID_AA64MMFR2_CCIDX_WIDTH 4
#define ID_AA64MMFR2_CCIDX_MASK (UL(0xf) << ID_AA64MMFR2_CCIDX_SHIFT)
#define ID_AA64MMFR2_CCIDX_VAL(x) ((x) & ID_AA64MMFR2_CCIDX_MASK)
#define ID_AA64MMFR2_CCIDX_32 (UL(0x0) << ID_AA64MMFR2_CCIDX_SHIFT)
#define ID_AA64MMFR2_CCIDX_64 (UL(0x1) << ID_AA64MMFR2_CCIDX_SHIFT)
#define ID_AA64MMFR2_NV_SHIFT 24
+#define ID_AA64MMFR2_NV_WIDTH 4
#define ID_AA64MMFR2_NV_MASK (UL(0xf) << ID_AA64MMFR2_NV_SHIFT)
#define ID_AA64MMFR2_NV_VAL(x) ((x) & ID_AA64MMFR2_NV_MASK)
#define ID_AA64MMFR2_NV_NONE (UL(0x0) << ID_AA64MMFR2_NV_SHIFT)
#define ID_AA64MMFR2_NV_8_3 (UL(0x1) << ID_AA64MMFR2_NV_SHIFT)
#define ID_AA64MMFR2_NV_8_4 (UL(0x2) << ID_AA64MMFR2_NV_SHIFT)
#define ID_AA64MMFR2_ST_SHIFT 28
+#define ID_AA64MMFR2_ST_WIDTH 4
#define ID_AA64MMFR2_ST_MASK (UL(0xf) << ID_AA64MMFR2_ST_SHIFT)
#define ID_AA64MMFR2_ST_VAL(x) ((x) & ID_AA64MMFR2_ST_MASK)
#define ID_AA64MMFR2_ST_NONE (UL(0x0) << ID_AA64MMFR2_ST_SHIFT)
#define ID_AA64MMFR2_ST_IMPL (UL(0x1) << ID_AA64MMFR2_ST_SHIFT)
#define ID_AA64MMFR2_AT_SHIFT 32
+#define ID_AA64MMFR2_AT_WIDTH 4
#define ID_AA64MMFR2_AT_MASK (UL(0xf) << ID_AA64MMFR2_AT_SHIFT)
#define ID_AA64MMFR2_AT_VAL(x) ((x) & ID_AA64MMFR2_AT_MASK)
#define ID_AA64MMFR2_AT_NONE (UL(0x0) << ID_AA64MMFR2_AT_SHIFT)
#define ID_AA64MMFR2_AT_IMPL (UL(0x1) << ID_AA64MMFR2_AT_SHIFT)
#define ID_AA64MMFR2_IDS_SHIFT 36
+#define ID_AA64MMFR2_IDS_WIDTH 4
#define ID_AA64MMFR2_IDS_MASK (UL(0xf) << ID_AA64MMFR2_IDS_SHIFT)
#define ID_AA64MMFR2_IDS_VAL(x) ((x) & ID_AA64MMFR2_IDS_MASK)
#define ID_AA64MMFR2_IDS_NONE (UL(0x0) << ID_AA64MMFR2_IDS_SHIFT)
#define ID_AA64MMFR2_IDS_IMPL (UL(0x1) << ID_AA64MMFR2_IDS_SHIFT)
#define ID_AA64MMFR2_FWB_SHIFT 40
+#define ID_AA64MMFR2_FWB_WIDTH 4
#define ID_AA64MMFR2_FWB_MASK (UL(0xf) << ID_AA64MMFR2_FWB_SHIFT)
#define ID_AA64MMFR2_FWB_VAL(x) ((x) & ID_AA64MMFR2_FWB_MASK)
#define ID_AA64MMFR2_FWB_NONE (UL(0x0) << ID_AA64MMFR2_FWB_SHIFT)
#define ID_AA64MMFR2_FWB_IMPL (UL(0x1) << ID_AA64MMFR2_FWB_SHIFT)
#define ID_AA64MMFR2_TTL_SHIFT 48
+#define ID_AA64MMFR2_TTL_WIDTH 4
#define ID_AA64MMFR2_TTL_MASK (UL(0xf) << ID_AA64MMFR2_TTL_SHIFT)
#define ID_AA64MMFR2_TTL_VAL(x) ((x) & ID_AA64MMFR2_TTL_MASK)
#define ID_AA64MMFR2_TTL_NONE (UL(0x0) << ID_AA64MMFR2_TTL_SHIFT)
#define ID_AA64MMFR2_TTL_IMPL (UL(0x1) << ID_AA64MMFR2_TTL_SHIFT)
#define ID_AA64MMFR2_BBM_SHIFT 52
+#define ID_AA64MMFR2_BBM_WIDTH 4
#define ID_AA64MMFR2_BBM_MASK (UL(0xf) << ID_AA64MMFR2_BBM_SHIFT)
#define ID_AA64MMFR2_BBM_VAL(x) ((x) & ID_AA64MMFR2_BBM_MASK)
#define ID_AA64MMFR2_BBM_LEVEL0 (UL(0x0) << ID_AA64MMFR2_BBM_SHIFT)
#define ID_AA64MMFR2_BBM_LEVEL1 (UL(0x1) << ID_AA64MMFR2_BBM_SHIFT)
#define ID_AA64MMFR2_BBM_LEVEL2 (UL(0x2) << ID_AA64MMFR2_BBM_SHIFT)
#define ID_AA64MMFR2_EVT_SHIFT 56
+#define ID_AA64MMFR2_EVT_WIDTH 4
#define ID_AA64MMFR2_EVT_MASK (UL(0xf) << ID_AA64MMFR2_EVT_SHIFT)
#define ID_AA64MMFR2_EVT_VAL(x) ((x) & ID_AA64MMFR2_EVT_MASK)
#define ID_AA64MMFR2_EVT_NONE (UL(0x0) << ID_AA64MMFR2_EVT_SHIFT)
#define ID_AA64MMFR2_EVT_8_2 (UL(0x1) << ID_AA64MMFR2_EVT_SHIFT)
#define ID_AA64MMFR2_EVT_8_5 (UL(0x2) << ID_AA64MMFR2_EVT_SHIFT)
#define ID_AA64MMFR2_E0PD_SHIFT 60
+#define ID_AA64MMFR2_E0PD_WIDTH 4
#define ID_AA64MMFR2_E0PD_MASK (UL(0xf) << ID_AA64MMFR2_E0PD_SHIFT)
#define ID_AA64MMFR2_E0PD_VAL(x) ((x) & ID_AA64MMFR2_E0PD_MASK)
#define ID_AA64MMFR2_E0PD_NONE (UL(0x0) << ID_AA64MMFR2_E0PD_SHIFT)
#define ID_AA64MMFR2_E0PD_IMPL (UL(0x1) << ID_AA64MMFR2_E0PD_SHIFT)
/* ID_AA64MMFR3_EL1 */
-#define ID_AA64MMFR3_EL1 MRS_REG(ID_AA64MMFR3_EL1)
+#define ID_AA64MMFR3_EL1_REG MRS_REG_ALT_NAME(ID_AA64MMFR3_EL1)
+#define ID_AA64MMFR3_EL1_ISS ISS_MSR_REG(ID_AA64MMFR3_EL1)
#define ID_AA64MMFR3_EL1_op0 3
#define ID_AA64MMFR3_EL1_op1 0
#define ID_AA64MMFR3_EL1_CRn 0
#define ID_AA64MMFR3_EL1_CRm 7
#define ID_AA64MMFR3_EL1_op2 3
#define ID_AA64MMFR3_TCRX_SHIFT 0
+#define ID_AA64MMFR3_TCRX_WIDTH 4
#define ID_AA64MMFR3_TCRX_MASK (UL(0xf) << ID_AA64MMFR3_TCRX_SHIFT)
#define ID_AA64MMFR3_TCRX_VAL(x) ((x) & ID_AA64MMFR3_TCRX_MASK)
#define ID_AA64MMFR3_TCRX_NONE (UL(0x0) << ID_AA64MMFR3_TCRX_SHIFT)
#define ID_AA64MMFR3_TCRX_IMPL (UL(0x1) << ID_AA64MMFR3_TCRX_SHIFT)
#define ID_AA64MMFR3_SCTLRX_SHIFT 4
+#define ID_AA64MMFR3_SCTLRX_WIDTH 4
#define ID_AA64MMFR3_SCTLRX_MASK (UL(0xf) << ID_AA64MMFR3_SCTLRX_SHIFT)
#define ID_AA64MMFR3_SCTLRX_VAL(x) ((x) & ID_AA64MMFR3_SCTLRX_MASK)
#define ID_AA64MMFR3_SCTLRX_NONE (UL(0x0) << ID_AA64MMFR3_SCTLRX_SHIFT)
#define ID_AA64MMFR3_SCTLRX_IMPL (UL(0x1) << ID_AA64MMFR3_SCTLRX_SHIFT)
+#define ID_AA64MMFR3_S1PIE_SHIFT 8
+#define ID_AA64MMFR3_S1PIE_WIDTH 4
+#define ID_AA64MMFR3_S1PIE_MASK (UL(0xf) << ID_AA64MMFR3_S1PIE_SHIFT)
+#define ID_AA64MMFR3_S1PIE_VAL(x) ((x) & ID_AA64MMFR3_S1PIE_MASK)
+#define ID_AA64MMFR3_S1PIE_NONE (UL(0x0) << ID_AA64MMFR3_S1PIE_SHIFT)
+#define ID_AA64MMFR3_S1PIE_IMPL (UL(0x1) << ID_AA64MMFR3_S1PIE_SHIFT)
+#define ID_AA64MMFR3_S2PIE_SHIFT 12
+#define ID_AA64MMFR3_S2PIE_WIDTH 4
+#define ID_AA64MMFR3_S2PIE_MASK (UL(0xf) << ID_AA64MMFR3_S2PIE_SHIFT)
+#define ID_AA64MMFR3_S2PIE_VAL(x) ((x) & ID_AA64MMFR3_S2PIE_MASK)
+#define ID_AA64MMFR3_S2PIE_NONE (UL(0x0) << ID_AA64MMFR3_S2PIE_SHIFT)
+#define ID_AA64MMFR3_S2PIE_IMPL (UL(0x1) << ID_AA64MMFR3_S2PIE_SHIFT)
+#define ID_AA64MMFR3_S1POE_SHIFT 16
+#define ID_AA64MMFR3_S1POE_WIDTH 4
+#define ID_AA64MMFR3_S1POE_MASK (UL(0xf) << ID_AA64MMFR3_S1POE_SHIFT)
+#define ID_AA64MMFR3_S1POE_VAL(x) ((x) & ID_AA64MMFR3_S1POE_MASK)
+#define ID_AA64MMFR3_S1POE_NONE (UL(0x0) << ID_AA64MMFR3_S1POE_SHIFT)
+#define ID_AA64MMFR3_S1POE_IMPL (UL(0x1) << ID_AA64MMFR3_S1POE_SHIFT)
+#define ID_AA64MMFR3_S2POE_SHIFT 20
+#define ID_AA64MMFR3_S2POE_WIDTH 4
+#define ID_AA64MMFR3_S2POE_MASK (UL(0xf) << ID_AA64MMFR3_S2POE_SHIFT)
+#define ID_AA64MMFR3_S2POE_VAL(x) ((x) & ID_AA64MMFR3_S2POE_MASK)
+#define ID_AA64MMFR3_S2POE_NONE (UL(0x0) << ID_AA64MMFR3_S2POE_SHIFT)
+#define ID_AA64MMFR3_S2POE_IMPL (UL(0x1) << ID_AA64MMFR3_S2POE_SHIFT)
+#define ID_AA64MMFR3_AIE_SHIFT 24
+#define ID_AA64MMFR3_AIE_WIDTH 4
+#define ID_AA64MMFR3_AIE_MASK (UL(0xf) << ID_AA64MMFR3_AIE_SHIFT)
+#define ID_AA64MMFR3_AIE_VAL(x) ((x) & ID_AA64MMFR3_AIE_MASK)
+#define ID_AA64MMFR3_AIE_NONE (UL(0x0) << ID_AA64MMFR3_AIE_SHIFT)
+#define ID_AA64MMFR3_AIE_IMPL (UL(0x1) << ID_AA64MMFR3_AIE_SHIFT)
#define ID_AA64MMFR3_MEC_SHIFT 28
+#define ID_AA64MMFR3_MEC_WIDTH 4
#define ID_AA64MMFR3_MEC_MASK (UL(0xf) << ID_AA64MMFR3_MEC_SHIFT)
#define ID_AA64MMFR3_MEC_VAL(x) ((x) & ID_AA64MMFR3_MEC_MASK)
#define ID_AA64MMFR3_MEC_NONE (UL(0x0) << ID_AA64MMFR3_MEC_SHIFT)
#define ID_AA64MMFR3_MEC_IMPL (UL(0x1) << ID_AA64MMFR3_MEC_SHIFT)
+#define ID_AA64MMFR3_SNERR_SHIFT 40
+#define ID_AA64MMFR3_SNERR_WIDTH 4
+#define ID_AA64MMFR3_SNERR_MASK (UL(0xf) << ID_AA64MMFR3_SNERR_SHIFT)
+#define ID_AA64MMFR3_SNERR_VAL(x) ((x) & ID_AA64MMFR3_SNERR_MASK)
+#define ID_AA64MMFR3_SNERR_NONE (UL(0x0) << ID_AA64MMFR3_SNERR_SHIFT)
+#define ID_AA64MMFR3_SNERR_ALL (UL(0x1) << ID_AA64MMFR3_SNERR_SHIFT)
+#define ID_AA64MMFR3_ANERR_SHIFT 44
+#define ID_AA64MMFR3_ANERR_WIDTH 4
+#define ID_AA64MMFR3_ANERR_MASK (UL(0xf) << ID_AA64MMFR3_ANERR_SHIFT)
+#define ID_AA64MMFR3_ANERR_VAL(x) ((x) & ID_AA64MMFR3_ANERR_MASK)
+#define ID_AA64MMFR3_ANERR_NONE (UL(0x0) << ID_AA64MMFR3_ANERR_SHIFT)
+#define ID_AA64MMFR3_ANERR_SOME (UL(0x1) << ID_AA64MMFR3_ANERR_SHIFT)
+#define ID_AA64MMFR3_SDERR_SHIFT 52
+#define ID_AA64MMFR3_SDERR_WIDTH 4
+#define ID_AA64MMFR3_SDERR_MASK (UL(0xf) << ID_AA64MMFR3_SDERR_SHIFT)
+#define ID_AA64MMFR3_SDERR_VAL(x) ((x) & ID_AA64MMFR3_SDERR_MASK)
+#define ID_AA64MMFR3_SDERR_NONE (UL(0x0) << ID_AA64MMFR3_SDERR_SHIFT)
+#define ID_AA64MMFR3_SDERR_ALL (UL(0x1) << ID_AA64MMFR3_SDERR_SHIFT)
+#define ID_AA64MMFR3_ADERR_SHIFT 56
+#define ID_AA64MMFR3_ADERR_WIDTH 4
+#define ID_AA64MMFR3_ADERR_MASK (UL(0xf) << ID_AA64MMFR3_ADERR_SHIFT)
+#define ID_AA64MMFR3_ADERR_VAL(x) ((x) & ID_AA64MMFR3_ADERR_MASK)
+#define ID_AA64MMFR3_ADERR_NONE (UL(0x0) << ID_AA64MMFR3_ADERR_SHIFT)
+#define ID_AA64MMFR3_ADERR_SOME (UL(0x1) << ID_AA64MMFR3_ADERR_SHIFT)
#define ID_AA64MMFR3_Spec_FPACC_SHIFT 60
+#define ID_AA64MMFR3_Spec_FPACC_WIDTH 4
#define ID_AA64MMFR3_Spec_FPACC_MASK (UL(0xf) << ID_AA64MMFR3_Spec_FPACC_SHIFT)
#define ID_AA64MMFR3_Spec_FPACC_VAL(x) ((x) & ID_AA64MMFR3_Spec_FPACC_MASK)
#define ID_AA64MMFR3_Spec_FPACC_NONE (UL(0x0) << ID_AA64MMFR3_Spec_FPACC_SHIFT)
#define ID_AA64MMFR3_Spec_FPACC_IMPL (UL(0x1) << ID_AA64MMFR3_Spec_FPACC_SHIFT)
/* ID_AA64MMFR4_EL1 */
-#define ID_AA64MMFR4_EL1 MRS_REG(ID_AA64MMFR4_EL1)
+#define ID_AA64MMFR4_EL1_REG MRS_REG_ALT_NAME(ID_AA64MMFR4_EL1)
+#define ID_AA64MMFR4_EL1_ISS ISS_MSR_REG(ID_AA64MMFR4_EL1)
#define ID_AA64MMFR4_EL1_op0 3
#define ID_AA64MMFR4_EL1_op1 0
#define ID_AA64MMFR4_EL1_CRn 0
@@ -1137,41 +1598,48 @@
#define ID_AA64MMFR4_EL1_op2 4
/* ID_AA64PFR0_EL1 */
-#define ID_AA64PFR0_EL1 MRS_REG(ID_AA64PFR0_EL1)
+#define ID_AA64PFR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64PFR0_EL1)
+#define ID_AA64PFR0_EL1_ISS ISS_MSR_REG(ID_AA64PFR0_EL1)
#define ID_AA64PFR0_EL1_op0 3
#define ID_AA64PFR0_EL1_op1 0
#define ID_AA64PFR0_EL1_CRn 0
#define ID_AA64PFR0_EL1_CRm 4
#define ID_AA64PFR0_EL1_op2 0
#define ID_AA64PFR0_EL0_SHIFT 0
+#define ID_AA64PFR0_EL0_WIDTH 4
#define ID_AA64PFR0_EL0_MASK (UL(0xf) << ID_AA64PFR0_EL0_SHIFT)
#define ID_AA64PFR0_EL0_VAL(x) ((x) & ID_AA64PFR0_EL0_MASK)
#define ID_AA64PFR0_EL0_64 (UL(0x1) << ID_AA64PFR0_EL0_SHIFT)
#define ID_AA64PFR0_EL0_64_32 (UL(0x2) << ID_AA64PFR0_EL0_SHIFT)
#define ID_AA64PFR0_EL1_SHIFT 4
+#define ID_AA64PFR0_EL1_WIDTH 4
#define ID_AA64PFR0_EL1_MASK (UL(0xf) << ID_AA64PFR0_EL1_SHIFT)
#define ID_AA64PFR0_EL1_VAL(x) ((x) & ID_AA64PFR0_EL1_MASK)
#define ID_AA64PFR0_EL1_64 (UL(0x1) << ID_AA64PFR0_EL1_SHIFT)
#define ID_AA64PFR0_EL1_64_32 (UL(0x2) << ID_AA64PFR0_EL1_SHIFT)
#define ID_AA64PFR0_EL2_SHIFT 8
+#define ID_AA64PFR0_EL2_WIDTH 4
#define ID_AA64PFR0_EL2_MASK (UL(0xf) << ID_AA64PFR0_EL2_SHIFT)
#define ID_AA64PFR0_EL2_VAL(x) ((x) & ID_AA64PFR0_EL2_MASK)
#define ID_AA64PFR0_EL2_NONE (UL(0x0) << ID_AA64PFR0_EL2_SHIFT)
#define ID_AA64PFR0_EL2_64 (UL(0x1) << ID_AA64PFR0_EL2_SHIFT)
#define ID_AA64PFR0_EL2_64_32 (UL(0x2) << ID_AA64PFR0_EL2_SHIFT)
#define ID_AA64PFR0_EL3_SHIFT 12
+#define ID_AA64PFR0_EL3_WIDTH 4
#define ID_AA64PFR0_EL3_MASK (UL(0xf) << ID_AA64PFR0_EL3_SHIFT)
#define ID_AA64PFR0_EL3_VAL(x) ((x) & ID_AA64PFR0_EL3_MASK)
#define ID_AA64PFR0_EL3_NONE (UL(0x0) << ID_AA64PFR0_EL3_SHIFT)
#define ID_AA64PFR0_EL3_64 (UL(0x1) << ID_AA64PFR0_EL3_SHIFT)
#define ID_AA64PFR0_EL3_64_32 (UL(0x2) << ID_AA64PFR0_EL3_SHIFT)
#define ID_AA64PFR0_FP_SHIFT 16
+#define ID_AA64PFR0_FP_WIDTH 4
#define ID_AA64PFR0_FP_MASK (UL(0xf) << ID_AA64PFR0_FP_SHIFT)
#define ID_AA64PFR0_FP_VAL(x) ((x) & ID_AA64PFR0_FP_MASK)
#define ID_AA64PFR0_FP_IMPL (UL(0x0) << ID_AA64PFR0_FP_SHIFT)
#define ID_AA64PFR0_FP_HP (UL(0x1) << ID_AA64PFR0_FP_SHIFT)
#define ID_AA64PFR0_FP_NONE (UL(0xf) << ID_AA64PFR0_FP_SHIFT)
#define ID_AA64PFR0_AdvSIMD_SHIFT 20
+#define ID_AA64PFR0_AdvSIMD_WIDTH 4
#define ID_AA64PFR0_AdvSIMD_MASK (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT)
#define ID_AA64PFR0_AdvSIMD_VAL(x) ((x) & ID_AA64PFR0_AdvSIMD_MASK)
#define ID_AA64PFR0_AdvSIMD_IMPL (UL(0x0) << ID_AA64PFR0_AdvSIMD_SHIFT)
@@ -1179,49 +1647,59 @@
#define ID_AA64PFR0_AdvSIMD_NONE (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT)
#define ID_AA64PFR0_GIC_BITS 0x4 /* Number of bits in GIC field */
#define ID_AA64PFR0_GIC_SHIFT 24
+#define ID_AA64PFR0_GIC_WIDTH 4
#define ID_AA64PFR0_GIC_MASK (UL(0xf) << ID_AA64PFR0_GIC_SHIFT)
#define ID_AA64PFR0_GIC_VAL(x) ((x) & ID_AA64PFR0_GIC_MASK)
#define ID_AA64PFR0_GIC_CPUIF_NONE (UL(0x0) << ID_AA64PFR0_GIC_SHIFT)
#define ID_AA64PFR0_GIC_CPUIF_EN (UL(0x1) << ID_AA64PFR0_GIC_SHIFT)
#define ID_AA64PFR0_GIC_CPUIF_4_1 (UL(0x3) << ID_AA64PFR0_GIC_SHIFT)
#define ID_AA64PFR0_RAS_SHIFT 28
+#define ID_AA64PFR0_RAS_WIDTH 4
#define ID_AA64PFR0_RAS_MASK (UL(0xf) << ID_AA64PFR0_RAS_SHIFT)
#define ID_AA64PFR0_RAS_VAL(x) ((x) & ID_AA64PFR0_RAS_MASK)
#define ID_AA64PFR0_RAS_NONE (UL(0x0) << ID_AA64PFR0_RAS_SHIFT)
#define ID_AA64PFR0_RAS_IMPL (UL(0x1) << ID_AA64PFR0_RAS_SHIFT)
#define ID_AA64PFR0_RAS_8_4 (UL(0x2) << ID_AA64PFR0_RAS_SHIFT)
+#define ID_AA64PFR0_RAS_8_9 (UL(0x3) << ID_AA64PFR0_RAS_SHIFT)
#define ID_AA64PFR0_SVE_SHIFT 32
+#define ID_AA64PFR0_SVE_WIDTH 4
#define ID_AA64PFR0_SVE_MASK (UL(0xf) << ID_AA64PFR0_SVE_SHIFT)
#define ID_AA64PFR0_SVE_VAL(x) ((x) & ID_AA64PFR0_SVE_MASK)
#define ID_AA64PFR0_SVE_NONE (UL(0x0) << ID_AA64PFR0_SVE_SHIFT)
#define ID_AA64PFR0_SVE_IMPL (UL(0x1) << ID_AA64PFR0_SVE_SHIFT)
#define ID_AA64PFR0_SEL2_SHIFT 36
+#define ID_AA64PFR0_SEL2_WIDTH 4
#define ID_AA64PFR0_SEL2_MASK (UL(0xf) << ID_AA64PFR0_SEL2_SHIFT)
#define ID_AA64PFR0_SEL2_VAL(x) ((x) & ID_AA64PFR0_SEL2_MASK)
#define ID_AA64PFR0_SEL2_NONE (UL(0x0) << ID_AA64PFR0_SEL2_SHIFT)
#define ID_AA64PFR0_SEL2_IMPL (UL(0x1) << ID_AA64PFR0_SEL2_SHIFT)
#define ID_AA64PFR0_MPAM_SHIFT 40
+#define ID_AA64PFR0_MPAM_WIDTH 4
#define ID_AA64PFR0_MPAM_MASK (UL(0xf) << ID_AA64PFR0_MPAM_SHIFT)
#define ID_AA64PFR0_MPAM_VAL(x) ((x) & ID_AA64PFR0_MPAM_MASK)
#define ID_AA64PFR0_MPAM_NONE (UL(0x0) << ID_AA64PFR0_MPAM_SHIFT)
#define ID_AA64PFR0_MPAM_IMPL (UL(0x1) << ID_AA64PFR0_MPAM_SHIFT)
#define ID_AA64PFR0_AMU_SHIFT 44
+#define ID_AA64PFR0_AMU_WIDTH 4
#define ID_AA64PFR0_AMU_MASK (UL(0xf) << ID_AA64PFR0_AMU_SHIFT)
#define ID_AA64PFR0_AMU_VAL(x) ((x) & ID_AA64PFR0_AMU_MASK)
#define ID_AA64PFR0_AMU_NONE (UL(0x0) << ID_AA64PFR0_AMU_SHIFT)
#define ID_AA64PFR0_AMU_V1 (UL(0x1) << ID_AA64PFR0_AMU_SHIFT)
#define ID_AA64PFR0_AMU_V1_1 (UL(0x2) << ID_AA64PFR0_AMU_SHIFT)
#define ID_AA64PFR0_DIT_SHIFT 48
+#define ID_AA64PFR0_DIT_WIDTH 4
#define ID_AA64PFR0_DIT_MASK (UL(0xf) << ID_AA64PFR0_DIT_SHIFT)
#define ID_AA64PFR0_DIT_VAL(x) ((x) & ID_AA64PFR0_DIT_MASK)
#define ID_AA64PFR0_DIT_NONE (UL(0x0) << ID_AA64PFR0_DIT_SHIFT)
#define ID_AA64PFR0_DIT_PSTATE (UL(0x1) << ID_AA64PFR0_DIT_SHIFT)
#define ID_AA64PFR0_RME_SHIFT 52
+#define ID_AA64PFR0_RME_WIDTH 4
#define ID_AA64PFR0_RME_MASK (UL(0xf) << ID_AA64PFR0_RME_SHIFT)
#define ID_AA64PFR0_RME_VAL(x) ((x) & ID_AA64PFR0_RME_MASK)
#define ID_AA64PFR0_RME_NONE (UL(0x0) << ID_AA64PFR0_RME_SHIFT)
#define ID_AA64PFR0_RME_IMPL (UL(0x1) << ID_AA64PFR0_RME_SHIFT)
#define ID_AA64PFR0_CSV2_SHIFT 56
+#define ID_AA64PFR0_CSV2_WIDTH 4
#define ID_AA64PFR0_CSV2_MASK (UL(0xf) << ID_AA64PFR0_CSV2_SHIFT)
#define ID_AA64PFR0_CSV2_VAL(x) ((x) & ID_AA64PFR0_CSV2_MASK)
#define ID_AA64PFR0_CSV2_NONE (UL(0x0) << ID_AA64PFR0_CSV2_SHIFT)
@@ -1229,30 +1707,35 @@
#define ID_AA64PFR0_CSV2_SCXTNUM (UL(0x2) << ID_AA64PFR0_CSV2_SHIFT)
#define ID_AA64PFR0_CSV2_3 (UL(0x3) << ID_AA64PFR0_CSV2_SHIFT)
#define ID_AA64PFR0_CSV3_SHIFT 60
+#define ID_AA64PFR0_CSV3_WIDTH 4
#define ID_AA64PFR0_CSV3_MASK (UL(0xf) << ID_AA64PFR0_CSV3_SHIFT)
#define ID_AA64PFR0_CSV3_VAL(x) ((x) & ID_AA64PFR0_CSV3_MASK)
#define ID_AA64PFR0_CSV3_NONE (UL(0x0) << ID_AA64PFR0_CSV3_SHIFT)
#define ID_AA64PFR0_CSV3_ISOLATED (UL(0x1) << ID_AA64PFR0_CSV3_SHIFT)
/* ID_AA64PFR1_EL1 */
-#define ID_AA64PFR1_EL1 MRS_REG(ID_AA64PFR1_EL1)
+#define ID_AA64PFR1_EL1_REG MRS_REG_ALT_NAME(ID_AA64PFR1_EL1)
+#define ID_AA64PFR1_EL1_ISS ISS_MSR_REG(ID_AA64PFR1_EL1)
#define ID_AA64PFR1_EL1_op0 3
#define ID_AA64PFR1_EL1_op1 0
#define ID_AA64PFR1_EL1_CRn 0
#define ID_AA64PFR1_EL1_CRm 4
#define ID_AA64PFR1_EL1_op2 1
#define ID_AA64PFR1_BT_SHIFT 0
+#define ID_AA64PFR1_BT_WIDTH 4
#define ID_AA64PFR1_BT_MASK (UL(0xf) << ID_AA64PFR1_BT_SHIFT)
#define ID_AA64PFR1_BT_VAL(x) ((x) & ID_AA64PFR1_BT_MASK)
#define ID_AA64PFR1_BT_NONE (UL(0x0) << ID_AA64PFR1_BT_SHIFT)
#define ID_AA64PFR1_BT_IMPL (UL(0x1) << ID_AA64PFR1_BT_SHIFT)
#define ID_AA64PFR1_SSBS_SHIFT 4
+#define ID_AA64PFR1_SSBS_WIDTH 4
#define ID_AA64PFR1_SSBS_MASK (UL(0xf) << ID_AA64PFR1_SSBS_SHIFT)
#define ID_AA64PFR1_SSBS_VAL(x) ((x) & ID_AA64PFR1_SSBS_MASK)
#define ID_AA64PFR1_SSBS_NONE (UL(0x0) << ID_AA64PFR1_SSBS_SHIFT)
#define ID_AA64PFR1_SSBS_PSTATE (UL(0x1) << ID_AA64PFR1_SSBS_SHIFT)
#define ID_AA64PFR1_SSBS_PSTATE_MSR (UL(0x2) << ID_AA64PFR1_SSBS_SHIFT)
#define ID_AA64PFR1_MTE_SHIFT 8
+#define ID_AA64PFR1_MTE_WIDTH 4
#define ID_AA64PFR1_MTE_MASK (UL(0xf) << ID_AA64PFR1_MTE_SHIFT)
#define ID_AA64PFR1_MTE_VAL(x) ((x) & ID_AA64PFR1_MTE_MASK)
#define ID_AA64PFR1_MTE_NONE (UL(0x0) << ID_AA64PFR1_MTE_SHIFT)
@@ -1260,40 +1743,77 @@
#define ID_AA64PFR1_MTE_MTE2 (UL(0x2) << ID_AA64PFR1_MTE_SHIFT)
#define ID_AA64PFR1_MTE_MTE3 (UL(0x3) << ID_AA64PFR1_MTE_SHIFT)
#define ID_AA64PFR1_RAS_frac_SHIFT 12
+#define ID_AA64PFR1_RAS_frac_WIDTH 4
#define ID_AA64PFR1_RAS_frac_MASK (UL(0xf) << ID_AA64PFR1_RAS_frac_SHIFT)
#define ID_AA64PFR1_RAS_frac_VAL(x) ((x) & ID_AA64PFR1_RAS_frac_MASK)
#define ID_AA64PFR1_RAS_frac_p0 (UL(0x0) << ID_AA64PFR1_RAS_frac_SHIFT)
#define ID_AA64PFR1_RAS_frac_p1 (UL(0x1) << ID_AA64PFR1_RAS_frac_SHIFT)
#define ID_AA64PFR1_MPAM_frac_SHIFT 16
+#define ID_AA64PFR1_MPAM_frac_WIDTH 4
#define ID_AA64PFR1_MPAM_frac_MASK (UL(0xf) << ID_AA64PFR1_MPAM_frac_SHIFT)
#define ID_AA64PFR1_MPAM_frac_VAL(x) ((x) & ID_AA64PFR1_MPAM_frac_MASK)
#define ID_AA64PFR1_MPAM_frac_p0 (UL(0x0) << ID_AA64PFR1_MPAM_frac_SHIFT)
#define ID_AA64PFR1_MPAM_frac_p1 (UL(0x1) << ID_AA64PFR1_MPAM_frac_SHIFT)
#define ID_AA64PFR1_SME_SHIFT 24
+#define ID_AA64PFR1_SME_WIDTH 4
#define ID_AA64PFR1_SME_MASK (UL(0xf) << ID_AA64PFR1_SME_SHIFT)
#define ID_AA64PFR1_SME_VAL(x) ((x) & ID_AA64PFR1_SME_MASK)
#define ID_AA64PFR1_SME_NONE (UL(0x0) << ID_AA64PFR1_SME_SHIFT)
#define ID_AA64PFR1_SME_SME (UL(0x1) << ID_AA64PFR1_SME_SHIFT)
#define ID_AA64PFR1_SME_SME2 (UL(0x2) << ID_AA64PFR1_SME_SHIFT)
#define ID_AA64PFR1_RNDR_trap_SHIFT 28
+#define ID_AA64PFR1_RNDR_trap_WIDTH 4
#define ID_AA64PFR1_RNDR_trap_MASK (UL(0xf) << ID_AA64PFR1_RNDR_trap_SHIFT)
#define ID_AA64PFR1_RNDR_trap_VAL(x) ((x) & ID_AA64PFR1_RNDR_trap_MASK)
#define ID_AA64PFR1_RNDR_trap_NONE (UL(0x0) << ID_AA64PFR1_RNDR_trap_SHIFT)
#define ID_AA64PFR1_RNDR_trap_IMPL (UL(0x1) << ID_AA64PFR1_RNDR_trap_SHIFT)
#define ID_AA64PFR1_CSV2_frac_SHIFT 32
+#define ID_AA64PFR1_CSV2_frac_WIDTH 4
#define ID_AA64PFR1_CSV2_frac_MASK (UL(0xf) << ID_AA64PFR1_CSV2_frac_SHIFT)
#define ID_AA64PFR1_CSV2_frac_VAL(x) ((x) & ID_AA64PFR1_CSV2_frac_MASK)
#define ID_AA64PFR1_CSV2_frac_p0 (UL(0x0) << ID_AA64PFR1_CSV2_frac_SHIFT)
#define ID_AA64PFR1_CSV2_frac_p1 (UL(0x1) << ID_AA64PFR1_CSV2_frac_SHIFT)
#define ID_AA64PFR1_CSV2_frac_p2 (UL(0x2) << ID_AA64PFR1_CSV2_frac_SHIFT)
#define ID_AA64PFR1_NMI_SHIFT 36
+#define ID_AA64PFR1_NMI_WIDTH 4
#define ID_AA64PFR1_NMI_MASK (UL(0xf) << ID_AA64PFR1_NMI_SHIFT)
#define ID_AA64PFR1_NMI_VAL(x) ((x) & ID_AA64PFR1_NMI_MASK)
#define ID_AA64PFR1_NMI_NONE (UL(0x0) << ID_AA64PFR1_NMI_SHIFT)
#define ID_AA64PFR1_NMI_IMPL (UL(0x1) << ID_AA64PFR1_NMI_SHIFT)
+#define ID_AA64PFR1_MTE_frac_SHIFT 40
+#define ID_AA64PFR1_MTE_frac_WIDTH 4
+#define ID_AA64PFR1_MTE_frac_MASK (UL(0xf) << ID_AA64PFR1_MTE_frac_SHIFT)
+#define ID_AA64PFR1_MTE_frac_VAL(x) ((x) & ID_AA64PFR1_MTE_frac_MASK)
+#define ID_AA64PFR1_MTE_frac_IMPL (UL(0x0) << ID_AA64PFR1_MTE_frac_SHIFT)
+#define ID_AA64PFR1_MTE_frac_NONE (UL(0xf) << ID_AA64PFR1_MTE_frac_SHIFT)
+#define ID_AA64PFR1_THE_SHIFT 48
+#define ID_AA64PFR1_THE_WIDTH 4
+#define ID_AA64PFR1_THE_MASK (UL(0xf) << ID_AA64PFR1_THE_SHIFT)
+#define ID_AA64PFR1_THE_VAL(x) ((x) & ID_AA64PFR1_THE_MASK)
+#define ID_AA64PFR1_THE_NONE (UL(0x0) << ID_AA64PFR1_THE_SHIFT)
+#define ID_AA64PFR1_THE_IMPL (UL(0x1) << ID_AA64PFR1_THE_SHIFT)
+#define ID_AA64PFR1_MTEX_SHIFT 52
+#define ID_AA64PFR1_MTEX_WIDTH 4
+#define ID_AA64PFR1_MTEX_MASK (UL(0xf) << ID_AA64PFR1_MTEX_SHIFT)
+#define ID_AA64PFR1_MTEX_VAL(x) ((x) & ID_AA64PFR1_MTEX_MASK)
+#define ID_AA64PFR1_MTEX_NONE (UL(0x0) << ID_AA64PFR1_MTEX_SHIFT)
+#define ID_AA64PFR1_MTEX_IMPL (UL(0x1) << ID_AA64PFR1_MTEX_SHIFT)
+#define ID_AA64PFR1_DF2_SHIFT 56
+#define ID_AA64PFR1_DF2_WIDTH 4
+#define ID_AA64PFR1_DF2_MASK (UL(0xf) << ID_AA64PFR1_DF2_SHIFT)
+#define ID_AA64PFR1_DF2_VAL(x) ((x) & ID_AA64PFR1_DF2_MASK)
+#define ID_AA64PFR1_DF2_NONE (UL(0x0) << ID_AA64PFR1_DF2_SHIFT)
+#define ID_AA64PFR1_DF2_IMPL (UL(0x1) << ID_AA64PFR1_DF2_SHIFT)
+#define ID_AA64PFR1_PFAR_SHIFT 60
+#define ID_AA64PFR1_PFAR_WIDTH 4
+#define ID_AA64PFR1_PFAR_MASK (UL(0xf) << ID_AA64PFR1_PFAR_SHIFT)
+#define ID_AA64PFR1_PFAR_VAL(x) ((x) & ID_AA64PFR1_PFAR_MASK)
+#define ID_AA64PFR1_PFAR_NONE (UL(0x0) << ID_AA64PFR1_PFAR_SHIFT)
+#define ID_AA64PFR1_PFAR_IMPL (UL(0x1) << ID_AA64PFR1_PFAR_SHIFT)
/* ID_AA64PFR2_EL1 */
-#define ID_AA64PFR2_EL1 MRS_REG(ID_AA64PFR2_EL1)
+#define ID_AA64PFR2_EL1_REG MRS_REG_ALT_NAME(ID_AA64PFR2_EL1)
+#define ID_AA64PFR2_EL1_ISS ISS_MSR_REG(ID_AA64PFR2_EL1)
#define ID_AA64PFR2_EL1_op0 3
#define ID_AA64PFR2_EL1_op1 0
#define ID_AA64PFR2_EL1_CRn 0
@@ -1301,116 +1821,146 @@
#define ID_AA64PFR2_EL1_op2 2
/* ID_AA64ZFR0_EL1 */
-#define ID_AA64ZFR0_EL1 MRS_REG(ID_AA64ZFR0_EL1)
#define ID_AA64ZFR0_EL1_REG MRS_REG_ALT_NAME(ID_AA64ZFR0_EL1)
+#define ID_AA64ZFR0_EL1_ISS ISS_MSR_REG(ID_AA64ZFR0_EL1)
#define ID_AA64ZFR0_EL1_op0 3
#define ID_AA64ZFR0_EL1_op1 0
#define ID_AA64ZFR0_EL1_CRn 0
#define ID_AA64ZFR0_EL1_CRm 4
#define ID_AA64ZFR0_EL1_op2 4
#define ID_AA64ZFR0_SVEver_SHIFT 0
+#define ID_AA64ZFR0_SVEver_WIDTH 4
#define ID_AA64ZFR0_SVEver_MASK (UL(0xf) << ID_AA64ZFR0_SVEver_SHIFT)
-#define ID_AA64ZFR0_SVEver_VAL(x) ((x) & ID_AA64ZFR0_SVEver_MASK
-#define ID_AA64ZFR0_SVEver_SVE1 (UL(0x0) << ID_AA64ZFR0_SVEver_SHIFT)
-#define ID_AA64ZFR0_SVEver_SVE2 (UL(0x1) << ID_AA64ZFR0_SVEver_SHIFT)
+#define ID_AA64ZFR0_SVEver_VAL(x) ((x) & ID_AA64ZFR0_SVEver_MASK)
+#define ID_AA64ZFR0_SVEver_SVE1 (UL(0x0) << ID_AA64ZFR0_SVEver_SHIFT)
+#define ID_AA64ZFR0_SVEver_SVE2 (UL(0x1) << ID_AA64ZFR0_SVEver_SHIFT)
+#define ID_AA64ZFR0_SVEver_SVE2P1 (UL(0x2) << ID_AA64ZFR0_SVEver_SHIFT)
#define ID_AA64ZFR0_AES_SHIFT 4
+#define ID_AA64ZFR0_AES_WIDTH 4
#define ID_AA64ZFR0_AES_MASK (UL(0xf) << ID_AA64ZFR0_AES_SHIFT)
-#define ID_AA64ZFR0_AES_VAL(x) ((x) & ID_AA64ZFR0_AES_MASK
-#define ID_AA64ZFR0_AES_NONE (UL(0x0) << ID_AA64ZFR0_AES_SHIFT)
-#define ID_AA64ZFR0_AES_BASE (UL(0x1) << ID_AA64ZFR0_AES_SHIFT)
-#define ID_AA64ZFR0_AES_PMULL (UL(0x2) << ID_AA64ZFR0_AES_SHIFT)
+#define ID_AA64ZFR0_AES_VAL(x) ((x) & ID_AA64ZFR0_AES_MASK)
+#define ID_AA64ZFR0_AES_NONE (UL(0x0) << ID_AA64ZFR0_AES_SHIFT)
+#define ID_AA64ZFR0_AES_BASE (UL(0x1) << ID_AA64ZFR0_AES_SHIFT)
+#define ID_AA64ZFR0_AES_PMULL (UL(0x2) << ID_AA64ZFR0_AES_SHIFT)
#define ID_AA64ZFR0_BitPerm_SHIFT 16
+#define ID_AA64ZFR0_BitPerm_WIDTH 4
#define ID_AA64ZFR0_BitPerm_MASK (UL(0xf) << ID_AA64ZFR0_BitPerm_SHIFT)
-#define ID_AA64ZFR0_BitPerm_VAL(x) ((x) & ID_AA64ZFR0_BitPerm_MASK
-#define ID_AA64ZFR0_BitPerm_NONE (UL(0x0) << ID_AA64ZFR0_BitPerm_SHIFT)
-#define ID_AA64ZFR0_BitPerm_IMPL (UL(0x1) << ID_AA64ZFR0_BitPerm_SHIFT)
+#define ID_AA64ZFR0_BitPerm_VAL(x) ((x) & ID_AA64ZFR0_BitPerm_MASK)
+#define ID_AA64ZFR0_BitPerm_NONE (UL(0x0) << ID_AA64ZFR0_BitPerm_SHIFT)
+#define ID_AA64ZFR0_BitPerm_IMPL (UL(0x1) << ID_AA64ZFR0_BitPerm_SHIFT)
#define ID_AA64ZFR0_BF16_SHIFT 20
+#define ID_AA64ZFR0_BF16_WIDTH 4
#define ID_AA64ZFR0_BF16_MASK (UL(0xf) << ID_AA64ZFR0_BF16_SHIFT)
-#define ID_AA64ZFR0_BF16_VAL(x) ((x) & ID_AA64ZFR0_BF16_MASK
-#define ID_AA64ZFR0_BF16_NONE (UL(0x0) << ID_AA64ZFR0_BF16_SHIFT)
-#define ID_AA64ZFR0_BF16_BASE (UL(0x1) << ID_AA64ZFR0_BF16_SHIFT)
-#define ID_AA64ZFR0_BF16_EBF (UL(0x1) << ID_AA64ZFR0_BF16_SHIFT)
+#define ID_AA64ZFR0_BF16_VAL(x) ((x) & ID_AA64ZFR0_BF16_MASK)
+#define ID_AA64ZFR0_BF16_NONE (UL(0x0) << ID_AA64ZFR0_BF16_SHIFT)
+#define ID_AA64ZFR0_BF16_BASE (UL(0x1) << ID_AA64ZFR0_BF16_SHIFT)
+#define ID_AA64ZFR0_BF16_EBF (UL(0x1) << ID_AA64ZFR0_BF16_SHIFT)
#define ID_AA64ZFR0_SHA3_SHIFT 32
+#define ID_AA64ZFR0_SHA3_WIDTH 4
#define ID_AA64ZFR0_SHA3_MASK (UL(0xf) << ID_AA64ZFR0_SHA3_SHIFT)
-#define ID_AA64ZFR0_SHA3_VAL(x) ((x) & ID_AA64ZFR0_SHA3_MASK
-#define ID_AA64ZFR0_SHA3_NONE (UL(0x0) << ID_AA64ZFR0_SHA3_SHIFT)
-#define ID_AA64ZFR0_SHA3_IMPL (UL(0x1) << ID_AA64ZFR0_SHA3_SHIFT)
+#define ID_AA64ZFR0_SHA3_VAL(x) ((x) & ID_AA64ZFR0_SHA3_MASK)
+#define ID_AA64ZFR0_SHA3_NONE (UL(0x0) << ID_AA64ZFR0_SHA3_SHIFT)
+#define ID_AA64ZFR0_SHA3_IMPL (UL(0x1) << ID_AA64ZFR0_SHA3_SHIFT)
#define ID_AA64ZFR0_SM4_SHIFT 40
+#define ID_AA64ZFR0_SM4_WIDTH 4
#define ID_AA64ZFR0_SM4_MASK (UL(0xf) << ID_AA64ZFR0_SM4_SHIFT)
-#define ID_AA64ZFR0_SM4_VAL(x) ((x) & ID_AA64ZFR0_SM4_MASK
-#define ID_AA64ZFR0_SM4_NONE (UL(0x0) << ID_AA64ZFR0_SM4_SHIFT)
-#define ID_AA64ZFR0_SM4_IMPL (UL(0x1) << ID_AA64ZFR0_SM4_SHIFT)
+#define ID_AA64ZFR0_SM4_VAL(x) ((x) & ID_AA64ZFR0_SM4_MASK)
+#define ID_AA64ZFR0_SM4_NONE (UL(0x0) << ID_AA64ZFR0_SM4_SHIFT)
+#define ID_AA64ZFR0_SM4_IMPL (UL(0x1) << ID_AA64ZFR0_SM4_SHIFT)
#define ID_AA64ZFR0_I8MM_SHIFT 44
+#define ID_AA64ZFR0_I8MM_WIDTH 4
#define ID_AA64ZFR0_I8MM_MASK (UL(0xf) << ID_AA64ZFR0_I8MM_SHIFT)
-#define ID_AA64ZFR0_I8MM_VAL(x) ((x) & ID_AA64ZFR0_I8MM_MASK
-#define ID_AA64ZFR0_I8MM_NONE (UL(0x0) << ID_AA64ZFR0_I8MM_SHIFT)
-#define ID_AA64ZFR0_I8MM_IMPL (UL(0x1) << ID_AA64ZFR0_I8MM_SHIFT)
+#define ID_AA64ZFR0_I8MM_VAL(x) ((x) & ID_AA64ZFR0_I8MM_MASK)
+#define ID_AA64ZFR0_I8MM_NONE (UL(0x0) << ID_AA64ZFR0_I8MM_SHIFT)
+#define ID_AA64ZFR0_I8MM_IMPL (UL(0x1) << ID_AA64ZFR0_I8MM_SHIFT)
#define ID_AA64ZFR0_F32MM_SHIFT 52
+#define ID_AA64ZFR0_F32MM_WIDTH 4
#define ID_AA64ZFR0_F32MM_MASK (UL(0xf) << ID_AA64ZFR0_F32MM_SHIFT)
-#define ID_AA64ZFR0_F32MM_VAL(x) ((x) & ID_AA64ZFR0_F32MM_MASK
-#define ID_AA64ZFR0_F32MM_NONE (UL(0x0) << ID_AA64ZFR0_F32MM_SHIFT)
-#define ID_AA64ZFR0_F32MM_IMPL (UL(0x1) << ID_AA64ZFR0_F32MM_SHIFT)
+#define ID_AA64ZFR0_F32MM_VAL(x) ((x) & ID_AA64ZFR0_F32MM_MASK)
+#define ID_AA64ZFR0_F32MM_NONE (UL(0x0) << ID_AA64ZFR0_F32MM_SHIFT)
+#define ID_AA64ZFR0_F32MM_IMPL (UL(0x1) << ID_AA64ZFR0_F32MM_SHIFT)
#define ID_AA64ZFR0_F64MM_SHIFT 56
+#define ID_AA64ZFR0_F64MM_WIDTH 4
#define ID_AA64ZFR0_F64MM_MASK (UL(0xf) << ID_AA64ZFR0_F64MM_SHIFT)
-#define ID_AA64ZFR0_F64MM_VAL(x) ((x) & ID_AA64ZFR0_F64MM_MASK
-#define ID_AA64ZFR0_F64MM_NONE (UL(0x0) << ID_AA64ZFR0_F64MM_SHIFT)
-#define ID_AA64ZFR0_F64MM_IMPL (UL(0x1) << ID_AA64ZFR0_F64MM_SHIFT)
+#define ID_AA64ZFR0_F64MM_VAL(x) ((x) & ID_AA64ZFR0_F64MM_MASK)
+#define ID_AA64ZFR0_F64MM_NONE (UL(0x0) << ID_AA64ZFR0_F64MM_SHIFT)
+#define ID_AA64ZFR0_F64MM_IMPL (UL(0x1) << ID_AA64ZFR0_F64MM_SHIFT)
/* ID_ISAR5_EL1 */
-#define ID_ISAR5_EL1 MRS_REG(ID_ISAR5_EL1)
+#define ID_ISAR5_EL1_ISS ISS_MSR_REG(ID_ISAR5_EL1)
#define ID_ISAR5_EL1_op0 0x3
#define ID_ISAR5_EL1_op1 0x0
#define ID_ISAR5_EL1_CRn 0x0
#define ID_ISAR5_EL1_CRm 0x2
#define ID_ISAR5_EL1_op2 0x5
#define ID_ISAR5_SEVL_SHIFT 0
+#define ID_ISAR5_SEVL_WIDTH 4
#define ID_ISAR5_SEVL_MASK (UL(0xf) << ID_ISAR5_SEVL_SHIFT)
#define ID_ISAR5_SEVL_VAL(x) ((x) & ID_ISAR5_SEVL_MASK)
#define ID_ISAR5_SEVL_NOP (UL(0x0) << ID_ISAR5_SEVL_SHIFT)
#define ID_ISAR5_SEVL_IMPL (UL(0x1) << ID_ISAR5_SEVL_SHIFT)
#define ID_ISAR5_AES_SHIFT 4
+#define ID_ISAR5_AES_WIDTH 4
#define ID_ISAR5_AES_MASK (UL(0xf) << ID_ISAR5_AES_SHIFT)
#define ID_ISAR5_AES_VAL(x) ((x) & ID_ISAR5_AES_MASK)
#define ID_ISAR5_AES_NONE (UL(0x0) << ID_ISAR5_AES_SHIFT)
#define ID_ISAR5_AES_BASE (UL(0x1) << ID_ISAR5_AES_SHIFT)
#define ID_ISAR5_AES_VMULL (UL(0x2) << ID_ISAR5_AES_SHIFT)
#define ID_ISAR5_SHA1_SHIFT 8
+#define ID_ISAR5_SHA1_WIDTH 4
#define ID_ISAR5_SHA1_MASK (UL(0xf) << ID_ISAR5_SHA1_SHIFT)
#define ID_ISAR5_SHA1_VAL(x) ((x) & ID_ISAR5_SHA1_MASK)
#define ID_ISAR5_SHA1_NONE (UL(0x0) << ID_ISAR5_SHA1_SHIFT)
#define ID_ISAR5_SHA1_IMPL (UL(0x1) << ID_ISAR5_SHA1_SHIFT)
#define ID_ISAR5_SHA2_SHIFT 12
+#define ID_ISAR5_SHA2_WIDTH 4
#define ID_ISAR5_SHA2_MASK (UL(0xf) << ID_ISAR5_SHA2_SHIFT)
#define ID_ISAR5_SHA2_VAL(x) ((x) & ID_ISAR5_SHA2_MASK)
#define ID_ISAR5_SHA2_NONE (UL(0x0) << ID_ISAR5_SHA2_SHIFT)
#define ID_ISAR5_SHA2_IMPL (UL(0x1) << ID_ISAR5_SHA2_SHIFT)
#define ID_ISAR5_CRC32_SHIFT 16
+#define ID_ISAR5_CRC32_WIDTH 4
#define ID_ISAR5_CRC32_MASK (UL(0xf) << ID_ISAR5_CRC32_SHIFT)
#define ID_ISAR5_CRC32_VAL(x) ((x) & ID_ISAR5_CRC32_MASK)
#define ID_ISAR5_CRC32_NONE (UL(0x0) << ID_ISAR5_CRC32_SHIFT)
#define ID_ISAR5_CRC32_IMPL (UL(0x1) << ID_ISAR5_CRC32_SHIFT)
#define ID_ISAR5_RDM_SHIFT 24
+#define ID_ISAR5_RDM_WIDTH 4
#define ID_ISAR5_RDM_MASK (UL(0xf) << ID_ISAR5_RDM_SHIFT)
#define ID_ISAR5_RDM_VAL(x) ((x) & ID_ISAR5_RDM_MASK)
#define ID_ISAR5_RDM_NONE (UL(0x0) << ID_ISAR5_RDM_SHIFT)
#define ID_ISAR5_RDM_IMPL (UL(0x1) << ID_ISAR5_RDM_SHIFT)
#define ID_ISAR5_VCMA_SHIFT 28
+#define ID_ISAR5_VCMA_WIDTH 4
#define ID_ISAR5_VCMA_MASK (UL(0xf) << ID_ISAR5_VCMA_SHIFT)
#define ID_ISAR5_VCMA_VAL(x) ((x) & ID_ISAR5_VCMA_MASK)
#define ID_ISAR5_VCMA_NONE (UL(0x0) << ID_ISAR5_VCMA_SHIFT)
#define ID_ISAR5_VCMA_IMPL (UL(0x1) << ID_ISAR5_VCMA_SHIFT)
/* MAIR_EL1 - Memory Attribute Indirection Register */
-#define MAIR_ATTR_MASK(idx) (UL(0xff) << ((n)* 8))
-#define MAIR_ATTR(attr, idx) ((attr) << ((idx) * 8))
-#define MAIR_DEVICE_nGnRnE UL(0x00)
-#define MAIR_DEVICE_nGnRE UL(0x04)
-#define MAIR_NORMAL_NC UL(0x44)
-#define MAIR_NORMAL_WT UL(0xbb)
-#define MAIR_NORMAL_WB UL(0xff)
+#define MAIR_EL1_REG MRS_REG_ALT_NAME(MAIR_EL1)
+#define MAIR_EL1_op0 3
+#define MAIR_EL1_op1 0
+#define MAIR_EL1_CRn 10
+#define MAIR_EL1_CRm 2
+#define MAIR_EL1_op2 0
+#define MAIR_ATTR_MASK(idx) (UL(0xff) << ((n)* 8))
+#define MAIR_ATTR(attr, idx) ((attr) << ((idx) * 8))
+#define MAIR_DEVICE_nGnRnE UL(0x00)
+#define MAIR_DEVICE_nGnRE UL(0x04)
+#define MAIR_NORMAL_NC UL(0x44)
+#define MAIR_NORMAL_WT UL(0xbb)
+#define MAIR_NORMAL_WB UL(0xff)
+
+/* MAIR_EL12 */
+#define MAIR_EL12_REG MRS_REG_ALT_NAME(MAIR_EL12)
+#define MAIR_EL12_op0 3
+#define MAIR_EL12_op1 5
+#define MAIR_EL12_CRn 10
+#define MAIR_EL12_CRm 2
+#define MAIR_EL12_op2 0
/* MDCCINT_EL1 */
-#define MDCCINT_EL1 MRS_REG(MDCCINT_EL1)
#define MDCCINT_EL1_op0 2
#define MDCCINT_EL1_op1 0
#define MDCCINT_EL1_CRn 0
@@ -1418,7 +1968,6 @@
#define MDCCINT_EL1_op2 0
/* MDCCSR_EL0 */
-#define MDCCSR_EL0 MRS_REG(MDCCSR_EL0)
#define MDCCSR_EL0_op0 2
#define MDCCSR_EL0_op1 3
#define MDCCSR_EL0_CRn 0
@@ -1426,7 +1975,6 @@
#define MDCCSR_EL0_op2 0
/* MDSCR_EL1 - Monitor Debug System Control Register */
-#define MDSCR_EL1 MRS_REG(MDSCR_EL1)
#define MDSCR_EL1_op0 2
#define MDSCR_EL1_op1 0
#define MDSCR_EL1_CRn 0
@@ -1440,7 +1988,6 @@
#define MDSCR_MDE (UL(0x1) << MDSCR_MDE_SHIFT)
/* MIDR_EL1 - Main ID Register */
-#define MIDR_EL1 MRS_REG(MIDR_EL1)
#define MIDR_EL1_op0 3
#define MIDR_EL1_op1 0
#define MIDR_EL1_CRn 0
@@ -1448,7 +1995,6 @@
#define MIDR_EL1_op2 0
/* MPIDR_EL1 - Multiprocessor Affinity Register */
-#define MPIDR_EL1 MRS_REG(MPIDR_EL1)
#define MPIDR_EL1_op0 3
#define MPIDR_EL1_op1 0
#define MPIDR_EL1_CRn 0
@@ -1472,95 +2018,110 @@
#define MPIDR_AFF3_VAL(x) ((x) & MPIDR_AFF3_MASK)
/* MVFR0_EL1 */
-#define MVFR0_EL1 MRS_REG(MVFR0_EL1)
+#define MVFR0_EL1_ISS ISS_MSR_REG(MVFR0_EL1)
#define MVFR0_EL1_op0 0x3
#define MVFR0_EL1_op1 0x0
#define MVFR0_EL1_CRn 0x0
#define MVFR0_EL1_CRm 0x3
#define MVFR0_EL1_op2 0x0
#define MVFR0_SIMDReg_SHIFT 0
+#define MVFR0_SIMDReg_WIDTH 4
#define MVFR0_SIMDReg_MASK (UL(0xf) << MVFR0_SIMDReg_SHIFT)
#define MVFR0_SIMDReg_VAL(x) ((x) & MVFR0_SIMDReg_MASK)
#define MVFR0_SIMDReg_NONE (UL(0x0) << MVFR0_SIMDReg_SHIFT)
#define MVFR0_SIMDReg_FP (UL(0x1) << MVFR0_SIMDReg_SHIFT)
#define MVFR0_SIMDReg_AdvSIMD (UL(0x2) << MVFR0_SIMDReg_SHIFT)
#define MVFR0_FPSP_SHIFT 4
+#define MVFR0_FPSP_WIDTH 4
#define MVFR0_FPSP_MASK (UL(0xf) << MVFR0_FPSP_SHIFT)
#define MVFR0_FPSP_VAL(x) ((x) & MVFR0_FPSP_MASK)
#define MVFR0_FPSP_NONE (UL(0x0) << MVFR0_FPSP_SHIFT)
#define MVFR0_FPSP_VFP_v2 (UL(0x1) << MVFR0_FPSP_SHIFT)
#define MVFR0_FPSP_VFP_v3_v4 (UL(0x2) << MVFR0_FPSP_SHIFT)
#define MVFR0_FPDP_SHIFT 8
+#define MVFR0_FPDP_WIDTH 4
#define MVFR0_FPDP_MASK (UL(0xf) << MVFR0_FPDP_SHIFT)
#define MVFR0_FPDP_VAL(x) ((x) & MVFR0_FPDP_MASK)
#define MVFR0_FPDP_NONE (UL(0x0) << MVFR0_FPDP_SHIFT)
#define MVFR0_FPDP_VFP_v2 (UL(0x1) << MVFR0_FPDP_SHIFT)
#define MVFR0_FPDP_VFP_v3_v4 (UL(0x2) << MVFR0_FPDP_SHIFT)
#define MVFR0_FPTrap_SHIFT 12
+#define MVFR0_FPTrap_WIDTH 4
#define MVFR0_FPTrap_MASK (UL(0xf) << MVFR0_FPTrap_SHIFT)
#define MVFR0_FPTrap_VAL(x) ((x) & MVFR0_FPTrap_MASK)
#define MVFR0_FPTrap_NONE (UL(0x0) << MVFR0_FPTrap_SHIFT)
#define MVFR0_FPTrap_IMPL (UL(0x1) << MVFR0_FPTrap_SHIFT)
#define MVFR0_FPDivide_SHIFT 16
+#define MVFR0_FPDivide_WIDTH 4
#define MVFR0_FPDivide_MASK (UL(0xf) << MVFR0_FPDivide_SHIFT)
#define MVFR0_FPDivide_VAL(x) ((x) & MVFR0_FPDivide_MASK)
#define MVFR0_FPDivide_NONE (UL(0x0) << MVFR0_FPDivide_SHIFT)
#define MVFR0_FPDivide_IMPL (UL(0x1) << MVFR0_FPDivide_SHIFT)
#define MVFR0_FPSqrt_SHIFT 20
+#define MVFR0_FPSqrt_WIDTH 4
#define MVFR0_FPSqrt_MASK (UL(0xf) << MVFR0_FPSqrt_SHIFT)
#define MVFR0_FPSqrt_VAL(x) ((x) & MVFR0_FPSqrt_MASK)
#define MVFR0_FPSqrt_NONE (UL(0x0) << MVFR0_FPSqrt_SHIFT)
#define MVFR0_FPSqrt_IMPL (UL(0x1) << MVFR0_FPSqrt_SHIFT)
#define MVFR0_FPShVec_SHIFT 24
+#define MVFR0_FPShVec_WIDTH 4
#define MVFR0_FPShVec_MASK (UL(0xf) << MVFR0_FPShVec_SHIFT)
#define MVFR0_FPShVec_VAL(x) ((x) & MVFR0_FPShVec_MASK)
#define MVFR0_FPShVec_NONE (UL(0x0) << MVFR0_FPShVec_SHIFT)
#define MVFR0_FPShVec_IMPL (UL(0x1) << MVFR0_FPShVec_SHIFT)
#define MVFR0_FPRound_SHIFT 28
+#define MVFR0_FPRound_WIDTH 4
#define MVFR0_FPRound_MASK (UL(0xf) << MVFR0_FPRound_SHIFT)
#define MVFR0_FPRound_VAL(x) ((x) & MVFR0_FPRound_MASK)
#define MVFR0_FPRound_NONE (UL(0x0) << MVFR0_FPRound_SHIFT)
#define MVFR0_FPRound_IMPL (UL(0x1) << MVFR0_FPRound_SHIFT)
/* MVFR1_EL1 */
-#define MVFR1_EL1 MRS_REG(MVFR1_EL1)
+#define MVFR1_EL1_ISS ISS_MSR_REG(MVFR1_EL1)
#define MVFR1_EL1_op0 0x3
#define MVFR1_EL1_op1 0x0
#define MVFR1_EL1_CRn 0x0
#define MVFR1_EL1_CRm 0x3
#define MVFR1_EL1_op2 0x1
#define MVFR1_FPFtZ_SHIFT 0
+#define MVFR1_FPFtZ_WIDTH 4
#define MVFR1_FPFtZ_MASK (UL(0xf) << MVFR1_FPFtZ_SHIFT)
#define MVFR1_FPFtZ_VAL(x) ((x) & MVFR1_FPFtZ_MASK)
#define MVFR1_FPFtZ_NONE (UL(0x0) << MVFR1_FPFtZ_SHIFT)
#define MVFR1_FPFtZ_IMPL (UL(0x1) << MVFR1_FPFtZ_SHIFT)
#define MVFR1_FPDNaN_SHIFT 4
+#define MVFR1_FPDNaN_WIDTH 4
#define MVFR1_FPDNaN_MASK (UL(0xf) << MVFR1_FPDNaN_SHIFT)
#define MVFR1_FPDNaN_VAL(x) ((x) & MVFR1_FPDNaN_MASK)
#define MVFR1_FPDNaN_NONE (UL(0x0) << MVFR1_FPDNaN_SHIFT)
#define MVFR1_FPDNaN_IMPL (UL(0x1) << MVFR1_FPDNaN_SHIFT)
#define MVFR1_SIMDLS_SHIFT 8
+#define MVFR1_SIMDLS_WIDTH 4
#define MVFR1_SIMDLS_MASK (UL(0xf) << MVFR1_SIMDLS_SHIFT)
#define MVFR1_SIMDLS_VAL(x) ((x) & MVFR1_SIMDLS_MASK)
#define MVFR1_SIMDLS_NONE (UL(0x0) << MVFR1_SIMDLS_SHIFT)
#define MVFR1_SIMDLS_IMPL (UL(0x1) << MVFR1_SIMDLS_SHIFT)
#define MVFR1_SIMDInt_SHIFT 12
+#define MVFR1_SIMDInt_WIDTH 4
#define MVFR1_SIMDInt_MASK (UL(0xf) << MVFR1_SIMDInt_SHIFT)
#define MVFR1_SIMDInt_VAL(x) ((x) & MVFR1_SIMDInt_MASK)
#define MVFR1_SIMDInt_NONE (UL(0x0) << MVFR1_SIMDInt_SHIFT)
#define MVFR1_SIMDInt_IMPL (UL(0x1) << MVFR1_SIMDInt_SHIFT)
#define MVFR1_SIMDSP_SHIFT 16
+#define MVFR1_SIMDSP_WIDTH 4
#define MVFR1_SIMDSP_MASK (UL(0xf) << MVFR1_SIMDSP_SHIFT)
#define MVFR1_SIMDSP_VAL(x) ((x) & MVFR1_SIMDSP_MASK)
#define MVFR1_SIMDSP_NONE (UL(0x0) << MVFR1_SIMDSP_SHIFT)
#define MVFR1_SIMDSP_IMPL (UL(0x1) << MVFR1_SIMDSP_SHIFT)
#define MVFR1_SIMDHP_SHIFT 20
+#define MVFR1_SIMDHP_WIDTH 4
#define MVFR1_SIMDHP_MASK (UL(0xf) << MVFR1_SIMDHP_SHIFT)
#define MVFR1_SIMDHP_VAL(x) ((x) & MVFR1_SIMDHP_MASK)
#define MVFR1_SIMDHP_NONE (UL(0x0) << MVFR1_SIMDHP_SHIFT)
#define MVFR1_SIMDHP_CONV_SP (UL(0x1) << MVFR1_SIMDHP_SHIFT)
#define MVFR1_SIMDHP_ARITH (UL(0x2) << MVFR1_SIMDHP_SHIFT)
#define MVFR1_FPHP_SHIFT 24
+#define MVFR1_FPHP_WIDTH 4
#define MVFR1_FPHP_MASK (UL(0xf) << MVFR1_FPHP_SHIFT)
#define MVFR1_FPHP_VAL(x) ((x) & MVFR1_FPHP_MASK)
#define MVFR1_FPHP_NONE (UL(0x0) << MVFR1_FPHP_SHIFT)
@@ -1568,13 +2129,13 @@
#define MVFR1_FPHP_CONV_DP (UL(0x2) << MVFR1_FPHP_SHIFT)
#define MVFR1_FPHP_ARITH (UL(0x3) << MVFR1_FPHP_SHIFT)
#define MVFR1_SIMDFMAC_SHIFT 28
+#define MVFR1_SIMDFMAC_WIDTH 4
#define MVFR1_SIMDFMAC_MASK (UL(0xf) << MVFR1_SIMDFMAC_SHIFT)
#define MVFR1_SIMDFMAC_VAL(x) ((x) & MVFR1_SIMDFMAC_MASK)
#define MVFR1_SIMDFMAC_NONE (UL(0x0) << MVFR1_SIMDFMAC_SHIFT)
#define MVFR1_SIMDFMAC_IMPL (UL(0x1) << MVFR1_SIMDFMAC_SHIFT)
/* OSDLR_EL1 */
-#define OSDLR_EL1 MRS_REG(OSDLR_EL1)
#define OSDLR_EL1_op0 2
#define OSDLR_EL1_op1 0
#define OSDLR_EL1_CRn 1
@@ -1582,7 +2143,6 @@
#define OSDLR_EL1_op2 4
/* OSLAR_EL1 */
-#define OSLAR_EL1 MRS_REG(OSLAR_EL1)
#define OSLAR_EL1_op0 2
#define OSLAR_EL1_op1 0
#define OSLAR_EL1_CRn 1
@@ -1590,7 +2150,6 @@
#define OSLAR_EL1_op2 4
/* OSLSR_EL1 */
-#define OSLSR_EL1 MRS_REG(OSLSR_EL1)
#define OSLSR_EL1_op0 2
#define OSLSR_EL1_op1 0
#define OSLSR_EL1_CRn 1
@@ -1608,7 +2167,7 @@
#define PAR_NS_SHIFT 9
#define PAR_NS_MASK (0x3 << PAR_NS_SHIFT)
#define PAR_PA_SHIFT 12
-#define PAR_PA_MASK 0x0000fffffffff000
+#define PAR_PA_MASK 0x000ffffffffff000
#define PAR_ATTR_SHIFT 56
#define PAR_ATTR_MASK (0xff << PAR_ATTR_SHIFT)
/* When PAR_F == 1 (aborted) */
@@ -1620,12 +2179,12 @@
#define PAR_S_MASK (0x1 << PAR_S_SHIFT)
/* PMBIDR_EL1 */
-#define PMBIDR_EL1 MRS_REG(PMBIDR_EL1)
-#define PMBIDR_EL1_op0 0x3
-#define PMBIDR_EL1_op1 0x0
-#define PMBIDR_EL1_CRn 0x9
-#define PMBIDR_EL1_CRm 0xa
-#define PMBIDR_EL1_op2 0x7
+#define PMBIDR_EL1_REG MRS_REG_ALT_NAME(PMBIDR_EL1)
+#define PMBIDR_EL1_op0 3
+#define PMBIDR_EL1_op1 0
+#define PMBIDR_EL1_CRn 9
+#define PMBIDR_EL1_CRm 10
+#define PMBIDR_EL1_op2 7
#define PMBIDR_Align_SHIFT 0
#define PMBIDR_Align_MASK (UL(0xf) << PMBIDR_Align_SHIFT)
#define PMBIDR_P_SHIFT 4
@@ -1634,12 +2193,12 @@
#define PMBIDR_F (UL(0x1) << PMBIDR_F_SHIFT)
/* PMBLIMITR_EL1 */
-#define PMBLIMITR_EL1 MRS_REG(PMBLIMITR_EL1)
-#define PMBLIMITR_EL1_op0 0x3
-#define PMBLIMITR_EL1_op1 0x0
-#define PMBLIMITR_EL1_CRn 0x9
-#define PMBLIMITR_EL1_CRm 0xa
-#define PMBLIMITR_EL1_op2 0x0
+#define PMBLIMITR_EL1_REG MRS_REG_ALT_NAME(PMBLIMITR_EL1)
+#define PMBLIMITR_EL1_op0 3
+#define PMBLIMITR_EL1_op1 0
+#define PMBLIMITR_EL1_CRn 9
+#define PMBLIMITR_EL1_CRm 10
+#define PMBLIMITR_EL1_op2 0
#define PMBLIMITR_E_SHIFT 0
#define PMBLIMITR_E (UL(0x1) << PMBLIMITR_E_SHIFT)
#define PMBLIMITR_FM_SHIFT 1
@@ -1651,25 +2210,27 @@
(UL(0xfffffffffffff) << PMBLIMITR_LIMIT_SHIFT)
/* PMBPTR_EL1 */
-#define PMBPTR_EL1 MRS_REG(PMBPTR_EL1)
-#define PMBPTR_EL1_op0 0x3
-#define PMBPTR_EL1_op1 0x0
-#define PMBPTR_EL1_CRn 0x9
-#define PMBPTR_EL1_CRm 0xa
-#define PMBPTR_EL1_op2 0x1
+#define PMBPTR_EL1_REG MRS_REG_ALT_NAME(PMBPTR_EL1)
+#define PMBPTR_EL1_op0 3
+#define PMBPTR_EL1_op1 0
+#define PMBPTR_EL1_CRn 9
+#define PMBPTR_EL1_CRm 10
+#define PMBPTR_EL1_op2 1
#define PMBPTR_PTR_SHIFT 0
#define PMBPTR_PTR_MASK \
(UL(0xffffffffffffffff) << PMBPTR_PTR_SHIFT)
/* PMBSR_EL1 */
-#define PMBSR_EL1 MRS_REG(PMBSR_EL1)
-#define PMBSR_EL1_op0 0x3
-#define PMBSR_EL1_op1 0x0
-#define PMBSR_EL1_CRn 0x9
-#define PMBSR_EL1_CRm 0xa
-#define PMBSR_EL1_op2 0x3
+#define PMBSR_EL1_REG MRS_REG_ALT_NAME(PMBSR_EL1)
+#define PMBSR_EL1_op0 3
+#define PMBSR_EL1_op1 0
+#define PMBSR_EL1_CRn 9
+#define PMBSR_EL1_CRm 10
+#define PMBSR_EL1_op2 3
#define PMBSR_MSS_SHIFT 0
#define PMBSR_MSS_MASK (UL(0xffff) << PMBSR_MSS_SHIFT)
+#define PMBSR_MSS_BSC_MASK (UL(0x3f) << PMBSR_MSS_SHIFT)
+#define PMBSR_MSS_FSC_MASK (UL(0x3f) << PMBSR_MSS_SHIFT)
#define PMBSR_COLL_SHIFT 16
#define PMBSR_COLL (UL(0x1) << PMBSR_COLL_SHIFT)
#define PMBSR_S_SHIFT 17
@@ -1682,7 +2243,6 @@
#define PMBSR_EC_MASK (UL(0x3f) << PMBSR_EC_SHIFT)
/* PMCCFILTR_EL0 */
-#define PMCCFILTR_EL0 MRS_REG(PMCCFILTR_EL0)
#define PMCCFILTR_EL0_op0 3
#define PMCCFILTR_EL0_op1 3
#define PMCCFILTR_EL0_CRn 14
@@ -1690,7 +2250,6 @@
#define PMCCFILTR_EL0_op2 7
/* PMCCNTR_EL0 */
-#define PMCCNTR_EL0 MRS_REG(PMCCNTR_EL0)
#define PMCCNTR_EL0_op0 3
#define PMCCNTR_EL0_op1 3
#define PMCCNTR_EL0_CRn 9
@@ -1698,7 +2257,6 @@
#define PMCCNTR_EL0_op2 0
/* PMCEID0_EL0 */
-#define PMCEID0_EL0 MRS_REG(PMCEID0_EL0)
#define PMCEID0_EL0_op0 3
#define PMCEID0_EL0_op1 3
#define PMCEID0_EL0_CRn 9
@@ -1706,7 +2264,6 @@
#define PMCEID0_EL0_op2 6
/* PMCEID1_EL0 */
-#define PMCEID1_EL0 MRS_REG(PMCEID1_EL0)
#define PMCEID1_EL0_op0 3
#define PMCEID1_EL0_op1 3
#define PMCEID1_EL0_CRn 9
@@ -1714,7 +2271,6 @@
#define PMCEID1_EL0_op2 7
/* PMCNTENCLR_EL0 */
-#define PMCNTENCLR_EL0 MRS_REG(PMCNTENCLR_EL0)
#define PMCNTENCLR_EL0_op0 3
#define PMCNTENCLR_EL0_op1 3
#define PMCNTENCLR_EL0_CRn 9
@@ -1722,7 +2278,6 @@
#define PMCNTENCLR_EL0_op2 2
/* PMCNTENSET_EL0 */
-#define PMCNTENSET_EL0 MRS_REG(PMCNTENSET_EL0)
#define PMCNTENSET_EL0_op0 3
#define PMCNTENSET_EL0_op1 3
#define PMCNTENSET_EL0_CRn 9
@@ -1730,24 +2285,24 @@
#define PMCNTENSET_EL0_op2 1
/* PMCR_EL0 - Perfomance Monitoring Counters */
-#define PMCR_EL0 MRS_REG(PMCR_EL0)
#define PMCR_EL0_op0 3
#define PMCR_EL0_op1 3
#define PMCR_EL0_CRn 9
#define PMCR_EL0_CRm 12
#define PMCR_EL0_op2 0
-#define PMCR_E (1 << 0) /* Enable all counters */
-#define PMCR_P (1 << 1) /* Reset all counters */
-#define PMCR_C (1 << 2) /* Clock counter reset */
-#define PMCR_D (1 << 3) /* CNTR counts every 64 clk cycles */
-#define PMCR_X (1 << 4) /* Export to ext. monitoring (ETM) */
-#define PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define PMCR_LC (1 << 6) /* Long cycle count enable */
-#define PMCR_IMP_SHIFT 24 /* Implementer code */
-#define PMCR_IMP_MASK (0xff << PMCR_IMP_SHIFT)
-#define PMCR_IMP_ARM 0x41
+#define PMCR_E (1ul << 0) /* Enable all counters */
+#define PMCR_P (1ul << 1) /* Reset all counters */
+#define PMCR_C (1ul << 2) /* Clock counter reset */
+#define PMCR_D (1ul << 3) /* CNTR counts every 64 clk cycles */
+#define PMCR_X (1ul << 4) /* Export to ext. monitoring (ETM) */
+#define PMCR_DP (1ul << 5) /* Disable CCNT if non-invasive debug*/
+#define PMCR_LC (1ul << 6) /* Long cycle count enable */
+#define PMCR_LP (1ul << 7) /* Long event count enable */
+#define PMCR_FZO (1ul << 9) /* Freeze-on-overflow */
+#define PMCR_N_SHIFT 11 /* Number of counters implemented */
+#define PMCR_N_MASK (0x1ful << PMCR_N_SHIFT)
#define PMCR_IDCODE_SHIFT 16 /* Identification code */
-#define PMCR_IDCODE_MASK (0xff << PMCR_IDCODE_SHIFT)
+#define PMCR_IDCODE_MASK (0xfful << PMCR_IDCODE_SHIFT)
#define PMCR_IDCODE_CORTEX_A57 0x01
#define PMCR_IDCODE_CORTEX_A72 0x02
#define PMCR_IDCODE_CORTEX_A53 0x03
@@ -1759,8 +2314,10 @@
#define PMCR_IDCODE_CORTEX_A55 0x45
#define PMCR_IDCODE_NEOVERSE_E1 0x46
#define PMCR_IDCODE_CORTEX_A75 0x4a
-#define PMCR_N_SHIFT 11 /* Number of counters implemented */
-#define PMCR_N_MASK (0x1f << PMCR_N_SHIFT)
+#define PMCR_IMP_SHIFT 24 /* Implementer code */
+#define PMCR_IMP_MASK (0xfful << PMCR_IMP_SHIFT)
+#define PMCR_IMP_ARM 0x41
+#define PMCR_FZS (1ul << 32) /* Freeze-on-SPE event */
/* PMEVCNTR<n>_EL0 */
#define PMEVCNTR_EL0_op0 3
@@ -1792,7 +2349,6 @@
#define PMEVTYPER_P (1 << 31) /* Privileged filtering */
/* PMINTENCLR_EL1 */
-#define PMINTENCLR_EL1 MRS_REG(PMINTENCLR_EL1)
#define PMINTENCLR_EL1_op0 3
#define PMINTENCLR_EL1_op1 0
#define PMINTENCLR_EL1_CRn 9
@@ -1800,7 +2356,6 @@
#define PMINTENCLR_EL1_op2 2
/* PMINTENSET_EL1 */
-#define PMINTENSET_EL1 MRS_REG(PMINTENSET_EL1)
#define PMINTENSET_EL1_op0 3
#define PMINTENSET_EL1_op1 0
#define PMINTENSET_EL1_CRn 9
@@ -1808,7 +2363,6 @@
#define PMINTENSET_EL1_op2 1
/* PMMIR_EL1 */
-#define PMMIR_EL1 MRS_REG(PMMIR_EL1)
#define PMMIR_EL1_op0 3
#define PMMIR_EL1_op1 0
#define PMMIR_EL1_CRn 9
@@ -1816,7 +2370,6 @@
#define PMMIR_EL1_op2 6
/* PMOVSCLR_EL0 */
-#define PMOVSCLR_EL0 MRS_REG(PMOVSCLR_EL0)
#define PMOVSCLR_EL0_op0 3
#define PMOVSCLR_EL0_op1 3
#define PMOVSCLR_EL0_CRn 9
@@ -1824,7 +2377,6 @@
#define PMOVSCLR_EL0_op2 3
/* PMOVSSET_EL0 */
-#define PMOVSSET_EL0 MRS_REG(PMOVSSET_EL0)
#define PMOVSSET_EL0_op0 3
#define PMOVSSET_EL0_op1 3
#define PMOVSSET_EL0_CRn 9
@@ -1832,12 +2384,12 @@
#define PMOVSSET_EL0_op2 3
/* PMSCR_EL1 */
-#define PMSCR_EL1 MRS_REG(PMSCR_EL1)
-#define PMSCR_EL1_op0 0x3
-#define PMSCR_EL1_op1 0x0
-#define PMSCR_EL1_CRn 0x9
-#define PMSCR_EL1_CRm 0x9
-#define PMSCR_EL1_op2 0x0
+#define PMSCR_EL1_REG MRS_REG_ALT_NAME(PMSCR_EL1)
+#define PMSCR_EL1_op0 3
+#define PMSCR_EL1_op1 0
+#define PMSCR_EL1_CRn 9
+#define PMSCR_EL1_CRm 9
+#define PMSCR_EL1_op2 0
#define PMSCR_E0SPE_SHIFT 0
#define PMSCR_E0SPE (UL(0x1) << PMSCR_E0SPE_SHIFT)
#define PMSCR_E1SPE_SHIFT 1
@@ -1852,7 +2404,6 @@
#define PMSCR_PCT_MASK (UL(0x3) << PMSCR_PCT_SHIFT)
/* PMSELR_EL0 */
-#define PMSELR_EL0 MRS_REG(PMSELR_EL0)
#define PMSELR_EL0_op0 3
#define PMSELR_EL0_op1 3
#define PMSELR_EL0_CRn 9
@@ -1861,20 +2412,20 @@
#define PMSELR_SEL_MASK 0x1f
/* PMSEVFR_EL1 */
-#define PMSEVFR_EL1 MRS_REG(PMSEVFR_EL1)
-#define PMSEVFR_EL1_op0 0x3
-#define PMSEVFR_EL1_op1 0x0
-#define PMSEVFR_EL1_CRn 0x9
-#define PMSEVFR_EL1_CRm 0x9
-#define PMSEVFR_EL1_op2 0x5
+#define PMSEVFR_EL1_REG MRS_REG_ALT_NAME(PMSEVFR_EL1)
+#define PMSEVFR_EL1_op0 3
+#define PMSEVFR_EL1_op1 0
+#define PMSEVFR_EL1_CRn 9
+#define PMSEVFR_EL1_CRm 9
+#define PMSEVFR_EL1_op2 5
/* PMSFCR_EL1 */
-#define PMSFCR_EL1 MRS_REG(PMSFCR_EL1)
-#define PMSFCR_EL1_op0 0x3
-#define PMSFCR_EL1_op1 0x0
-#define PMSFCR_EL1_CRn 0x9
-#define PMSFCR_EL1_CRm 0x9
-#define PMSFCR_EL1_op2 0x4
+#define PMSFCR_EL1_REG MRS_REG_ALT_NAME(PMSFCR_EL1)
+#define PMSFCR_EL1_op0 3
+#define PMSFCR_EL1_op1 0
+#define PMSFCR_EL1_CRn 9
+#define PMSFCR_EL1_CRm 9
+#define PMSFCR_EL1_op2 4
#define PMSFCR_FE_SHIFT 0
#define PMSFCR_FE (UL(0x1) << PMSFCR_FE_SHIFT)
#define PMSFCR_FT_SHIFT 1
@@ -1891,24 +2442,24 @@
#define PMSFCR_ST (UL(0x1) << PMSFCR_ST_SHIFT)
/* PMSICR_EL1 */
-#define PMSICR_EL1 MRS_REG(PMSICR_EL1)
-#define PMSICR_EL1_op0 0x3
-#define PMSICR_EL1_op1 0x0
-#define PMSICR_EL1_CRn 0x9
-#define PMSICR_EL1_CRm 0x9
-#define PMSICR_EL1_op2 0x2
+#define PMSICR_EL1_REG MRS_REG_ALT_NAME(PMSICR_EL1)
+#define PMSICR_EL1_op0 3
+#define PMSICR_EL1_op1 0
+#define PMSICR_EL1_CRn 9
+#define PMSICR_EL1_CRm 9
+#define PMSICR_EL1_op2 2
#define PMSICR_COUNT_SHIFT 0
#define PMSICR_COUNT_MASK (UL(0xffffffff) << PMSICR_COUNT_SHIFT)
#define PMSICR_ECOUNT_SHIFT 56
#define PMSICR_ECOUNT_MASK (UL(0xff) << PMSICR_ECOUNT_SHIFT)
/* PMSIDR_EL1 */
-#define PMSIDR_EL1 MRS_REG(PMSIDR_EL1)
-#define PMSIDR_EL1_op0 0x3
-#define PMSIDR_EL1_op1 0x0
-#define PMSIDR_EL1_CRn 0x9
-#define PMSIDR_EL1_CRm 0x9
-#define PMSIDR_EL1_op2 0x7
+#define PMSIDR_EL1_REG MRS_REG_ALT_NAME(PMSIDR_EL1)
+#define PMSIDR_EL1_op0 3
+#define PMSIDR_EL1_op1 0
+#define PMSIDR_EL1_CRn 9
+#define PMSIDR_EL1_CRm 9
+#define PMSIDR_EL1_op2 7
#define PMSIDR_FE_SHIFT 0
#define PMSIDR_FE (UL(0x1) << PMSIDR_FE_SHIFT)
#define PMSIDR_FT_SHIFT 1
@@ -1935,37 +2486,36 @@
#define PMSIDR_PBT (UL(0x1) << PMSIDR_PBT_SHIFT)
/* PMSIRR_EL1 */
-#define PMSIRR_EL1 MRS_REG(PMSIRR_EL1)
-#define PMSIRR_EL1_op0 0x3
-#define PMSIRR_EL1_op1 0x0
-#define PMSIRR_EL1_CRn 0x9
-#define PMSIRR_EL1_CRm 0x9
-#define PMSIRR_EL1_op2 0x3
+#define PMSIRR_EL1_REG MRS_REG_ALT_NAME(PMSIRR_EL1)
+#define PMSIRR_EL1_op0 3
+#define PMSIRR_EL1_op1 0
+#define PMSIRR_EL1_CRn 9
+#define PMSIRR_EL1_CRm 9
+#define PMSIRR_EL1_op2 3
#define PMSIRR_RND_SHIFT 0
#define PMSIRR_RND (UL(0x1) << PMSIRR_RND_SHIFT)
#define PMSIRR_INTERVAL_SHIFT 8
#define PMSIRR_INTERVAL_MASK (UL(0xffffff) << PMSIRR_INTERVAL_SHIFT)
/* PMSLATFR_EL1 */
-#define PMSLATFR_EL1 MRS_REG(PMSLATFR_EL1)
-#define PMSLATFR_EL1_op0 0x3
-#define PMSLATFR_EL1_op1 0x0
-#define PMSLATFR_EL1_CRn 0x9
-#define PMSLATFR_EL1_CRm 0x9
-#define PMSLATFR_EL1_op2 0x6
+#define PMSLATFR_EL1_REG MRS_REG_ALT_NAME(PMSLATFR_EL1)
+#define PMSLATFR_EL1_op0 3
+#define PMSLATFR_EL1_op1 0
+#define PMSLATFR_EL1_CRn 9
+#define PMSLATFR_EL1_CRm 9
+#define PMSLATFR_EL1_op2 6
#define PMSLATFR_MINLAT_SHIFT 0
#define PMSLATFR_MINLAT_MASK (UL(0xfff) << PMSLATFR_MINLAT_SHIFT)
/* PMSNEVFR_EL1 */
-#define PMSNEVFR_EL1 MRS_REG(PMSNEVFR_EL1)
-#define PMSNEVFR_EL1_op0 0x3
-#define PMSNEVFR_EL1_op1 0x0
-#define PMSNEVFR_EL1_CRn 0x9
-#define PMSNEVFR_EL1_CRm 0x9
-#define PMSNEVFR_EL1_op2 0x1
+#define PMSNEVFR_EL1_REG MRS_REG_ALT_NAME(PMSNEVFR_EL1)
+#define PMSNEVFR_EL1_op0 3
+#define PMSNEVFR_EL1_op1 0
+#define PMSNEVFR_EL1_CRn 9
+#define PMSNEVFR_EL1_CRm 9
+#define PMSNEVFR_EL1_op2 1
/* PMSWINC_EL0 */
-#define PMSWINC_EL0 MRS_REG(PMSWINC_EL0)
#define PMSWINC_EL0_op0 3
#define PMSWINC_EL0_op1 3
#define PMSWINC_EL0_CRn 9
@@ -1973,7 +2523,6 @@
#define PMSWINC_EL0_op2 4
/* PMUSERENR_EL0 */
-#define PMUSERENR_EL0 MRS_REG(PMUSERENR_EL0)
#define PMUSERENR_EL0_op0 3
#define PMUSERENR_EL0_op1 3
#define PMUSERENR_EL0_CRn 9
@@ -1981,7 +2530,6 @@
#define PMUSERENR_EL0_op2 0
/* PMXEVCNTR_EL0 */
-#define PMXEVCNTR_EL0 MRS_REG(PMXEVCNTR_EL0)
#define PMXEVCNTR_EL0_op0 3
#define PMXEVCNTR_EL0_op1 3
#define PMXEVCNTR_EL0_CRn 9
@@ -1989,7 +2537,6 @@
#define PMXEVCNTR_EL0_op2 2
/* PMXEVTYPER_EL0 */
-#define PMXEVTYPER_EL0 MRS_REG(PMXEVTYPER_EL0)
#define PMXEVTYPER_EL0_op0 3
#define PMXEVTYPER_EL0_op1 3
#define PMXEVTYPER_EL0_CRn 9
@@ -1997,7 +2544,6 @@
#define PMXEVTYPER_EL0_op2 1
/* RNDRRS */
-#define RNDRRS MRS_REG(RNDRRS)
#define RNDRRS_REG MRS_REG_ALT_NAME(RNDRRS)
#define RNDRRS_op0 3
#define RNDRRS_op1 3
@@ -2006,6 +2552,12 @@
#define RNDRRS_op2 1
/* SCTLR_EL1 - System Control Register */
+#define SCTLR_EL1_REG MRS_REG_ALT_NAME(SCTLR_EL1)
+#define SCTLR_EL1_op0 3
+#define SCTLR_EL1_op1 0
+#define SCTLR_EL1_CRn 1
+#define SCTLR_EL1_CRm 0
+#define SCTLR_EL1_op2 0
#define SCTLR_RES1 0x30d00800 /* Reserved ARMv8.0, write 1 */
#define SCTLR_M (UL(0x1) << 0)
#define SCTLR_A (UL(0x1) << 1)
@@ -2056,7 +2608,21 @@
#define SCTLR_EnALS (UL(0x1) << 56)
#define SCTLR_EPAN (UL(0x1) << 57)
+/* SCTLR_EL12 */
+#define SCTLR_EL12_REG MRS_REG_ALT_NAME(SCTLR_EL12)
+#define SCTLR_EL12_op0 3
+#define SCTLR_EL12_op1 5
+#define SCTLR_EL12_CRn 1
+#define SCTLR_EL12_CRm 0
+#define SCTLR_EL12_op2 0
+
/* SPSR_EL1 */
+#define SPSR_EL1_REG MRS_REG_ALT_NAME(SPSR_EL1)
+#define SPSR_EL1_op0 3
+#define SPSR_EL1_op1 0
+#define SPSR_EL1_CRn 4
+#define SPSR_EL1_CRm 0
+#define SPSR_EL1_op2 0
/*
* When the exception is taken in AArch64:
* M[3:2] is the exception level
@@ -2083,7 +2649,8 @@
#define PSR_D 0x00000200UL
#define PSR_DAIF (PSR_D | PSR_A | PSR_I | PSR_F)
/* The default DAIF mask. These bits are valid in spsr_el1 and daif */
-#define PSR_DAIF_DEFAULT (PSR_F)
+#define PSR_DAIF_DEFAULT (0)
+#define PSR_DAIF_INTR (PSR_I | PSR_F)
#define PSR_BTYPE 0x00000c00UL
#define PSR_SSBS 0x00001000UL
#define PSR_ALLINT 0x00002000UL
@@ -2102,8 +2669,15 @@
#define PSR_SETTABLE_32 PSR_FLAGS
#define PSR_SETTABLE_64 (PSR_FLAGS | PSR_SS)
+/* SPSR_EL12 */
+#define SPSR_EL12_REG MRS_REG_ALT_NAME(SPSR_EL12)
+#define SPSR_EL12_op0 3
+#define SPSR_EL12_op1 5
+#define SPSR_EL12_CRn 4
+#define SPSR_EL12_CRm 0
+#define SPSR_EL12_op2 0
+
/* REVIDR_EL1 - Revision ID Register */
-#define REVIDR_EL1 MRS_REG(REVIDR_EL1)
#define REVIDR_EL1_op0 3
#define REVIDR_EL1_op1 0
#define REVIDR_EL1_CRn 0
@@ -2111,111 +2685,119 @@
#define REVIDR_EL1_op2 6
/* TCR_EL1 - Translation Control Register */
+#define TCR_EL1_REG MRS_REG_ALT_NAME(TCR_EL1)
+#define TCR_EL1_op0 3
+#define TCR_EL1_op1 0
+#define TCR_EL1_CRn 2
+#define TCR_EL1_CRm 0
+#define TCR_EL1_op2 2
/* Bits 63:59 are reserved */
+#define TCR_DS_SHIFT 59
+#define TCR_DS (UL(1) << TCR_DS_SHIFT)
#define TCR_TCMA1_SHIFT 58
-#define TCR_TCMA1 (1UL << TCR_TCMA1_SHIFT)
+#define TCR_TCMA1 (UL(1) << TCR_TCMA1_SHIFT)
#define TCR_TCMA0_SHIFT 57
-#define TCR_TCMA0 (1UL << TCR_TCMA0_SHIFT)
+#define TCR_TCMA0 (UL(1) << TCR_TCMA0_SHIFT)
#define TCR_E0PD1_SHIFT 56
-#define TCR_E0PD1 (1UL << TCR_E0PD1_SHIFT)
+#define TCR_E0PD1 (UL(1) << TCR_E0PD1_SHIFT)
#define TCR_E0PD0_SHIFT 55
-#define TCR_E0PD0 (1UL << TCR_E0PD0_SHIFT)
+#define TCR_E0PD0 (UL(1) << TCR_E0PD0_SHIFT)
#define TCR_NFD1_SHIFT 54
-#define TCR_NFD1 (1UL << TCR_NFD1_SHIFT)
+#define TCR_NFD1 (UL(1) << TCR_NFD1_SHIFT)
#define TCR_NFD0_SHIFT 53
-#define TCR_NFD0 (1UL << TCR_NFD0_SHIFT)
+#define TCR_NFD0 (UL(1) << TCR_NFD0_SHIFT)
#define TCR_TBID1_SHIFT 52
-#define TCR_TBID1 (1UL << TCR_TBID1_SHIFT)
+#define TCR_TBID1 (UL(1) << TCR_TBID1_SHIFT)
#define TCR_TBID0_SHIFT 51
-#define TCR_TBID0 (1UL << TCR_TBID0_SHIFT)
+#define TCR_TBID0 (UL(1) << TCR_TBID0_SHIFT)
#define TCR_HWU162_SHIFT 50
-#define TCR_HWU162 (1UL << TCR_HWU162_SHIFT)
+#define TCR_HWU162 (UL(1) << TCR_HWU162_SHIFT)
#define TCR_HWU161_SHIFT 49
-#define TCR_HWU161 (1UL << TCR_HWU161_SHIFT)
+#define TCR_HWU161 (UL(1) << TCR_HWU161_SHIFT)
#define TCR_HWU160_SHIFT 48
-#define TCR_HWU160 (1UL << TCR_HWU160_SHIFT)
+#define TCR_HWU160 (UL(1) << TCR_HWU160_SHIFT)
#define TCR_HWU159_SHIFT 47
-#define TCR_HWU159 (1UL << TCR_HWU159_SHIFT)
+#define TCR_HWU159 (UL(1) << TCR_HWU159_SHIFT)
#define TCR_HWU1 \
(TCR_HWU159 | TCR_HWU160 | TCR_HWU161 | TCR_HWU162)
#define TCR_HWU062_SHIFT 46
-#define TCR_HWU062 (1UL << TCR_HWU062_SHIFT)
+#define TCR_HWU062 (UL(1) << TCR_HWU062_SHIFT)
#define TCR_HWU061_SHIFT 45
-#define TCR_HWU061 (1UL << TCR_HWU061_SHIFT)
+#define TCR_HWU061 (UL(1) << TCR_HWU061_SHIFT)
#define TCR_HWU060_SHIFT 44
-#define TCR_HWU060 (1UL << TCR_HWU060_SHIFT)
+#define TCR_HWU060 (UL(1) << TCR_HWU060_SHIFT)
#define TCR_HWU059_SHIFT 43
-#define TCR_HWU059 (1UL << TCR_HWU059_SHIFT)
+#define TCR_HWU059 (UL(1) << TCR_HWU059_SHIFT)
#define TCR_HWU0 \
(TCR_HWU059 | TCR_HWU060 | TCR_HWU061 | TCR_HWU062)
#define TCR_HPD1_SHIFT 42
-#define TCR_HPD1 (1UL << TCR_HPD1_SHIFT)
+#define TCR_HPD1 (UL(1) << TCR_HPD1_SHIFT)
#define TCR_HPD0_SHIFT 41
-#define TCR_HPD0 (1UL << TCR_HPD0_SHIFT)
+#define TCR_HPD0 (UL(1) << TCR_HPD0_SHIFT)
#define TCR_HD_SHIFT 40
-#define TCR_HD (1UL << TCR_HD_SHIFT)
+#define TCR_HD (UL(1) << TCR_HD_SHIFT)
#define TCR_HA_SHIFT 39
-#define TCR_HA (1UL << TCR_HA_SHIFT)
+#define TCR_HA (UL(1) << TCR_HA_SHIFT)
#define TCR_TBI1_SHIFT 38
-#define TCR_TBI1 (1UL << TCR_TBI1_SHIFT)
+#define TCR_TBI1 (UL(1) << TCR_TBI1_SHIFT)
#define TCR_TBI0_SHIFT 37
-#define TCR_TBI0 (1UL << TCR_TBI0_SHIFT)
+#define TCR_TBI0 (UL(1) << TCR_TBI0_SHIFT)
#define TCR_ASID_SHIFT 36
#define TCR_ASID_WIDTH 1
-#define TCR_ASID_16 (1UL << TCR_ASID_SHIFT)
+#define TCR_ASID_16 (UL(1) << TCR_ASID_SHIFT)
/* Bit 35 is reserved */
#define TCR_IPS_SHIFT 32
#define TCR_IPS_WIDTH 3
-#define TCR_IPS_32BIT (0UL << TCR_IPS_SHIFT)
-#define TCR_IPS_36BIT (1UL << TCR_IPS_SHIFT)
-#define TCR_IPS_40BIT (2UL << TCR_IPS_SHIFT)
-#define TCR_IPS_42BIT (3UL << TCR_IPS_SHIFT)
-#define TCR_IPS_44BIT (4UL << TCR_IPS_SHIFT)
-#define TCR_IPS_48BIT (5UL << TCR_IPS_SHIFT)
+#define TCR_IPS_32BIT (UL(0) << TCR_IPS_SHIFT)
+#define TCR_IPS_36BIT (UL(1) << TCR_IPS_SHIFT)
+#define TCR_IPS_40BIT (UL(2) << TCR_IPS_SHIFT)
+#define TCR_IPS_42BIT (UL(3) << TCR_IPS_SHIFT)
+#define TCR_IPS_44BIT (UL(4) << TCR_IPS_SHIFT)
+#define TCR_IPS_48BIT (UL(5) << TCR_IPS_SHIFT)
#define TCR_TG1_SHIFT 30
-#define TCR_TG1_MASK (3UL << TCR_TG1_SHIFT)
-#define TCR_TG1_16K (1UL << TCR_TG1_SHIFT)
-#define TCR_TG1_4K (2UL << TCR_TG1_SHIFT)
-#define TCR_TG1_64K (3UL << TCR_TG1_SHIFT)
+#define TCR_TG1_MASK (UL(3) << TCR_TG1_SHIFT)
+#define TCR_TG1_16K (UL(1) << TCR_TG1_SHIFT)
+#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
+#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
#define TCR_SH1_SHIFT 28
-#define TCR_SH1_IS (3UL << TCR_SH1_SHIFT)
+#define TCR_SH1_IS (UL(3) << TCR_SH1_SHIFT)
#define TCR_ORGN1_SHIFT 26
-#define TCR_ORGN1_WBWA (1UL << TCR_ORGN1_SHIFT)
+#define TCR_ORGN1_WBWA (UL(1) << TCR_ORGN1_SHIFT)
#define TCR_IRGN1_SHIFT 24
-#define TCR_IRGN1_WBWA (1UL << TCR_IRGN1_SHIFT)
+#define TCR_IRGN1_WBWA (UL(1) << TCR_IRGN1_SHIFT)
#define TCR_EPD1_SHIFT 23
-#define TCR_EPD1 (1UL << TCR_EPD1_SHIFT)
+#define TCR_EPD1 (UL(1) << TCR_EPD1_SHIFT)
#define TCR_A1_SHIFT 22
-#define TCR_A1 (0x1UL << TCR_A1_SHIFT)
+#define TCR_A1 (UL(1) << TCR_A1_SHIFT)
#define TCR_T1SZ_SHIFT 16
-#define TCR_T1SZ_MASK (0x3fUL << TCR_T1SZ_SHIFT)
+#define TCR_T1SZ_MASK (UL(0x3f) << TCR_T1SZ_SHIFT)
#define TCR_T1SZ(x) ((x) << TCR_T1SZ_SHIFT)
#define TCR_TG0_SHIFT 14
-#define TCR_TG0_MASK (3UL << TCR_TG0_SHIFT)
-#define TCR_TG0_4K (0UL << TCR_TG0_SHIFT)
-#define TCR_TG0_64K (1UL << TCR_TG0_SHIFT)
-#define TCR_TG0_16K (2UL << TCR_TG0_SHIFT)
+#define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT)
+#define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT)
#define TCR_SH0_SHIFT 12
-#define TCR_SH0_IS (3UL << TCR_SH0_SHIFT)
+#define TCR_SH0_IS (UL(3) << TCR_SH0_SHIFT)
#define TCR_ORGN0_SHIFT 10
-#define TCR_ORGN0_WBWA (1UL << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT)
#define TCR_IRGN0_SHIFT 8
-#define TCR_IRGN0_WBWA (1UL << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT)
#define TCR_EPD0_SHIFT 7
-#define TCR_EPD0 (1UL << TCR_EPD0_SHIFT)
+#define TCR_EPD0 (UL(1) << TCR_EPD0_SHIFT)
/* Bit 6 is reserved */
#define TCR_T0SZ_SHIFT 0
-#define TCR_T0SZ_MASK (0x3fUL << TCR_T0SZ_SHIFT)
+#define TCR_T0SZ_MASK (UL(0x3f) << TCR_T0SZ_SHIFT)
#define TCR_T0SZ(x) ((x) << TCR_T0SZ_SHIFT)
#define TCR_TxSZ(x) (TCR_T1SZ(x) | TCR_T0SZ(x))
-#define TCR_CACHE_ATTRS ((TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) |\
- (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA))
-#ifdef SMP
-#define TCR_SMP_ATTRS (TCR_SH0_IS | TCR_SH1_IS)
-#else
-#define TCR_SMP_ATTRS 0
-#endif
+/* TCR_EL12 */
+#define TCR_EL12_REG MRS_REG_ALT_NAME(TCR_EL12)
+#define TCR_EL12_op0 3
+#define TCR_EL12_op1 5
+#define TCR_EL12_CRn 2
+#define TCR_EL12_CRm 0
+#define TCR_EL12_op2 2
/* TTBR0_EL1 & TTBR1_EL1 - Translation Table Base Register 0 & 1 */
#define TTBR_ASID_SHIFT 48
@@ -2224,7 +2806,61 @@
#define TTBR_CnP_SHIFT 0
#define TTBR_CnP (1ul << TTBR_CnP_SHIFT)
+/* TTBR0_EL1 */
+#define TTBR0_EL1_REG MRS_REG_ALT_NAME(TTBR0_EL1)
+#define TTBR0_EL1_op0 3
+#define TTBR0_EL1_op1 0
+#define TTBR0_EL1_CRn 2
+#define TTBR0_EL1_CRm 0
+#define TTBR0_EL1_op2 0
+
+/* TTBR0_EL12 */
+#define TTBR0_EL12_REG MRS_REG_ALT_NAME(TTBR0_EL12)
+#define TTBR0_EL12_op0 3
+#define TTBR0_EL12_op1 5
+#define TTBR0_EL12_CRn 2
+#define TTBR0_EL12_CRm 0
+#define TTBR0_EL12_op2 0
+
+/* TTBR1_EL1 */
+#define TTBR1_EL1_REG MRS_REG_ALT_NAME(TTBR1_EL1)
+#define TTBR1_EL1_op0 3
+#define TTBR1_EL1_op1 0
+#define TTBR1_EL1_CRn 2
+#define TTBR1_EL1_CRm 0
+#define TTBR1_EL1_op2 1
+
+/* TTBR1_EL12 */
+#define TTBR1_EL12_REG MRS_REG_ALT_NAME(TTBR1_EL12)
+#define TTBR1_EL12_op0 3
+#define TTBR1_EL12_op1 5
+#define TTBR1_EL12_CRn 2
+#define TTBR1_EL12_CRm 0
+#define TTBR1_EL12_op2 1
+
+/* VBAR_EL1 */
+#define VBAR_EL1_REG MRS_REG_ALT_NAME(VBAR_EL1)
+#define VBAR_EL1_op0 3
+#define VBAR_EL1_op1 0
+#define VBAR_EL1_CRn 12
+#define VBAR_EL1_CRm 0
+#define VBAR_EL1_op2 0
+
+/* VBAR_EL12 */
+#define VBAR_EL12_REG MRS_REG_ALT_NAME(VBAR_EL12)
+#define VBAR_EL12_op0 3
+#define VBAR_EL12_op1 5
+#define VBAR_EL12_CRn 12
+#define VBAR_EL12_CRm 0
+#define VBAR_EL12_op2 0
+
/* ZCR_EL1 - SVE Control Register */
+#define ZCR_EL1_REG MRS_REG_ALT_NAME(ZCR_EL1)
+#define ZCR_EL1_op0 3
+#define ZCR_EL1_op1 0
+#define ZCR_EL1_CRn 1
+#define ZCR_EL1_CRm 2
+#define ZCR_EL1_op2 0
#define ZCR_LEN_SHIFT 0
#define ZCR_LEN_MASK (0xf << ZCR_LEN_SHIFT)
#define ZCR_LEN_BYTES(x) ((((x) & ZCR_LEN_MASK) + 1) * 16)
diff --git a/sys/arm64/include/asm.h b/sys/arm64/include/asm.h
index 16be39b3eae4..4f373dc4b7e1 100644
--- a/sys/arm64/include/asm.h
+++ b/sys/arm64/include/asm.h
@@ -73,6 +73,16 @@
#define lr x30
/*
+ * Check whether a given cpu feature is present, in the case it is not we jump
+ * to the given label. The tmp register should be a register able to hold the
+ * temporary data.
+ */
+#define CHECK_CPU_FEAT(tmp, feat_reg, feat, label) \
+ mrs tmp, ##feat_reg##_el1; \
+ ubfx tmp, tmp, ##feat_reg##_##feat##_SHIFT, ##feat_reg##_##feat##_WIDTH; \
+ cbz tmp, label
+
+/*
* Sets the trap fault handler. The exception handler will return to the
* address in the handler register on a data abort or the xzr register to
* clear the handler. The tmp parameter should be a register able to hold
@@ -87,19 +97,25 @@
ldr tmp, =has_pan; /* Get the addr of has_pan */ \
ldr reg, [tmp]; /* Read it */ \
cbz reg, 997f; /* If no PAN skip */ \
- .inst 0xd500409f | (0 << 8); /* Clear PAN */ \
+ .arch_extension pan; \
+ msr pan, #0; /* Disable PAN checks */ \
+ .arch_extension nopan; \
997:
#define EXIT_USER_ACCESS(reg) \
cbz reg, 998f; /* If no PAN skip */ \
- .inst 0xd500409f | (1 << 8); /* Set PAN */ \
+ .arch_extension pan; \
+ msr pan, #1; /* Enable PAN checks */ \
+ .arch_extension nopan; \
998:
#define EXIT_USER_ACCESS_CHECK(reg, tmp) \
ldr tmp, =has_pan; /* Get the addr of has_pan */ \
ldr reg, [tmp]; /* Read it */ \
cbz reg, 999f; /* If no PAN skip */ \
- .inst 0xd500409f | (1 << 8); /* Set PAN */ \
+ .arch_extension pan; \
+ msr pan, #1; /* Enable PAN checks */ \
+ .arch_extension nopan; \
999:
/*
diff --git a/sys/arm64/include/atomic.h b/sys/arm64/include/atomic.h
index 76ca951678d4..998a49c02e60 100644
--- a/sys/arm64/include/atomic.h
+++ b/sys/arm64/include/atomic.h
@@ -65,8 +65,9 @@ extern _Bool lse_supported;
#include <sys/atomic_common.h>
-#ifdef _KERNEL
-
+#if defined(__ARM_FEATURE_ATOMICS)
+#define _ATOMIC_LSE_SUPPORTED 1
+#elif defined(_KERNEL)
#ifdef LSE_ATOMICS
#define _ATOMIC_LSE_SUPPORTED 1
#else
@@ -464,7 +465,7 @@ _ATOMIC_TEST_OP(set, orr, set)
#define _ATOMIC_LOAD_ACQ_IMPL(t, w, s) \
static __inline uint##t##_t \
-atomic_load_acq_##t(volatile uint##t##_t *p) \
+atomic_load_acq_##t(const volatile uint##t##_t *p) \
{ \
uint##t##_t ret; \
\
@@ -608,6 +609,8 @@ _ATOMIC_STORE_REL_IMPL(64, , )
#define atomic_set_ptr atomic_set_64
#define atomic_swap_ptr atomic_swap_64
#define atomic_subtract_ptr atomic_subtract_64
+#define atomic_testandclear_ptr atomic_testandclear_64
+#define atomic_testandset_ptr atomic_testandset_64
#define atomic_add_acq_long atomic_add_acq_64
#define atomic_fcmpset_acq_long atomic_fcmpset_acq_64
diff --git a/sys/arm64/include/bus.h b/sys/arm64/include/bus.h
index 196916de92e7..2e2ef2f6d008 100644
--- a/sys/arm64/include/bus.h
+++ b/sys/arm64/include/bus.h
@@ -76,6 +76,7 @@
#define BUS_SPACE_MAXADDR_24BIT 0xFFFFFFUL
#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFFUL
+#define BUS_SPACE_MAXADDR_36BIT 0xFFFFFFFFFUL
#define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFUL
#define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFFUL
#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFFUL
diff --git a/sys/arm64/include/bus_dma.h b/sys/arm64/include/bus_dma.h
index fb494be0bdc4..d9c37eb2641b 100644
--- a/sys/arm64/include/bus_dma.h
+++ b/sys/arm64/include/bus_dma.h
@@ -62,7 +62,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
/*
* Free a piece of memory and it's allociated dmamap, that was allocated
- * via bus_dmamem_alloc. Make the same choice for free/contigfree.
+ * via bus_dmamem_alloc.
*/
static inline void
bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h
index 8f5a9e3dbd3c..935e3754bf25 100644
--- a/sys/arm64/include/cpu.h
+++ b/sys/arm64/include/cpu.h
@@ -42,8 +42,10 @@
#ifndef _MACHINE_CPU_H_
#define _MACHINE_CPU_H_
+#if !defined(__ASSEMBLER__)
#include <machine/atomic.h>
#include <machine/frame.h>
+#endif
#include <machine/armreg.h>
#define TRAPF_PC(tfp) ((tfp)->tf_elr)
@@ -73,6 +75,7 @@
#define CPU_IMPL_CAVIUM 0x43
#define CPU_IMPL_DEC 0x44
#define CPU_IMPL_FUJITSU 0x46
+#define CPU_IMPL_HISILICON 0x48
#define CPU_IMPL_INFINEON 0x49
#define CPU_IMPL_FREESCALE 0x4D
#define CPU_IMPL_NVIDIA 0x4E
@@ -82,6 +85,7 @@
#define CPU_IMPL_APPLE 0x61
#define CPU_IMPL_INTEL 0x69
#define CPU_IMPL_AMPERE 0xC0
+#define CPU_IMPL_MICROSOFT 0x6D
/* ARM Part numbers */
#define CPU_PART_FOUNDATION 0xD00
@@ -101,6 +105,7 @@
#define CPU_PART_AEM_V8 0xD0F
#define CPU_PART_NEOVERSE_V1 0xD40
#define CPU_PART_CORTEX_A78 0xD41
+#define CPU_PART_CORTEX_A78AE 0xD42
#define CPU_PART_CORTEX_A65AE 0xD43
#define CPU_PART_CORTEX_X1 0xD44
#define CPU_PART_CORTEX_A510 0xD46
@@ -113,6 +118,14 @@
#define CPU_PART_CORTEX_A715 0xD4D
#define CPU_PART_CORTEX_X3 0xD4E
#define CPU_PART_NEOVERSE_V2 0xD4F
+#define CPU_PART_CORTEX_A520 0xD80
+#define CPU_PART_CORTEX_A720 0xD81
+#define CPU_PART_CORTEX_X4 0xD82
+#define CPU_PART_NEOVERSE_V3AE 0xD83
+#define CPU_PART_NEOVERSE_V3 0xD84
+#define CPU_PART_CORTEX_X925 0xD85
+#define CPU_PART_CORTEX_A725 0xD87
+#define CPU_PART_NEOVERSE_N3 0xD8E
/* Cavium Part numbers */
#define CPU_PART_THUNDERX 0x0A1
@@ -125,9 +138,16 @@
#define CPU_REV_THUNDERX2_0 0x00
-/* APM / Ampere Part Number */
+/* APM (now Ampere) Part number */
#define CPU_PART_EMAG8180 0x000
+/* Ampere Part numbers */
+#define CPU_PART_AMPERE1 0xAC3
+#define CPU_PART_AMPERE1A 0xAC4
+
+/* Microsoft Part numbers */
+#define CPU_PART_AZURE_COBALT_100 0xD49
+
/* Qualcomm */
#define CPU_PART_KRYO400_GOLD 0x804
#define CPU_PART_KRYO400_SILVER 0x805
@@ -198,6 +218,7 @@
#define CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 0
#endif
+#if !defined(__ASSEMBLER__)
extern char btext[];
extern char etext[];
@@ -229,9 +250,18 @@ void ptrauth_mp_start(uint64_t);
/* Functions to read the sanitised view of the special registers */
void update_special_regs(u_int);
-bool extract_user_id_field(u_int, u_int, uint8_t *);
-bool get_kernel_reg(u_int, uint64_t *);
-bool get_kernel_reg_masked(u_int, uint64_t *, uint64_t);
+void update_special_reg_iss(u_int, uint64_t, uint64_t);
+#define update_special_reg(reg, clear, set) \
+ update_special_reg_iss(reg ## _ISS, clear, set)
+bool get_kernel_reg_iss(u_int, uint64_t *);
+#define get_kernel_reg(reg, valp) \
+ get_kernel_reg_iss(reg ## _ISS, valp)
+bool get_kernel_reg_iss_masked(u_int, uint64_t *, uint64_t);
+#define get_kernel_reg_masked(reg, valp, mask) \
+ get_kernel_reg_iss_masked(reg ## _ISS, valp, mask)
+bool get_user_reg_iss(u_int, uint64_t *, bool);
+#define get_user_reg(reg, valp, fbsd) \
+ get_user_reg_iss(reg ## _ISS, valp, fbsd)
void cpu_desc_init(void);
@@ -268,6 +298,7 @@ ADDRESS_TRANSLATE_FUNC(s1e0w)
ADDRESS_TRANSLATE_FUNC(s1e1r)
ADDRESS_TRANSLATE_FUNC(s1e1w)
+#endif /* !__ASSEMBLER__ */
#endif
#endif /* !_MACHINE_CPU_H_ */
diff --git a/sys/arm64/include/cpu_feat.h b/sys/arm64/include/cpu_feat.h
new file mode 100644
index 000000000000..9fe6a9dd95d9
--- /dev/null
+++ b/sys/arm64/include/cpu_feat.h
@@ -0,0 +1,88 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Arm Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_CPU_FEAT_H_
+#define _MACHINE_CPU_FEAT_H_
+
+#include <sys/linker_set.h>
+
+typedef enum {
+ ERRATA_UNKNOWN, /* Unknown erratum */
+ ERRATA_NONE, /* No errata for this feature on this system. */
+ ERRATA_AFFECTED, /* There is errata on this system. */
+ ERRATA_FW_MITIGAION, /* There is errata, and a firmware */
+ /* mitigation. The mitigation may need a */
+ /* kernel component. */
+} cpu_feat_errata;
+
+#define CPU_FEAT_STAGE_MASK 0x00000001
+#define CPU_FEAT_EARLY_BOOT 0x00000000
+#define CPU_FEAT_AFTER_DEV 0x00000001
+
+#define CPU_FEAT_SCOPE_MASK 0x00000010
+#define CPU_FEAT_PER_CPU 0x00000000
+#define CPU_FEAT_SYSTEM 0x00000010
+
+struct cpu_feat;
+
+typedef bool (cpu_feat_check)(const struct cpu_feat *, u_int);
+typedef bool (cpu_feat_has_errata)(const struct cpu_feat *, u_int,
+ u_int **, u_int *);
+typedef void (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata,
+ u_int *, u_int);
+
+struct cpu_feat {
+ const char *feat_name;
+ cpu_feat_check *feat_check;
+ cpu_feat_has_errata *feat_has_errata;
+ cpu_feat_enable *feat_enable;
+ uint32_t feat_flags;
+};
+SET_DECLARE(cpu_feat_set, struct cpu_feat);
+
+/*
+ * Allow drivers to mark an erratum as worked around, e.g. the Errata
+ * Management ABI may know the workaround isn't needed on a given system.
+ */
+typedef cpu_feat_errata (*cpu_feat_errata_check_fn)(const struct cpu_feat *,
+ u_int);
+void cpu_feat_register_errata_check(cpu_feat_errata_check_fn);
+
+void enable_cpu_feat(uint32_t);
+
+/* Check if an erratum is in the list of errata */
+static inline bool
+cpu_feat_has_erratum(u_int *errata_list, u_int errata_count, u_int erratum)
+{
+ for (u_int i = 0; i < errata_count; i++)
+ if (errata_list[0] == erratum)
+ return (true);
+
+ return (false);
+}
+
+#endif /* _MACHINE_CPU_FEAT_H_ */
diff --git a/sys/arm64/include/cpufunc.h b/sys/arm64/include/cpufunc.h
index ba712f48b262..e6e1f682794e 100644
--- a/sys/arm64/include/cpufunc.h
+++ b/sys/arm64/include/cpufunc.h
@@ -41,8 +41,6 @@ breakpoint(void)
#ifdef _KERNEL
#include <machine/armreg.h>
-void pan_enable(void);
-
static __inline register_t
dbg_disable(void)
{
@@ -160,6 +158,26 @@ invalidate_local_icache(void)
"isb \n");
}
+static __inline void
+wfet(uint64_t val)
+{
+ __asm __volatile(
+ "msr s0_3_c1_c0_0, %0\n"
+ :
+ : "r" ((val))
+ : "memory");
+}
+
+static __inline void
+wfit(uint64_t val)
+{
+ __asm __volatile(
+ "msr s0_3_c1_c0_1, %0\n"
+ :
+ : "r" ((val))
+ : "memory");
+}
+
extern bool icache_aliasing;
extern bool icache_vmid;
diff --git a/sys/arm64/include/efi.h b/sys/arm64/include/efi.h
index ed9e8d86b7bc..bfce872296a2 100644
--- a/sys/arm64/include/efi.h
+++ b/sys/arm64/include/efi.h
@@ -35,6 +35,8 @@
#ifndef __ARM64_INCLUDE_EFI_H_
#define __ARM64_INCLUDE_EFI_H_
+#include <sys/types.h>
+
#define EFIABI_ATTR
#ifdef _KERNEL
@@ -44,7 +46,7 @@
#define EFI_TIME_UNLOCK()
#define EFI_TIME_OWNED()
-#define EFI_RT_HANDLE_FAULTS_DEFAULT 0
+#define EFI_RT_HANDLE_FAULTS_DEFAULT 1
#endif
struct efirt_callinfo {
diff --git a/sys/arm64/include/elf.h b/sys/arm64/include/elf.h
index 9f9cd44ac9f7..d6328c143585 100644
--- a/sys/arm64/include/elf.h
+++ b/sys/arm64/include/elf.h
@@ -94,90 +94,110 @@ __ElfType(Auxinfo);
#endif
/* HWCAP */
-#define HWCAP_FP 0x00000001
-#define HWCAP_ASIMD 0x00000002
-#define HWCAP_EVTSTRM 0x00000004
-#define HWCAP_AES 0x00000008
-#define HWCAP_PMULL 0x00000010
-#define HWCAP_SHA1 0x00000020
-#define HWCAP_SHA2 0x00000040
-#define HWCAP_CRC32 0x00000080
-#define HWCAP_ATOMICS 0x00000100
-#define HWCAP_FPHP 0x00000200
-#define HWCAP_ASIMDHP 0x00000400
+#define HWCAP_FP (1 << 0)
+#define HWCAP_ASIMD (1 << 1)
+#define HWCAP_EVTSTRM (1 << 2)
+#define HWCAP_AES (1 << 3)
+#define HWCAP_PMULL (1 << 4)
+#define HWCAP_SHA1 (1 << 5)
+#define HWCAP_SHA2 (1 << 6)
+#define HWCAP_CRC32 (1 << 7)
+#define HWCAP_ATOMICS (1 << 8)
+#define HWCAP_FPHP (1 << 9)
+#define HWCAP_ASIMDHP (1 << 10)
/*
* XXX: The following bits (from CPUID to FLAGM) were originally incorrect,
* but later changed to match the Linux definitions. No compatibility code is
* provided, as the fix was expected to result in near-zero fallout.
*/
-#define HWCAP_CPUID 0x00000800
-#define HWCAP_ASIMDRDM 0x00001000
-#define HWCAP_JSCVT 0x00002000
-#define HWCAP_FCMA 0x00004000
-#define HWCAP_LRCPC 0x00008000
-#define HWCAP_DCPOP 0x00010000
-#define HWCAP_SHA3 0x00020000
-#define HWCAP_SM3 0x00040000
-#define HWCAP_SM4 0x00080000
-#define HWCAP_ASIMDDP 0x00100000
-#define HWCAP_SHA512 0x00200000
-#define HWCAP_SVE 0x00400000
-#define HWCAP_ASIMDFHM 0x00800000
-#define HWCAP_DIT 0x01000000
-#define HWCAP_USCAT 0x02000000
-#define HWCAP_ILRCPC 0x04000000
-#define HWCAP_FLAGM 0x08000000
-#define HWCAP_SSBS 0x10000000
-#define HWCAP_SB 0x20000000
-#define HWCAP_PACA 0x40000000
-#define HWCAP_PACG 0x80000000
+#define HWCAP_CPUID (1 << 11)
+#define HWCAP_ASIMDRDM (1 << 12)
+#define HWCAP_JSCVT (1 << 13)
+#define HWCAP_FCMA (1 << 14)
+#define HWCAP_LRCPC (1 << 15)
+#define HWCAP_DCPOP (1 << 16)
+#define HWCAP_SHA3 (1 << 17)
+#define HWCAP_SM3 (1 << 18)
+#define HWCAP_SM4 (1 << 19)
+#define HWCAP_ASIMDDP (1 << 20)
+#define HWCAP_SHA512 (1 << 21)
+#define HWCAP_SVE (1 << 22)
+#define HWCAP_ASIMDFHM (1 << 23)
+#define HWCAP_DIT (1 << 24)
+#define HWCAP_USCAT (1 << 25)
+#define HWCAP_ILRCPC (1 << 26)
+#define HWCAP_FLAGM (1 << 27)
+#define HWCAP_SSBS (1 << 28)
+#define HWCAP_SB (1 << 29)
+#define HWCAP_PACA (1 << 30)
+#define HWCAP_PACG (1UL << 31)
+#define HWCAP_GCS (1UL << 32)
/* HWCAP2 */
-#define HWCAP2_DCPODP 0x0000000000000001ul
-#define HWCAP2_SVE2 0x0000000000000002ul
-#define HWCAP2_SVEAES 0x0000000000000004ul
-#define HWCAP2_SVEPMULL 0x0000000000000008ul
-#define HWCAP2_SVEBITPERM 0x0000000000000010ul
-#define HWCAP2_SVESHA3 0x0000000000000020ul
-#define HWCAP2_SVESM4 0x0000000000000040ul
-#define HWCAP2_FLAGM2 0x0000000000000080ul
-#define HWCAP2_FRINT 0x0000000000000100ul
-#define HWCAP2_SVEI8MM 0x0000000000000200ul
-#define HWCAP2_SVEF32MM 0x0000000000000400ul
-#define HWCAP2_SVEF64MM 0x0000000000000800ul
-#define HWCAP2_SVEBF16 0x0000000000001000ul
-#define HWCAP2_I8MM 0x0000000000002000ul
-#define HWCAP2_BF16 0x0000000000004000ul
-#define HWCAP2_DGH 0x0000000000008000ul
-#define HWCAP2_RNG 0x0000000000010000ul
-#define HWCAP2_BTI 0x0000000000020000ul
-#define HWCAP2_MTE 0x0000000000040000ul
-#define HWCAP2_ECV 0x0000000000080000ul
-#define HWCAP2_AFP 0x0000000000100000ul
-#define HWCAP2_RPRES 0x0000000000200000ul
-#define HWCAP2_MTE3 0x0000000000400000ul
-#define HWCAP2_SME 0x0000000000800000ul
-#define HWCAP2_SME_I16I64 0x0000000001000000ul
-#define HWCAP2_SME_F64F64 0x0000000002000000ul
-#define HWCAP2_SME_I8I32 0x0000000004000000ul
-#define HWCAP2_SME_F16F32 0x0000000008000000ul
-#define HWCAP2_SME_B16F32 0x0000000010000000ul
-#define HWCAP2_SME_F32F32 0x0000000020000000ul
-#define HWCAP2_SME_FA64 0x0000000040000000ul
-#define HWCAP2_WFXT 0x0000000080000000ul
-#define HWCAP2_EBF16 0x0000000100000000ul
-#define HWCAP2_SVE_EBF16 0x0000000200000000ul
-#define HWCAP2_CSSC 0x0000000400000000ul
-#define HWCAP2_RPRFM 0x0000000800000000ul
-#define HWCAP2_SVE2P1 0x0000001000000000ul
-#define HWCAP2_SME2 0x0000002000000000ul
-#define HWCAP2_SME2P1 0x0000004000000000ul
-#define HWCAP2_SME_I16I32 0x0000008000000000ul
-#define HWCAP2_SME_BI32I32 0x0000010000000000ul
-#define HWCAP2_SME_B16B16 0x0000020000000000ul
-#define HWCAP2_SME_F16F16 0x0000040000000000ul
-#define HWCAP2_MOPS 0x0000080000000000ul
-#define HWCAP2_HBC 0x0000100000000000ul
+#define HWCAP2_DCPODP (1 << 0)
+#define HWCAP2_SVE2 (1 << 1)
+#define HWCAP2_SVEAES (1 << 2)
+#define HWCAP2_SVEPMULL (1 << 3)
+#define HWCAP2_SVEBITPERM (1 << 4)
+#define HWCAP2_SVESHA3 (1 << 5)
+#define HWCAP2_SVESM4 (1 << 6)
+#define HWCAP2_FLAGM2 (1 << 7)
+#define HWCAP2_FRINT (1 << 8)
+#define HWCAP2_SVEI8MM (1 << 9)
+#define HWCAP2_SVEF32MM (1 << 10)
+#define HWCAP2_SVEF64MM (1 << 11)
+#define HWCAP2_SVEBF16 (1 << 12)
+#define HWCAP2_I8MM (1 << 13)
+#define HWCAP2_BF16 (1 << 14)
+#define HWCAP2_DGH (1 << 15)
+#define HWCAP2_RNG (1 << 16)
+#define HWCAP2_BTI (1 << 17)
+#define HWCAP2_MTE (1 << 18)
+#define HWCAP2_ECV (1 << 19)
+#define HWCAP2_AFP (1 << 20)
+#define HWCAP2_RPRES (1 << 21)
+#define HWCAP2_MTE3 (1 << 22)
+#define HWCAP2_SME (1 << 23)
+#define HWCAP2_SME_I16I64 (1 << 24)
+#define HWCAP2_SME_F64F64 (1 << 25)
+#define HWCAP2_SME_I8I32 (1 << 26)
+#define HWCAP2_SME_F16F32 (1 << 27)
+#define HWCAP2_SME_B16F32 (1 << 28)
+#define HWCAP2_SME_F32F32 (1 << 29)
+#define HWCAP2_SME_FA64 (1 << 30)
+#define HWCAP2_WFXT (1UL << 31)
+#define HWCAP2_EBF16 (1UL << 32)
+#define HWCAP2_SVE_EBF16 (1UL << 33)
+#define HWCAP2_CSSC (1UL << 34)
+#define HWCAP2_RPRFM (1UL << 35)
+#define HWCAP2_SVE2P1 (1UL << 36)
+#define HWCAP2_SME2 (1UL << 37)
+#define HWCAP2_SME2P1 (1UL << 38)
+#define HWCAP2_SME_I16I32 (1UL << 39)
+#define HWCAP2_SME_BI32I32 (1UL << 40)
+#define HWCAP2_SME_B16B16 (1UL << 41)
+#define HWCAP2_SME_F16F16 (1UL << 42)
+#define HWCAP2_MOPS (1UL << 43)
+#define HWCAP2_HBC (1UL << 44)
+#define HWCAP2_SVE_B16B16 (1UL << 45)
+#define HWCAP2_LRCPC3 (1UL << 46)
+#define HWCAP2_LSE128 (1UL << 47)
+#define HWCAP2_FPMR (1UL << 48)
+#define HWCAP2_LUT (1UL << 49)
+#define HWCAP2_FAMINMAX (1UL << 50)
+#define HWCAP2_F8CVT (1UL << 51)
+#define HWCAP2_F8FMA (1UL << 52)
+#define HWCAP2_F8DP4 (1UL << 53)
+#define HWCAP2_F8DP2 (1UL << 54)
+#define HWCAP2_F8E4M3 (1UL << 55)
+#define HWCAP2_F8E5M2 (1UL << 56)
+#define HWCAP2_SME_LUTV2 (1UL << 57)
+#define HWCAP2_SME_F8F16 (1UL << 58)
+#define HWCAP2_SME_F8F32 (1UL << 59)
+#define HWCAP2_SME_SF8FMA (1UL << 60)
+#define HWCAP2_SME_SF8DP4 (1UL << 61)
+#define HWCAP2_SME_SF8DP2 (1UL << 62)
+#define HWCAP2_POE (1UL << 63)
#ifdef COMPAT_FREEBSD32
/* ARM HWCAP */
diff --git a/sys/arm64/include/hypervisor.h b/sys/arm64/include/hypervisor.h
index 85ac5cda7037..a32e1000d911 100644
--- a/sys/arm64/include/hypervisor.h
+++ b/sys/arm64/include/hypervisor.h
@@ -37,21 +37,35 @@
/* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */
#define CNTHCTL_EVNTI_MASK (0xf << 4) /* Bit to trigger event stream */
+/* Valid if HCR_EL2.E2H == 0 */
+#define CNTHCTL_EL1PCTEN (1 << 0) /* Allow physical counter access */
+#define CNTHCTL_EL1PCEN (1 << 1) /* Allow physical timer access */
+/* Valid if HCR_EL2.E2H == 1 */
+#define CNTHCTL_E2H_EL0PCTEN (1 << 0) /* Allow EL0 physical counter access */
+#define CNTHCTL_E2H_EL0VCTEN (1 << 1) /* Allow EL0 virtual counter access */
+#define CNTHCTL_E2H_EL0VTEN (1 << 8)
+#define CNTHCTL_E2H_EL0PTEN (1 << 9)
+#define CNTHCTL_E2H_EL1PCTEN (1 << 10) /* Allow physical counter access */
+#define CNTHCTL_E2H_EL1PTEN (1 << 11) /* Allow physical timer access */
+/* Unconditionally valid */
#define CNTHCTL_EVNTDIR (1 << 3) /* Control transition trigger bit */
#define CNTHCTL_EVNTEN (1 << 2) /* Enable event stream */
-#define CNTHCTL_EL1PCEN (1 << 1) /* Allow EL0/1 physical timer access */
-#define CNTHCTL_EL1PCTEN (1 << 0) /*Allow EL0/1 physical counter access*/
/* CPTR_EL2 - Architecture feature trap register */
/* Valid if HCR_EL2.E2H == 0 */
-#define CPTR_RES0 0x7fefc800
-#define CPTR_RES1 0x000033ff
-#define CPTR_TFP 0x00000400
+#define CPTR_TRAP_ALL 0xc01037ff /* Enable all traps */
+#define CPTR_RES0 0x7fefc800
+#define CPTR_RES1 0x000032ff
+#define CPTR_TZ 0x00000100
+#define CPTR_TFP 0x00000400
+#define CPTR_TTA 0x00100000
/* Valid if HCR_EL2.E2H == 1 */
-#define CPTR_FPEN 0x00300000
+#define CPTR_E2H_TRAP_ALL 0xd0000000
+#define CPTR_E2H_ZPEN 0x00030000
+#define CPTR_E2H_FPEN 0x00300000
+#define CPTR_E2H_TTA 0x10000000
/* Unconditionally valid */
-#define CPTR_TTA 0x00100000
-#define CPTR_TCPAC 0x80000000
+#define CPTR_TCPAC 0x80000000
/* HCR_EL2 - Hypervisor Config Register */
#define HCR_VM (UL(0x1) << 0)
@@ -118,6 +132,41 @@
#define HCR_TWEDEn (UL(0x1) << 59)
#define HCR_TWEDEL_MASK (UL(0xf) << 60)
+/* HCRX_EL2 - Extended Hypervisor Configuration Register */
+#define HCRX_EL2_REG MRS_REG_ALT_NAME(HCRX_EL2)
+#define HCRX_EL2_op0 3
+#define HCRX_EL2_op1 4
+#define HCRX_EL2_CRn 1
+#define HCRX_EL2_CRm 2
+#define HCRX_EL2_op2 2
+
+#define HCRX_EnAS0 (UL(0x1) << 0)
+#define HCRX_EnALS (UL(0x1) << 1)
+#define HCRX_EnASR (UL(0x1) << 2)
+#define HCRX_FnXS (UL(0x1) << 3)
+#define HCRX_FGTnXS (UL(0x1) << 4)
+#define HCRX_SMPME (UL(0x1) << 5)
+#define HCRX_TALLINT (UL(0x1) << 6)
+#define HCRX_VINMI (UL(0x1) << 7)
+#define HCRX_VFNMI (UL(0x1) << 8)
+#define HCRX_CMOW (UL(0x1) << 9)
+#define HCRX_MCE2 (UL(0x1) << 10)
+#define HCRX_MSCEn (UL(0x1) << 11)
+/* Bits 12 & 13 are reserved */
+#define HCRX_TCR2En (UL(0x1) << 14)
+#define HCRX_SCTLR2En (UL(0x1) << 15)
+#define HCRX_PTTWI (UL(0x1) << 16)
+#define HCRX_D128En (UL(0x1) << 17)
+#define HCRX_EnSNERR (UL(0x1) << 18)
+#define HCRX_TMEA (UL(0x1) << 19)
+#define HCRX_EnSDERR (UL(0x1) << 20)
+#define HCRX_EnIDCP128 (UL(0x1) << 21)
+#define HCRX_GCSEn (UL(0x1) << 22)
+#define HCRX_EnFPM (UL(0x1) << 23)
+#define HCRX_PACMEn (UL(0x1) << 24)
+/* Bit 25 is reserved */
+#define HCRX_SRMASKEn (UL(0x1) << 26)
+
/* HPFAR_EL2 - Hypervisor IPA Fault Address Register */
#define HPFAR_EL2_FIPA_SHIFT 4
#define HPFAR_EL2_FIPA_MASK 0xfffffffff0
@@ -143,10 +192,14 @@
#define SCTLR_EL2_C (0x1UL << SCTLR_EL2_C_SHIFT)
#define SCTLR_EL2_SA_SHIFT 3
#define SCTLR_EL2_SA (0x1UL << SCTLR_EL2_SA_SHIFT)
+#define SCTLR_EL2_EOS_SHIFT 11
+#define SCTLR_EL2_EOS (0x1UL << SCTLR_EL2_EOS_SHIFT)
#define SCTLR_EL2_I_SHIFT 12
#define SCTLR_EL2_I (0x1UL << SCTLR_EL2_I_SHIFT)
#define SCTLR_EL2_WXN_SHIFT 19
#define SCTLR_EL2_WXN (0x1UL << SCTLR_EL2_WXN_SHIFT)
+#define SCTLR_EL2_EIS_SHIFT 22
+#define SCTLR_EL2_EIS (0x1UL << SCTLR_EL2_EIS_SHIFT)
#define SCTLR_EL2_EE_SHIFT 25
#define SCTLR_EL2_EE (0x1UL << SCTLR_EL2_EE_SHIFT)
@@ -228,6 +281,9 @@
#define VTCR_EL2_PS_42BIT (0x3UL << VTCR_EL2_PS_SHIFT)
#define VTCR_EL2_PS_44BIT (0x4UL << VTCR_EL2_PS_SHIFT)
#define VTCR_EL2_PS_48BIT (0x5UL << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_PS_52BIT (0x6UL << VTCR_EL2_PS_SHIFT)
+#define VTCR_EL2_DS_SHIFT 32
+#define VTCR_EL2_DS (0x1UL << VTCR_EL2_DS_SHIFT)
/* VTTBR_EL2 - Virtualization Translation Table Base Register */
#define VTTBR_VMID_MASK 0xffff000000000000
@@ -252,5 +308,35 @@
#define MDCR_EL2_TDOSA (0x1UL << MDCR_EL2_TDOSA_SHIFT)
#define MDCR_EL2_TDRA_SHIFT 11
#define MDCR_EL2_TDRA (0x1UL << MDCR_EL2_TDRA_SHIFT)
+#define MDCR_E2PB_SHIFT 12
+#define MDCR_E2PB_MASK (0x3UL << MDCR_E2PB_SHIFT)
+#define MDCR_TPMS_SHIFT 14
+#define MDCR_TPMS (0x1UL << MDCR_TPMS_SHIFT)
+#define MDCR_EnSPM_SHIFT 15
+#define MDCR_EnSPM (0x1UL << MDCR_EnSPM_SHIFT)
+#define MDCR_HPMD_SHIFT 17
+#define MDCR_HPMD (0x1UL << MDCR_HPMD_SHIFT)
+#define MDCR_TTRF_SHIFT 19
+#define MDCR_TTRF (0x1UL << MDCR_TTRF_SHIFT)
+#define MDCR_HCCD_SHIFT 23
+#define MDCR_HCCD (0x1UL << MDCR_HCCD_SHIFT)
+#define MDCR_E2TB_SHIFT 24
+#define MDCR_E2TB_MASK (0x3UL << MDCR_E2TB_SHIFT)
+#define MDCR_HLP_SHIFT 26
+#define MDCR_HLP (0x1UL << MDCR_HLP_SHIFT)
+#define MDCR_TDCC_SHIFT 27
+#define MDCR_TDCC (0x1UL << MDCR_TDCC_SHIFT)
+#define MDCR_MTPME_SHIFT 28
+#define MDCR_MTPME (0x1UL << MDCR_MTPME_SHIFT)
+#define MDCR_HPMFZO_SHIFT 29
+#define MDCR_HPMFZO (0x1UL << MDCR_HPMFZO_SHIFT)
+#define MDCR_PMSSE_SHIFT 30
+#define MDCR_PMSSE_MASK (0x3UL << MDCR_PMSSE_SHIFT)
+#define MDCR_HPMFZS_SHIFT 36
+#define MDCR_HPMFZS (0x1UL << MDCR_HPMFZS_SHIFT)
+#define MDCR_PMEE_SHIFT 40
+#define MDCR_PMEE_MASK (0x3UL << MDCR_PMEE_SHIFT)
+#define MDCR_EBWE_SHIFT 43
+#define MDCR_EBWE (0x1UL << MDCR_EBWE_SHIFT)
#endif /* !_MACHINE_HYPERVISOR_H_ */
diff --git a/sys/arm64/include/intr.h b/sys/arm64/include/intr.h
index 3cdbc83ff109..ef7fe56e3a13 100644
--- a/sys/arm64/include/intr.h
+++ b/sys/arm64/include/intr.h
@@ -27,20 +27,20 @@
#ifndef _MACHINE_INTR_H_
#define _MACHINE_INTR_H_
+#ifndef LOCORE
#ifdef FDT
#include <dev/ofw/openfirm.h>
#endif
-#include <sys/intr.h>
-
-#ifndef NIRQ
-#define NIRQ 16384 /* XXX - It should be an option. */
-#endif
-
static inline void
arm_irq_memory_barrier(uintptr_t irq)
{
}
+#endif /* !LOCORE */
+
+#ifndef NIRQ
+#define NIRQ 16384 /* XXX - It should be an option. */
+#endif
#ifdef DEV_ACPI
#define ACPI_INTR_XREF 1
@@ -48,4 +48,8 @@ arm_irq_memory_barrier(uintptr_t irq)
#define ACPI_GPIO_XREF 3
#endif
+#define INTR_ROOT_IRQ 0
+#define INTR_ROOT_FIQ 1
+#define INTR_ROOT_COUNT 2
+
#endif /* _MACHINE_INTR_H */
diff --git a/sys/arm64/include/machdep.h b/sys/arm64/include/machdep.h
index 2f2960ae39f2..4fa80219da42 100644
--- a/sys/arm64/include/machdep.h
+++ b/sys/arm64/include/machdep.h
@@ -33,7 +33,6 @@ struct arm64_bootparams {
vm_offset_t modulep;
vm_offset_t kern_stack;
vm_paddr_t kern_ttbr0;
- uint64_t hcr_el2;
int boot_el; /* EL the kernel booted from */
int pad;
};
diff --git a/sys/arm64/include/md_var.h b/sys/arm64/include/md_var.h
index f9aaaeba7306..da136ff091db 100644
--- a/sys/arm64/include/md_var.h
+++ b/sys/arm64/include/md_var.h
@@ -37,8 +37,12 @@ extern char sigcode[];
extern int szsigcode;
extern u_long elf_hwcap;
extern u_long elf_hwcap2;
+extern u_long elf_hwcap3;
+extern u_long elf_hwcap4;
extern u_long linux_elf_hwcap;
extern u_long linux_elf_hwcap2;
+extern u_long linux_elf_hwcap3;
+extern u_long linux_elf_hwcap4;
#ifdef COMPAT_FREEBSD32
extern u_long elf32_hwcap;
extern u_long elf32_hwcap2;
diff --git a/sys/arm64/include/metadata.h b/sys/arm64/include/metadata.h
index 7459aa90a6e2..30ec5115e670 100644
--- a/sys/arm64/include/metadata.h
+++ b/sys/arm64/include/metadata.h
@@ -31,10 +31,15 @@
#define MODINFOMD_DTBP 0x1002
#define MODINFOMD_EFI_FB 0x1003
+/*
+ * This is not the same as the UEFI standard EFI_MEMORY_ATTRIBUTES_TABLE, though
+ * memory_size / descritpr_size entries of EFI_MEMORY_DESCRIPTORS follow this table
+ * starting at a 16-byte alignment.
+ */
struct efi_map_header {
- size_t memory_size;
- size_t descriptor_size;
- uint32_t descriptor_version;
+ size_t memory_size; /* Numnber of bytes that follow */
+ size_t descriptor_size; /* Size of each EFI_MEMORY_DESCRIPTOR */
+ uint32_t descriptor_version; /* Currently '1' */
};
struct efi_fb {
diff --git a/sys/arm64/include/param.h b/sys/arm64/include/param.h
index ca3fae11c515..753035b7775e 100644
--- a/sys/arm64/include/param.h
+++ b/sys/arm64/include/param.h
@@ -43,8 +43,6 @@
#define STACKALIGNBYTES (16 - 1)
#define STACKALIGN(p) ((uint64_t)(p) & ~STACKALIGNBYTES)
-#define __PCI_REROUTE_INTERRUPT
-
#ifndef MACHINE
#define MACHINE "arm64"
#endif
@@ -97,7 +95,7 @@
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK (PAGE_SIZE - 1)
-#define MAXPAGESIZES 3 /* maximum number of supported page sizes */
+#define MAXPAGESIZES 4 /* maximum number of supported page sizes */
#ifndef KSTACK_PAGES
#if defined(KASAN) || defined(KMSAN)
@@ -119,17 +117,9 @@
/*
* Mach derived conversion macros
*/
-#define round_page(x) (((unsigned long)(x) + PAGE_MASK) & ~PAGE_MASK)
-#define trunc_page(x) ((unsigned long)(x) & ~PAGE_MASK)
-
-#define atop(x) ((unsigned long)(x) >> PAGE_SHIFT)
-#define ptoa(x) ((unsigned long)(x) << PAGE_SHIFT)
-
#define arm64_btop(x) ((unsigned long)(x) >> PAGE_SHIFT)
#define arm64_ptob(x) ((unsigned long)(x) << PAGE_SHIFT)
-#define pgtok(x) ((unsigned long)(x) * (PAGE_SIZE / 1024))
-
#endif /* !_MACHINE_PARAM_H_ */
#endif /* !__arm__ */
diff --git a/sys/arm64/include/pcb.h b/sys/arm64/include/pcb.h
index d7392d5f2032..c0feb1149cf5 100644
--- a/sys/arm64/include/pcb.h
+++ b/sys/arm64/include/pcb.h
@@ -59,17 +59,19 @@ struct pcb {
u_int pcb_flags;
#define PCB_SINGLE_STEP_SHIFT 0
#define PCB_SINGLE_STEP (1 << PCB_SINGLE_STEP_SHIFT)
- uint32_t pcb_pad1;
+ u_int pcb_sve_len; /* The SVE vector length */
struct vfpstate *pcb_fpusaved;
int pcb_fpflags;
#define PCB_FP_STARTED 0x00000001
+#define PCB_FP_SVEVALID 0x00000002
#define PCB_FP_KERN 0x40000000
#define PCB_FP_NOSAVE 0x80000000
/* The bits passed to userspace in get_fpcontext */
-#define PCB_FP_USERMASK (PCB_FP_STARTED)
+#define PCB_FP_USERMASK (PCB_FP_STARTED | PCB_FP_SVEVALID)
u_int pcb_vfpcpu; /* Last cpu this thread ran VFP code */
- uint64_t pcb_reserved[5];
+ void *pcb_svesaved;
+ uint64_t pcb_reserved[4];
/*
* The userspace VFP state. The pcb_fpusaved pointer will point to
@@ -83,7 +85,7 @@ struct pcb {
#ifdef _KERNEL
void makectx(struct trapframe *tf, struct pcb *pcb);
-int savectx(struct pcb *pcb) __returns_twice;
+void savectx(struct pcb *pcb) __returns_twice;
#endif
#endif /* !LOCORE */
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
index d69924080610..0f23f200f0f6 100644
--- a/sys/arm64/include/pmap.h
+++ b/sys/arm64/include/pmap.h
@@ -71,16 +71,6 @@ struct md_page {
vm_memattr_t pv_memattr;
};
-/*
- * This structure is used to hold a virtual<->physical address
- * association and is used mostly by bootstrap code
- */
-struct pv_addr {
- SLIST_ENTRY(pv_addr) pv_list;
- vm_offset_t pv_va;
- vm_paddr_t pv_pa;
-};
-
enum pmap_stage {
PM_INVALID,
PM_STAGE1,
@@ -111,6 +101,8 @@ extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
#define pmap_kernel() kernel_pmap
+extern bool pmap_lpa_enabled;
+
#define PMAP_ASSERT_LOCKED(pmap) \
mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
@@ -137,6 +129,8 @@ extern struct pmap kernel_pmap_store;
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
+extern pt_entry_t pmap_sh_attr;
+
/*
* Macros to test if a mapping is mappable with an L1 Section mapping
* or an L2 Large Page mapping.
@@ -147,7 +141,8 @@ extern vm_offset_t virtual_end;
#define pmap_vm_page_alloc_check(m)
void pmap_activate_vm(pmap_t);
-void pmap_bootstrap(vm_size_t);
+void pmap_bootstrap_dmap(vm_size_t);
+void pmap_bootstrap(void);
int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
int pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot);
void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode);
@@ -180,7 +175,6 @@ int pmap_fault(pmap_t, uint64_t, uint64_t);
struct pcb *pmap_switch(struct thread *);
extern void (*pmap_clean_stage2_tlbi)(void);
-extern void (*pmap_invalidate_vpipt_icache)(void);
extern void (*pmap_stage2_invalidate_range)(uint64_t, vm_offset_t, vm_offset_t,
bool);
extern void (*pmap_stage2_invalidate_all)(uint64_t);
diff --git a/sys/arm64/include/proc.h b/sys/arm64/include/proc.h
index dfd39faadd46..dc2fa2df654d 100644
--- a/sys/arm64/include/proc.h
+++ b/sys/arm64/include/proc.h
@@ -65,7 +65,11 @@ struct mdthread {
struct ptrauth_key apia;
} md_ptrauth_kern;
- uint64_t md_reserved[4];
+ uint64_t md_efirt_tmp;
+ int md_efirt_dis_pf;
+
+ int md_reserved0;
+ uint64_t md_reserved[2];
};
struct mdproc {
diff --git a/sys/arm64/include/pte.h b/sys/arm64/include/pte.h
index 56eede01d776..464d8c941c56 100644
--- a/sys/arm64/include/pte.h
+++ b/sys/arm64/include/pte.h
@@ -54,13 +54,6 @@ typedef uint64_t pt_entry_t; /* page table entry */
#define ATTR_MASK_L UINT64_C(0x0000000000000fff)
#define ATTR_MASK (ATTR_MASK_H | ATTR_MASK_L)
-#define BASE_MASK ~ATTR_MASK
-#define BASE_ADDR(x) ((x) & BASE_MASK)
-
-#define PTE_TO_PHYS(pte) BASE_ADDR(pte)
-/* Convert a phys addr to the output address field of a PTE */
-#define PHYS_TO_PTE(pa) (pa)
-
/* Bits 58:55 are reserved for software */
#define ATTR_SW_UNUSED1 (1UL << 58)
#define ATTR_SW_NO_PROMOTE (1UL << 57)
@@ -80,14 +73,37 @@ typedef uint64_t pt_entry_t; /* page table entry */
#define ATTR_CONTIGUOUS (1UL << 52)
#define ATTR_DBM (1UL << 51)
-#define ATTR_S1_GP (1UL << 50)
+#define ATTR_S1_GP_SHIFT 50
+#define ATTR_S1_GP (1UL << ATTR_S1_GP_SHIFT)
+
+/*
+ * Largest possible output address field for a level 3 page. Block
+ * entries will use fewer low address bits, but these are res0 so
+ * should be safe to include.
+ *
+ * This is also safe to use for the next-level table address for
+ * table entries as they encode a physical address in the same way.
+ */
+#if PAGE_SIZE == PAGE_SIZE_4K
+#define ATTR_ADDR UINT64_C(0x0003fffffffff000)
+#elif PAGE_SIZE == PAGE_SIZE_16K
+#define ATTR_ADDR UINT64_C(0x0003ffffffffc000)
+#else
+#error Unsupported page size
+#endif
+
#define ATTR_S1_nG (1 << 11)
#define ATTR_AF (1 << 10)
+/* When TCR_EL1.DS == 0 */
#define ATTR_SH(x) ((x) << 8)
#define ATTR_SH_MASK ATTR_SH(3)
#define ATTR_SH_NS 0 /* Non-shareable */
#define ATTR_SH_OS 2 /* Outer-shareable */
#define ATTR_SH_IS 3 /* Inner-shareable */
+/* When TCR_EL1.DS == 1 */
+#define ATTR_OA_51_50_SHIFT 8
+#define ATTR_OA_51_50_MASK (3 << ATTR_OA_51_50_SHIFT)
+#define ATTR_OA_51_50_DELTA (50 - 8) /* Delta from address to pte */
#define ATTR_S1_AP_RW_BIT (1 << 7)
#define ATTR_S1_AP(x) ((x) << 6)
@@ -111,8 +127,6 @@ typedef uint64_t pt_entry_t; /* page table entry */
#define ATTR_S2_MEMATTR_WT 0xa
#define ATTR_S2_MEMATTR_WB 0xf
-#define ATTR_DEFAULT (ATTR_AF | ATTR_SH(ATTR_SH_IS))
-
#define ATTR_DESCR_MASK 3
#define ATTR_DESCR_VALID 1
#define ATTR_DESCR_TYPE_MASK 2
@@ -126,6 +140,29 @@ typedef uint64_t pt_entry_t; /* page table entry */
*/
#define ATTR_PROMOTE (ATTR_MASK & ~(ATTR_CONTIGUOUS | ATTR_AF))
+/* Read the output address or next-level table address from a PTE */
+#define PTE_TO_PHYS(x) ({ \
+ pt_entry_t _pte = (x); \
+ vm_paddr_t _pa; \
+ _pa = _pte & ATTR_ADDR; \
+ if (pmap_lpa_enabled) \
+ _pa |= (_pte & ATTR_OA_51_50_MASK) << ATTR_OA_51_50_DELTA; \
+ _pa; \
+})
+
+/*
+ * Convert a physical address to an output address or next-level
+ * table address in a PTE
+ */
+#define PHYS_TO_PTE(x) ({ \
+ vm_paddr_t _pa = (x); \
+ pt_entry_t _pte; \
+ _pte = _pa & ATTR_ADDR; \
+ if (pmap_lpa_enabled) \
+ _pte |= (_pa >> ATTR_OA_51_50_DELTA) & ATTR_OA_51_50_MASK; \
+ _pte; \
+})
+
#if PAGE_SIZE == PAGE_SIZE_4K
#define L0_SHIFT 39
#define L1_SHIFT 30
@@ -198,13 +235,18 @@ typedef uint64_t pt_entry_t; /* page table entry */
* can be coalesced into a single TLB entry
*/
#if PAGE_SIZE == PAGE_SIZE_4K
+#define L2C_ENTRIES 16
#define L3C_ENTRIES 16
#elif PAGE_SIZE == PAGE_SIZE_16K
+#define L2C_ENTRIES 32
#define L3C_ENTRIES 128
#else
#error Unsupported page size
#endif
+#define L2C_SIZE (L2C_ENTRIES * L2_SIZE)
+#define L2C_OFFSET (L2C_SIZE - 1)
+
#define L3C_SIZE (L3C_ENTRIES * L3_SIZE)
#define L3C_OFFSET (L3C_SIZE - 1)
diff --git a/sys/arm64/include/reg.h b/sys/arm64/include/reg.h
index c699752197a8..4226385480e8 100644
--- a/sys/arm64/include/reg.h
+++ b/sys/arm64/include/reg.h
@@ -63,6 +63,19 @@ struct fpreg32 {
int dummy;
};
+#define SVEREG_FLAG_REGS_MASK 0x0001
+#define SVEREG_FLAG_FP 0x0000
+#define SVEREG_FLAG_SVE 0x0001
+
+struct svereg_header {
+ __uint32_t sve_size;
+ __uint32_t sve_maxsize;
+ __uint16_t sve_vec_len;
+ __uint16_t sve_max_vec_len;
+ __uint16_t sve_flags;
+ __uint16_t sve_reserved;
+};
+
struct dbreg {
__uint8_t db_debug_ver;
__uint8_t db_nbkpts;
diff --git a/sys/arm64/include/resource.h b/sys/arm64/include/resource.h
index d4cffb1ae854..336fc11a435a 100644
--- a/sys/arm64/include/resource.h
+++ b/sys/arm64/include/resource.h
@@ -44,9 +44,7 @@
#define SYS_RES_MEMORY 3 /* i/o memory */
#define SYS_RES_IOPORT 4 /* i/o ports */
#define SYS_RES_GPIO 5 /* general purpose i/o */
-#ifdef NEW_PCIB
#define PCI_RES_BUS 6 /* PCI bus numbers */
-#endif
#endif /* !_MACHINE_RESOURCE_H_ */
diff --git a/sys/arm64/include/sdt_machdep.h b/sys/arm64/include/sdt_machdep.h
new file mode 100644
index 000000000000..738d246832a2
--- /dev/null
+++ b/sys/arm64/include/sdt_machdep.h
@@ -0,0 +1,12 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Mark Johnston <markj@FreeBSD.org>
+ */
+
+#ifndef _SYS_SDT_MACHDEP_H_
+#define _SYS_SDT_MACHDEP_H_
+
+#define _SDT_ASM_PATCH_INSTR "nop"
+
+#endif /* _SYS_SDT_MACHDEP_H_ */
diff --git a/sys/arm64/include/sysarch.h b/sys/arm64/include/sysarch.h
index 83094943423a..498e26f6d47e 100644
--- a/sys/arm64/include/sysarch.h
+++ b/sys/arm64/include/sysarch.h
@@ -39,6 +39,8 @@
#ifndef _MACHINE_SYSARCH_H_
#define _MACHINE_SYSARCH_H_
+#include <sys/cdefs.h>
+
#define ARM64_GUARD_PAGE 0x100
struct arm64_guard_page_args {
@@ -46,6 +48,9 @@ struct arm64_guard_page_args {
__size_t len;
};
+#define ARM64_GET_SVE_VL 0x200
+/* Reserved ARM64_SET_SVE_VL 0x201 */
+
#ifndef _KERNEL
__BEGIN_DECLS
diff --git a/sys/arm64/include/ucontext.h b/sys/arm64/include/ucontext.h
index dedbd061ec6b..a4f0ee243b3a 100644
--- a/sys/arm64/include/ucontext.h
+++ b/sys/arm64/include/ucontext.h
@@ -62,6 +62,14 @@ struct arm64_reg_context {
};
#define ARM64_CTX_END 0xa5a5a5a5
+#define ARM64_CTX_SVE 0x00657673
+
+struct sve_context {
+ struct arm64_reg_context sve_ctx;
+ __uint16_t sve_vector_len;
+ __uint16_t sve_flags;
+ __uint16_t sve_reserved[2];
+};
struct __mcontext {
struct gpregs mc_gpregs;
diff --git a/sys/arm64/include/undefined.h b/sys/arm64/include/undefined.h
index db5d0523e711..71b2eed22a84 100644
--- a/sys/arm64/include/undefined.h
+++ b/sys/arm64/include/undefined.h
@@ -35,31 +35,17 @@
typedef int (*undef_handler_t)(vm_offset_t, uint32_t, struct trapframe *,
uint32_t);
-
-static inline int
-mrs_Op0(uint32_t insn)
-{
-
- /* op0 is encoded without the top bit in a mrs instruction */
- return (2 | ((insn & MRS_Op0_MASK) >> MRS_Op0_SHIFT));
-}
-
-#define MRS_GET(op) \
-static inline int \
-mrs_##op(uint32_t insn) \
-{ \
- \
- return ((insn & MRS_##op##_MASK) >> MRS_##op##_SHIFT); \
-}
-MRS_GET(Op1)
-MRS_GET(CRn)
-MRS_GET(CRm)
-MRS_GET(Op2)
+typedef bool (*undef_sys_handler_t)(uint64_t, struct trapframe *);
void undef_init(void);
-void *install_undef_handler(bool, undef_handler_t);
+void install_sys_handler(undef_sys_handler_t);
+void *install_undef_handler(undef_handler_t);
+#ifdef COMPAT_FREEBSD32
+void *install_undef32_handler(undef_handler_t);
+#endif
void remove_undef_handler(void *);
-int undef_insn(u_int, struct trapframe *);
+bool undef_sys(uint64_t, struct trapframe *);
+int undef_insn(struct trapframe *);
#endif /* _KERNEL */
diff --git a/sys/arm64/include/vfp.h b/sys/arm64/include/vfp.h
index 7f4c86e7737d..fc93908add0b 100644
--- a/sys/arm64/include/vfp.h
+++ b/sys/arm64/include/vfp.h
@@ -79,6 +79,13 @@ void vfp_reset_state(struct thread *, struct pcb *);
void vfp_restore_state(void);
void vfp_save_state(struct thread *, struct pcb *);
void vfp_save_state_savectx(struct pcb *);
+void vfp_save_state_switch(struct thread *);
+void vfp_to_sve_sync(struct thread *);
+void sve_to_vfp_sync(struct thread *);
+
+size_t sve_max_buf_size(void);
+size_t sve_buf_size(struct thread *);
+bool sve_restore_state(struct thread *);
struct fpu_kern_ctx;
diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
index 8e2c9c868635..1d783cdacb0d 100644
--- a/sys/arm64/include/vmm.h
+++ b/sys/arm64/include/vmm.h
@@ -102,14 +102,30 @@ enum vm_reg_name {
#define VM_INTINFO_HWEXCEPTION (3 << 8)
#define VM_INTINFO_SWINTR (4 << 8)
-#define VM_MAX_SUFFIXLEN 15
-
#define VM_GUEST_BASE_IPA 0x80000000UL /* Guest kernel start ipa */
-#ifdef _KERNEL
-
-#define VM_MAX_NAMELEN 32
+/*
+ * The VM name has to fit into the pathname length constraints of devfs,
+ * governed primarily by SPECNAMELEN. The length is the total number of
+ * characters in the full path, relative to the mount point and not
+ * including any leading '/' characters.
+ * A prefix and a suffix are added to the name specified by the user.
+ * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters
+ * longer for future use.
+ * The suffix is a string that identifies a bootrom image or some similar
+ * image that is attached to the VM. A separator character gets added to
+ * the suffix automatically when generating the full path, so it must be
+ * accounted for, reducing the effective length by 1.
+ * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37
+ * bytes for FreeBSD 12. A minimum length is set for safety and supports
+ * a SPECNAMELEN as small as 32 on old systems.
+ */
+#define VM_MAX_PREFIXLEN 10
+#define VM_MAX_SUFFIXLEN 15
+#define VM_MAX_NAMELEN \
+ (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1)
+#ifdef _KERNEL
struct vm;
struct vm_exception;
struct vm_exit;
@@ -127,44 +143,13 @@ struct vm_eventinfo {
int vm_create(const char *name, struct vm **retvm);
struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
+void vm_disable_vcpu_creation(struct vm *vm);
void vm_slock_vcpus(struct vm *vm);
void vm_unlock_vcpus(struct vm *vm);
void vm_destroy(struct vm *vm);
int vm_reinit(struct vm *vm);
const char *vm_name(struct vm *vm);
-/*
- * APIs that modify the guest memory map require all vcpus to be frozen.
- */
-void vm_slock_memsegs(struct vm *vm);
-void vm_xlock_memsegs(struct vm *vm);
-void vm_unlock_memsegs(struct vm *vm);
-int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
- size_t len, int prot, int flags);
-int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
-int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
-void vm_free_memseg(struct vm *vm, int ident);
-
-/*
- * APIs that inspect the guest memory map require only a *single* vcpu to
- * be frozen. This acts like a read lock on the guest memory map since any
- * modification requires *all* vcpus to be frozen.
- */
-int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
- vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
-int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
- struct vm_object **objptr);
-vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
-void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
- int prot, void **cookie);
-void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
- int prot, void **cookie);
-void vm_gpa_release(void *cookie);
-bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
-
-int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
- uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
-
uint16_t vm_get_maxcpus(struct vm *vm);
void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
uint16_t *threads, uint16_t *maxcpus);
@@ -200,13 +185,6 @@ cpuset_t vm_active_cpus(struct vm *vm);
cpuset_t vm_debug_cpus(struct vm *vm);
cpuset_t vm_suspended_cpus(struct vm *vm);
-static __inline bool
-virt_enabled(void)
-{
-
- return (has_hyp());
-}
-
static __inline int
vcpu_rendezvous_pending(struct vm_eventinfo *info)
{
@@ -252,6 +230,8 @@ vcpu_should_yield(struct vcpu *vcpu)
void *vcpu_stats(struct vcpu *vcpu);
void vcpu_notify_event(struct vcpu *vcpu);
+struct vmspace *vm_vmspace(struct vm *vm);
+struct vm_mem *vm_mem(struct vm *vm);
enum vm_reg_name vm_segment_name(int seg_encoding);
@@ -295,9 +275,11 @@ struct vre {
*/
enum vm_cap_type {
VM_CAP_HALT_EXIT,
- VM_CAP_MTRAP_EXIT,
VM_CAP_PAUSE_EXIT,
VM_CAP_UNRESTRICTED_GUEST,
+ VM_CAP_BRK_EXIT,
+ VM_CAP_SS_EXIT,
+ VM_CAP_MASK_HWINTR,
VM_CAP_MAX
};
@@ -312,6 +294,8 @@ enum vm_exitcode {
VM_EXITCODE_PAGING,
VM_EXITCODE_SMCCC,
VM_EXITCODE_DEBUG,
+ VM_EXITCODE_BRK,
+ VM_EXITCODE_SS,
VM_EXITCODE_MAX
};
diff --git a/sys/arm64/include/vmm_dev.h b/sys/arm64/include/vmm_dev.h
index 9e229665a71e..938bea47c7f8 100644
--- a/sys/arm64/include/vmm_dev.h
+++ b/sys/arm64/include/vmm_dev.h
@@ -27,10 +27,7 @@
#ifndef _VMM_DEV_H_
#define _VMM_DEV_H_
-#ifdef _KERNEL
-void vmmdev_init(void);
-int vmmdev_cleanup(void);
-#endif
+#include <machine/vmm.h>
struct vm_memmap {
vm_paddr_t gpa;
diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h
index 83c55913f56e..349849845e73 100644
--- a/sys/arm64/include/vmparam.h
+++ b/sys/arm64/include/vmparam.h
@@ -73,14 +73,16 @@
#define VM_PHYSSEG_MAX 64
/*
- * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool
- * from which physical pages are allocated and VM_FREEPOOL_DIRECT is
- * the pool from which physical pages for small UMA objects are
- * allocated.
+ * Create three free page pools: VM_FREEPOOL_DEFAULT is the default pool from
+ * which physical pages are allocated and VM_FREEPOOL_DIRECT is the pool from
+ * which physical pages for page tables and small UMA objects are allocated.
+ * VM_FREEPOOL_LAZYINIT is a special-purpose pool that is populated only during
+ * boot and is used to implement deferred initialization of page structures.
*/
-#define VM_NFREEPOOL 2
-#define VM_FREEPOOL_DEFAULT 0
-#define VM_FREEPOOL_DIRECT 1
+#define VM_NFREEPOOL 3
+#define VM_FREEPOOL_LAZYINIT 0
+#define VM_FREEPOOL_DEFAULT 1
+#define VM_FREEPOOL_DIRECT 2
/*
* Create two free page lists: VM_FREELIST_DMA32 is for physical pages that have
@@ -112,25 +114,34 @@
#endif
/*
- * Enable superpage reservations: 1 level.
+ * Enable superpage reservations: 2 levels.
*/
#ifndef VM_NRESERVLEVEL
-#define VM_NRESERVLEVEL 1
+#define VM_NRESERVLEVEL 2
#endif
/*
- * Level 0 reservations consist of 512 pages when PAGE_SIZE is 4KB, and
- * 2048 pages when PAGE_SIZE is 16KB.
+ * Level 0 reservations consist of 16 pages when PAGE_SIZE is 4KB, and 128
+ * pages when PAGE_SIZE is 16KB. Level 1 reservations consist of 32 64KB
+ * pages when PAGE_SIZE is 4KB, and 16 2M pages when PAGE_SIZE is 16KB.
*/
-#ifndef VM_LEVEL_0_ORDER
#if PAGE_SIZE == PAGE_SIZE_4K
-#define VM_LEVEL_0_ORDER 9
+#ifndef VM_LEVEL_0_ORDER
+#define VM_LEVEL_0_ORDER 4
+#endif
+#ifndef VM_LEVEL_1_ORDER
+#define VM_LEVEL_1_ORDER 5
+#endif
#elif PAGE_SIZE == PAGE_SIZE_16K
-#define VM_LEVEL_0_ORDER 11
+#ifndef VM_LEVEL_0_ORDER
+#define VM_LEVEL_0_ORDER 7
+#endif
+#ifndef VM_LEVEL_1_ORDER
+#define VM_LEVEL_1_ORDER 4
+#endif
#else
#error Unsupported page size
#endif
-#endif
/**
* Address space layout.
@@ -293,7 +304,7 @@
#endif
#if !defined(KASAN) && !defined(KMSAN)
-#define UMA_MD_SMALL_ALLOC
+#define UMA_USE_DMAP
#endif
#ifndef LOCORE
@@ -301,7 +312,6 @@
extern vm_paddr_t dmap_phys_base;
extern vm_paddr_t dmap_phys_max;
extern vm_offset_t dmap_max_addr;
-extern vm_offset_t vm_max_kernel_address;
#endif
@@ -318,6 +328,7 @@ extern vm_offset_t vm_max_kernel_address;
* Need a page dump array for minidump.
*/
#define MINIDUMP_PAGE_TRACKING 1
+#define MINIDUMP_STARTUP_PAGE_TRACKING 1
#endif /* !_MACHINE_VMPARAM_H_ */
diff --git a/sys/arm64/iommu/iommu.c b/sys/arm64/iommu/iommu.c
index cb2b86c9dc41..5db48b3bc276 100644
--- a/sys/arm64/iommu/iommu.c
+++ b/sys/arm64/iommu/iommu.c
@@ -80,22 +80,21 @@ struct iommu_entry {
static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
static int
-iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
- iommu_gaddr_t size, int flags)
+iommu_domain_unmap_buf(struct iommu_domain *iodom,
+ struct iommu_map_entry *entry, int flags)
{
struct iommu_unit *iommu;
int error;
iommu = iodom->iommu;
-
- error = IOMMU_UNMAP(iommu->dev, iodom, base, size);
-
+ error = IOMMU_UNMAP(iommu->dev, iodom, entry->start, entry->end -
+ entry->start);
return (error);
}
static int
-iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
- iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
+iommu_domain_map_buf(struct iommu_domain *iodom, struct iommu_map_entry *entry,
+ vm_page_t *ma, uint64_t eflags, int flags)
{
struct iommu_unit *iommu;
vm_prot_t prot;
@@ -110,12 +109,10 @@ iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
if (eflags & IOMMU_MAP_ENTRY_WRITE)
prot |= VM_PROT_WRITE;
- va = base;
-
+ va = entry->start;
iommu = iodom->iommu;
-
- error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot);
-
+ error = IOMMU_MAP(iommu->dev, iodom, va, ma, entry->end -
+ entry->start, prot);
return (error);
}
@@ -172,7 +169,6 @@ iommu_tag_init(struct iommu_domain *iodom, struct bus_dma_tag_iommu *t)
maxaddr = MIN(iodom->end, BUS_SPACE_MAXADDR);
- t->common.ref_count = 0;
t->common.impl = &bus_dma_iommu_impl;
t->common.alignment = 1;
t->common.boundary = 0;
@@ -377,32 +373,19 @@ void
iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
{
struct bus_dma_tag_iommu *tag;
+ int error;
IOMMU_ASSERT_LOCKED(iommu);
tag = ioctx->tag;
IOMMU_CTX_FREE(iommu->dev, ioctx);
+ IOMMU_UNLOCK(iommu);
free(tag, M_IOMMU);
-}
-
-void
-iommu_free_ctx(struct iommu_ctx *ioctx)
-{
- struct iommu_unit *iommu;
- struct iommu_domain *iodom;
- int error;
-
- iodom = ioctx->domain;
- iommu = iodom->iommu;
-
- IOMMU_LOCK(iommu);
- iommu_free_ctx_locked(iommu, ioctx);
- IOMMU_UNLOCK(iommu);
/* Since we have a domain per each ctx, remove the domain too. */
- error = iommu_domain_free(iodom);
+ error = iommu_domain_free(ioctx->domain);
if (error)
device_printf(iommu->dev, "Could not free a domain\n");
}
@@ -428,8 +411,8 @@ iommu_domain_unload(struct iommu_domain *iodom,
TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
("not mapped entry %p %p", iodom, entry));
- error = iodom->ops->unmap(iodom, entry->start, entry->end -
- entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
+ error = iodom->ops->unmap(iodom, entry,
+ cansleep ? IOMMU_PGF_WAITOK : 0);
KASSERT(error == 0, ("unmap %p error %d", iodom, error));
TAILQ_REMOVE(entries, entry, dmamap_link);
iommu_domain_free_entry(entry, true);
@@ -455,6 +438,7 @@ iommu_register(struct iommu_unit *iommu)
LIST_INSERT_HEAD(&iommu_list, entry, next);
IOMMU_LIST_UNLOCK();
+ sysctl_ctx_init(&iommu->sysctl_ctx);
iommu_init_busdma(iommu);
return (0);
@@ -475,6 +459,7 @@ iommu_unregister(struct iommu_unit *iommu)
IOMMU_LIST_UNLOCK();
iommu_fini_busdma(iommu);
+ sysctl_ctx_free(&iommu->sysctl_ctx);
mtx_destroy(&iommu->lock);
@@ -503,6 +488,11 @@ iommu_find(device_t dev, bool verbose)
}
void
+iommu_unit_pre_instantiate_ctx(struct iommu_unit *unit)
+{
+}
+
+void
iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
bool cansleep __unused)
{
diff --git a/sys/arm64/iommu/iommu_pmap.c b/sys/arm64/iommu/iommu_pmap.c
index d356a92c4d66..dc5c09239c04 100644
--- a/sys/arm64/iommu/iommu_pmap.c
+++ b/sys/arm64/iommu/iommu_pmap.c
@@ -632,8 +632,8 @@ retry:
l1p = smmu_pmap_l1(pmap, va);
l2p = smmu_pmap_l2(pmap, va);
- cpu_dcache_wb_range((vm_offset_t)l1p, sizeof(pd_entry_t));
- cpu_dcache_wb_range((vm_offset_t)l2p, sizeof(pd_entry_t));
+ cpu_dcache_wb_range(l1p, sizeof(pd_entry_t));
+ cpu_dcache_wb_range(l2p, sizeof(pd_entry_t));
goto retry;
}
@@ -644,7 +644,7 @@ retry:
/* New mapping */
smmu_pmap_store(l3, new_l3);
- cpu_dcache_wb_range((vm_offset_t)l3, sizeof(pt_entry_t));
+ cpu_dcache_wb_range(l3, sizeof(pt_entry_t));
smmu_pmap_resident_count_inc(pmap, 1);
dsb(ishst);
@@ -681,7 +681,7 @@ pmap_gpu_remove(struct smmu_pmap *pmap, vm_offset_t va)
smmu_pmap_resident_count_dec(pmap, 1);
smmu_pmap_clear(pte);
- cpu_dcache_wb_range((vm_offset_t)pte, sizeof(pt_entry_t));
+ cpu_dcache_wb_range(pte, sizeof(pt_entry_t));
rc = KERN_SUCCESS;
out:
@@ -708,7 +708,7 @@ smmu_pmap_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa,
KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
va = trunc_page(va);
- new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT |
+ new_l3 = (pt_entry_t)(pa | ATTR_AF | ATTR_SH(ATTR_SH_IS) |
ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE);
if ((prot & VM_PROT_WRITE) == 0)
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
diff --git a/sys/arm64/iommu/smmu.c b/sys/arm64/iommu/smmu.c
index 1d1996a69027..ec8e04ce117b 100644
--- a/sys/arm64/iommu/smmu.c
+++ b/sys/arm64/iommu/smmu.c
@@ -309,15 +309,6 @@ smmu_write_ack(struct smmu_softc *sc, uint32_t reg,
return (0);
}
-static inline int
-ilog2(long x)
-{
-
- KASSERT(x > 0 && powerof2(x), ("%s: invalid arg %ld", __func__, x));
-
- return (flsl(x) - 1);
-}
-
static int
smmu_init_queue(struct smmu_softc *sc, struct smmu_queue *q,
uint32_t prod_off, uint32_t cons_off, uint32_t dwords)
@@ -856,7 +847,6 @@ smmu_init_cd(struct smmu_softc *sc, struct smmu_domain *domain)
return (ENXIO);
}
- cd->size = size;
cd->paddr = vtophys(cd->vaddr);
ptr = cd->vaddr;
@@ -970,10 +960,6 @@ smmu_init_strtab_2lvl(struct smmu_softc *sc)
sz = strtab->num_l1_entries * sizeof(struct l1_desc);
strtab->l1 = malloc(sz, M_SMMU, M_WAITOK | M_ZERO);
- if (strtab->l1 == NULL) {
- contigfree(strtab->vaddr, l1size, M_SMMU);
- return (ENOMEM);
- }
reg = STRTAB_BASE_CFG_FMT_2LVL;
reg |= size << STRTAB_BASE_CFG_LOG2SIZE_S;
@@ -1023,7 +1009,6 @@ smmu_init_l1_entry(struct smmu_softc *sc, int sid)
size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
l1_desc->span = STRTAB_SPLIT + 1;
- l1_desc->size = size;
l1_desc->va = contigmalloc(size, M_SMMU,
M_WAITOK | M_ZERO, /* flags */
0, /* low */
@@ -1066,7 +1051,7 @@ smmu_deinit_l1_entry(struct smmu_softc *sc, int sid)
*addr = 0;
l1_desc = &strtab->l1[sid >> STRTAB_SPLIT];
- contigfree(l1_desc->va, l1_desc->size, M_SMMU);
+ free(l1_desc->va, M_SMMU);
}
static int
@@ -1774,7 +1759,7 @@ smmu_domain_free(device_t dev, struct iommu_domain *iodom)
smmu_tlbi_asid(sc, domain->asid);
smmu_asid_free(sc, domain->asid);
- contigfree(cd->vaddr, cd->size, M_SMMU);
+ free(cd->vaddr, M_SMMU);
free(cd, M_SMMU);
free(domain, M_SMMU);
@@ -1796,7 +1781,7 @@ smmu_set_buswide(device_t dev, struct smmu_domain *domain,
}
static int
-smmu_pci_get_sid(device_t child, u_int *xref0, u_int *sid0)
+smmu_pci_get_sid(device_t child, uintptr_t *xref0, u_int *sid0)
{
struct pci_id_ofw_iommu pi;
int err;
@@ -1960,7 +1945,7 @@ static int
smmu_find(device_t dev, device_t child)
{
struct smmu_softc *sc;
- u_int xref;
+ uintptr_t xref;
int err;
sc = device_get_softc(dev);
diff --git a/sys/arm64/iommu/smmuvar.h b/sys/arm64/iommu/smmuvar.h
index c32b17abc9bc..60fad8f87531 100644
--- a/sys/arm64/iommu/smmuvar.h
+++ b/sys/arm64/iommu/smmuvar.h
@@ -82,7 +82,6 @@ struct smmu_queue_local_copy {
struct smmu_cd {
vm_paddr_t paddr;
- vm_size_t size;
void *vaddr;
};
@@ -121,7 +120,6 @@ struct smmu_cmdq_entry {
struct l1_desc {
uint8_t span;
- size_t size;
void *va;
vm_paddr_t pa;
};
diff --git a/sys/arm64/linux/linux.h b/sys/arm64/linux/linux.h
index d612ba8e5d9e..00a70fabc54f 100644
--- a/sys/arm64/linux/linux.h
+++ b/sys/arm64/linux/linux.h
@@ -77,7 +77,7 @@ typedef struct {
#define l_fd_set fd_set
/* Miscellaneous */
-#define LINUX_AT_COUNT 21 /* Count of used aux entry types.
+#define LINUX_AT_COUNT 23 /* Count of used aux entry types.
* Keep this synchronized with
* linux_copyout_auxargs() code.
*/
diff --git a/sys/arm64/linux/linux_dummy_machdep.c b/sys/arm64/linux/linux_dummy_machdep.c
index a7a7795f573d..5ff6bfafe2d6 100644
--- a/sys/arm64/linux/linux_dummy_machdep.c
+++ b/sys/arm64/linux/linux_dummy_machdep.c
@@ -42,10 +42,4 @@ LIN_SDT_PROVIDER_DECLARE(LINUX_DTRACE);
* Before adding new stubs to this file, please check if a stub can be added to
* the machine-independent code in sys/compat/linux/linux_dummy.c.
*/
-DUMMY(mq_open);
-DUMMY(mq_unlink);
-DUMMY(mq_timedsend);
-DUMMY(mq_timedreceive);
-DUMMY(mq_notify);
-DUMMY(mq_getsetattr);
DUMMY(kexec_file_load);
diff --git a/sys/arm64/linux/linux_machdep.c b/sys/arm64/linux/linux_machdep.c
index 3bc2923b9d4d..9f370f04b5c5 100644
--- a/sys/arm64/linux/linux_machdep.c
+++ b/sys/arm64/linux/linux_machdep.c
@@ -64,7 +64,7 @@ linux_set_cloned_tls(struct thread *td, void *desc)
if ((uint64_t)desc >= VM_MAXUSER_ADDRESS)
return (EPERM);
- return (cpu_set_user_tls(td, desc));
+ return (cpu_set_user_tls(td, desc, 0));
}
void
diff --git a/sys/arm64/linux/linux_proto.h b/sys/arm64/linux/linux_proto.h
index 1a5335af05b1..ae3d8569df58 100644
--- a/sys/arm64/linux/linux_proto.h
+++ b/sys/arm64/linux/linux_proto.h
@@ -459,7 +459,7 @@ struct linux_delete_module_args {
};
struct linux_timer_create_args {
char clock_id_l_[PADL_(clockid_t)]; clockid_t clock_id; char clock_id_r_[PADR_(clockid_t)];
- char evp_l_[PADL_(struct sigevent *)]; struct sigevent * evp; char evp_r_[PADR_(struct sigevent *)];
+ char evp_l_[PADL_(struct l_sigevent *)]; struct l_sigevent * evp; char evp_r_[PADR_(struct l_sigevent *)];
char timerid_l_[PADL_(l_timer_t *)]; l_timer_t * timerid; char timerid_r_[PADR_(l_timer_t *)];
};
struct linux_timer_gettime_args {
@@ -701,7 +701,7 @@ struct linux_mq_timedreceive_args {
};
struct linux_mq_notify_args {
char mqd_l_[PADL_(l_mqd_t)]; l_mqd_t mqd; char mqd_r_[PADR_(l_mqd_t)];
- char abs_timeout_l_[PADL_(const struct l_timespec *)]; const struct l_timespec * abs_timeout; char abs_timeout_r_[PADR_(const struct l_timespec *)];
+ char sevp_l_[PADL_(const struct l_sigevent *)]; const struct l_sigevent * sevp; char sevp_r_[PADR_(const struct l_sigevent *)];
};
struct linux_mq_getsetattr_args {
char mqd_l_[PADL_(l_mqd_t)]; l_mqd_t mqd; char mqd_r_[PADR_(l_mqd_t)];
diff --git a/sys/arm64/linux/linux_support.S b/sys/arm64/linux/linux_support.S
index 151ede7e1c19..3b16583e9d54 100644
--- a/sys/arm64/linux/linux_support.S
+++ b/sys/arm64/linux/linux_support.S
@@ -26,6 +26,8 @@
* SUCH DAMAGE.
*/
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
#include <machine/param.h>
#include <machine/vmparam.h>
@@ -172,3 +174,5 @@ ENTRY(futex_xorl)
str w4, [x2]
ret
END(futex_xorl)
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/linux/linux_sysent.c b/sys/arm64/linux/linux_sysent.c
index ac93f9bb3a54..722ada465730 100644
--- a/sys/arm64/linux/linux_sysent.c
+++ b/sys/arm64/linux/linux_sysent.c
@@ -14,11 +14,11 @@
/* The casts are bogus but will do for now. */
struct sysent linux_sysent[] = {
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 0 = linux_io_setup */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 1 = linux_io_destroy */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 2 = linux_io_submit */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 3 = linux_io_cancel */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 4 = linux_io_getevents */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 0 = linux_io_setup */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 1 = linux_io_destroy */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 2 = linux_io_submit */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 3 = linux_io_cancel */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 4 = linux_io_getevents */
{ .sy_narg = AS(linux_setxattr_args), .sy_call = (sy_call_t *)linux_setxattr, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 5 = linux_setxattr */
{ .sy_narg = AS(linux_lsetxattr_args), .sy_call = (sy_call_t *)linux_lsetxattr, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 6 = linux_lsetxattr */
{ .sy_narg = AS(linux_fsetxattr_args), .sy_call = (sy_call_t *)linux_fsetxattr, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 7 = linux_fsetxattr */
@@ -53,10 +53,10 @@ struct sysent linux_sysent[] = {
{ .sy_narg = AS(linux_symlinkat_args), .sy_call = (sy_call_t *)linux_symlinkat, .sy_auevent = AUE_SYMLINKAT, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 36 = linux_symlinkat */
{ .sy_narg = AS(linux_linkat_args), .sy_call = (sy_call_t *)linux_linkat, .sy_auevent = AUE_LINKAT, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 37 = linux_linkat */
{ .sy_narg = AS(linux_renameat_args), .sy_call = (sy_call_t *)linux_renameat, .sy_auevent = AUE_RENAMEAT, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 38 = linux_renameat */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 39 = linux_umount2 */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 39 = linux_umount2 */
{ .sy_narg = AS(linux_mount_args), .sy_call = (sy_call_t *)linux_mount, .sy_auevent = AUE_MOUNT, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 40 = linux_mount */
{ .sy_narg = 0, .sy_call = (sy_call_t *)linux_pivot_root, .sy_auevent = AUE_PIVOT_ROOT, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 41 = linux_pivot_root */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 42 = nfsservctl */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 42 = nfsservctl */
{ .sy_narg = AS(linux_statfs_args), .sy_call = (sy_call_t *)linux_statfs, .sy_auevent = AUE_STATFS, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 43 = linux_statfs */
{ .sy_narg = AS(linux_fstatfs_args), .sy_call = (sy_call_t *)linux_fstatfs, .sy_auevent = AUE_FSTATFS, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 44 = linux_fstatfs */
{ .sy_narg = AS(linux_truncate_args), .sy_call = (sy_call_t *)linux_truncate, .sy_auevent = AUE_TRUNCATE, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 45 = linux_truncate */
@@ -74,7 +74,7 @@ struct sysent linux_sysent[] = {
{ .sy_narg = AS(close_args), .sy_call = (sy_call_t *)sys_close, .sy_auevent = AUE_CLOSE, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 57 = close */
{ .sy_narg = 0, .sy_call = (sy_call_t *)linux_vhangup, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 58 = linux_vhangup */
{ .sy_narg = AS(linux_pipe2_args), .sy_call = (sy_call_t *)linux_pipe2, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 59 = linux_pipe2 */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 60 = linux_quotactl */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 60 = linux_quotactl */
{ .sy_narg = AS(linux_getdents64_args), .sy_call = (sy_call_t *)linux_getdents64, .sy_auevent = AUE_GETDIRENTRIES, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 61 = linux_getdents64 */
{ .sy_narg = AS(linux_lseek_args), .sy_call = (sy_call_t *)linux_lseek, .sy_auevent = AUE_LSEEK, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 62 = linux_lseek */
{ .sy_narg = AS(read_args), .sy_call = (sy_call_t *)sys_read, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 63 = read */
@@ -91,11 +91,11 @@ struct sysent linux_sysent[] = {
{ .sy_narg = 0, .sy_call = (sy_call_t *)linux_signalfd4, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 74 = linux_signalfd4 */
{ .sy_narg = 0, .sy_call = (sy_call_t *)linux_vmsplice, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 75 = linux_vmsplice */
{ .sy_narg = AS(linux_splice_args), .sy_call = (sy_call_t *)linux_splice, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 76 = linux_splice */
- { .sy_narg = 0, .sy_call = (sy_call_t *)linux_tee, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 77 = linux_tee */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)linux_tee, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 77 = linux_tee */
{ .sy_narg = AS(linux_readlinkat_args), .sy_call = (sy_call_t *)linux_readlinkat, .sy_auevent = AUE_READLINKAT, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 78 = linux_readlinkat */
{ .sy_narg = AS(linux_newfstatat_args), .sy_call = (sy_call_t *)linux_newfstatat, .sy_auevent = AUE_FSTATAT, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 79 = linux_newfstatat */
{ .sy_narg = AS(linux_newfstat_args), .sy_call = (sy_call_t *)linux_newfstat, .sy_auevent = AUE_FSTAT, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 80 = linux_newfstat */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 81 = linux_sync */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 81 = linux_sync */
{ .sy_narg = AS(fsync_args), .sy_call = (sy_call_t *)sys_fsync, .sy_auevent = AUE_FSYNC, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 82 = fsync */
{ .sy_narg = AS(linux_fdatasync_args), .sy_call = (sy_call_t *)linux_fdatasync, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 83 = linux_fdatasync */
{ .sy_narg = AS(linux_sync_file_range_args), .sy_call = (sy_call_t *)linux_sync_file_range, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 84 = linux_sync_file_range */
@@ -142,7 +142,7 @@ struct sysent linux_sysent[] = {
{ .sy_narg = AS(linux_sched_get_priority_max_args), .sy_call = (sy_call_t *)linux_sched_get_priority_max, .sy_auevent = AUE_SCHED_GET_PRIORITY_MAX, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 125 = linux_sched_get_priority_max */
{ .sy_narg = AS(linux_sched_get_priority_min_args), .sy_call = (sy_call_t *)linux_sched_get_priority_min, .sy_auevent = AUE_SCHED_GET_PRIORITY_MIN, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 126 = linux_sched_get_priority_min */
{ .sy_narg = AS(linux_sched_rr_get_interval_args), .sy_call = (sy_call_t *)linux_sched_rr_get_interval, .sy_auevent = AUE_SCHED_RR_GET_INTERVAL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 127 = linux_sched_rr_get_interval */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 128 = restart_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 128 = restart_syscall */
{ .sy_narg = AS(linux_kill_args), .sy_call = (sy_call_t *)linux_kill, .sy_auevent = AUE_KILL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 129 = linux_kill */
{ .sy_narg = AS(linux_tkill_args), .sy_call = (sy_call_t *)linux_tkill, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 130 = linux_tkill */
{ .sy_narg = AS(linux_tgkill_args), .sy_call = (sy_call_t *)linux_tgkill, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 131 = linux_tgkill */
@@ -227,7 +227,7 @@ struct sysent linux_sysent[] = {
{ .sy_narg = AS(linux_shutdown_args), .sy_call = (sy_call_t *)linux_shutdown, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 210 = linux_shutdown */
{ .sy_narg = AS(linux_sendmsg_args), .sy_call = (sy_call_t *)linux_sendmsg, .sy_auevent = AUE_SENDMSG, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 211 = linux_sendmsg */
{ .sy_narg = AS(linux_recvmsg_args), .sy_call = (sy_call_t *)linux_recvmsg, .sy_auevent = AUE_RECVMSG, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 212 = linux_recvmsg */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 213 = linux_readahead */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 213 = linux_readahead */
{ .sy_narg = AS(linux_brk_args), .sy_call = (sy_call_t *)linux_brk, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 214 = linux_brk */
{ .sy_narg = AS(munmap_args), .sy_call = (sy_call_t *)sys_munmap, .sy_auevent = AUE_MUNMAP, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 215 = munmap */
{ .sy_narg = AS(linux_mremap_args), .sy_call = (sy_call_t *)linux_mremap, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 216 = linux_mremap */
@@ -258,22 +258,22 @@ struct sysent linux_sysent[] = {
{ .sy_narg = 0, .sy_call = (sy_call_t *)linux_perf_event_open, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 241 = linux_perf_event_open */
{ .sy_narg = AS(linux_accept4_args), .sy_call = (sy_call_t *)linux_accept4, .sy_auevent = AUE_ACCEPT, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 242 = linux_accept4 */
{ .sy_narg = AS(linux_recvmmsg_args), .sy_call = (sy_call_t *)linux_recvmmsg, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 243 = linux_recvmmsg */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 244 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 245 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 246 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 247 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 248 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 249 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 250 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 251 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 252 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 253 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 254 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 255 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 256 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 257 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 258 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 259 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 244 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 245 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 246 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 247 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 248 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 249 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 250 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 251 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 252 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 253 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 254 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 255 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 256 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 257 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 258 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 259 = unimpl_md_syscall */
{ .sy_narg = AS(linux_wait4_args), .sy_call = (sy_call_t *)linux_wait4, .sy_auevent = AUE_WAIT4, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 260 = linux_wait4 */
{ .sy_narg = AS(linux_prlimit64_args), .sy_call = (sy_call_t *)linux_prlimit64, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 261 = linux_prlimit64 */
{ .sy_narg = 0, .sy_call = (sy_call_t *)linux_fanotify_init, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 262 = linux_fanotify_init */
@@ -309,135 +309,135 @@ struct sysent linux_sysent[] = {
{ .sy_narg = 0, .sy_call = (sy_call_t *)linux_io_pgetevents, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 292 = linux_io_pgetevents */
{ .sy_narg = AS(linux_rseq_args), .sy_call = (sy_call_t *)linux_rseq, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 293 = linux_rseq */
{ .sy_narg = 0, .sy_call = (sy_call_t *)linux_kexec_file_load, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 294 = linux_kexec_file_load */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 295 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 296 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 297 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 298 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 299 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 300 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 301 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 302 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 303 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 304 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 305 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 306 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 307 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 308 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 309 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 310 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 311 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 312 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 313 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 314 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 315 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 316 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 317 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 318 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 319 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 320 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 321 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 322 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 323 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 324 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 325 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 326 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 327 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 328 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 329 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 330 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 331 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 332 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 333 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 334 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 335 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 336 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 337 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 338 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 339 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 340 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 341 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 342 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 343 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 344 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 345 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 346 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 347 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 348 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 349 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 350 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 351 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 352 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 353 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 354 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 355 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 356 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 357 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 358 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 359 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 360 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 361 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 362 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 363 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 364 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 365 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 366 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 367 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 368 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 369 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 370 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 371 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 372 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 373 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 374 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 375 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 376 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 377 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 378 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 379 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 380 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 381 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 382 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 383 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 384 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 385 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 386 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 387 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 388 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 389 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 390 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 391 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 392 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 393 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 394 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 395 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 396 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 397 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 398 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 399 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 400 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 401 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 402 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 403 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 404 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 405 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 406 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 407 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 408 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 409 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 410 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 411 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 412 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 413 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 414 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 415 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 416 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 417 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 418 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 419 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 420 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 421 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 422 = unimpl_md_syscall */
- { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 423 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 295 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 296 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 297 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 298 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 299 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 300 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 301 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 302 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 303 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 304 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 305 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 306 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 307 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 308 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 309 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 310 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 311 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 312 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 313 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 314 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 315 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 316 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 317 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 318 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 319 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 320 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 321 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 322 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 323 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 324 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 325 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 326 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 327 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 328 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 329 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 330 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 331 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 332 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 333 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 334 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 335 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 336 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 337 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 338 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 339 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 340 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 341 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 342 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 343 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 344 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 345 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 346 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 347 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 348 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 349 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 350 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 351 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 352 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 353 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 354 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 355 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 356 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 357 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 358 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 359 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 360 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 361 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 362 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 363 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 364 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 365 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 366 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 367 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 368 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 369 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 370 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 371 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 372 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 373 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 374 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 375 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 376 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 377 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 378 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 379 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 380 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 381 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 382 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 383 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 384 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 385 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 386 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 387 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 388 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 389 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 390 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 391 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 392 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 393 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 394 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 395 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 396 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 397 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 398 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 399 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 400 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 401 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 402 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 403 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 404 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 405 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 406 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 407 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 408 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 409 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 410 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 411 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 412 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 413 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 414 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 415 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 416 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 417 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 418 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 419 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 420 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 421 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 422 = unimpl_md_syscall */
+ { .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 423 = unimpl_md_syscall */
{ .sy_narg = AS(linux_pidfd_send_signal_args), .sy_call = (sy_call_t *)linux_pidfd_send_signal, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 424 = linux_pidfd_send_signal */
{ .sy_narg = 0, .sy_call = (sy_call_t *)linux_io_uring_setup, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 425 = linux_io_uring_setup */
{ .sy_narg = 0, .sy_call = (sy_call_t *)linux_io_uring_enter, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 426 = linux_io_uring_enter */
diff --git a/sys/arm64/linux/linux_systrace_args.c b/sys/arm64/linux/linux_systrace_args.c
index 151d46238d1c..54e4dd82355d 100644
--- a/sys/arm64/linux/linux_systrace_args.c
+++ b/sys/arm64/linux/linux_systrace_args.c
@@ -1,8 +1,9 @@
/*
- * System call argument to DTrace register array converstion.
+ * System call argument to DTrace register array conversion.
*
- * DO NOT EDIT-- this file is automatically @generated.
* This file is part of the DTrace syscall provider.
+ *
+ * DO NOT EDIT-- this file is automatically @generated.
*/
static void
@@ -848,7 +849,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 107: {
struct linux_timer_create_args *p = params;
iarg[a++] = p->clock_id; /* clockid_t */
- uarg[a++] = (intptr_t)p->evp; /* struct sigevent * */
+ uarg[a++] = (intptr_t)p->evp; /* struct l_sigevent * */
uarg[a++] = (intptr_t)p->timerid; /* l_timer_t * */
*n_args = 3;
break;
@@ -1445,7 +1446,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 184: {
struct linux_mq_notify_args *p = params;
iarg[a++] = p->mqd; /* l_mqd_t */
- uarg[a++] = (intptr_t)p->abs_timeout; /* const struct l_timespec * */
+ uarg[a++] = (intptr_t)p->sevp; /* const struct l_sigevent * */
*n_args = 2;
break;
}
@@ -3848,7 +3849,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "clockid_t";
break;
case 1:
- p = "userland struct sigevent *";
+ p = "userland struct l_sigevent *";
break;
case 2:
p = "userland l_timer_t *";
@@ -4792,7 +4793,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "l_mqd_t";
break;
case 1:
- p = "userland const struct l_timespec *";
+ p = "userland const struct l_sigevent *";
break;
default:
break;
diff --git a/sys/arm64/linux/linux_sysvec.c b/sys/arm64/linux/linux_sysvec.c
index 185c4f3044b1..084b7a11b01f 100644
--- a/sys/arm64/linux/linux_sysvec.c
+++ b/sys/arm64/linux/linux_sysvec.c
@@ -156,6 +156,8 @@ linux64_arch_copyout_auxargs(struct image_params *imgp, Elf_Auxinfo **pos)
AUXARGS_ENTRY((*pos), LINUX_AT_SYSINFO_EHDR, linux_vdso_base);
AUXARGS_ENTRY((*pos), LINUX_AT_HWCAP, *imgp->sysent->sv_hwcap);
AUXARGS_ENTRY((*pos), LINUX_AT_HWCAP2, *imgp->sysent->sv_hwcap2);
+ AUXARGS_ENTRY((*pos), LINUX_AT_HWCAP3, *imgp->sysent->sv_hwcap3);
+ AUXARGS_ENTRY((*pos), LINUX_AT_HWCAP4, *imgp->sysent->sv_hwcap4);
AUXARGS_ENTRY((*pos), LINUX_AT_PLATFORM, PTROUT(linux_platform));
}
@@ -458,6 +460,8 @@ struct sysentvec elf_linux_sysvec = {
.sv_trap = NULL,
.sv_hwcap = &linux_elf_hwcap,
.sv_hwcap2 = &linux_elf_hwcap2,
+ .sv_hwcap3 = &linux_elf_hwcap3,
+ .sv_hwcap4 = &linux_elf_hwcap4,
.sv_onexec = linux_on_exec_vmspace,
.sv_onexit = linux_on_exit,
.sv_ontdexit = linux_thread_dtor,
diff --git a/sys/arm64/linux/linux_vdso_gtod.c b/sys/arm64/linux/linux_vdso_gtod.c
index f7def68d88c4..203c76b6e3a9 100644
--- a/sys/arm64/linux/linux_vdso_gtod.c
+++ b/sys/arm64/linux/linux_vdso_gtod.c
@@ -29,6 +29,7 @@
#include <sys/elf.h>
#include <sys/errno.h>
#include <sys/proc.h>
+#include <sys/stdarg.h>
#include <sys/stddef.h>
#define _KERNEL
#include <sys/vdso.h>
@@ -36,7 +37,6 @@
#include <stdbool.h>
#include <machine/atomic.h>
-#include <machine/stdarg.h>
#include <arm64/linux/linux.h>
#include <arm64/linux/linux_syscall.h>
diff --git a/sys/arm64/linux/syscalls.master b/sys/arm64/linux/syscalls.master
index 61c7499d57b3..79c04c398e00 100644
--- a/sys/arm64/linux/syscalls.master
+++ b/sys/arm64/linux/syscalls.master
@@ -661,7 +661,7 @@
107 AUE_NULL STD {
int linux_timer_create(
clockid_t clock_id,
- struct sigevent *evp,
+ struct l_sigevent *evp,
l_timer_t *timerid
);
}
@@ -1107,7 +1107,7 @@
184 AUE_NULL STD {
int linux_mq_notify(
l_mqd_t mqd,
- const struct l_timespec *abs_timeout
+ const struct l_sigevent *sevp
);
}
185 AUE_NULL STD {
diff --git a/sys/arm64/nvidia/tegra210/max77620.c b/sys/arm64/nvidia/tegra210/max77620.c
index b33d73e71f90..d9c7736bd4c3 100644
--- a/sys/arm64/nvidia/tegra210/max77620.c
+++ b/sys/arm64/nvidia/tegra210/max77620.c
@@ -435,7 +435,8 @@ max77620_attach(device_t dev)
goto fail;
}
#endif
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
if (sc->irq_h != NULL)
@@ -450,6 +451,11 @@ static int
max77620_detach(device_t dev)
{
struct max77620_softc *sc;
+ int error;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
sc = device_get_softc(dev);
if (sc->irq_h != NULL)
@@ -458,7 +464,7 @@ max77620_detach(device_t dev)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res);
LOCK_DESTROY(sc);
- return (bus_generic_detach(dev));
+ return (0);
}
static phandle_t
diff --git a/sys/arm64/nvidia/tegra210/max77620_rtc.c b/sys/arm64/nvidia/tegra210/max77620_rtc.c
index dc82d4b387b9..77d38c347d14 100644
--- a/sys/arm64/nvidia/tegra210/max77620_rtc.c
+++ b/sys/arm64/nvidia/tegra210/max77620_rtc.c
@@ -355,7 +355,8 @@ max77620_rtc_attach(device_t dev)
clock_register(sc->dev, 1000000);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
LOCK_DESTROY(sc);
@@ -366,11 +367,16 @@ static int
max77620_rtc_detach(device_t dev)
{
struct max77620_softc *sc;
+ int error;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
sc = device_get_softc(dev);
LOCK_DESTROY(sc);
- return (bus_generic_detach(dev));
+ return (0);
}
/*
@@ -385,7 +391,7 @@ max77620_rtc_create(struct max77620_softc *sc, phandle_t node)
parent = device_get_parent(sc->dev);
- child = BUS_ADD_CHILD(parent, 0, NULL, -1);
+ child = BUS_ADD_CHILD(parent, 0, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->dev, "Cannot create MAX77620 RTC device.\n");
return (ENXIO);
diff --git a/sys/arm64/nvidia/tegra210/tegra210_coretemp.c b/sys/arm64/nvidia/tegra210/tegra210_coretemp.c
index ac037d4ac385..973cbc4759fb 100644
--- a/sys/arm64/nvidia/tegra210/tegra210_coretemp.c
+++ b/sys/arm64/nvidia/tegra210/tegra210_coretemp.c
@@ -181,9 +181,9 @@ tegra210_coretemp_identify(driver_t *driver, device_t parent)
root = OF_finddevice("/");
if (!ofw_bus_node_is_compatible(root, "nvidia,tegra210"))
return;
- if (device_find_child(parent, "tegra210_coretemp", -1) != NULL)
+ if (device_find_child(parent, "tegra210_coretemp", DEVICE_UNIT_ANY) != NULL)
return;
- if (BUS_ADD_CHILD(parent, 0, "tegra210_coretemp", -1) == NULL)
+ if (BUS_ADD_CHILD(parent, 0, "tegra210_coretemp", DEVICE_UNIT_ANY) == NULL)
device_printf(parent, "add child failed\n");
}
diff --git a/sys/arm64/nvidia/tegra210/tegra210_cpufreq.c b/sys/arm64/nvidia/tegra210/tegra210_cpufreq.c
index 9b248a09bd58..56dfc1b32500 100644
--- a/sys/arm64/nvidia/tegra210/tegra210_cpufreq.c
+++ b/sys/arm64/nvidia/tegra210/tegra210_cpufreq.c
@@ -393,9 +393,9 @@ tegra210_cpufreq_identify(driver_t *driver, device_t parent)
if (device_get_unit(parent) != 0)
return;
- if (device_find_child(parent, "tegra210_cpufreq", -1) != NULL)
+ if (device_find_child(parent, "tegra210_cpufreq", DEVICE_UNIT_ANY) != NULL)
return;
- if (BUS_ADD_CHILD(parent, 0, "tegra210_cpufreq", -1) == NULL)
+ if (BUS_ADD_CHILD(parent, 0, "tegra210_cpufreq", DEVICE_UNIT_ANY) == NULL)
device_printf(parent, "add child failed\n");
}
diff --git a/sys/arm64/nvidia/tegra210/tegra210_pmc.c b/sys/arm64/nvidia/tegra210/tegra210_pmc.c
index 0f0343a317ce..b8b95ca32c40 100644
--- a/sys/arm64/nvidia/tegra210/tegra210_pmc.c
+++ b/sys/arm64/nvidia/tegra210/tegra210_pmc.c
@@ -190,7 +190,7 @@ WR4(struct tegra210_pmc_softc *sc, bus_size_t r, uint32_t v)
struct arm_smccc_res res;
if (sc->secure_access) {
- arm_smccc_smc(PMC_SMC, PMC_SMC_WRITE, r, v, 0, 0, 0, 0, &res);
+ arm_smccc_invoke_smc(PMC_SMC, PMC_SMC_WRITE, r, v, &res);
if (res.a0 != 0)
device_printf(sc->dev," PMC SMC write failed: %lu\n",
res.a0);
@@ -205,7 +205,7 @@ RD4(struct tegra210_pmc_softc *sc, bus_size_t r)
struct arm_smccc_res res;
if (sc->secure_access) {
- arm_smccc_smc(PMC_SMC, PMC_SMC_READ, r, 0, 0, 0, 0, 0, &res);
+ arm_smccc_invoke_smc(PMC_SMC, PMC_SMC_READ, r, &res);
if (res.a0 != 0)
device_printf(sc->dev," PMC SMC write failed: %lu\n",
res.a0);
diff --git a/sys/arm64/qoriq/qoriq_dw_pci.c b/sys/arm64/qoriq/qoriq_dw_pci.c
index 2d7de343564a..b6eecba48c39 100644
--- a/sys/arm64/qoriq/qoriq_dw_pci.c
+++ b/sys/arm64/qoriq/qoriq_dw_pci.c
@@ -31,7 +31,6 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
-#include <sys/devmap.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
@@ -241,7 +240,8 @@ qorif_dw_pci_attach(device_t dev)
goto out;
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
out:
/* XXX Cleanup */
return (rv);
diff --git a/sys/arm64/qoriq/qoriq_gpio_pic.c b/sys/arm64/qoriq/qoriq_gpio_pic.c
index ec84d2a14ae7..aca8b9daea13 100644
--- a/sys/arm64/qoriq/qoriq_gpio_pic.c
+++ b/sys/arm64/qoriq/qoriq_gpio_pic.c
@@ -34,11 +34,11 @@
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rman.h>
+#include <sys/stdarg.h>
#include <sys/gpio.h>
#include <machine/bus.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#include <dev/gpio/gpiobusvar.h>
#include <dev/gpio/qoriq_gpio.h>
diff --git a/sys/arm64/qoriq/qoriq_therm.c b/sys/arm64/qoriq/qoriq_therm.c
index decd55fad6e4..72c0e6ff0fa2 100644
--- a/sys/arm64/qoriq/qoriq_therm.c
+++ b/sys/arm64/qoriq/qoriq_therm.c
@@ -464,7 +464,8 @@ qoriq_therm_attach(device_t dev)
}
OF_device_register_xref(OF_xref_from_node(node), dev);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
if (sc->irq_ih != NULL)
diff --git a/sys/arm64/rockchip/rk3328_codec.c b/sys/arm64/rockchip/rk3328_codec.c
index 480a1087ea2d..22e3cde9093e 100644
--- a/sys/arm64/rockchip/rk3328_codec.c
+++ b/sys/arm64/rockchip/rk3328_codec.c
@@ -568,6 +568,12 @@ rkcodec_dai_setup_mixer(device_t dev, device_t pcmdev)
return (0);
}
+static int
+rkcodec_dai_set_sysclk(device_t dev, unsigned int rate, int dai_dir)
+{
+ return (0);
+}
+
static device_method_t rkcodec_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, rkcodec_probe),
@@ -577,6 +583,7 @@ static device_method_t rkcodec_methods[] = {
DEVMETHOD(audio_dai_init, rkcodec_dai_init),
DEVMETHOD(audio_dai_setup_mixer, rkcodec_dai_setup_mixer),
DEVMETHOD(audio_dai_trigger, rkcodec_dai_trigger),
+ DEVMETHOD(audio_dai_set_sysclk, rkcodec_dai_set_sysclk),
DEVMETHOD_END
};
diff --git a/sys/arm64/rockchip/rk3568_pcie.c b/sys/arm64/rockchip/rk3568_pcie.c
index d55bfb1bcc73..ac9d0a713886 100644
--- a/sys/arm64/rockchip/rk3568_pcie.c
+++ b/sys/arm64/rockchip/rk3568_pcie.c
@@ -139,21 +139,43 @@ rk3568_pcie_init_soc(device_t dev)
int err, count;
bool status;
+ /* Assert PCIe reset */
+ if (sc->reset_gpio != NULL) {
+ if (gpio_pin_setflags(sc->reset_gpio, GPIO_PIN_OUTPUT)) {
+ device_printf(dev, "Could not setup PCIe reset\n");
+ return (ENXIO);
+ }
+ if (gpio_pin_set_active(sc->reset_gpio, true)) {
+ device_printf(dev, "Could not set PCIe reset\n");
+ return (ENXIO);
+ }
+ }
+
/* Assert reset */
- if (hwreset_assert(sc->hwreset))
+ if (hwreset_assert(sc->hwreset)) {
device_printf(dev, "Could not assert reset\n");
+ return (ENXIO);
+ }
/* Powerup PCIe */
- if (regulator_enable(sc->regulator))
- device_printf(dev, "Cannot enable regulator\n");
+ if (sc->regulator != NULL) {
+ if (regulator_enable(sc->regulator)) {
+ device_printf(dev, "Cannot enable regulator\n");
+ return (ENXIO);
+ }
+ }
/* Enable PHY */
- if (phy_enable(sc->phy))
+ if (phy_enable(sc->phy)) {
device_printf(dev, "Cannot enable phy\n");
+ return (ENXIO);
+ }
/* Deassert reset */
- if (hwreset_deassert(sc->hwreset))
+ if (hwreset_deassert(sc->hwreset)) {
device_printf(dev, "Could not deassert reset\n");
+ return (ENXIO);
+ }
/* Enable clocks */
if ((err = clk_enable(sc->aclk_mst))) {
@@ -183,7 +205,7 @@ rk3568_pcie_init_soc(device_t dev)
bus_write_4(sc->apb_res, PCIE_CLIENT_GENERAL_CON,
(DEVICE_TYPE_MASK << 16) | DEVICE_TYPE_RC);
- /* Assert reset PCIe */
+ /* Deassert PCIe reset */
if ((err = gpio_pin_set_active(sc->reset_gpio, false)))
device_printf(dev, "reset_gpio set failed\n");
@@ -193,9 +215,13 @@ rk3568_pcie_init_soc(device_t dev)
(LINK_REQ_RST_GRT | LTSSM_ENABLE));
DELAY(100000);
- /* Release reset */
- if ((err = gpio_pin_set_active(sc->reset_gpio, true)))
- device_printf(dev, "reset_gpio release failed\n");
+ /* Release PCIe reset */
+ if (sc->reset_gpio != NULL) {
+ if (gpio_pin_set_active(sc->reset_gpio, true)) {
+ device_printf(dev, "Could not release PCIe reset");
+ return (ENXIO);
+ }
+ }
/* Wait for link up/stable */
for (count = 20; count; count--) {
@@ -301,8 +327,9 @@ rk3568_pcie_attach(device_t dev)
}
/* Get regulator if present */
- if (regulator_get_by_ofw_property(dev, 0, "vpcie3v3-supply",
- &sc->regulator)) {
+ error = regulator_get_by_ofw_property(dev, 0, "vpcie3v3-supply",
+ &sc->regulator);
+ if (error != 0 && error != ENOENT) {
device_printf(dev, "Cannot get regulator\n");
goto fail;
}
@@ -314,14 +341,11 @@ rk3568_pcie_attach(device_t dev)
}
/* Get GPIO reset */
- if (OF_hasprop(sc->node, "reset-gpios")) {
- if (gpio_pin_get_by_ofw_property(dev, sc->node, "reset-gpios",
- &sc->reset_gpio)) {
- device_printf(dev, "Cannot get reset-gpios\n");
- goto fail;
- }
- gpio_pin_setflags(sc->reset_gpio, GPIO_PIN_OUTPUT);
- gpio_pin_set_active(sc->reset_gpio, true);
+ error = gpio_pin_get_by_ofw_property(dev, sc->node, "reset-gpios",
+ &sc->reset_gpio);
+ if (error != 0 && error != ENOENT) {
+ device_printf(dev, "Cannot get reset-gpios\n");
+ goto fail;
}
/* Get clocks */
@@ -362,7 +386,8 @@ rk3568_pcie_attach(device_t dev)
goto fail;
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
rk3568_pcie_detach(dev);
return (ENXIO);
diff --git a/sys/arm64/rockchip/rk_gpio.c b/sys/arm64/rockchip/rk_gpio.c
index 6a7ae95d4e8a..a86392f16624 100644
--- a/sys/arm64/rockchip/rk_gpio.c
+++ b/sys/arm64/rockchip/rk_gpio.c
@@ -76,7 +76,7 @@ enum gpio_regs {
GPIO_INTR_EDGE_RISING | GPIO_INTR_EDGE_FALLING | \
GPIO_INTR_LEVEL_HIGH | GPIO_INTR_LEVEL_LOW)
-#define GPIO_FLAGS_PINCTRL GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN
+#define GPIO_FLAGS_PINCTRL (GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN)
#define RK_GPIO_MAX_PINS 32
struct pin_cached {
diff --git a/sys/arm64/rockchip/rk_grf_gpio.c b/sys/arm64/rockchip/rk_grf_gpio.c
new file mode 100644
index 000000000000..6818bd85bb95
--- /dev/null
+++ b/sys/arm64/rockchip/rk_grf_gpio.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2025 Stephen Hurd <shurd@FreeBSD.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/gpio.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/gpio/gpiobusvar.h>
+#include <dev/syscon/syscon.h>
+
+#include "syscon_if.h"
+
+#define GRF_SOC_CON10 0x0428
+#define SOC_CON10_GPIOMUT (1 << 1)
+#define SOC_CON10_GPIOMUT_MASK ((1 << 1) << 16)
+#define SOC_CON10_GPIOMUT_EN (1 << 0)
+#define SOC_CON10_GPIOMUT_EN_MASK ((1 << 0) << 16)
+
+struct rk_grf_gpio_softc {
+ device_t sc_dev;
+ device_t sc_busdev;
+ struct syscon *sc_grf;
+ bool active_high;
+};
+
+static struct ofw_compat_data compat_data[] = {
+ {"rockchip,rk3328-grf-gpio", 1},
+ {NULL, 0}
+};
+
+static device_t
+rk_grf_gpio_get_bus(device_t dev)
+{
+ struct rk_grf_gpio_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ return (sc->sc_busdev);
+}
+
+static int
+rk_grf_gpio_pin_max(device_t dev, int *maxpin)
+{
+ *maxpin = 1;
+ return (0);
+}
+
+static int
+rk_grf_gpio_pin_getname(device_t dev, uint32_t pin, char *name)
+{
+ if (pin)
+ return (EINVAL);
+
+ snprintf(name, GPIOMAXNAME, "GPIO_MUTE");
+
+ return (0);
+}
+
+static int
+rk_grf_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags)
+{
+ if (pin)
+ return (EINVAL);
+ *flags = GPIO_PIN_OUTPUT;
+ return (0);
+}
+
+static int
+rk_grf_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
+{
+ if (pin)
+ return (EINVAL);
+ if (flags != GPIO_PIN_OUTPUT)
+ return (EINVAL);
+
+ return (0);
+}
+
+static int
+rk_grf_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
+{
+ if (pin)
+ return (EINVAL);
+
+ *caps = GPIO_PIN_OUTPUT;
+ return (0);
+}
+
+static int
+rk_grf_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val)
+{
+ struct rk_grf_gpio_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+
+ if (pin)
+ return (EINVAL);
+
+ reg = SYSCON_READ_4(sc->sc_grf, GRF_SOC_CON10);
+ if (reg & SOC_CON10_GPIOMUT)
+ *val = 1;
+ else
+ *val = 0;
+
+ return (0);
+}
+
+static int
+rk_grf_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value)
+{
+ struct rk_grf_gpio_softc *sc;
+ uint32_t val;
+
+ sc = device_get_softc(dev);
+
+ if (pin)
+ return (EINVAL);
+
+ val = SOC_CON10_GPIOMUT_MASK;
+ if (value)
+ val |= SOC_CON10_GPIOMUT;
+ SYSCON_WRITE_4(sc->sc_grf, GRF_SOC_CON10, val);
+
+ return (0);
+}
+
+static int
+rk_grf_gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells,
+ pcell_t *gpios, uint32_t *pin, uint32_t *flags)
+{
+ if (gpios[0])
+ return (EINVAL);
+
+ /* The gpios are mapped as <pin flags> */
+ *pin = 0;
+ /* TODO: The only valid flags are active low or active high */
+ *flags = GPIO_PIN_OUTPUT;
+ return (0);
+}
+
+static int
+rk_grf_gpio_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "RockChip General Register File GPIO (GPIO_MUTE)");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+rk_grf_gpio_attach(device_t dev)
+{
+ struct rk_grf_gpio_softc *sc;
+ phandle_t parent_node, node;
+ device_t pdev;
+
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+
+ node = ofw_bus_get_node(sc->sc_dev);
+ if (!OF_hasprop(node, "gpio-controller"))
+ return (ENXIO);
+ pdev = device_get_parent(dev);
+ parent_node = ofw_bus_get_node(pdev);
+ if (syscon_get_by_ofw_node(dev, parent_node, &sc->sc_grf) != 0) {
+ device_printf(dev, "cannot get parent syscon handle\n");
+ return (ENXIO);
+ }
+
+ sc->sc_busdev = gpiobus_attach_bus(dev);
+ if (sc->sc_busdev == NULL) {
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+rk_grf_gpio_detach(device_t dev)
+{
+ struct rk_grf_gpio_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ if (sc->sc_busdev)
+ gpiobus_detach_bus(dev);
+
+ return(0);
+}
+
+static device_method_t rk_grf_gpio_methods[] = {
+ DEVMETHOD(device_probe, rk_grf_gpio_probe),
+ DEVMETHOD(device_attach, rk_grf_gpio_attach),
+ DEVMETHOD(device_detach, rk_grf_gpio_detach),
+
+ /* GPIO protocol */
+ DEVMETHOD(gpio_get_bus, rk_grf_gpio_get_bus),
+ DEVMETHOD(gpio_pin_max, rk_grf_gpio_pin_max),
+ DEVMETHOD(gpio_pin_getname, rk_grf_gpio_pin_getname),
+ DEVMETHOD(gpio_pin_getflags, rk_grf_gpio_pin_getflags),
+ DEVMETHOD(gpio_pin_setflags, rk_grf_gpio_pin_setflags),
+ DEVMETHOD(gpio_pin_getcaps, rk_grf_gpio_pin_getcaps),
+ DEVMETHOD(gpio_pin_get, rk_grf_gpio_pin_get),
+ DEVMETHOD(gpio_pin_set, rk_grf_gpio_pin_set),
+ DEVMETHOD(gpio_map_gpios, rk_grf_gpio_map_gpios),
+
+ DEVMETHOD_END
+};
+
+static driver_t rk_grf_gpio_driver = {
+ "gpio",
+ rk_grf_gpio_methods,
+ sizeof(struct rk_grf_gpio_softc),
+};
+
+/*
+ * GPIO driver is always a child of rk_grf driver and should be probed
+ * and attached within rk_grf function. Due to this, bus pass order
+ * must be same as bus pass order of rk_grf driver.
+ */
+EARLY_DRIVER_MODULE(rk_grf_gpio, simplebus, rk_grf_gpio_driver, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/arm64/rockchip/rk_i2s.c b/sys/arm64/rockchip/rk_i2s.c
index 3d6aaf523c4a..5f1b6bbdeabf 100644
--- a/sys/arm64/rockchip/rk_i2s.c
+++ b/sys/arm64/rockchip/rk_i2s.c
@@ -91,16 +91,20 @@
#define TXFIFO0LR_MASK 0x3f
#define I2S_DMACR 0x0010
#define I2S_DMACR_RDE_ENABLE (1 << 24)
-#define I2S_DMACR_RDL(n) ((n) << 16)
+#define I2S_DMACR_RDL(n) (((n) - 1) << 16)
#define I2S_DMACR_TDE_ENABLE (1 << 8)
#define I2S_DMACR_TDL(n) ((n) << 0)
#define I2S_INTCR 0x0014
#define I2S_INTCR_RFT(n) (((n) - 1) << 20)
-#define I2S_INTCR_TFT(n) (((n) - 1) << 4)
+#define I2S_INTCR_TFT(n) ((n) << 4)
+#define I2S_INTCR_RXOIC (1 << 18)
+#define I2S_INTCR_RXOIE (1 << 17)
#define I2S_INTCR_RXFIE (1 << 16)
#define I2S_INTCR_TXUIC (1 << 2)
+#define I2S_INTCR_TXUIE (1 << 1)
#define I2S_INTCR_TXEIE (1 << 0)
#define I2S_INTSR 0x0018
+#define I2S_INTSR_RXOI (1 << 17)
#define I2S_INTSR_RXFI (1 << 16)
#define I2S_INTSR_TXUI (1 << 1)
#define I2S_INTSR_TXEI (1 << 0)
diff --git a/sys/arm64/rockchip/rk_iodomain.c b/sys/arm64/rockchip/rk_iodomain.c
index ed61b99481f2..7b4006fc9aed 100644
--- a/sys/arm64/rockchip/rk_iodomain.c
+++ b/sys/arm64/rockchip/rk_iodomain.c
@@ -163,11 +163,16 @@ rk_iodomain_set(struct rk_iodomain_softc *sc)
regulator_t supply;
uint32_t reg = 0;
uint32_t mask = 0;
- int uvolt, i;
+ int uvolt, i, rv;
for (i = 0; i < sc->conf->nsupply; i++) {
- if (regulator_get_by_ofw_property(sc->dev, sc->node,
- sc->conf->supply[i].name, &supply) != 0) {
+ rv = regulator_get_by_ofw_property(sc->dev, sc->node,
+ sc->conf->supply[i].name, &supply);
+
+ if (rv == ENOENT)
+ continue;
+
+ if (rv != 0) {
device_printf(sc->dev,
"Cannot get property for regulator %s\n",
sc->conf->supply[i].name);
diff --git a/sys/arm64/rockchip/rk_pcie.c b/sys/arm64/rockchip/rk_pcie.c
index 69ae4254a235..f22cfcf19def 100644
--- a/sys/arm64/rockchip/rk_pcie.c
+++ b/sys/arm64/rockchip/rk_pcie.c
@@ -1353,8 +1353,9 @@ rk_pcie_attach(device_t dev)
APB_WR4(sc, PCIE_RC_CONFIG_LCS, val);
DELAY(250000);
- device_add_child(dev, "pci", -1);
- return (bus_generic_attach(dev));
+ device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
out_full:
bus_teardown_intr(dev, sc->sys_irq_res, sc->sys_irq_cookie);
diff --git a/sys/arm64/rockchip/rk_pinctrl.c b/sys/arm64/rockchip/rk_pinctrl.c
index 4892b0bf236c..f38601742142 100644
--- a/sys/arm64/rockchip/rk_pinctrl.c
+++ b/sys/arm64/rockchip/rk_pinctrl.c
@@ -1141,9 +1141,9 @@ rk_pinctrl_configure_pin(struct rk_pinctrl_softc *sc, uint32_t *pindata)
{
phandle_t pin_conf;
struct syscon *syscon;
- uint32_t bank, subbank, pin, function, bias;
+ uint32_t bank, subbank, pin, function;
uint32_t bit, mask, reg, drive;
- int i, rv;
+ int i, rv, bias;
bank = pindata[0];
pin = pindata[1];
@@ -1191,7 +1191,7 @@ rk_pinctrl_configure_pin(struct rk_pinctrl_softc *sc, uint32_t *pindata)
drive = ((1 << (value + 1)) - 1) << (pin % 2);
- mask = 0x3f << (pin % 2);;
+ mask = 0x3f << (pin % 2);
SYSCON_WRITE_4(syscon, reg, drive | (mask << 16));
}
@@ -1511,7 +1511,7 @@ rk_pinctrl_attach(device_t dev)
simplebus_init(dev, node);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
/* Attach child devices */
for (node = OF_child(node), gpio_unit = 0; node > 0;
@@ -1537,7 +1537,8 @@ rk_pinctrl_attach(device_t dev)
fdt_pinctrl_configure_tree(dev);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static int
diff --git a/sys/arm64/rockchip/rk_tsadc.c b/sys/arm64/rockchip/rk_tsadc.c
index a8a64b69d7d3..e6cbad36f697 100644
--- a/sys/arm64/rockchip/rk_tsadc.c
+++ b/sys/arm64/rockchip/rk_tsadc.c
@@ -818,7 +818,8 @@ tsadc_attach(device_t dev)
}
OF_device_register_xref(OF_xref_from_node(node), dev);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail_sysctl:
sysctl_ctx_free(&tsadc_sysctl_ctx);
diff --git a/sys/arm64/rockchip/rk_usbphy.c b/sys/arm64/rockchip/rk_usbphy.c
index f9acbdff6d79..5db6cbb827dd 100644
--- a/sys/arm64/rockchip/rk_usbphy.c
+++ b/sys/arm64/rockchip/rk_usbphy.c
@@ -274,7 +274,8 @@ rk_usbphy_attach(device_t dev)
if (rv != 0)
goto fail;
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
return (ENXIO);
diff --git a/sys/arm64/vmm/arm64.h b/sys/arm64/vmm/arm64.h
index 43459d14e143..6a0c4c78e568 100644
--- a/sys/arm64/vmm/arm64.h
+++ b/sys/arm64/vmm/arm64.h
@@ -39,6 +39,9 @@
struct vgic_v3;
struct vgic_v3_cpu;
+/*
+ * Per-vCPU hypervisor state.
+ */
struct hypctx {
struct trapframe tf;
@@ -91,6 +94,7 @@ struct hypctx {
/* EL2 control registers */
uint64_t cptr_el2; /* Architectural Feature Trap Register */
uint64_t hcr_el2; /* Hypervisor Configuration Register */
+ uint64_t hcrx_el2; /* Extended Hypervisor Configuration Register */
uint64_t mdcr_el2; /* Monitor Debug Configuration Register */
uint64_t vpidr_el2; /* Virtualization Processor ID Register */
uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
@@ -104,6 +108,12 @@ struct hypctx {
struct vtimer_cpu vtimer_cpu;
+ uint64_t setcaps; /* Currently enabled capabilities. */
+
+ /* vCPU state used to handle guest debugging. */
+ uint64_t debug_spsr; /* Saved guest SPSR */
+ uint64_t debug_mdscr; /* Saved guest MDSCR */
+
struct vgic_v3_regs vgic_v3_regs;
struct vgic_v3_cpu *vgic_cpu;
bool has_exception;
diff --git a/sys/arm64/vmm/io/vgic_v3.c b/sys/arm64/vmm/io/vgic_v3.c
index 7ed591c409ba..67afb3374815 100644
--- a/sys/arm64/vmm/io/vgic_v3.c
+++ b/sys/arm64/vmm/io/vgic_v3.c
@@ -68,6 +68,7 @@
#include <arm64/vmm/hyp.h>
#include <arm64/vmm/mmu.h>
#include <arm64/vmm/arm64.h>
+#include <arm64/vmm/vmm_handlers.h>
#include "vgic.h"
#include "vgic_v3.h"
@@ -2252,7 +2253,7 @@ vgic_v3_init(device_t dev)
uint64_t ich_vtr_el2;
uint32_t pribits, prebits;
- ich_vtr_el2 = vmm_call_hyp(HYP_READ_REGISTER, HYP_REG_ICH_VTR);
+ ich_vtr_el2 = vmm_read_reg(HYP_REG_ICH_VTR);
/* TODO: These fields are common with the vgicv2 driver */
pribits = ICH_VTR_EL2_PRIBITS(ich_vtr_el2);
diff --git a/sys/arm64/vmm/io/vtimer.c b/sys/arm64/vmm/io/vtimer.c
index aa0b3ff1588e..f59d7ebc1ad4 100644
--- a/sys/arm64/vmm/io/vtimer.c
+++ b/sys/arm64/vmm/io/vtimer.c
@@ -129,14 +129,42 @@ vtimer_vminit(struct hyp *hyp)
{
uint64_t now;
+ hyp->vtimer.cnthctl_el2 = cnthctl_el2_reg;
+
/*
* Configure the Counter-timer Hypervisor Control Register for the VM.
- *
- * CNTHCTL_EL1PCEN: trap access to CNTP_{CTL, CVAL, TVAL}_EL0 from EL1
- * CNTHCTL_EL1PCTEN: trap access to CNTPCT_EL0
*/
- hyp->vtimer.cnthctl_el2 = cnthctl_el2_reg & ~CNTHCTL_EL1PCEN;
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_EL1PCTEN;
+ if (in_vhe()) {
+ /*
+ * CNTHCTL_E2H_EL0PCTEN: trap EL0 access to CNTP{CT,CTSS}_EL0
+ * CNTHCTL_E2H_EL1VCTEN: don't trap EL0 access to
+ * CNTV{CT,CTSS}_EL0
+ * CNTHCTL_E2H_EL0VTEN: don't trap EL0 access to
+ * CNTV_{CTL,CVAL,TVAL}_EL0
+ * CNTHCTL_E2H_EL0PTEN: trap EL0 access to
+ * CNTP_{CTL,CVAL,TVAL}_EL0
+ * CNTHCTL_E2H_EL1PCEN: trap EL1 access to
+ CNTP_{CTL,CVAL,TVAL}_EL0
+ * CNTHCTL_E2H_EL1PCTEN: trap access to CNTPCT_EL0
+ *
+ * TODO: Don't trap when FEAT_ECV is present
+ */
+ hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL0PCTEN;
+ hyp->vtimer.cnthctl_el2 |= CNTHCTL_E2H_EL0VCTEN;
+ hyp->vtimer.cnthctl_el2 |= CNTHCTL_E2H_EL0VTEN;
+ hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL0PTEN;
+
+ hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL1PTEN;
+ hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL1PCTEN;
+ } else {
+ /*
+ * CNTHCTL_EL1PCEN: trap access to CNTP_{CTL, CVAL, TVAL}_EL0
+ * from EL1
+ * CNTHCTL_EL1PCTEN: trap access to CNTPCT_EL0
+ */
+ hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_EL1PCEN;
+ hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_EL1PCTEN;
+ }
now = READ_SPECIALREG(cntpct_el0);
hyp->vtimer.cntvoff_el2 = now;
diff --git a/sys/arm64/vmm/vmm.c b/sys/arm64/vmm/vmm.c
index a2cc63448f19..3082d2941221 100644
--- a/sys/arm64/vmm/vmm.c
+++ b/sys/arm64/vmm/vmm.c
@@ -60,13 +60,14 @@
#include <machine/vm.h>
#include <machine/vmparam.h>
#include <machine/vmm.h>
-#include <machine/vmm_dev.h>
#include <machine/vmm_instruction_emul.h>
#include <dev/pci/pcireg.h>
+#include <dev/vmm/vmm_dev.h>
+#include <dev/vmm/vmm_ktr.h>
+#include <dev/vmm/vmm_mem.h>
+#include <dev/vmm/vmm_stat.h>
-#include "vmm_ktr.h"
-#include "vmm_stat.h"
#include "arm64.h"
#include "mmu.h"
@@ -94,25 +95,6 @@ struct vcpu {
#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
-struct mem_seg {
- uint64_t gpa;
- size_t len;
- bool wired;
- bool sysmem;
- vm_object_t object;
-};
-#define VM_MAX_MEMSEGS 3
-
-struct mem_map {
- vm_paddr_t gpa;
- size_t len;
- vm_ooffset_t segoff;
- int segid;
- int prot;
- int flags;
-};
-#define VM_MAX_MEMMAPS 4
-
struct vmm_mmio_region {
uint64_t start;
uint64_t end;
@@ -141,11 +123,11 @@ struct vm {
volatile cpuset_t active_cpus; /* (i) active vcpus */
volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */
int suspend; /* (i) stop VM execution */
+ bool dying; /* (o) is dying */
volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
- struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */
- struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */
struct vmspace *vmspace; /* (o) guest's address space */
+ struct vm_mem mem; /* (i) guest memory */
char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */
struct vcpu **vcpu; /* (i) guest vcpus */
struct vmm_mmio_region mmio_region[VM_MAX_MMIO_REGIONS];
@@ -156,7 +138,6 @@ struct vm {
uint16_t cores; /* (o) num of cores/socket */
uint16_t threads; /* (o) num of threads/core */
uint16_t maxcpus; /* (o) max pluggable cpus */
- struct sx mem_segs_lock; /* (o) */
struct sx vcpus_init_lock; /* (o) */
};
@@ -234,10 +215,25 @@ u_int vm_maxcpu;
SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
&vm_maxcpu, 0, "Maximum number of vCPUs");
-static void vm_free_memmap(struct vm *vm, int ident);
-static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
static void vcpu_notify_event_locked(struct vcpu *vcpu);
+/* global statistics */
+VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
+VMM_STAT(VMEXIT_UNKNOWN, "number of vmexits for the unknown exception");
+VMM_STAT(VMEXIT_WFI, "number of times wfi was intercepted");
+VMM_STAT(VMEXIT_WFE, "number of times wfe was intercepted");
+VMM_STAT(VMEXIT_HVC, "number of times hvc was intercepted");
+VMM_STAT(VMEXIT_MSR, "number of times msr/mrs was intercepted");
+VMM_STAT(VMEXIT_DATA_ABORT, "number of vmexits for a data abort");
+VMM_STAT(VMEXIT_INSN_ABORT, "number of vmexits for an instruction abort");
+VMM_STAT(VMEXIT_UNHANDLED_SYNC, "number of vmexits for an unhandled synchronous exception");
+VMM_STAT(VMEXIT_IRQ, "number of vmexits for an irq");
+VMM_STAT(VMEXIT_FIQ, "number of vmexits for an interrupt");
+VMM_STAT(VMEXIT_BRK, "number of vmexits for a breakpoint exception");
+VMM_STAT(VMEXIT_SS, "number of vmexits for a single-step exception");
+VMM_STAT(VMEXIT_UNHANDLED_EL2, "number of vmexits for an unhandled EL2 exception");
+VMM_STAT(VMEXIT_UNHANDLED, "number of vmexits for an unhandled exception");
+
/*
* Upper limit on vm_maxcpu. We could increase this to 28 bits, but this
* is a safe value for now.
@@ -249,7 +245,8 @@ vmm_regs_init(struct vmm_regs *regs, const struct vmm_regs *masks)
{
#define _FETCH_KERN_REG(reg, field) do { \
regs->field = vmm_arch_regs_masks.field; \
- if (!get_kernel_reg_masked(reg, &regs->field, masks->field)) \
+ if (!get_kernel_reg_iss_masked(reg ## _ISS, &regs->field, \
+ masks->field)) \
regs->field = 0; \
} while (0)
_FETCH_KERN_REG(ID_AA64AFR0_EL1, id_aa64afr0);
@@ -315,6 +312,20 @@ vm_exitinfo(struct vcpu *vcpu)
}
static int
+vmm_unsupported_quirk(void)
+{
+ /*
+ * Known to not load on Ampere eMAG
+ * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=285051
+ */
+ if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_APM,
+ CPU_PART_EMAG8180, 0, 0))
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
vmm_init(void)
{
int error;
@@ -343,19 +354,29 @@ vmm_handler(module_t mod, int what, void *arg)
switch (what) {
case MOD_LOAD:
- /* TODO: if (vmm_is_hw_supported()) { */
- vmmdev_init();
+ error = vmm_unsupported_quirk();
+ if (error != 0)
+ break;
+ error = vmmdev_init();
+ if (error != 0)
+ break;
error = vmm_init();
if (error == 0)
vmm_initialized = true;
+ else
+ (void)vmmdev_cleanup();
break;
case MOD_UNLOAD:
- /* TODO: if (vmm_is_hw_supported()) { */
error = vmmdev_cleanup();
if (error == 0 && vmm_initialized) {
error = vmmops_modcleanup();
- if (error)
+ if (error) {
+ /*
+ * Something bad happened - prevent new
+ * VMs from being created
+ */
vmm_initialized = false;
+ }
}
break;
default:
@@ -376,8 +397,9 @@ static moduledata_t vmm_kmod = {
*
* - HYP initialization requires smp_rendezvous() and therefore must happen
* after SMP is fully functional (after SI_SUB_SMP).
+ * - vmm device initialization requires an initialized devfs.
*/
-DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
+DECLARE_MODULE(vmm, vmm_kmod, MAX(SI_SUB_SMP, SI_SUB_DEVFS) + 1, SI_ORDER_ANY);
MODULE_VERSION(vmm, 1);
static void
@@ -405,6 +427,14 @@ vm_init(struct vm *vm, bool create)
}
}
+void
+vm_disable_vcpu_creation(struct vm *vm)
+{
+ sx_xlock(&vm->vcpus_init_lock);
+ vm->dying = true;
+ sx_xunlock(&vm->vcpus_init_lock);
+}
+
struct vcpu *
vm_alloc_vcpu(struct vm *vm, int vcpuid)
{
@@ -417,13 +447,14 @@ vm_alloc_vcpu(struct vm *vm, int vcpuid)
if (vcpuid >= vgic_max_cpu_count(vm->cookie))
return (NULL);
- vcpu = atomic_load_ptr(&vm->vcpu[vcpuid]);
+ vcpu = (struct vcpu *)
+ atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]);
if (__predict_true(vcpu != NULL))
return (vcpu);
sx_xlock(&vm->vcpus_init_lock);
vcpu = vm->vcpu[vcpuid];
- if (vcpu == NULL/* && !vm->dying*/) {
+ if (vcpu == NULL && !vm->dying) {
vcpu = vcpu_alloc(vm, vcpuid);
vcpu_init(vcpu);
@@ -473,7 +504,7 @@ vm_create(const char *name, struct vm **retvm)
vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO);
strcpy(vm->name, name);
vm->vmspace = vmspace;
- sx_init(&vm->mem_segs_lock, "vm mem_segs");
+ vm_mem_init(&vm->mem);
sx_init(&vm->vcpus_init_lock, "vm vcpus");
vm->sockets = 1;
@@ -522,11 +553,11 @@ vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
static void
vm_cleanup(struct vm *vm, bool destroy)
{
- struct mem_map *mm;
pmap_t pmap __diagused;
int i;
if (destroy) {
+ vm_xlock_memsegs(vm);
pmap = vmspace_pmap(vm->vmspace);
sched_pin();
PCPU_SET(curvmpmap, NULL);
@@ -534,7 +565,9 @@ vm_cleanup(struct vm *vm, bool destroy)
CPU_FOREACH(i) {
MPASS(cpuid_to_pcpu[i]->pc_curvmpmap != pmap);
}
- }
+ } else
+ vm_assert_memseg_xlocked(vm);
+
vgic_detach_from_vm(vm->cookie);
@@ -545,25 +578,9 @@ vm_cleanup(struct vm *vm, bool destroy)
vmmops_cleanup(vm->cookie);
- /*
- * System memory is removed from the guest address space only when
- * the VM is destroyed. This is because the mapping remains the same
- * across VM reset.
- *
- * Device memory can be relocated by the guest (e.g. using PCI BARs)
- * so those mappings are removed on a VM reset.
- */
- if (!destroy) {
- for (i = 0; i < VM_MAX_MEMMAPS; i++) {
- mm = &vm->mem_maps[i];
- if (destroy || !sysmem_mapping(vm, mm))
- vm_free_memmap(vm, i);
- }
- }
-
+ vm_mem_cleanup(vm);
if (destroy) {
- for (i = 0; i < VM_MAX_MEMSEGS; i++)
- vm_free_memseg(vm, i);
+ vm_mem_destroy(vm);
vmmops_vmspace_free(vm->vmspace);
vm->vmspace = NULL;
@@ -572,7 +589,6 @@ vm_cleanup(struct vm *vm, bool destroy)
free(vm->vcpu[i], M_VMM);
free(vm->vcpu, M_VMM);
sx_destroy(&vm->vcpus_init_lock);
- sx_destroy(&vm->mem_segs_lock);
}
}
@@ -608,290 +624,11 @@ vm_name(struct vm *vm)
return (vm->name);
}
-void
-vm_slock_memsegs(struct vm *vm)
-{
- sx_slock(&vm->mem_segs_lock);
-}
-
-void
-vm_xlock_memsegs(struct vm *vm)
-{
- sx_xlock(&vm->mem_segs_lock);
-}
-
-void
-vm_unlock_memsegs(struct vm *vm)
-{
- sx_unlock(&vm->mem_segs_lock);
-}
-
-/*
- * Return 'true' if 'gpa' is allocated in the guest address space.
- *
- * This function is called in the context of a running vcpu which acts as
- * an implicit lock on 'vm->mem_maps[]'.
- */
-bool
-vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
-{
- struct vm *vm = vcpu->vm;
- struct mem_map *mm;
- int i;
-
-#ifdef INVARIANTS
- int hostcpu, state;
- state = vcpu_get_state(vcpu, &hostcpu);
- KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
- ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
-#endif
-
- for (i = 0; i < VM_MAX_MEMMAPS; i++) {
- mm = &vm->mem_maps[i];
- if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
- return (true); /* 'gpa' is sysmem or devmem */
- }
-
- return (false);
-}
-
-int
-vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
-{
- struct mem_seg *seg;
- vm_object_t obj;
-
- sx_assert(&vm->mem_segs_lock, SX_XLOCKED);
-
- if (ident < 0 || ident >= VM_MAX_MEMSEGS)
- return (EINVAL);
-
- if (len == 0 || (len & PAGE_MASK))
- return (EINVAL);
-
- seg = &vm->mem_segs[ident];
- if (seg->object != NULL) {
- if (seg->len == len && seg->sysmem == sysmem)
- return (EEXIST);
- else
- return (EINVAL);
- }
-
- obj = vm_object_allocate(OBJT_DEFAULT, len >> PAGE_SHIFT);
- if (obj == NULL)
- return (ENOMEM);
-
- seg->len = len;
- seg->object = obj;
- seg->sysmem = sysmem;
- return (0);
-}
-
-int
-vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
- vm_object_t *objptr)
-{
- struct mem_seg *seg;
-
- sx_assert(&vm->mem_segs_lock, SX_LOCKED);
-
- if (ident < 0 || ident >= VM_MAX_MEMSEGS)
- return (EINVAL);
-
- seg = &vm->mem_segs[ident];
- if (len)
- *len = seg->len;
- if (sysmem)
- *sysmem = seg->sysmem;
- if (objptr)
- *objptr = seg->object;
- return (0);
-}
-
-void
-vm_free_memseg(struct vm *vm, int ident)
-{
- struct mem_seg *seg;
-
- KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
- ("%s: invalid memseg ident %d", __func__, ident));
-
- seg = &vm->mem_segs[ident];
- if (seg->object != NULL) {
- vm_object_deallocate(seg->object);
- bzero(seg, sizeof(struct mem_seg));
- }
-}
-
-int
-vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
- size_t len, int prot, int flags)
-{
- struct mem_seg *seg;
- struct mem_map *m, *map;
- vm_ooffset_t last;
- int i, error;
-
- if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
- return (EINVAL);
-
- if (flags & ~VM_MEMMAP_F_WIRED)
- return (EINVAL);
-
- if (segid < 0 || segid >= VM_MAX_MEMSEGS)
- return (EINVAL);
-
- seg = &vm->mem_segs[segid];
- if (seg->object == NULL)
- return (EINVAL);
-
- last = first + len;
- if (first < 0 || first >= last || last > seg->len)
- return (EINVAL);
-
- if ((gpa | first | last) & PAGE_MASK)
- return (EINVAL);
-
- map = NULL;
- for (i = 0; i < VM_MAX_MEMMAPS; i++) {
- m = &vm->mem_maps[i];
- if (m->len == 0) {
- map = m;
- break;
- }
- }
-
- if (map == NULL)
- return (ENOSPC);
-
- error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
- len, 0, VMFS_NO_SPACE, prot, prot, 0);
- if (error != KERN_SUCCESS)
- return (EFAULT);
-
- vm_object_reference(seg->object);
-
- if (flags & VM_MEMMAP_F_WIRED) {
- error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
- VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
- if (error != KERN_SUCCESS) {
- vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
- return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM :
- EFAULT);
- }
- }
-
- map->gpa = gpa;
- map->len = len;
- map->segoff = first;
- map->segid = segid;
- map->prot = prot;
- map->flags = flags;
- return (0);
-}
-
-int
-vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len)
-{
- struct mem_map *m;
- int i;
-
- for (i = 0; i < VM_MAX_MEMMAPS; i++) {
- m = &vm->mem_maps[i];
- if (m->gpa == gpa && m->len == len) {
- vm_free_memmap(vm, i);
- return (0);
- }
- }
-
- return (EINVAL);
-}
-
-int
-vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
- vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
-{
- struct mem_map *mm, *mmnext;
- int i;
-
- mmnext = NULL;
- for (i = 0; i < VM_MAX_MEMMAPS; i++) {
- mm = &vm->mem_maps[i];
- if (mm->len == 0 || mm->gpa < *gpa)
- continue;
- if (mmnext == NULL || mm->gpa < mmnext->gpa)
- mmnext = mm;
- }
-
- if (mmnext != NULL) {
- *gpa = mmnext->gpa;
- if (segid)
- *segid = mmnext->segid;
- if (segoff)
- *segoff = mmnext->segoff;
- if (len)
- *len = mmnext->len;
- if (prot)
- *prot = mmnext->prot;
- if (flags)
- *flags = mmnext->flags;
- return (0);
- } else {
- return (ENOENT);
- }
-}
-
-static void
-vm_free_memmap(struct vm *vm, int ident)
-{
- struct mem_map *mm;
- int error __diagused;
-
- mm = &vm->mem_maps[ident];
- if (mm->len) {
- error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
- mm->gpa + mm->len);
- KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
- __func__, error));
- bzero(mm, sizeof(struct mem_map));
- }
-}
-
-static __inline bool
-sysmem_mapping(struct vm *vm, struct mem_map *mm)
-{
-
- if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
- return (true);
- else
- return (false);
-}
-
-vm_paddr_t
-vmm_sysmem_maxaddr(struct vm *vm)
-{
- struct mem_map *mm;
- vm_paddr_t maxaddr;
- int i;
-
- maxaddr = 0;
- for (i = 0; i < VM_MAX_MEMMAPS; i++) {
- mm = &vm->mem_maps[i];
- if (sysmem_mapping(vm, mm)) {
- if (maxaddr < mm->gpa + mm->len)
- maxaddr = mm->gpa + mm->len;
- }
- }
- return (maxaddr);
-}
-
int
vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa, int *is_fault)
{
-
- vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault);
- return (0);
+ return (vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault));
}
static int
@@ -1319,6 +1056,18 @@ vcpu_notify_event(struct vcpu *vcpu)
vcpu_unlock(vcpu);
}
+struct vmspace *
+vm_vmspace(struct vm *vm)
+{
+ return (vm->vmspace);
+}
+
+struct vm_mem *
+vm_mem(struct vm *vm)
+{
+ return (&vm->mem);
+}
+
static void
restore_guest_fpustate(struct vcpu *vcpu)
{
@@ -1506,70 +1255,6 @@ vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
return (state);
}
-static void *
-_vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
- void **cookie)
-{
- int i, count, pageoff;
- struct mem_map *mm;
- vm_page_t m;
-
- pageoff = gpa & PAGE_MASK;
- if (len > PAGE_SIZE - pageoff)
- panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
-
- count = 0;
- for (i = 0; i < VM_MAX_MEMMAPS; i++) {
- mm = &vm->mem_maps[i];
- if (sysmem_mapping(vm, mm) && gpa >= mm->gpa &&
- gpa < mm->gpa + mm->len) {
- count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
- trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
- break;
- }
- }
-
- if (count == 1) {
- *cookie = m;
- return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
- } else {
- *cookie = NULL;
- return (NULL);
- }
-}
-
-void *
-vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
- void **cookie)
-{
-#ifdef INVARIANTS
- /*
- * The current vcpu should be frozen to ensure 'vm_memmap[]'
- * stability.
- */
- int state = vcpu_get_state(vcpu, NULL);
- KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
- __func__, state));
-#endif
- return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie));
-}
-
-void *
-vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
- void **cookie)
-{
- sx_assert(&vm->mem_segs_lock, SX_LOCKED);
- return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
-}
-
-void
-vm_gpa_release(void *cookie)
-{
- vm_page_t m = cookie;
-
- vm_page_unwire(m, PQ_ACTIVE);
-}
-
int
vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
{
diff --git a/sys/arm64/vmm/vmm_arm64.c b/sys/arm64/vmm/vmm_arm64.c
index e71761f9ccef..de2425aae0a1 100644
--- a/sys/arm64/vmm/vmm_arm64.c
+++ b/sys/arm64/vmm/vmm_arm64.c
@@ -58,6 +58,8 @@
#include <machine/hypervisor.h>
#include <machine/pmap.h>
+#include <dev/vmm/vmm_mem.h>
+
#include "mmu.h"
#include "arm64.h"
#include "hyp.h"
@@ -65,6 +67,7 @@
#include "io/vgic.h"
#include "io/vgic_v3.h"
#include "io/vtimer.h"
+#include "vmm_handlers.h"
#include "vmm_stat.h"
#define HANDLED 1
@@ -101,9 +104,6 @@ static vm_offset_t stack_hyp_va[MAXCPU];
static vmem_t *el2_mem_alloc;
static void arm_setup_vectors(void *arg);
-static void vmm_pmap_clean_stage2_tlbi(void);
-static void vmm_pmap_invalidate_range(uint64_t, vm_offset_t, vm_offset_t, bool);
-static void vmm_pmap_invalidate_all(uint64_t);
DPCPU_DEFINE_STATIC(struct hypctx *, vcpu);
@@ -130,20 +130,6 @@ arm_setup_vectors(void *arg)
el2_regs = arg;
arm64_set_active_vcpu(NULL);
- daif = intr_disable();
-
- /*
- * Install the temporary vectors which will be responsible for
- * initializing the VMM when we next trap into EL2.
- *
- * x0: the exception vector table responsible for hypervisor
- * initialization on the next call.
- */
- vmm_call_hyp(vtophys(&vmm_hyp_code));
-
- /* Create and map the hypervisor stack */
- stack_top = stack_hyp_va[PCPU_GET(cpuid)] + VMM_STACK_SIZE;
-
/*
* Configure the system control register for EL2:
*
@@ -161,9 +147,27 @@ arm_setup_vectors(void *arg)
sctlr_el2 |= SCTLR_EL2_WXN;
sctlr_el2 &= ~SCTLR_EL2_EE;
- /* Special call to initialize EL2 */
- vmm_call_hyp(vmmpmap_to_ttbr0(), stack_top, el2_regs->tcr_el2,
- sctlr_el2, el2_regs->vtcr_el2);
+ daif = intr_disable();
+
+ if (in_vhe()) {
+ WRITE_SPECIALREG(vtcr_el2, el2_regs->vtcr_el2);
+ } else {
+ /*
+ * Install the temporary vectors which will be responsible for
+ * initializing the VMM when we next trap into EL2.
+ *
+ * x0: the exception vector table responsible for hypervisor
+ * initialization on the next call.
+ */
+ vmm_call_hyp(vtophys(&vmm_hyp_code));
+
+ /* Create and map the hypervisor stack */
+ stack_top = stack_hyp_va[PCPU_GET(cpuid)] + VMM_STACK_SIZE;
+
+ /* Special call to initialize EL2 */
+ vmm_call_hyp(vmmpmap_to_ttbr0(), stack_top, el2_regs->tcr_el2,
+ sctlr_el2, el2_regs->vtcr_el2);
+ }
intr_restore(daif);
}
@@ -235,22 +239,15 @@ vmmops_modinit(int ipinum)
vm_paddr_t vmm_base;
uint64_t id_aa64mmfr0_el1, pa_range_bits, pa_range_field;
uint64_t cnthctl_el2;
- register_t daif;
int cpu, i;
bool rv __diagused;
- if (!virt_enabled()) {
+ if (!has_hyp()) {
printf(
"vmm: Processor doesn't have support for virtualization\n");
return (ENXIO);
}
- /* TODO: Support VHE */
- if (in_vhe()) {
- printf("vmm: VHE is unsupported\n");
- return (ENXIO);
- }
-
if (!vgic_present()) {
printf("vmm: No vgic found\n");
return (ENODEV);
@@ -283,67 +280,72 @@ vmmops_modinit(int ipinum)
}
pa_range_bits = pa_range_field >> ID_AA64MMFR0_PARange_SHIFT;
- /* Initialise the EL2 MMU */
- if (!vmmpmap_init()) {
- printf("vmm: Failed to init the EL2 MMU\n");
- return (ENOMEM);
+ if (!in_vhe()) {
+ /* Initialise the EL2 MMU */
+ if (!vmmpmap_init()) {
+ printf("vmm: Failed to init the EL2 MMU\n");
+ return (ENOMEM);
+ }
}
/* Set up the stage 2 pmap callbacks */
MPASS(pmap_clean_stage2_tlbi == NULL);
- pmap_clean_stage2_tlbi = vmm_pmap_clean_stage2_tlbi;
- pmap_stage2_invalidate_range = vmm_pmap_invalidate_range;
- pmap_stage2_invalidate_all = vmm_pmap_invalidate_all;
-
- /*
- * Create an allocator for the virtual address space used by EL2.
- * EL2 code is identity-mapped; the allocator is used to find space for
- * VM structures.
- */
- el2_mem_alloc = vmem_create("VMM EL2", 0, 0, PAGE_SIZE, 0, M_WAITOK);
-
- /* Create the mappings for the hypervisor translation table. */
- hyp_code_len = round_page(&vmm_hyp_code_end - &vmm_hyp_code);
-
- /* We need an physical identity mapping for when we activate the MMU */
- hyp_code_base = vmm_base = vtophys(&vmm_hyp_code);
- rv = vmmpmap_enter(vmm_base, hyp_code_len, vmm_base,
- VM_PROT_READ | VM_PROT_EXECUTE);
- MPASS(rv);
+ pmap_clean_stage2_tlbi = vmm_clean_s2_tlbi;
+ pmap_stage2_invalidate_range = vmm_s2_tlbi_range;
+ pmap_stage2_invalidate_all = vmm_s2_tlbi_all;
- next_hyp_va = roundup2(vmm_base + hyp_code_len, L2_SIZE);
-
- /* Create a per-CPU hypervisor stack */
- CPU_FOREACH(cpu) {
- stack[cpu] = malloc(VMM_STACK_SIZE, M_HYP, M_WAITOK | M_ZERO);
- stack_hyp_va[cpu] = next_hyp_va;
-
- for (i = 0; i < VMM_STACK_PAGES; i++) {
- rv = vmmpmap_enter(stack_hyp_va[cpu] + ptoa(i),
- PAGE_SIZE, vtophys(stack[cpu] + ptoa(i)),
- VM_PROT_READ | VM_PROT_WRITE);
- MPASS(rv);
+ if (!in_vhe()) {
+ /*
+ * Create an allocator for the virtual address space used by
+ * EL2. EL2 code is identity-mapped; the allocator is used to
+ * find space for VM structures.
+ */
+ el2_mem_alloc = vmem_create("VMM EL2", 0, 0, PAGE_SIZE, 0,
+ M_WAITOK);
+
+ /* Create the mappings for the hypervisor translation table. */
+ hyp_code_len = round_page(&vmm_hyp_code_end - &vmm_hyp_code);
+
+ /* We need an physical identity mapping for when we activate the MMU */
+ hyp_code_base = vmm_base = vtophys(&vmm_hyp_code);
+ rv = vmmpmap_enter(vmm_base, hyp_code_len, vmm_base,
+ VM_PROT_READ | VM_PROT_EXECUTE);
+ MPASS(rv);
+
+ next_hyp_va = roundup2(vmm_base + hyp_code_len, L2_SIZE);
+
+ /* Create a per-CPU hypervisor stack */
+ CPU_FOREACH(cpu) {
+ stack[cpu] = malloc(VMM_STACK_SIZE, M_HYP, M_WAITOK | M_ZERO);
+ stack_hyp_va[cpu] = next_hyp_va;
+
+ for (i = 0; i < VMM_STACK_PAGES; i++) {
+ rv = vmmpmap_enter(stack_hyp_va[cpu] + ptoa(i),
+ PAGE_SIZE, vtophys(stack[cpu] + ptoa(i)),
+ VM_PROT_READ | VM_PROT_WRITE);
+ MPASS(rv);
+ }
+ next_hyp_va += L2_SIZE;
}
- next_hyp_va += L2_SIZE;
- }
- el2_regs.tcr_el2 = TCR_EL2_RES1;
- el2_regs.tcr_el2 |= min(pa_range_bits << TCR_EL2_PS_SHIFT,
- TCR_EL2_PS_52BITS);
- el2_regs.tcr_el2 |= TCR_EL2_T0SZ(64 - EL2_VIRT_BITS);
- el2_regs.tcr_el2 |= TCR_EL2_IRGN0_WBWA | TCR_EL2_ORGN0_WBWA;
+ el2_regs.tcr_el2 = TCR_EL2_RES1;
+ el2_regs.tcr_el2 |= min(pa_range_bits << TCR_EL2_PS_SHIFT,
+ TCR_EL2_PS_52BITS);
+ el2_regs.tcr_el2 |= TCR_EL2_T0SZ(64 - EL2_VIRT_BITS);
+ el2_regs.tcr_el2 |= TCR_EL2_IRGN0_WBWA | TCR_EL2_ORGN0_WBWA;
#if PAGE_SIZE == PAGE_SIZE_4K
- el2_regs.tcr_el2 |= TCR_EL2_TG0_4K;
+ el2_regs.tcr_el2 |= TCR_EL2_TG0_4K;
#elif PAGE_SIZE == PAGE_SIZE_16K
- el2_regs.tcr_el2 |= TCR_EL2_TG0_16K;
+ el2_regs.tcr_el2 |= TCR_EL2_TG0_16K;
#else
#error Unsupported page size
#endif
#ifdef SMP
- el2_regs.tcr_el2 |= TCR_EL2_SH0_IS;
+ el2_regs.tcr_el2 |= TCR_EL2_SH0_IS;
#endif
+ }
- switch (el2_regs.tcr_el2 & TCR_EL2_PS_MASK) {
+ switch (pa_range_bits << TCR_EL2_PS_SHIFT) {
case TCR_EL2_PS_32BITS:
vmm_max_ipa_bits = 32;
break;
@@ -381,8 +383,6 @@ vmmops_modinit(int ipinum)
* shareable
*/
el2_regs.vtcr_el2 = VTCR_EL2_RES1;
- el2_regs.vtcr_el2 |=
- min(pa_range_bits << VTCR_EL2_PS_SHIFT, VTCR_EL2_PS_48BIT);
el2_regs.vtcr_el2 |= VTCR_EL2_IRGN0_WBWA | VTCR_EL2_ORGN0_WBWA;
el2_regs.vtcr_el2 |= VTCR_EL2_T0SZ(64 - vmm_virt_bits);
el2_regs.vtcr_el2 |= vmm_vtcr_el2_sl(vmm_pmap_levels);
@@ -396,42 +396,55 @@ vmmops_modinit(int ipinum)
#ifdef SMP
el2_regs.vtcr_el2 |= VTCR_EL2_SH0_IS;
#endif
+ /*
+ * If FEAT_LPA2 is enabled in the host then we need to enable it here
+ * so the page tables created by pmap.c are correct. The meaning of
+ * the shareability field changes to become address bits when this
+ * is set.
+ */
+ if ((READ_SPECIALREG(tcr_el1) & TCR_DS) != 0) {
+ el2_regs.vtcr_el2 |= VTCR_EL2_DS;
+ el2_regs.vtcr_el2 |=
+ min(pa_range_bits << VTCR_EL2_PS_SHIFT, VTCR_EL2_PS_52BIT);
+ } else {
+ el2_regs.vtcr_el2 |=
+ min(pa_range_bits << VTCR_EL2_PS_SHIFT, VTCR_EL2_PS_48BIT);
+ }
smp_rendezvous(NULL, arm_setup_vectors, NULL, &el2_regs);
- /* Add memory to the vmem allocator (checking there is space) */
- if (vmm_base > (L2_SIZE + PAGE_SIZE)) {
- /*
- * Ensure there is an L2 block before the vmm code to check
- * for buffer overflows on earlier data. Include the PAGE_SIZE
- * of the minimum we can allocate.
- */
- vmm_base -= L2_SIZE + PAGE_SIZE;
- vmm_base = rounddown2(vmm_base, L2_SIZE);
+ if (!in_vhe()) {
+ /* Add memory to the vmem allocator (checking there is space) */
+ if (vmm_base > (L2_SIZE + PAGE_SIZE)) {
+ /*
+ * Ensure there is an L2 block before the vmm code to check
+ * for buffer overflows on earlier data. Include the PAGE_SIZE
+ * of the minimum we can allocate.
+ */
+ vmm_base -= L2_SIZE + PAGE_SIZE;
+ vmm_base = rounddown2(vmm_base, L2_SIZE);
+
+ /*
+ * Check there is memory before the vmm code to add.
+ *
+ * Reserve the L2 block at address 0 so NULL dereference will
+ * raise an exception.
+ */
+ if (vmm_base > L2_SIZE)
+ vmem_add(el2_mem_alloc, L2_SIZE, vmm_base - L2_SIZE,
+ M_WAITOK);
+ }
/*
- * Check there is memory before the vmm code to add.
- *
- * Reserve the L2 block at address 0 so NULL dereference will
- * raise an exception.
+ * Add the memory after the stacks. There is most of an L2 block
+ * between the last stack and the first allocation so this should
+ * be safe without adding more padding.
*/
- if (vmm_base > L2_SIZE)
- vmem_add(el2_mem_alloc, L2_SIZE, vmm_base - L2_SIZE,
- M_WAITOK);
+ if (next_hyp_va < HYP_VM_MAX_ADDRESS - PAGE_SIZE)
+ vmem_add(el2_mem_alloc, next_hyp_va,
+ HYP_VM_MAX_ADDRESS - next_hyp_va, M_WAITOK);
}
-
- /*
- * Add the memory after the stacks. There is most of an L2 block
- * between the last stack and the first allocation so this should
- * be safe without adding more padding.
- */
- if (next_hyp_va < HYP_VM_MAX_ADDRESS - PAGE_SIZE)
- vmem_add(el2_mem_alloc, next_hyp_va,
- HYP_VM_MAX_ADDRESS - next_hyp_va, M_WAITOK);
-
- daif = intr_disable();
- cnthctl_el2 = vmm_call_hyp(HYP_READ_REGISTER, HYP_REG_CNTHCTL);
- intr_restore(daif);
+ cnthctl_el2 = vmm_read_reg(HYP_REG_CNTHCTL);
vgic_init();
vtimer_init(cnthctl_el2);
@@ -444,21 +457,25 @@ vmmops_modcleanup(void)
{
int cpu;
- smp_rendezvous(NULL, arm_teardown_vectors, NULL, NULL);
+ if (!in_vhe()) {
+ smp_rendezvous(NULL, arm_teardown_vectors, NULL, NULL);
- CPU_FOREACH(cpu) {
- vmmpmap_remove(stack_hyp_va[cpu], VMM_STACK_PAGES * PAGE_SIZE,
- false);
- }
+ CPU_FOREACH(cpu) {
+ vmmpmap_remove(stack_hyp_va[cpu],
+ VMM_STACK_PAGES * PAGE_SIZE, false);
+ }
- vmmpmap_remove(hyp_code_base, hyp_code_len, false);
+ vmmpmap_remove(hyp_code_base, hyp_code_len, false);
+ }
vtimer_cleanup();
- vmmpmap_fini();
+ if (!in_vhe()) {
+ vmmpmap_fini();
- CPU_FOREACH(cpu)
- free(stack[cpu], M_HYP);
+ CPU_FOREACH(cpu)
+ free(stack[cpu], M_HYP);
+ }
pmap_clean_stage2_tlbi = NULL;
pmap_stage2_invalidate_range = NULL;
@@ -510,8 +527,9 @@ vmmops_init(struct vm *vm, pmap_t pmap)
vtimer_vminit(hyp);
vgic_vminit(hyp);
- hyp->el2_addr = el2_map_enter((vm_offset_t)hyp, size,
- VM_PROT_READ | VM_PROT_WRITE);
+ if (!in_vhe())
+ hyp->el2_addr = el2_map_enter((vm_offset_t)hyp, size,
+ VM_PROT_READ | VM_PROT_WRITE);
return (hyp);
}
@@ -539,8 +557,9 @@ vmmops_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
vtimer_cpuinit(hypctx);
vgic_cpuinit(hypctx);
- hypctx->el2_addr = el2_map_enter((vm_offset_t)hypctx, size,
- VM_PROT_READ | VM_PROT_WRITE);
+ if (!in_vhe())
+ hypctx->el2_addr = el2_map_enter((vm_offset_t)hypctx, size,
+ VM_PROT_READ | VM_PROT_WRITE);
return (hypctx);
}
@@ -567,26 +586,6 @@ vmmops_vmspace_free(struct vmspace *vmspace)
vmspace_free(vmspace);
}
-static void
-vmm_pmap_clean_stage2_tlbi(void)
-{
- vmm_call_hyp(HYP_CLEAN_S2_TLBI);
-}
-
-static void
-vmm_pmap_invalidate_range(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva,
- bool final_only)
-{
- MPASS(eva > sva);
- vmm_call_hyp(HYP_S2_TLBI_RANGE, vttbr, sva, eva, final_only);
-}
-
-static void
-vmm_pmap_invalidate_all(uint64_t vttbr)
-{
- vmm_call_hyp(HYP_S2_TLBI_ALL, vttbr);
-}
-
static inline void
arm64_print_hyp_regs(struct vm_exit *vme)
{
@@ -700,7 +699,14 @@ handle_el1_sync_excp(struct hypctx *hypctx, struct vm_exit *vme_ret,
arm64_gen_reg_emul_data(esr_iss, vme_ret);
vme_ret->exitcode = VM_EXITCODE_REG_EMUL;
break;
-
+ case EXCP_BRK:
+ vmm_stat_incr(hypctx->vcpu, VMEXIT_BRK, 1);
+ vme_ret->exitcode = VM_EXITCODE_BRK;
+ break;
+ case EXCP_SOFTSTP_EL0:
+ vmm_stat_incr(hypctx->vcpu, VMEXIT_SS, 1);
+ vme_ret->exitcode = VM_EXITCODE_SS;
+ break;
case EXCP_INSN_ABORT_L:
case EXCP_DATA_ABORT_L:
vmm_stat_incr(hypctx->vcpu, esr_ec == EXCP_DATA_ABORT_L ?
@@ -1101,7 +1107,7 @@ vmmops_run(void *vcpui, register_t pc, pmap_t pmap, struct vm_eventinfo *evinfo)
* Update fields that may change on exeption entry
* based on how sctlr_el1 is configured.
*/
- if ((hypctx->sctlr_el1 & SCTLR_SPAN) != 0)
+ if ((hypctx->sctlr_el1 & SCTLR_SPAN) == 0)
hypctx->tf.tf_spsr |= PSR_PAN;
if ((hypctx->sctlr_el1 & SCTLR_DSSBS) == 0)
hypctx->tf.tf_spsr &= ~PSR_SSBS;
@@ -1136,16 +1142,13 @@ vmmops_run(void *vcpui, register_t pc, pmap_t pmap, struct vm_eventinfo *evinfo)
vgic_flush_hwstate(hypctx);
/* Call into EL2 to switch to the guest */
- excp_type = vmm_call_hyp(HYP_ENTER_GUEST,
- hyp->el2_addr, hypctx->el2_addr);
+ excp_type = vmm_enter_guest(hyp, hypctx);
vgic_sync_hwstate(hypctx);
vtimer_sync_hwstate(hypctx);
/*
- * Deactivate the stage2 pmap. vmm_pmap_clean_stage2_tlbi
- * depends on this meaning we activate the VM before entering
- * the vm again
+ * Deactivate the stage2 pmap.
*/
PCPU_SET(curvmpmap, NULL);
intr_restore(daif);
@@ -1198,7 +1201,8 @@ vmmops_vcpu_cleanup(void *vcpui)
vtimer_cpucleanup(hypctx);
vgic_cpucleanup(hypctx);
- vmmpmap_remove(hypctx->el2_addr, el2_hypctx_size(), true);
+ if (!in_vhe())
+ vmmpmap_remove(hypctx->el2_addr, el2_hypctx_size(), true);
free(hypctx, M_HYP);
}
@@ -1213,7 +1217,8 @@ vmmops_cleanup(void *vmi)
smp_rendezvous(NULL, arm_pcpu_vmcleanup, NULL, hyp);
- vmmpmap_remove(hyp->el2_addr, el2_hyp_size(hyp->vm), true);
+ if (!in_vhe())
+ vmmpmap_remove(hyp->el2_addr, el2_hyp_size(hyp->vm), true);
free(hyp, M_HYP);
}
@@ -1313,6 +1318,7 @@ vmmops_exception(void *vcpui, uint64_t esr, uint64_t far)
int
vmmops_getcap(void *vcpui, int num, int *retval)
{
+ struct hypctx *hypctx = vcpui;
int ret;
ret = ENOENT;
@@ -1322,6 +1328,11 @@ vmmops_getcap(void *vcpui, int num, int *retval)
*retval = 1;
ret = 0;
break;
+ case VM_CAP_BRK_EXIT:
+ case VM_CAP_SS_EXIT:
+ case VM_CAP_MASK_HWINTR:
+ *retval = (hypctx->setcaps & (1ul << num)) != 0;
+ break;
default:
break;
}
@@ -1332,6 +1343,68 @@ vmmops_getcap(void *vcpui, int num, int *retval)
int
vmmops_setcap(void *vcpui, int num, int val)
{
+ struct hypctx *hypctx = vcpui;
+ int ret;
+
+ ret = 0;
- return (ENOENT);
+ switch (num) {
+ case VM_CAP_BRK_EXIT:
+ if ((val != 0) == ((hypctx->setcaps & (1ul << num)) != 0))
+ break;
+ if (val != 0)
+ hypctx->mdcr_el2 |= MDCR_EL2_TDE;
+ else
+ hypctx->mdcr_el2 &= ~MDCR_EL2_TDE;
+ break;
+ case VM_CAP_SS_EXIT:
+ if ((val != 0) == ((hypctx->setcaps & (1ul << num)) != 0))
+ break;
+
+ if (val != 0) {
+ hypctx->debug_spsr |= (hypctx->tf.tf_spsr & PSR_SS);
+ hypctx->debug_mdscr |= hypctx->mdscr_el1 &
+ (MDSCR_SS | MDSCR_KDE);
+
+ hypctx->tf.tf_spsr |= PSR_SS;
+ hypctx->mdscr_el1 |= MDSCR_SS | MDSCR_KDE;
+ hypctx->mdcr_el2 |= MDCR_EL2_TDE;
+ } else {
+ hypctx->tf.tf_spsr &= ~PSR_SS;
+ hypctx->tf.tf_spsr |= hypctx->debug_spsr;
+ hypctx->debug_spsr &= ~PSR_SS;
+ hypctx->mdscr_el1 &= ~(MDSCR_SS | MDSCR_KDE);
+ hypctx->mdscr_el1 |= hypctx->debug_mdscr;
+ hypctx->debug_mdscr &= ~(MDSCR_SS | MDSCR_KDE);
+ hypctx->mdcr_el2 &= ~MDCR_EL2_TDE;
+ }
+ break;
+ case VM_CAP_MASK_HWINTR:
+ if ((val != 0) == ((hypctx->setcaps & (1ul << num)) != 0))
+ break;
+
+ if (val != 0) {
+ hypctx->debug_spsr |= (hypctx->tf.tf_spsr &
+ (PSR_I | PSR_F));
+ hypctx->tf.tf_spsr |= PSR_I | PSR_F;
+ } else {
+ hypctx->tf.tf_spsr &= ~(PSR_I | PSR_F);
+ hypctx->tf.tf_spsr |= (hypctx->debug_spsr &
+ (PSR_I | PSR_F));
+ hypctx->debug_spsr &= ~(PSR_I | PSR_F);
+ }
+ break;
+ default:
+ ret = ENOENT;
+ break;
+ }
+
+ if (ret == 0) {
+ if (val == 0)
+ hypctx->setcaps &= ~(1ul << num);
+ else
+ hypctx->setcaps |= (1ul << num);
+ }
+
+ return (ret);
}
diff --git a/sys/arm64/vmm/vmm_call.S b/sys/arm64/vmm/vmm_call.S
index fc28e3f173eb..8caf0465f938 100644
--- a/sys/arm64/vmm/vmm_call.S
+++ b/sys/arm64/vmm/vmm_call.S
@@ -28,6 +28,7 @@
* SUCH DAMAGE.
*/
+#include <sys/elf_common.h>
#include <machine/asm.h>
@@ -37,3 +38,5 @@ ENTRY(vmm_call_hyp)
hvc #0
ret
END(vmm_call_hyp)
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/vmm/vmm_dev.c b/sys/arm64/vmm/vmm_dev.c
deleted file mode 100644
index 9f405384f2b3..000000000000
--- a/sys/arm64/vmm/vmm_dev.c
+++ /dev/null
@@ -1,1054 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2011 NetApp, Inc.
- * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/kernel.h>
-#include <sys/jail.h>
-#include <sys/queue.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/malloc.h>
-#include <sys/conf.h>
-#include <sys/sysctl.h>
-#include <sys/libkern.h>
-#include <sys/ioccom.h>
-#include <sys/mman.h>
-#include <sys/uio.h>
-#include <sys/proc.h>
-
-#include <vm/vm.h>
-#include <vm/pmap.h>
-#include <vm/vm_map.h>
-#include <vm/vm_object.h>
-
-#include <machine/machdep.h>
-#include <machine/vmparam.h>
-#include <machine/vmm.h>
-#include <machine/vmm_dev.h>
-
-#include "vmm_stat.h"
-
-#include "io/vgic.h"
-
-struct devmem_softc {
- int segid;
- char *name;
- struct cdev *cdev;
- struct vmmdev_softc *sc;
- SLIST_ENTRY(devmem_softc) link;
-};
-
-struct vmmdev_softc {
- struct vm *vm; /* vm instance cookie */
- struct cdev *cdev;
- struct ucred *ucred;
- SLIST_ENTRY(vmmdev_softc) link;
- SLIST_HEAD(, devmem_softc) devmem;
- int flags;
-};
-#define VSC_LINKED 0x01
-
-static SLIST_HEAD(, vmmdev_softc) head;
-
-static unsigned pr_allow_flag;
-static struct mtx vmmdev_mtx;
-MTX_SYSINIT(vmmdev_mtx, &vmmdev_mtx, "vmm device mutex", MTX_DEF);
-
-static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev");
-
-SYSCTL_DECL(_hw_vmm);
-
-static int vmm_priv_check(struct ucred *ucred);
-static int devmem_create_cdev(const char *vmname, int id, char *devmem);
-static void devmem_destroy(void *arg);
-
-static int
-vmm_priv_check(struct ucred *ucred)
-{
-
- if (jailed(ucred) &&
- !(ucred->cr_prison->pr_allow & pr_allow_flag))
- return (EPERM);
-
- return (0);
-}
-
-static int
-vcpu_lock_one(struct vcpu *vcpu)
-{
- int error;
-
- error = vcpu_set_state(vcpu, VCPU_FROZEN, true);
- return (error);
-}
-
-static void
-vcpu_unlock_one(struct vcpu *vcpu)
-{
- enum vcpu_state state;
-
- state = vcpu_get_state(vcpu, NULL);
- if (state != VCPU_FROZEN) {
- panic("vcpu %s(%d) has invalid state %d",
- vm_name(vcpu_vm(vcpu)), vcpu_vcpuid(vcpu), state);
- }
-
- vcpu_set_state(vcpu, VCPU_IDLE, false);
-}
-
-static int
-vcpu_lock_all(struct vmmdev_softc *sc)
-{
- struct vcpu *vcpu;
- int error;
- uint16_t i, j, maxcpus;
-
- error = 0;
- vm_slock_vcpus(sc->vm);
- maxcpus = vm_get_maxcpus(sc->vm);
- for (i = 0; i < maxcpus; i++) {
- vcpu = vm_vcpu(sc->vm, i);
- if (vcpu == NULL)
- continue;
- error = vcpu_lock_one(vcpu);
- if (error)
- break;
- }
-
- if (error) {
- for (j = 0; j < i; j++) {
- vcpu = vm_vcpu(sc->vm, j);
- if (vcpu == NULL)
- continue;
- vcpu_unlock_one(vcpu);
- }
- vm_unlock_vcpus(sc->vm);
- }
-
- return (error);
-}
-
-static void
-vcpu_unlock_all(struct vmmdev_softc *sc)
-{
- struct vcpu *vcpu;
- uint16_t i, maxcpus;
-
- maxcpus = vm_get_maxcpus(sc->vm);
- for (i = 0; i < maxcpus; i++) {
- vcpu = vm_vcpu(sc->vm, i);
- if (vcpu == NULL)
- continue;
- vcpu_unlock_one(vcpu);
- }
- vm_unlock_vcpus(sc->vm);
-}
-
-static struct vmmdev_softc *
-vmmdev_lookup(const char *name)
-{
- struct vmmdev_softc *sc;
-
-#ifdef notyet /* XXX kernel is not compiled with invariants */
- mtx_assert(&vmmdev_mtx, MA_OWNED);
-#endif
-
- SLIST_FOREACH(sc, &head, link) {
- if (strcmp(name, vm_name(sc->vm)) == 0)
- break;
- }
-
- if (sc == NULL)
- return (NULL);
-
- if (cr_cansee(curthread->td_ucred, sc->ucred))
- return (NULL);
-
- return (sc);
-}
-
-static struct vmmdev_softc *
-vmmdev_lookup2(struct cdev *cdev)
-{
-
- return (cdev->si_drv1);
-}
-
-static int
-vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags)
-{
- int error, off, c, prot;
- vm_paddr_t gpa, maxaddr;
- void *hpa, *cookie;
- struct vmmdev_softc *sc;
-
- error = vmm_priv_check(curthread->td_ucred);
- if (error)
- return (error);
-
- sc = vmmdev_lookup2(cdev);
- if (sc == NULL)
- return (ENXIO);
-
- /*
- * Get a read lock on the guest memory map.
- */
- vm_slock_memsegs(sc->vm);
-
- prot = (uio->uio_rw == UIO_WRITE ? VM_PROT_WRITE : VM_PROT_READ);
- maxaddr = vmm_sysmem_maxaddr(sc->vm);
- while (uio->uio_resid > 0 && error == 0) {
- gpa = uio->uio_offset;
- off = gpa & PAGE_MASK;
- c = min(uio->uio_resid, PAGE_SIZE - off);
-
- /*
- * The VM has a hole in its physical memory map. If we want to
- * use 'dd' to inspect memory beyond the hole we need to
- * provide bogus data for memory that lies in the hole.
- *
- * Since this device does not support lseek(2), dd(1) will
- * read(2) blocks of data to simulate the lseek(2).
- */
- hpa = vm_gpa_hold_global(sc->vm, gpa, c, prot, &cookie);
- if (hpa == NULL) {
- if (uio->uio_rw == UIO_READ && gpa < maxaddr)
- error = uiomove(__DECONST(void *, zero_region),
- c, uio);
- else
- error = EFAULT;
- } else {
- error = uiomove(hpa, c, uio);
- vm_gpa_release(cookie);
- }
- }
- vm_unlock_memsegs(sc->vm);
- return (error);
-}
-
-static int
-get_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg)
-{
- struct devmem_softc *dsc;
- int error;
- bool sysmem;
-
- error = vm_get_memseg(sc->vm, mseg->segid, &mseg->len, &sysmem, NULL);
- if (error || mseg->len == 0)
- return (error);
-
- if (!sysmem) {
- SLIST_FOREACH(dsc, &sc->devmem, link) {
- if (dsc->segid == mseg->segid)
- break;
- }
- KASSERT(dsc != NULL, ("%s: devmem segment %d not found",
- __func__, mseg->segid));
- error = copystr(dsc->name, mseg->name, sizeof(mseg->name),
- NULL);
- } else {
- bzero(mseg->name, sizeof(mseg->name));
- }
-
- return (error);
-}
-
-static int
-alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg)
-{
- char *name;
- int error;
- bool sysmem;
-
- error = 0;
- name = NULL;
- sysmem = true;
-
- /*
- * The allocation is lengthened by 1 to hold a terminating NUL. It'll
- * by stripped off when devfs processes the full string.
- */
- if (VM_MEMSEG_NAME(mseg)) {
- sysmem = false;
- name = malloc(sizeof(mseg->name), M_VMMDEV, M_WAITOK);
- error = copystr(mseg->name, name, sizeof(mseg->name), NULL);
- if (error)
- goto done;
- }
-
- error = vm_alloc_memseg(sc->vm, mseg->segid, mseg->len, sysmem);
- if (error)
- goto done;
-
- if (VM_MEMSEG_NAME(mseg)) {
- error = devmem_create_cdev(vm_name(sc->vm), mseg->segid, name);
- if (error)
- vm_free_memseg(sc->vm, mseg->segid);
- else
- name = NULL; /* freed when 'cdev' is destroyed */
- }
-done:
- free(name, M_VMMDEV);
- return (error);
-}
-
-static int
-vm_get_register_set(struct vcpu *vcpu, unsigned int count, int *regnum,
- uint64_t *regval)
-{
- int error, i;
-
- error = 0;
- for (i = 0; i < count; i++) {
- error = vm_get_register(vcpu, regnum[i], &regval[i]);
- if (error)
- break;
- }
- return (error);
-}
-
-static int
-vm_set_register_set(struct vcpu *vcpu, unsigned int count, int *regnum,
- uint64_t *regval)
-{
- int error, i;
-
- error = 0;
- for (i = 0; i < count; i++) {
- error = vm_set_register(vcpu, regnum[i], regval[i]);
- if (error)
- break;
- }
- return (error);
-}
-
-static int
-vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
- struct thread *td)
-{
- int error, vcpuid, size;
- cpuset_t *cpuset;
- struct vmmdev_softc *sc;
- struct vcpu *vcpu;
- struct vm_register *vmreg;
- struct vm_register_set *vmregset;
- struct vm_run *vmrun;
- struct vm_vgic_version *vgv;
- struct vm_vgic_descr *vgic;
- struct vm_cpuset *vm_cpuset;
- struct vm_irq *vi;
- struct vm_capability *vmcap;
- struct vm_stats *vmstats;
- struct vm_stat_desc *statdesc;
- struct vm_suspend *vmsuspend;
- struct vm_exception *vmexc;
- struct vm_gla2gpa *gg;
- struct vm_memmap *mm;
- struct vm_munmap *mu;
- struct vm_msi *vmsi;
- struct vm_cpu_topology *topology;
- uint64_t *regvals;
- int *regnums;
- enum { NONE, SINGLE, ALL } vcpus_locked;
- bool memsegs_locked;
-
- error = vmm_priv_check(curthread->td_ucred);
- if (error)
- return (error);
-
- sc = vmmdev_lookup2(cdev);
- if (sc == NULL)
- return (ENXIO);
-
- error = 0;
- vcpuid = -1;
- vcpu = NULL;
- vcpus_locked = NONE;
- memsegs_locked = false;
-
- /*
- * Some VMM ioctls can operate only on vcpus that are not running.
- */
- switch (cmd) {
- case VM_RUN:
- case VM_GET_REGISTER:
- case VM_SET_REGISTER:
- case VM_GET_REGISTER_SET:
- case VM_SET_REGISTER_SET:
- case VM_INJECT_EXCEPTION:
- case VM_GET_CAPABILITY:
- case VM_SET_CAPABILITY:
- case VM_GLA2GPA_NOFAULT:
- case VM_ACTIVATE_CPU:
- /*
- * ioctls that can operate only on vcpus that are not running.
- */
- vcpuid = *(int *)data;
- vcpu = vm_alloc_vcpu(sc->vm, vcpuid);
- if (vcpu == NULL) {
- error = EINVAL;
- goto done;
- }
- error = vcpu_lock_one(vcpu);
- if (error)
- goto done;
- vcpus_locked = SINGLE;
- break;
-
- case VM_ALLOC_MEMSEG:
- case VM_MMAP_MEMSEG:
- case VM_MUNMAP_MEMSEG:
- case VM_REINIT:
- case VM_ATTACH_VGIC:
- /*
- * ioctls that modify the memory map must lock memory
- * segments exclusively.
- */
- vm_xlock_memsegs(sc->vm);
- memsegs_locked = true;
-
- /*
- * ioctls that operate on the entire virtual machine must
- * prevent all vcpus from running.
- */
- error = vcpu_lock_all(sc);
- if (error)
- goto done;
- vcpus_locked = ALL;
- break;
- case VM_GET_MEMSEG:
- case VM_MMAP_GETNEXT:
- /*
- * Lock the memory map while it is being inspected.
- */
- vm_slock_memsegs(sc->vm);
- memsegs_locked = true;
- break;
-
- case VM_STATS:
- /*
- * These do not need the vCPU locked but do operate on
- * a specific vCPU.
- */
- vcpuid = *(int *)data;
- vcpu = vm_alloc_vcpu(sc->vm, vcpuid);
- if (vcpu == NULL) {
- error = EINVAL;
- goto done;
- }
- break;
-
- case VM_SUSPEND_CPU:
- case VM_RESUME_CPU:
- /*
- * These can either operate on all CPUs via a vcpuid of
- * -1 or on a specific vCPU.
- */
- vcpuid = *(int *)data;
- if (vcpuid == -1)
- break;
- vcpu = vm_alloc_vcpu(sc->vm, vcpuid);
- if (vcpu == NULL) {
- error = EINVAL;
- goto done;
- }
- break;
-
- case VM_ASSERT_IRQ:
- vi = (struct vm_irq *)data;
- error = vm_assert_irq(sc->vm, vi->irq);
- break;
- case VM_DEASSERT_IRQ:
- vi = (struct vm_irq *)data;
- error = vm_deassert_irq(sc->vm, vi->irq);
- break;
- default:
- break;
- }
-
- switch (cmd) {
- case VM_RUN: {
- struct vm_exit *vme;
-
- vmrun = (struct vm_run *)data;
- vme = vm_exitinfo(vcpu);
-
- error = vm_run(vcpu);
- if (error != 0)
- break;
-
- error = copyout(vme, vmrun->vm_exit, sizeof(*vme));
- if (error != 0)
- break;
- break;
- }
- case VM_SUSPEND:
- vmsuspend = (struct vm_suspend *)data;
- error = vm_suspend(sc->vm, vmsuspend->how);
- break;
- case VM_REINIT:
- error = vm_reinit(sc->vm);
- break;
- case VM_STAT_DESC: {
- statdesc = (struct vm_stat_desc *)data;
- error = vmm_stat_desc_copy(statdesc->index,
- statdesc->desc, sizeof(statdesc->desc));
- break;
- }
- case VM_STATS: {
- CTASSERT(MAX_VM_STATS >= MAX_VMM_STAT_ELEMS);
- vmstats = (struct vm_stats *)data;
- getmicrotime(&vmstats->tv);
- error = vmm_stat_copy(vcpu, vmstats->index,
- nitems(vmstats->statbuf),
- &vmstats->num_entries, vmstats->statbuf);
- break;
- }
- case VM_MMAP_GETNEXT:
- mm = (struct vm_memmap *)data;
- error = vm_mmap_getnext(sc->vm, &mm->gpa, &mm->segid,
- &mm->segoff, &mm->len, &mm->prot, &mm->flags);
- break;
- case VM_MMAP_MEMSEG:
- mm = (struct vm_memmap *)data;
- error = vm_mmap_memseg(sc->vm, mm->gpa, mm->segid, mm->segoff,
- mm->len, mm->prot, mm->flags);
- break;
- case VM_MUNMAP_MEMSEG:
- mu = (struct vm_munmap *)data;
- error = vm_munmap_memseg(sc->vm, mu->gpa, mu->len);
- break;
- case VM_ALLOC_MEMSEG:
- error = alloc_memseg(sc, (struct vm_memseg *)data);
- break;
- case VM_GET_MEMSEG:
- error = get_memseg(sc, (struct vm_memseg *)data);
- break;
- case VM_GET_REGISTER:
- vmreg = (struct vm_register *)data;
- error = vm_get_register(vcpu, vmreg->regnum, &vmreg->regval);
- break;
- case VM_SET_REGISTER:
- vmreg = (struct vm_register *)data;
- error = vm_set_register(vcpu, vmreg->regnum, vmreg->regval);
- break;
- case VM_GET_REGISTER_SET:
- vmregset = (struct vm_register_set *)data;
- if (vmregset->count > VM_REG_LAST) {
- error = EINVAL;
- break;
- }
- regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV,
- M_WAITOK);
- regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV,
- M_WAITOK);
- error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) *
- vmregset->count);
- if (error == 0)
- error = vm_get_register_set(vcpu, vmregset->count,
- regnums, regvals);
- if (error == 0)
- error = copyout(regvals, vmregset->regvals,
- sizeof(regvals[0]) * vmregset->count);
- free(regvals, M_VMMDEV);
- free(regnums, M_VMMDEV);
- break;
- case VM_SET_REGISTER_SET:
- vmregset = (struct vm_register_set *)data;
- if (vmregset->count > VM_REG_LAST) {
- error = EINVAL;
- break;
- }
- regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV,
- M_WAITOK);
- regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV,
- M_WAITOK);
- error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) *
- vmregset->count);
- if (error == 0)
- error = copyin(vmregset->regvals, regvals,
- sizeof(regvals[0]) * vmregset->count);
- if (error == 0)
- error = vm_set_register_set(vcpu, vmregset->count,
- regnums, regvals);
- free(regvals, M_VMMDEV);
- free(regnums, M_VMMDEV);
- break;
- case VM_GET_CAPABILITY:
- vmcap = (struct vm_capability *)data;
- error = vm_get_capability(vcpu,
- vmcap->captype,
- &vmcap->capval);
- break;
- case VM_SET_CAPABILITY:
- vmcap = (struct vm_capability *)data;
- error = vm_set_capability(vcpu,
- vmcap->captype,
- vmcap->capval);
- break;
- case VM_INJECT_EXCEPTION:
- vmexc = (struct vm_exception *)data;
- error = vm_inject_exception(vcpu, vmexc->esr, vmexc->far);
- break;
- case VM_GLA2GPA_NOFAULT:
- gg = (struct vm_gla2gpa *)data;
- error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla,
- gg->prot, &gg->gpa, &gg->fault);
- KASSERT(error == 0 || error == EFAULT,
- ("%s: vm_gla2gpa unknown error %d", __func__, error));
- break;
- case VM_ACTIVATE_CPU:
- error = vm_activate_cpu(vcpu);
- break;
- case VM_GET_CPUS:
- error = 0;
- vm_cpuset = (struct vm_cpuset *)data;
- size = vm_cpuset->cpusetsize;
- if (size < sizeof(cpuset_t) || size > CPU_MAXSIZE / NBBY) {
- error = ERANGE;
- break;
- }
- cpuset = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
- if (vm_cpuset->which == VM_ACTIVE_CPUS)
- *cpuset = vm_active_cpus(sc->vm);
- else if (vm_cpuset->which == VM_SUSPENDED_CPUS)
- *cpuset = vm_suspended_cpus(sc->vm);
- else if (vm_cpuset->which == VM_DEBUG_CPUS)
- *cpuset = vm_debug_cpus(sc->vm);
- else
- error = EINVAL;
- if (error == 0)
- error = copyout(cpuset, vm_cpuset->cpus, size);
- free(cpuset, M_TEMP);
- break;
- case VM_SUSPEND_CPU:
- error = vm_suspend_cpu(sc->vm, vcpu);
- break;
- case VM_RESUME_CPU:
- error = vm_resume_cpu(sc->vm, vcpu);
- break;
- case VM_GET_VGIC_VERSION:
- vgv = (struct vm_vgic_version *)data;
- /* TODO: Query the vgic driver for this */
- vgv->version = 3;
- vgv->flags = 0;
- error = 0;
- break;
- case VM_ATTACH_VGIC:
- vgic = (struct vm_vgic_descr *)data;
- error = vm_attach_vgic(sc->vm, vgic);
- break;
- case VM_RAISE_MSI:
- vmsi = (struct vm_msi *)data;
- error = vm_raise_msi(sc->vm, vmsi->msg, vmsi->addr, vmsi->bus,
- vmsi->slot, vmsi->func);
- break;
- case VM_SET_TOPOLOGY:
- topology = (struct vm_cpu_topology *)data;
- error = vm_set_topology(sc->vm, topology->sockets,
- topology->cores, topology->threads, topology->maxcpus);
- break;
- case VM_GET_TOPOLOGY:
- topology = (struct vm_cpu_topology *)data;
- vm_get_topology(sc->vm, &topology->sockets, &topology->cores,
- &topology->threads, &topology->maxcpus);
- error = 0;
- break;
- default:
- error = ENOTTY;
- break;
- }
-
-done:
- if (vcpus_locked == SINGLE)
- vcpu_unlock_one(vcpu);
- else if (vcpus_locked == ALL)
- vcpu_unlock_all(sc);
- if (memsegs_locked)
- vm_unlock_memsegs(sc->vm);
-
- /*
- * Make sure that no handler returns a kernel-internal
- * error value to userspace.
- */
- KASSERT(error == ERESTART || error >= 0,
- ("vmmdev_ioctl: invalid error return %d", error));
- return (error);
-}
-
-static int
-vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t mapsize,
- struct vm_object **objp, int nprot)
-{
- struct vmmdev_softc *sc;
- vm_paddr_t gpa;
- size_t len;
- vm_ooffset_t segoff, first, last;
- int error, found, segid;
- bool sysmem;
-
- error = vmm_priv_check(curthread->td_ucred);
- if (error)
- return (error);
-
- first = *offset;
- last = first + mapsize;
- if ((nprot & PROT_EXEC) || first < 0 || first >= last)
- return (EINVAL);
-
- sc = vmmdev_lookup2(cdev);
- if (sc == NULL) {
- /* virtual machine is in the process of being created */
- return (EINVAL);
- }
-
- /*
- * Get a read lock on the guest memory map.
- */
- vm_slock_memsegs(sc->vm);
-
- gpa = 0;
- found = 0;
- while (!found) {
- error = vm_mmap_getnext(sc->vm, &gpa, &segid, &segoff, &len,
- NULL, NULL);
- if (error)
- break;
-
- if (first >= gpa && last <= gpa + len)
- found = 1;
- else
- gpa += len;
- }
-
- if (found) {
- error = vm_get_memseg(sc->vm, segid, &len, &sysmem, objp);
- KASSERT(error == 0 && *objp != NULL,
- ("%s: invalid memory segment %d", __func__, segid));
- if (sysmem) {
- vm_object_reference(*objp);
- *offset = segoff + (first - gpa);
- } else {
- error = EINVAL;
- }
- }
- vm_unlock_memsegs(sc->vm);
- return (error);
-}
-
-static void
-vmmdev_destroy(void *arg)
-{
- struct vmmdev_softc *sc = arg;
- struct devmem_softc *dsc;
- int error __diagused;
-
- error = vcpu_lock_all(sc);
- KASSERT(error == 0, ("%s: error %d freezing vcpus", __func__, error));
- vm_unlock_vcpus(sc->vm);
-
- while ((dsc = SLIST_FIRST(&sc->devmem)) != NULL) {
- KASSERT(dsc->cdev == NULL, ("%s: devmem not free", __func__));
- SLIST_REMOVE_HEAD(&sc->devmem, link);
- free(dsc->name, M_VMMDEV);
- free(dsc, M_VMMDEV);
- }
-
- if (sc->cdev != NULL)
- destroy_dev(sc->cdev);
-
- if (sc->vm != NULL)
- vm_destroy(sc->vm);
-
- if (sc->ucred != NULL)
- crfree(sc->ucred);
-
- if ((sc->flags & VSC_LINKED) != 0) {
- mtx_lock(&vmmdev_mtx);
- SLIST_REMOVE(&head, sc, vmmdev_softc, link);
- mtx_unlock(&vmmdev_mtx);
- }
-
- free(sc, M_VMMDEV);
-}
-
-static int
-sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS)
-{
- struct devmem_softc *dsc;
- struct vmmdev_softc *sc;
- struct cdev *cdev;
- char *buf;
- int error, buflen;
-
- error = vmm_priv_check(req->td->td_ucred);
- if (error)
- return (error);
-
- buflen = VM_MAX_NAMELEN + 1;
- buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO);
- strlcpy(buf, "beavis", buflen);
- error = sysctl_handle_string(oidp, buf, buflen, req);
- if (error != 0 || req->newptr == NULL)
- goto out;
-
- mtx_lock(&vmmdev_mtx);
- sc = vmmdev_lookup(buf);
- if (sc == NULL || sc->cdev == NULL) {
- mtx_unlock(&vmmdev_mtx);
- error = EINVAL;
- goto out;
- }
-
- /*
- * Setting 'sc->cdev' to NULL is used to indicate that the VM
- * is scheduled for destruction.
- */
- cdev = sc->cdev;
- sc->cdev = NULL;
- mtx_unlock(&vmmdev_mtx);
-
- /*
- * Destroy all cdevs:
- *
- * - any new operations on the 'cdev' will return an error (ENXIO).
- *
- * - the 'devmem' cdevs are destroyed before the virtual machine 'cdev'
- */
- SLIST_FOREACH(dsc, &sc->devmem, link) {
- KASSERT(dsc->cdev != NULL, ("devmem cdev already destroyed"));
- destroy_dev(dsc->cdev);
- devmem_destroy(dsc);
- }
- destroy_dev(cdev);
- vmmdev_destroy(sc);
- error = 0;
-
-out:
- free(buf, M_VMMDEV);
- return (error);
-}
-SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy,
- CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE,
- NULL, 0, sysctl_vmm_destroy, "A",
- NULL);
-
-static struct cdevsw vmmdevsw = {
- .d_name = "vmmdev",
- .d_version = D_VERSION,
- .d_ioctl = vmmdev_ioctl,
- .d_mmap_single = vmmdev_mmap_single,
- .d_read = vmmdev_rw,
- .d_write = vmmdev_rw,
-};
-
-static int
-sysctl_vmm_create(SYSCTL_HANDLER_ARGS)
-{
- struct vm *vm;
- struct cdev *cdev;
- struct vmmdev_softc *sc, *sc2;
- char *buf;
- int error, buflen;
-
- error = vmm_priv_check(req->td->td_ucred);
- if (error)
- return (error);
-
- buflen = VM_MAX_NAMELEN + 1;
- buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO);
- strlcpy(buf, "beavis", buflen);
- error = sysctl_handle_string(oidp, buf, buflen, req);
- if (error != 0 || req->newptr == NULL)
- goto out;
-
- mtx_lock(&vmmdev_mtx);
- sc = vmmdev_lookup(buf);
- mtx_unlock(&vmmdev_mtx);
- if (sc != NULL) {
- error = EEXIST;
- goto out;
- }
-
- error = vm_create(buf, &vm);
- if (error != 0)
- goto out;
-
- sc = malloc(sizeof(struct vmmdev_softc), M_VMMDEV, M_WAITOK | M_ZERO);
- sc->ucred = crhold(curthread->td_ucred);
- sc->vm = vm;
- SLIST_INIT(&sc->devmem);
-
- /*
- * Lookup the name again just in case somebody sneaked in when we
- * dropped the lock.
- */
- mtx_lock(&vmmdev_mtx);
- sc2 = vmmdev_lookup(buf);
- if (sc2 == NULL) {
- SLIST_INSERT_HEAD(&head, sc, link);
- sc->flags |= VSC_LINKED;
- }
- mtx_unlock(&vmmdev_mtx);
-
- if (sc2 != NULL) {
- vmmdev_destroy(sc);
- error = EEXIST;
- goto out;
- }
-
- error = make_dev_p(MAKEDEV_CHECKNAME, &cdev, &vmmdevsw, sc->ucred,
- UID_ROOT, GID_WHEEL, 0600, "vmm/%s", buf);
- if (error != 0) {
- vmmdev_destroy(sc);
- goto out;
- }
-
- mtx_lock(&vmmdev_mtx);
- sc->cdev = cdev;
- sc->cdev->si_drv1 = sc;
- mtx_unlock(&vmmdev_mtx);
-
-out:
- free(buf, M_VMMDEV);
- return (error);
-}
-SYSCTL_PROC(_hw_vmm, OID_AUTO, create,
- CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE,
- NULL, 0, sysctl_vmm_create, "A",
- NULL);
-
-void
-vmmdev_init(void)
-{
- pr_allow_flag = prison_add_allow(NULL, "vmm", NULL,
- "Allow use of vmm in a jail.");
-}
-
-int
-vmmdev_cleanup(void)
-{
- int error;
-
- if (SLIST_EMPTY(&head))
- error = 0;
- else
- error = EBUSY;
-
- return (error);
-}
-
-static int
-devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t len,
- struct vm_object **objp, int nprot)
-{
- struct devmem_softc *dsc;
- vm_ooffset_t first, last;
- size_t seglen;
- int error;
- bool sysmem;
-
- dsc = cdev->si_drv1;
- if (dsc == NULL) {
- /* 'cdev' has been created but is not ready for use */
- return (ENXIO);
- }
-
- first = *offset;
- last = *offset + len;
- if ((nprot & PROT_EXEC) || first < 0 || first >= last)
- return (EINVAL);
-
- vm_slock_memsegs(dsc->sc->vm);
-
- error = vm_get_memseg(dsc->sc->vm, dsc->segid, &seglen, &sysmem, objp);
- KASSERT(error == 0 && !sysmem && *objp != NULL,
- ("%s: invalid devmem segment %d", __func__, dsc->segid));
-
- if (seglen >= last)
- vm_object_reference(*objp);
- else
- error = 0;
- vm_unlock_memsegs(dsc->sc->vm);
- return (error);
-}
-
-static struct cdevsw devmemsw = {
- .d_name = "devmem",
- .d_version = D_VERSION,
- .d_mmap_single = devmem_mmap_single,
-};
-
-static int
-devmem_create_cdev(const char *vmname, int segid, char *devname)
-{
- struct devmem_softc *dsc;
- struct vmmdev_softc *sc;
- struct cdev *cdev;
- int error;
-
- error = make_dev_p(MAKEDEV_CHECKNAME, &cdev, &devmemsw, NULL,
- UID_ROOT, GID_WHEEL, 0600, "vmm.io/%s.%s", vmname, devname);
- if (error)
- return (error);
-
- dsc = malloc(sizeof(struct devmem_softc), M_VMMDEV, M_WAITOK | M_ZERO);
-
- mtx_lock(&vmmdev_mtx);
- sc = vmmdev_lookup(vmname);
- KASSERT(sc != NULL, ("%s: vm %s softc not found", __func__, vmname));
- if (sc->cdev == NULL) {
- /* virtual machine is being created or destroyed */
- mtx_unlock(&vmmdev_mtx);
- free(dsc, M_VMMDEV);
- destroy_dev_sched_cb(cdev, NULL, 0);
- return (ENODEV);
- }
-
- dsc->segid = segid;
- dsc->name = devname;
- dsc->cdev = cdev;
- dsc->sc = sc;
- SLIST_INSERT_HEAD(&sc->devmem, dsc, link);
- mtx_unlock(&vmmdev_mtx);
-
- /* The 'cdev' is ready for use after 'si_drv1' is initialized */
- cdev->si_drv1 = dsc;
- return (0);
-}
-
-static void
-devmem_destroy(void *arg)
-{
- struct devmem_softc *dsc = arg;
-
- KASSERT(dsc->cdev, ("%s: devmem cdev already destroyed", __func__));
- dsc->cdev = NULL;
- dsc->sc = NULL;
-}
diff --git a/sys/arm64/vmm/vmm_dev_machdep.c b/sys/arm64/vmm/vmm_dev_machdep.c
new file mode 100644
index 000000000000..926a74fa528b
--- /dev/null
+++ b/sys/arm64/vmm/vmm_dev_machdep.c
@@ -0,0 +1,138 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/conf.h>
+#include <sys/libkern.h>
+#include <sys/ioccom.h>
+#include <sys/mman.h>
+#include <sys/uio.h>
+#include <sys/proc.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+
+#include <machine/machdep.h>
+#include <machine/vmparam.h>
+#include <machine/vmm.h>
+
+#include <dev/vmm/vmm_dev.h>
+#include <dev/vmm/vmm_mem.h>
+
+#include "io/vgic.h"
+
+const struct vmmdev_ioctl vmmdev_machdep_ioctls[] = {
+ VMMDEV_IOCTL(VM_RUN, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_INJECT_EXCEPTION, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_GLA2GPA_NOFAULT, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+
+ VMMDEV_IOCTL(VM_ATTACH_VGIC,
+ VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
+
+ VMMDEV_IOCTL(VM_GET_VGIC_VERSION, 0),
+ VMMDEV_IOCTL(VM_RAISE_MSI, 0),
+ VMMDEV_IOCTL(VM_ASSERT_IRQ, 0),
+ VMMDEV_IOCTL(VM_DEASSERT_IRQ, 0),
+};
+const size_t vmmdev_machdep_ioctl_count = nitems(vmmdev_machdep_ioctls);
+
+int
+vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
+ int fflag, struct thread *td)
+{
+ struct vm_run *vmrun;
+ struct vm_vgic_version *vgv;
+ struct vm_vgic_descr *vgic;
+ struct vm_irq *vi;
+ struct vm_exception *vmexc;
+ struct vm_gla2gpa *gg;
+ struct vm_msi *vmsi;
+ int error;
+
+ error = 0;
+ switch (cmd) {
+ case VM_RUN: {
+ struct vm_exit *vme;
+
+ vmrun = (struct vm_run *)data;
+ vme = vm_exitinfo(vcpu);
+
+ error = vm_run(vcpu);
+ if (error != 0)
+ break;
+
+ error = copyout(vme, vmrun->vm_exit, sizeof(*vme));
+ if (error != 0)
+ break;
+ break;
+ }
+ case VM_INJECT_EXCEPTION:
+ vmexc = (struct vm_exception *)data;
+ error = vm_inject_exception(vcpu, vmexc->esr, vmexc->far);
+ break;
+ case VM_GLA2GPA_NOFAULT:
+ gg = (struct vm_gla2gpa *)data;
+ error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla,
+ gg->prot, &gg->gpa, &gg->fault);
+ KASSERT(error == 0 || error == EFAULT,
+ ("%s: vm_gla2gpa unknown error %d", __func__, error));
+ break;
+ case VM_GET_VGIC_VERSION:
+ vgv = (struct vm_vgic_version *)data;
+ /* TODO: Query the vgic driver for this */
+ vgv->version = 3;
+ vgv->flags = 0;
+ error = 0;
+ break;
+ case VM_ATTACH_VGIC:
+ vgic = (struct vm_vgic_descr *)data;
+ error = vm_attach_vgic(vm, vgic);
+ break;
+ case VM_RAISE_MSI:
+ vmsi = (struct vm_msi *)data;
+ error = vm_raise_msi(vm, vmsi->msg, vmsi->addr, vmsi->bus,
+ vmsi->slot, vmsi->func);
+ break;
+ case VM_ASSERT_IRQ:
+ vi = (struct vm_irq *)data;
+ error = vm_assert_irq(vm, vi->irq);
+ break;
+ case VM_DEASSERT_IRQ:
+ vi = (struct vm_irq *)data;
+ error = vm_deassert_irq(vm, vi->irq);
+ break;
+ default:
+ error = ENOTTY;
+ break;
+ }
+
+ return (error);
+}
diff --git a/sys/arm64/vmm/vmm_handlers.c b/sys/arm64/vmm/vmm_handlers.c
new file mode 100644
index 000000000000..c567b585eb06
--- /dev/null
+++ b/sys/arm64/vmm/vmm_handlers.c
@@ -0,0 +1,113 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Arm Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/systm.h>
+
+#include <machine/ifunc.h>
+
+#include "arm64.h"
+#include "vmm_handlers.h"
+
+/* Read an EL2 register */
+static uint64_t
+vmm_nvhe_read_reg(uint64_t reg)
+{
+ return (vmm_call_hyp(HYP_READ_REGISTER, reg));
+}
+
+DEFINE_IFUNC(, uint64_t, vmm_read_reg, (uint64_t reg))
+{
+ if (in_vhe())
+ return (vmm_vhe_read_reg);
+ return (vmm_nvhe_read_reg);
+}
+
+/* Enter the guest */
+static uint64_t
+vmm_nvhe_enter_guest(struct hyp *hyp, struct hypctx *hypctx)
+{
+ return (vmm_call_hyp(HYP_ENTER_GUEST, hyp->el2_addr, hypctx->el2_addr));
+}
+
+DEFINE_IFUNC(, uint64_t, vmm_enter_guest,
+ (struct hyp *hyp, struct hypctx *hypctx))
+{
+ if (in_vhe())
+ return (vmm_vhe_enter_guest);
+ return (vmm_nvhe_enter_guest);
+}
+
+/* Clean the TLB for all guests */
+static void
+vmm_nvhe_clean_s2_tlbi(void)
+{
+ vmm_call_hyp(HYP_CLEAN_S2_TLBI);
+}
+
+DEFINE_IFUNC(, void, vmm_clean_s2_tlbi, (void))
+{
+ if (in_vhe())
+ return (vmm_vhe_clean_s2_tlbi);
+ return (vmm_nvhe_clean_s2_tlbi);
+}
+
+/*
+ * Switch to a guest vttbr and clean the TLB for a range of guest
+ * virtual address space.
+ */
+static void
+vmm_nvhe_s2_tlbi_range(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva,
+ bool final_only)
+{
+ vmm_call_hyp(HYP_S2_TLBI_RANGE, vttbr, sva, eva, final_only);
+}
+
+DEFINE_IFUNC(, void, vmm_s2_tlbi_range,
+ (uint64_t vttbr, vm_offset_t sva, vm_offset_t eva, bool final_only))
+{
+ if (in_vhe())
+ return (vmm_vhe_s2_tlbi_range);
+ return (vmm_nvhe_s2_tlbi_range);
+}
+
+/*
+ * Switch to a guest vttbr and clean the TLB for all the guest
+ * virtual address space.
+ */
+static void
+vmm_nvhe_s2_tlbi_all(uint64_t vttbr)
+{
+ vmm_call_hyp(HYP_S2_TLBI_ALL, vttbr);
+}
+
+DEFINE_IFUNC(, void, vmm_s2_tlbi_all, (uint64_t vttbr))
+{
+ if (in_vhe())
+ return (vmm_vhe_s2_tlbi_all);
+ return (vmm_nvhe_s2_tlbi_all);
+}
diff --git a/sys/arm64/arm64/uma_machdep.c b/sys/arm64/vmm/vmm_handlers.h
index f942248d4dcd..f651fce6f32d 100644
--- a/sys/arm64/arm64/uma_machdep.c
+++ b/sys/arm64/vmm/vmm_handlers.h
@@ -1,6 +1,7 @@
/*-
- * Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu>
- * All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Arm Ltd
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,7 +15,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -24,46 +25,24 @@
* SUCH DAMAGE.
*/
-#include <sys/param.h>
-#include <sys/malloc.h>
-#include <vm/vm.h>
-#include <vm/vm_param.h>
-#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
-#include <vm/vm_dumpset.h>
-#include <vm/uma.h>
-#include <vm/uma_int.h>
-#include <machine/machdep.h>
+#ifndef _VMM_VMM_HANDLERS_H_
+#define _VMM_VMM_HANDLERS_H_
+
+#include <sys/types.h>
-void *
-uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
- int wait)
-{
- vm_page_t m;
- vm_paddr_t pa;
- void *va;
+struct hyp;
+struct hypctx;
- *flags = UMA_SLAB_PRIV;
- m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
- VM_ALLOC_WIRED);
- if (m == NULL)
- return (NULL);
- pa = m->phys_addr;
- if ((wait & M_NODUMP) == 0)
- dump_add_page(pa);
- va = (void *)PHYS_TO_DMAP(pa);
- return (va);
-}
+void vmm_clean_s2_tlbi(void);
+uint64_t vmm_enter_guest(struct hyp *, struct hypctx *);
+uint64_t vmm_read_reg(uint64_t);
+void vmm_s2_tlbi_range(uint64_t, vm_offset_t, vm_offset_t, bool);
+void vmm_s2_tlbi_all(uint64_t);
-void
-uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
-{
- vm_page_t m;
- vm_paddr_t pa;
+void vmm_vhe_clean_s2_tlbi(void);
+uint64_t vmm_vhe_enter_guest(struct hyp *, struct hypctx *);
+uint64_t vmm_vhe_read_reg(uint64_t);
+void vmm_vhe_s2_tlbi_range(uint64_t, vm_offset_t, vm_offset_t, bool);
+void vmm_vhe_s2_tlbi_all(uint64_t);
- pa = DMAP_TO_PHYS((vm_offset_t)mem);
- dump_drop_page(pa);
- m = PHYS_TO_VM_PAGE(pa);
- vm_page_unwire_noq(m);
- vm_page_free(m);
-}
+#endif /* _VMM_VMM_HANDLERS_H_ */
diff --git a/sys/arm64/vmm/vmm_hyp.c b/sys/arm64/vmm/vmm_hyp.c
index 9ff250e798e7..d61885c15871 100644
--- a/sys/arm64/vmm/vmm_hyp.c
+++ b/sys/arm64/vmm/vmm_hyp.c
@@ -39,9 +39,7 @@
struct hypctx;
-uint64_t vmm_hyp_enter(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t,
- uint64_t, uint64_t, uint64_t);
-uint64_t vmm_enter_guest(struct hypctx *);
+uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
static void
vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
@@ -51,11 +49,12 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
/* Store the guest VFP registers */
if (guest) {
/* Store the timer registers */
- hypctx->vtimer_cpu.cntkctl_el1 = READ_SPECIALREG(cntkctl_el1);
+ hypctx->vtimer_cpu.cntkctl_el1 =
+ READ_SPECIALREG(EL1_REG(CNTKCTL));
hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 =
- READ_SPECIALREG(cntv_cval_el0);
+ READ_SPECIALREG(EL0_REG(CNTV_CVAL));
hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 =
- READ_SPECIALREG(cntv_ctl_el0);
+ READ_SPECIALREG(EL0_REG(CNTV_CTL));
/* Store the GICv3 registers */
hypctx->vgic_v3_regs.ich_eisr_el2 =
@@ -221,41 +220,53 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
hypctx->tf.tf_spsr = READ_SPECIALREG(spsr_el2);
if (guest) {
hypctx->tf.tf_esr = READ_SPECIALREG(esr_el2);
+ hypctx->par_el1 = READ_SPECIALREG(par_el1);
}
/* Store the guest special registers */
- hypctx->elr_el1 = READ_SPECIALREG(elr_el1);
hypctx->sp_el0 = READ_SPECIALREG(sp_el0);
hypctx->tpidr_el0 = READ_SPECIALREG(tpidr_el0);
hypctx->tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
hypctx->tpidr_el1 = READ_SPECIALREG(tpidr_el1);
- hypctx->vbar_el1 = READ_SPECIALREG(vbar_el1);
hypctx->actlr_el1 = READ_SPECIALREG(actlr_el1);
- hypctx->afsr0_el1 = READ_SPECIALREG(afsr0_el1);
- hypctx->afsr1_el1 = READ_SPECIALREG(afsr1_el1);
- hypctx->amair_el1 = READ_SPECIALREG(amair_el1);
- hypctx->contextidr_el1 = READ_SPECIALREG(contextidr_el1);
- hypctx->cpacr_el1 = READ_SPECIALREG(cpacr_el1);
hypctx->csselr_el1 = READ_SPECIALREG(csselr_el1);
- hypctx->esr_el1 = READ_SPECIALREG(esr_el1);
- hypctx->far_el1 = READ_SPECIALREG(far_el1);
- hypctx->mair_el1 = READ_SPECIALREG(mair_el1);
hypctx->mdccint_el1 = READ_SPECIALREG(mdccint_el1);
hypctx->mdscr_el1 = READ_SPECIALREG(mdscr_el1);
- hypctx->par_el1 = READ_SPECIALREG(par_el1);
- hypctx->sctlr_el1 = READ_SPECIALREG(sctlr_el1);
- hypctx->spsr_el1 = READ_SPECIALREG(spsr_el1);
- hypctx->tcr_el1 = READ_SPECIALREG(tcr_el1);
- /* TODO: Support when this is not res0 */
- hypctx->tcr2_el1 = 0;
- hypctx->ttbr0_el1 = READ_SPECIALREG(ttbr0_el1);
- hypctx->ttbr1_el1 = READ_SPECIALREG(ttbr1_el1);
+
+ if (guest_or_nonvhe(guest)) {
+ hypctx->elr_el1 = READ_SPECIALREG(EL1_REG(ELR));
+ hypctx->vbar_el1 = READ_SPECIALREG(EL1_REG(VBAR));
+
+ hypctx->afsr0_el1 = READ_SPECIALREG(EL1_REG(AFSR0));
+ hypctx->afsr1_el1 = READ_SPECIALREG(EL1_REG(AFSR1));
+ hypctx->amair_el1 = READ_SPECIALREG(EL1_REG(AMAIR));
+ hypctx->contextidr_el1 = READ_SPECIALREG(EL1_REG(CONTEXTIDR));
+ hypctx->cpacr_el1 = READ_SPECIALREG(EL1_REG(CPACR));
+ hypctx->esr_el1 = READ_SPECIALREG(EL1_REG(ESR));
+ hypctx->far_el1 = READ_SPECIALREG(EL1_REG(FAR));
+ hypctx->mair_el1 = READ_SPECIALREG(EL1_REG(MAIR));
+ hypctx->sctlr_el1 = READ_SPECIALREG(EL1_REG(SCTLR));
+ hypctx->spsr_el1 = READ_SPECIALREG(EL1_REG(SPSR));
+ hypctx->tcr_el1 = READ_SPECIALREG(EL1_REG(TCR));
+ /* TODO: Support when this is not res0 */
+ hypctx->tcr2_el1 = 0;
+ hypctx->ttbr0_el1 = READ_SPECIALREG(EL1_REG(TTBR0));
+ hypctx->ttbr1_el1 = READ_SPECIALREG(EL1_REG(TTBR1));
+ }
hypctx->cptr_el2 = READ_SPECIALREG(cptr_el2);
hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2);
hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2);
hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2);
+
+#ifndef VMM_VHE
+ /* hcrx_el2 depends on feat_hcx */
+ uint64_t mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
+ if (ID_AA64MMFR1_HCX_VAL(mmfr1) >> ID_AA64MMFR1_HCX_SHIFT) {
+ hypctx->hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2));
+ }
+#endif
}
static void
@@ -264,35 +275,52 @@ vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
uint64_t dfr0;
/* Restore the special registers */
- WRITE_SPECIALREG(elr_el1, hypctx->elr_el1);
+ WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
+
+ if (guest_or_nonvhe(guest)) {
+ uint64_t mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
+ if (ID_AA64MMFR1_HCX_VAL(mmfr1) >> ID_AA64MMFR1_HCX_SHIFT) {
+ WRITE_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2), hypctx->hcrx_el2);
+ }
+ }
+ isb();
+
WRITE_SPECIALREG(sp_el0, hypctx->sp_el0);
WRITE_SPECIALREG(tpidr_el0, hypctx->tpidr_el0);
WRITE_SPECIALREG(tpidrro_el0, hypctx->tpidrro_el0);
WRITE_SPECIALREG(tpidr_el1, hypctx->tpidr_el1);
- WRITE_SPECIALREG(vbar_el1, hypctx->vbar_el1);
WRITE_SPECIALREG(actlr_el1, hypctx->actlr_el1);
- WRITE_SPECIALREG(afsr0_el1, hypctx->afsr0_el1);
- WRITE_SPECIALREG(afsr1_el1, hypctx->afsr1_el1);
- WRITE_SPECIALREG(amair_el1, hypctx->amair_el1);
- WRITE_SPECIALREG(contextidr_el1, hypctx->contextidr_el1);
- WRITE_SPECIALREG(cpacr_el1, hypctx->cpacr_el1);
WRITE_SPECIALREG(csselr_el1, hypctx->csselr_el1);
- WRITE_SPECIALREG(esr_el1, hypctx->esr_el1);
- WRITE_SPECIALREG(far_el1, hypctx->far_el1);
WRITE_SPECIALREG(mdccint_el1, hypctx->mdccint_el1);
WRITE_SPECIALREG(mdscr_el1, hypctx->mdscr_el1);
- WRITE_SPECIALREG(mair_el1, hypctx->mair_el1);
- WRITE_SPECIALREG(par_el1, hypctx->par_el1);
- WRITE_SPECIALREG(sctlr_el1, hypctx->sctlr_el1);
- WRITE_SPECIALREG(tcr_el1, hypctx->tcr_el1);
- /* TODO: tcr2_el1 */
- WRITE_SPECIALREG(ttbr0_el1, hypctx->ttbr0_el1);
- WRITE_SPECIALREG(ttbr1_el1, hypctx->ttbr1_el1);
- WRITE_SPECIALREG(spsr_el1, hypctx->spsr_el1);
+
+ if (guest_or_nonvhe(guest)) {
+ WRITE_SPECIALREG(EL1_REG(ELR), hypctx->elr_el1);
+ WRITE_SPECIALREG(EL1_REG(VBAR), hypctx->vbar_el1);
+
+ WRITE_SPECIALREG(EL1_REG(AFSR0), hypctx->afsr0_el1);
+ WRITE_SPECIALREG(EL1_REG(AFSR1), hypctx->afsr1_el1);
+ WRITE_SPECIALREG(EL1_REG(AMAIR), hypctx->amair_el1);
+ WRITE_SPECIALREG(EL1_REG(CONTEXTIDR), hypctx->contextidr_el1);
+ WRITE_SPECIALREG(EL1_REG(CPACR), hypctx->cpacr_el1);
+ WRITE_SPECIALREG(EL1_REG(ESR), hypctx->esr_el1);
+ WRITE_SPECIALREG(EL1_REG(FAR), hypctx->far_el1);
+ WRITE_SPECIALREG(EL1_REG(MAIR), hypctx->mair_el1); //
+
+ WRITE_SPECIALREG(EL1_REG(SCTLR), hypctx->sctlr_el1);
+ WRITE_SPECIALREG(EL1_REG(SPSR), hypctx->spsr_el1);
+ WRITE_SPECIALREG(EL1_REG(TCR), hypctx->tcr_el1);
+ /* TODO: tcr2_el1 */
+ WRITE_SPECIALREG(EL1_REG(TTBR0), hypctx->ttbr0_el1);
+ WRITE_SPECIALREG(EL1_REG(TTBR1), hypctx->ttbr1_el1);
+ }
+
+ if (guest) {
+ WRITE_SPECIALREG(par_el1, hypctx->par_el1);
+ }
WRITE_SPECIALREG(cptr_el2, hypctx->cptr_el2);
- WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
WRITE_SPECIALREG(vpidr_el2, hypctx->vpidr_el2);
WRITE_SPECIALREG(vmpidr_el2, hypctx->vmpidr_el2);
@@ -413,10 +441,11 @@ vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
if (guest) {
/* Load the timer registers */
- WRITE_SPECIALREG(cntkctl_el1, hypctx->vtimer_cpu.cntkctl_el1);
- WRITE_SPECIALREG(cntv_cval_el0,
+ WRITE_SPECIALREG(EL1_REG(CNTKCTL),
+ hypctx->vtimer_cpu.cntkctl_el1);
+ WRITE_SPECIALREG(EL0_REG(CNTV_CVAL),
hypctx->vtimer_cpu.virt_timer.cntx_cval_el0);
- WRITE_SPECIALREG(cntv_ctl_el0,
+ WRITE_SPECIALREG(EL0_REG(CNTV_CTL),
hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0);
WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2);
WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2);
@@ -496,7 +525,7 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
WRITE_SPECIALREG(mdcr_el2, hypctx->mdcr_el2);
/* Call into the guest */
- ret = vmm_enter_guest(hypctx);
+ ret = VMM_HYP_FUNC(do_call_guest)(hypctx);
WRITE_SPECIALREG(mdcr_el2, host_hypctx.mdcr_el2);
isb();
@@ -566,8 +595,20 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
return (ret);
}
-static uint64_t
-vmm_hyp_read_reg(uint64_t reg)
+VMM_STATIC uint64_t
+VMM_HYP_FUNC(enter_guest)(struct hyp *hyp, struct hypctx *hypctx)
+{
+ uint64_t ret;
+
+ do {
+ ret = vmm_hyp_call_guest(hyp, hypctx);
+ } while (ret == EXCP_TYPE_REENTER);
+
+ return (ret);
+}
+
+VMM_STATIC uint64_t
+VMM_HYP_FUNC(read_reg)(uint64_t reg)
{
switch (reg) {
case HYP_REG_ICH_VTR:
@@ -579,22 +620,27 @@ vmm_hyp_read_reg(uint64_t reg)
return (0);
}
-static int
-vmm_clean_s2_tlbi(void)
+VMM_STATIC void
+VMM_HYP_FUNC(clean_s2_tlbi)(void)
{
dsb(ishst);
__asm __volatile("tlbi alle1is");
dsb(ish);
-
- return (0);
}
-static int
-vm_s2_tlbi_range(uint64_t vttbr, vm_offset_t sva, vm_size_t eva,
+VMM_STATIC void
+VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva,
bool final_only)
{
uint64_t end, r, start;
uint64_t host_vttbr;
+#ifdef VMM_VHE
+ uint64_t host_tcr;
+#endif
+
+#ifdef VMM_VHE
+ dsb(ishst);
+#endif
#define TLBI_VA_SHIFT 12
#define TLBI_VA_MASK ((1ul << 44) - 1)
@@ -607,6 +653,12 @@ vm_s2_tlbi_range(uint64_t vttbr, vm_offset_t sva, vm_size_t eva,
WRITE_SPECIALREG(vttbr_el2, vttbr);
isb();
+#ifdef VMM_VHE
+ host_tcr = READ_SPECIALREG(tcr_el2);
+ WRITE_SPECIALREG(tcr_el2, host_tcr & ~HCR_TGE);
+ isb();
+#endif
+
/*
* The CPU can cache the stage 1 + 2 combination so we need to ensure
* the stage 2 is invalidated first, then when this has completed we
@@ -631,18 +683,25 @@ vm_s2_tlbi_range(uint64_t vttbr, vm_offset_t sva, vm_size_t eva,
dsb(ish);
isb();
- /* Switch back t othe host vttbr */
- WRITE_SPECIALREG(vttbr_el2, host_vttbr);
+#ifdef VMM_VHE
+ WRITE_SPECIALREG(tcr_el2, host_tcr);
isb();
+#endif
- return (0);
+ /* Switch back to the host vttbr */
+ WRITE_SPECIALREG(vttbr_el2, host_vttbr);
+ isb();
}
-static int
-vm_s2_tlbi_all(uint64_t vttbr)
+VMM_STATIC void
+VMM_HYP_FUNC(s2_tlbi_all)(uint64_t vttbr)
{
uint64_t host_vttbr;
+#ifdef VMM_VHE
+ dsb(ishst);
+#endif
+
/* Switch to the guest vttbr */
/* TODO: Handle Cortex-A57/A72 erratum 131936 */
host_vttbr = READ_SPECIALREG(vttbr_el2);
@@ -656,80 +715,4 @@ vm_s2_tlbi_all(uint64_t vttbr)
/* Switch back t othe host vttbr */
WRITE_SPECIALREG(vttbr_el2, host_vttbr);
isb();
-
- return (0);
-}
-
-static int
-vmm_dc_civac(uint64_t start, uint64_t len)
-{
- size_t line_size, end;
- uint64_t ctr;
-
- ctr = READ_SPECIALREG(ctr_el0);
- line_size = sizeof(int) << CTR_DLINE_SIZE(ctr);
- end = start + len;
- dsb(ishst);
- /* Clean and Invalidate the D-cache */
- for (; start < end; start += line_size)
- __asm __volatile("dc civac, %0" :: "r" (start) : "memory");
- dsb(ish);
- return (0);
-}
-
-static int
-vmm_el2_tlbi(uint64_t type, uint64_t start, uint64_t len)
-{
- uint64_t end, r;
-
- dsb(ishst);
- switch (type) {
- default:
- case HYP_EL2_TLBI_ALL:
- __asm __volatile("tlbi alle2" ::: "memory");
- break;
- case HYP_EL2_TLBI_VA:
- end = TLBI_VA(start + len);
- start = TLBI_VA(start);
- for (r = start; r < end; r += TLBI_VA_L3_INCR) {
- __asm __volatile("tlbi vae2is, %0" :: "r"(r));
- }
- break;
- }
- dsb(ish);
-
- return (0);
-}
-
-uint64_t
-vmm_hyp_enter(uint64_t handle, uint64_t x1, uint64_t x2, uint64_t x3,
- uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7)
-{
- uint64_t ret;
-
- switch (handle) {
- case HYP_ENTER_GUEST:
- do {
- ret = vmm_hyp_call_guest((struct hyp *)x1,
- (struct hypctx *)x2);
- } while (ret == EXCP_TYPE_REENTER);
- return (ret);
- case HYP_READ_REGISTER:
- return (vmm_hyp_read_reg(x1));
- case HYP_CLEAN_S2_TLBI:
- return (vmm_clean_s2_tlbi());
- case HYP_DC_CIVAC:
- return (vmm_dc_civac(x1, x2));
- case HYP_EL2_TLBI:
- return (vmm_el2_tlbi(x1, x2, x3));
- case HYP_S2_TLBI_RANGE:
- return (vm_s2_tlbi_range(x1, x2, x3, x4));
- case HYP_S2_TLBI_ALL:
- return (vm_s2_tlbi_all(x1));
- case HYP_CLEANUP: /* Handled in vmm_hyp_exception.S */
- default:
- break;
- }
-
- return (0);
}
diff --git a/sys/arm64/vmm/vmm_hyp_el2.S b/sys/arm64/vmm/vmm_hyp_el2.S
index 7b49d3144dff..0ba040ee7bad 100644
--- a/sys/arm64/vmm/vmm_hyp_el2.S
+++ b/sys/arm64/vmm/vmm_hyp_el2.S
@@ -28,12 +28,17 @@
* SUCH DAMAGE.
*/
+#include <sys/elf_common.h>
+
+#include <machine/asm.h>
#include <machine/param.h>
- .rodata
+ .section .rodata
.align PAGE_SHIFT
.globl vmm_hyp_code
vmm_hyp_code:
.incbin "vmm_hyp_blob.bin"
.globl vmm_hyp_code_end
vmm_hyp_code_end:
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/vmm/vmm_hyp_exception.S b/sys/arm64/vmm/vmm_hyp_exception.S
index 0e8b31ae8b12..50c2490f37bf 100644
--- a/sys/arm64/vmm/vmm_hyp_exception.S
+++ b/sys/arm64/vmm/vmm_hyp_exception.S
@@ -30,6 +30,7 @@
*/
+#include <sys/elf_common.h>
#include <machine/asm.h>
#include <machine/hypervisor.h>
@@ -145,29 +146,6 @@
b handle_\name
.endm
- .section ".vmm_vectors","ax"
- .align 11
-hyp_init_vectors:
- vempty /* Synchronous EL2t */
- vempty /* IRQ EL2t */
- vempty /* FIQ EL2t */
- vempty /* Error EL2t */
-
- vempty /* Synchronous EL2h */
- vempty /* IRQ EL2h */
- vempty /* FIQ EL2h */
- vempty /* Error EL2h */
-
- vector hyp_init /* Synchronous 64-bit EL1 */
- vempty /* IRQ 64-bit EL1 */
- vempty /* FIQ 64-bit EL1 */
- vempty /* Error 64-bit EL1 */
-
- vempty /* Synchronous 32-bit EL1 */
- vempty /* IRQ 32-bit EL1 */
- vempty /* FIQ 32-bit EL1 */
- vempty /* Error 32-bit EL1 */
-
.text
.align 11
hyp_vectors:
@@ -191,50 +169,6 @@ hyp_vectors:
vempty /* FIQ 32-bit EL1 */
vempty /* Error 32-bit EL1 */
-/*
- * Initialize the hypervisor mode with a new exception vector table, translation
- * table and stack.
- *
- * Expecting:
- * x0 - translation tables physical address
- * x1 - stack top virtual address
- * x2 - TCR_EL2 value
- * x3 - SCTLR_EL2 value
- * x4 - VTCR_EL2 value
- */
-LENTRY(handle_hyp_init)
- /* Install the new exception vectors */
- adrp x6, hyp_vectors
- add x6, x6, :lo12:hyp_vectors
- msr vbar_el2, x6
- /* Set the stack top address */
- mov sp, x1
- /* Use the host VTTBR_EL2 to tell the host and the guests apart */
- mov x9, #VTTBR_HOST
- msr vttbr_el2, x9
- /* Load the base address for the translation tables */
- msr ttbr0_el2, x0
- /* Invalidate the TLB */
- dsb ish
- tlbi alle2
- dsb ishst
- isb
- /* Use the same memory attributes as EL1 */
- mrs x9, mair_el1
- msr mair_el2, x9
- /* Configure address translation */
- msr tcr_el2, x2
- isb
- /* Set the system control register for EL2 */
- msr sctlr_el2, x3
- /* Set the Stage 2 translation control register */
- msr vtcr_el2, x4
- /* Return success */
- mov x0, #0
- /* MMU is up and running */
- ERET
-LEND(handle_hyp_init)
-
.macro do_world_switch_to_host
save_guest_registers
restore_host_registers
@@ -242,10 +176,15 @@ LEND(handle_hyp_init)
/* Restore host VTTBR */
mov x9, #VTTBR_HOST
msr vttbr_el2, x9
+
+#ifdef VMM_VHE
+ msr vbar_el1, x1
+#endif
.endm
.macro handle_el2_excp type
+#ifndef VMM_VHE
/* Save registers before modifying so we can restore them */
str x9, [sp, #-16]!
@@ -256,15 +195,18 @@ LEND(handle_hyp_init)
/* We got the exception while the guest was running */
ldr x9, [sp], #16
+#endif /* !VMM_VHE */
do_world_switch_to_host
mov x0, \type
ret
+#ifndef VMM_VHE
1:
/* We got the exception while the host was running */
ldr x9, [sp], #16
mov x0, \type
ERET
+#endif /* !VMM_VHE */
.endm
@@ -286,6 +228,7 @@ LEND(handle_el2_el2h_error)
LENTRY(handle_el2_el1_sync64)
+#ifndef VMM_VHE
/* Save registers before modifying so we can restore them */
str x9, [sp, #-16]!
@@ -308,7 +251,9 @@ LENTRY(handle_el2_el1_sync64)
ldr lr, [sp], #16
ERET
-1: /* Guest exception taken to EL2 */
+1:
+#endif
+ /* Guest exception taken to EL2 */
do_world_switch_to_host
mov x0, #EXCP_TYPE_EL1_SYNC
ret
@@ -332,7 +277,7 @@ LENTRY(handle_el2_el1_irq64)
2:
ldr x9, [sp], #16
ret
-LEND(handle_el2_el1_irq)
+LEND(handle_el2_el1_irq64)
LENTRY(handle_el2_el1_fiq64)
do_world_switch_to_host
@@ -349,12 +294,20 @@ LEND(handle_el2_el1_error64)
/*
* Usage:
- * uint64_t vmm_enter_guest(struct hypctx *hypctx)
+ * uint64_t vmm_do_call_guest(struct hypctx *hypctx)
*
* Expecting:
* x0 - hypctx address
*/
-ENTRY(vmm_enter_guest)
+ENTRY(VMM_HYP_FUNC(do_call_guest))
+#ifdef VMM_VHE
+ mrs x1, vbar_el1
+ adrp x2, hyp_vectors
+ add x2, x2, :lo12:hyp_vectors
+ msr vbar_el1, x2
+ isb
+#endif
+
/* Save hypctx address */
msr tpidr_el2, x0
@@ -363,25 +316,6 @@ ENTRY(vmm_enter_guest)
/* Enter guest */
ERET
-END(vmm_enter_guest)
-
-/*
- * Usage:
- * void vmm_cleanup(uint64_t handle, void *hyp_stub_vectors)
- *
- * Expecting:
- * x1 - physical address of hyp_stub_vectors
- */
-LENTRY(vmm_cleanup)
- /* Restore the stub vectors */
- msr vbar_el2, x1
-
- /* Disable the MMU */
- dsb sy
- mrs x2, sctlr_el2
- bic x2, x2, #SCTLR_EL2_M
- msr sctlr_el2, x2
- isb
+END(VMM_HYP_FUNC(do_call_guest))
- ERET
-LEND(vmm_cleanup)
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/arm64/vmm/vmm_ktr.h b/sys/arm64/vmm/vmm_ktr.h
deleted file mode 100644
index 965f440ae874..000000000000
--- a/sys/arm64/vmm/vmm_ktr.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
- *
- * Copyright (c) 2011 NetApp, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifndef _VMM_KTR_H_
-#define _VMM_KTR_H_
-
-#include <sys/ktr.h>
-#include <sys/pcpu.h>
-
-#ifndef KTR_VMM
-#define KTR_VMM KTR_GEN
-#endif
-
-#define VCPU_CTR0(vm, vcpuid, format) \
-CTR2(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid))
-
-#define VCPU_CTR1(vm, vcpuid, format, p1) \
-CTR3(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1))
-
-#define VCPU_CTR2(vm, vcpuid, format, p1, p2) \
-CTR4(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2))
-
-#define VCPU_CTR3(vm, vcpuid, format, p1, p2, p3) \
-CTR5(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2), (p3))
-
-#define VCPU_CTR4(vm, vcpuid, format, p1, p2, p3, p4) \
-CTR6(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), \
- (p1), (p2), (p3), (p4))
-
-#define VM_CTR0(vm, format) \
-CTR1(KTR_VMM, "vm %s: " format, vm_name((vm)))
-
-#define VM_CTR1(vm, format, p1) \
-CTR2(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1))
-
-#define VM_CTR2(vm, format, p1, p2) \
-CTR3(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2))
-
-#define VM_CTR3(vm, format, p1, p2, p3) \
-CTR4(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3))
-
-#define VM_CTR4(vm, format, p1, p2, p3, p4) \
-CTR5(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3), (p4))
-#endif
diff --git a/sys/arm64/vmm/vmm_mmu.c b/sys/arm64/vmm/vmm_mmu.c
index 1f2d248a743b..42537254e27b 100644
--- a/sys/arm64/vmm/vmm_mmu.c
+++ b/sys/arm64/vmm/vmm_mmu.c
@@ -294,7 +294,7 @@ vmmpmap_enter(vm_offset_t va, vm_size_t size, vm_paddr_t pa, vm_prot_t prot)
KASSERT((size & PAGE_MASK) == 0,
("%s: Mapping is not page-sized", __func__));
- l3e = ATTR_DEFAULT | L3_PAGE;
+ l3e = ATTR_AF | ATTR_SH(ATTR_SH_IS) | L3_PAGE;
/* This bit is res1 at EL2 */
l3e |= ATTR_S1_AP(ATTR_S1_AP_USER);
/* Only normal memory is used at EL2 */
diff --git a/sys/arm64/vmm/vmm_nvhe.c b/sys/arm64/vmm/vmm_nvhe.c
new file mode 100644
index 000000000000..025b1308ce68
--- /dev/null
+++ b/sys/arm64/vmm/vmm_nvhe.c
@@ -0,0 +1,118 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021 Andrew Turner
+ * Copyright (c) 2024 Arm Ltd
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define VMM_STATIC static
+#define VMM_HYP_FUNC(func) vmm_nvhe_ ## func
+
+#define guest_or_nonvhe(guest) (true)
+#define EL1_REG(reg) MRS_REG_ALT_NAME(reg ## _EL1)
+#define EL0_REG(reg) MRS_REG_ALT_NAME(reg ## _EL0)
+
+#include "vmm_hyp.c"
+
+uint64_t vmm_hyp_enter(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t,
+ uint64_t, uint64_t, uint64_t);
+
+/*
+ * Handlers for EL2 addres space. Only needed by non-VHE code as in VHE the
+ * kernel is in EL2 so pmap will manage the address space.
+ */
+static int
+vmm_dc_civac(uint64_t start, uint64_t len)
+{
+ size_t line_size, end;
+ uint64_t ctr;
+
+ ctr = READ_SPECIALREG(ctr_el0);
+ line_size = sizeof(int) << CTR_DLINE_SIZE(ctr);
+ end = start + len;
+ dsb(ishst);
+ /* Clean and Invalidate the D-cache */
+ for (; start < end; start += line_size)
+ __asm __volatile("dc civac, %0" :: "r" (start) : "memory");
+ dsb(ish);
+ return (0);
+}
+
+static int
+vmm_el2_tlbi(uint64_t type, uint64_t start, uint64_t len)
+{
+ uint64_t end, r;
+
+ dsb(ishst);
+ switch (type) {
+ default:
+ case HYP_EL2_TLBI_ALL:
+ __asm __volatile("tlbi alle2" ::: "memory");
+ break;
+ case HYP_EL2_TLBI_VA:
+ end = TLBI_VA(start + len);
+ start = TLBI_VA(start);
+ for (r = start; r < end; r += TLBI_VA_L3_INCR) {
+ __asm __volatile("tlbi vae2is, %0" :: "r"(r));
+ }
+ break;
+ }
+ dsb(ish);
+
+ return (0);
+}
+
+uint64_t
+vmm_hyp_enter(uint64_t handle, uint64_t x1, uint64_t x2, uint64_t x3,
+ uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7)
+{
+ switch (handle) {
+ case HYP_ENTER_GUEST:
+ return (VMM_HYP_FUNC(enter_guest)((struct hyp *)x1,
+ (struct hypctx *)x2));
+ case HYP_READ_REGISTER:
+ return (VMM_HYP_FUNC(read_reg)(x1));
+ case HYP_CLEAN_S2_TLBI:
+ VMM_HYP_FUNC(clean_s2_tlbi());
+ return (0);
+ case HYP_DC_CIVAC:
+ return (vmm_dc_civac(x1, x2));
+ case HYP_EL2_TLBI:
+ return (vmm_el2_tlbi(x1, x2, x3));
+ case HYP_S2_TLBI_RANGE:
+ VMM_HYP_FUNC(s2_tlbi_range)(x1, x2, x3, x4);
+ return (0);
+ case HYP_S2_TLBI_ALL:
+ VMM_HYP_FUNC(s2_tlbi_all)(x1);
+ return (0);
+ case HYP_CLEANUP: /* Handled in vmm_hyp_exception.S */
+ default:
+ break;
+ }
+
+ return (0);
+}
diff --git a/sys/arm64/vmm/vmm_nvhe_exception.S b/sys/arm64/vmm/vmm_nvhe_exception.S
new file mode 100644
index 000000000000..17bc4cb70366
--- /dev/null
+++ b/sys/arm64/vmm/vmm_nvhe_exception.S
@@ -0,0 +1,120 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Arm Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define VMM_HYP_FUNC(func) vmm_nvhe_ ## func
+
+#include "vmm_hyp_exception.S"
+
+ .section ".vmm_vectors","ax"
+ .align 11
+hyp_init_vectors:
+ vempty /* Synchronous EL2t */
+ vempty /* IRQ EL2t */
+ vempty /* FIQ EL2t */
+ vempty /* Error EL2t */
+
+ vempty /* Synchronous EL2h */
+ vempty /* IRQ EL2h */
+ vempty /* FIQ EL2h */
+ vempty /* Error EL2h */
+
+ vector hyp_init /* Synchronous 64-bit EL1 */
+ vempty /* IRQ 64-bit EL1 */
+ vempty /* FIQ 64-bit EL1 */
+ vempty /* Error 64-bit EL1 */
+
+ vempty /* Synchronous 32-bit EL1 */
+ vempty /* IRQ 32-bit EL1 */
+ vempty /* FIQ 32-bit EL1 */
+ vempty /* Error 32-bit EL1 */
+
+ .text
+
+/*
+ * Initialize the hypervisor mode with a new exception vector table, translation
+ * table and stack.
+ *
+ * Expecting:
+ * x0 - translation tables physical address
+ * x1 - stack top virtual address
+ * x2 - TCR_EL2 value
+ * x3 - SCTLR_EL2 value
+ * x4 - VTCR_EL2 value
+ */
+LENTRY(handle_hyp_init)
+ /* Install the new exception vectors */
+ adrp x6, hyp_vectors
+ add x6, x6, :lo12:hyp_vectors
+ msr vbar_el2, x6
+ /* Set the stack top address */
+ mov sp, x1
+ /* Use the host VTTBR_EL2 to tell the host and the guests apart */
+ mov x9, #VTTBR_HOST
+ msr vttbr_el2, x9
+ /* Load the base address for the translation tables */
+ msr ttbr0_el2, x0
+ /* Invalidate the TLB */
+ dsb ish
+ tlbi alle2
+ dsb ishst
+ isb
+ /* Use the same memory attributes as EL1 */
+ mrs x9, mair_el1
+ msr mair_el2, x9
+ /* Configure address translation */
+ msr tcr_el2, x2
+ isb
+ /* Set the system control register for EL2 */
+ msr sctlr_el2, x3
+ /* Set the Stage 2 translation control register */
+ msr vtcr_el2, x4
+ /* Return success */
+ mov x0, #0
+ /* MMU is up and running */
+ ERET
+LEND(handle_hyp_init)
+
+/*
+ * Usage:
+ * void vmm_cleanup(uint64_t handle, void *hyp_stub_vectors)
+ *
+ * Expecting:
+ * x1 - physical address of hyp_stub_vectors
+ */
+LENTRY(vmm_cleanup)
+ /* Restore the stub vectors */
+ msr vbar_el2, x1
+
+ /* Disable the MMU */
+ dsb sy
+ mrs x2, sctlr_el2
+ bic x2, x2, #SCTLR_EL2_M
+ msr sctlr_el2, x2
+ isb
+
+ ERET
+LEND(vmm_cleanup)
diff --git a/sys/arm64/vmm/vmm_reset.c b/sys/arm64/vmm/vmm_reset.c
index a929a60c9474..79d022cf33e8 100644
--- a/sys/arm64/vmm/vmm_reset.c
+++ b/sys/arm64/vmm/vmm_reset.c
@@ -136,7 +136,12 @@ reset_vm_el2_regs(void *vcpu)
*/
el2ctx->hcr_el2 = HCR_RW | HCR_TID3 | HCR_TWI | HCR_BSU_IS | HCR_FB |
HCR_AMO | HCR_IMO | HCR_FMO | HCR_SWIO | HCR_VM;
+ if (in_vhe()) {
+ el2ctx->hcr_el2 |= HCR_E2H;
+ }
+ /* Set the Extended Hypervisor Configuration Register */
+ el2ctx->hcrx_el2 = 0;
/* TODO: Trap all extensions we don't support */
el2ctx->mdcr_el2 = 0;
/* PMCR_EL0.N is read from MDCR_EL2.HPMN */
@@ -166,7 +171,11 @@ reset_vm_el2_regs(void *vcpu)
* Don't trap accesses to CPACR_EL1, trace, SVE, Advanced SIMD
* and floating point functionality to EL2.
*/
- el2ctx->cptr_el2 = CPTR_RES1;
+ if (in_vhe())
+ el2ctx->cptr_el2 = CPTR_E2H_TRAP_ALL | CPTR_E2H_FPEN;
+ else
+ el2ctx->cptr_el2 = CPTR_TRAP_ALL & ~CPTR_TFP;
+ el2ctx->cptr_el2 &= ~CPTR_TCPAC;
/*
* Disable interrupts in the guest. The guest OS will re-enable
* them.
diff --git a/sys/arm64/vmm/vmm_stat.c b/sys/arm64/vmm/vmm_stat.c
deleted file mode 100644
index 858ce980843a..000000000000
--- a/sys/arm64/vmm/vmm_stat.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2011 NetApp, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-
-#include <sys/param.h>
-#include <sys/kernel.h>
-#include <sys/systm.h>
-#include <sys/malloc.h>
-
-#include <machine/machdep.h>
-#include <machine/vmm.h>
-#include "vmm_stat.h"
-
-/*
- * 'vst_num_elems' is the total number of addressable statistic elements
- * 'vst_num_types' is the number of unique statistic types
- *
- * It is always true that 'vst_num_elems' is greater than or equal to
- * 'vst_num_types'. This is because a stat type may represent more than
- * one element (for e.g. VMM_STAT_ARRAY).
- */
-static int vst_num_elems, vst_num_types;
-static struct vmm_stat_type *vsttab[MAX_VMM_STAT_ELEMS];
-
-static MALLOC_DEFINE(M_VMM_STAT, "vmm stat", "vmm stat");
-
-#define vst_size ((size_t)vst_num_elems * sizeof(uint64_t))
-
-void
-vmm_stat_register(void *arg)
-{
- struct vmm_stat_type *vst = arg;
-
- /* We require all stats to identify themselves with a description */
- if (vst->desc == NULL)
- return;
-
- if (vst_num_elems + vst->nelems >= MAX_VMM_STAT_ELEMS) {
- printf("Cannot accommodate vmm stat type \"%s\"!\n", vst->desc);
- return;
- }
-
- vst->index = vst_num_elems;
- vst_num_elems += vst->nelems;
-
- vsttab[vst_num_types++] = vst;
-}
-
-int
-vmm_stat_copy(struct vcpu *vcpu, int index, int count, int *num_stats,
- uint64_t *buf)
-{
- struct vmm_stat_type *vst;
- uint64_t *stats;
- int i, tocopy;
-
- if (index < 0 || count < 0)
- return (EINVAL);
-
- if (index > vst_num_elems)
- return (ENOENT);
-
- if (index == vst_num_elems) {
- *num_stats = 0;
- return (0);
- }
-
- tocopy = min(vst_num_elems - index, count);
-
- /* Let stats functions update their counters */
- for (i = 0; i < vst_num_types; i++) {
- vst = vsttab[i];
- if (vst->func != NULL)
- (*vst->func)(vcpu, vst);
- }
-
- /* Copy over the stats */
- stats = vcpu_stats(vcpu);
- memcpy(buf, stats + index, tocopy * sizeof(stats[0]));
- *num_stats = tocopy;
- return (0);
-}
-
-void *
-vmm_stat_alloc(void)
-{
-
- return (malloc(vst_size, M_VMM_STAT, M_WAITOK));
-}
-
-void
-vmm_stat_init(void *vp)
-{
-
- bzero(vp, vst_size);
-}
-
-void
-vmm_stat_free(void *vp)
-{
- free(vp, M_VMM_STAT);
-}
-
-int
-vmm_stat_desc_copy(int index, char *buf, int bufsize)
-{
- int i;
- struct vmm_stat_type *vst;
-
- for (i = 0; i < vst_num_types; i++) {
- vst = vsttab[i];
- if (index >= vst->index && index < vst->index + vst->nelems) {
- if (vst->nelems > 1) {
- snprintf(buf, bufsize, "%s[%d]",
- vst->desc, index - vst->index);
- } else {
- strlcpy(buf, vst->desc, bufsize);
- }
- return (0); /* found it */
- }
- }
-
- return (EINVAL);
-}
-
-/* global statistics */
-VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
-VMM_STAT(VMEXIT_UNKNOWN, "number of vmexits for the unknown exception");
-VMM_STAT(VMEXIT_WFI, "number of times wfi was intercepted");
-VMM_STAT(VMEXIT_WFE, "number of times wfe was intercepted");
-VMM_STAT(VMEXIT_HVC, "number of times hvc was intercepted");
-VMM_STAT(VMEXIT_MSR, "number of times msr/mrs was intercepted");
-VMM_STAT(VMEXIT_DATA_ABORT, "number of vmexits for a data abort");
-VMM_STAT(VMEXIT_INSN_ABORT, "number of vmexits for an instruction abort");
-VMM_STAT(VMEXIT_UNHANDLED_SYNC, "number of vmexits for an unhandled synchronous exception");
-VMM_STAT(VMEXIT_IRQ, "number of vmexits for an irq");
-VMM_STAT(VMEXIT_FIQ, "number of vmexits for an interrupt");
-VMM_STAT(VMEXIT_UNHANDLED_EL2, "number of vmexits for an unhandled EL2 exception");
-VMM_STAT(VMEXIT_UNHANDLED, "number of vmexits for an unhandled exception");
diff --git a/sys/arm64/vmm/vmm_stat.h b/sys/arm64/vmm/vmm_stat.h
index b0a06ef79253..0dc3eeced603 100644
--- a/sys/arm64/vmm/vmm_stat.h
+++ b/sys/arm64/vmm/vmm_stat.h
@@ -32,102 +32,7 @@
#ifndef _VMM_STAT_H_
#define _VMM_STAT_H_
-struct vm;
-
-#define MAX_VMM_STAT_ELEMS 64 /* arbitrary */
-
-enum vmm_stat_scope {
- VMM_STAT_SCOPE_ANY,
-};
-
-struct vmm_stat_type;
-typedef void (*vmm_stat_func_t)(struct vcpu *vcpu,
- struct vmm_stat_type *stat);
-
-struct vmm_stat_type {
- int index; /* position in the stats buffer */
- int nelems; /* standalone or array */
- const char *desc; /* description of statistic */
- vmm_stat_func_t func;
- enum vmm_stat_scope scope;
-};
-
-void vmm_stat_register(void *arg);
-
-#define VMM_STAT_FDEFINE(type, nelems, desc, func, scope) \
- struct vmm_stat_type type[1] = { \
- { -1, nelems, desc, func, scope } \
- }; \
- SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
-
-#define VMM_STAT_DEFINE(type, nelems, desc, scope) \
- VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
-
-#define VMM_STAT_DECLARE(type) \
- extern struct vmm_stat_type type[1]
-
-#define VMM_STAT(type, desc) \
- VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_ANY)
-
-#define VMM_STAT_FUNC(type, desc, func) \
- VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
-
-#define VMM_STAT_ARRAY(type, nelems, desc) \
- VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
-
-void *vmm_stat_alloc(void);
-void vmm_stat_init(void *vp);
-void vmm_stat_free(void *vp);
-
-int vmm_stat_copy(struct vcpu *vcpu, int index, int count,
- int *num_stats, uint64_t *buf);
-int vmm_stat_desc_copy(int index, char *buf, int buflen);
-
-static void __inline
-vmm_stat_array_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
- uint64_t x)
-{
-#ifdef VMM_KEEP_STATS
- uint64_t *stats;
-
- stats = vcpu_stats(vcpu);
-
- if (vst->index >= 0 && statidx < vst->nelems)
- stats[vst->index + statidx] += x;
-#endif
-}
-
-static void __inline
-vmm_stat_array_set(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
- uint64_t val)
-{
-#ifdef VMM_KEEP_STATS
- uint64_t *stats;
-
- stats = vcpu_stats(vcpu);
-
- if (vst->index >= 0 && statidx < vst->nelems)
- stats[vst->index + statidx] = val;
-#endif
-}
-
-static void __inline
-vmm_stat_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t x)
-{
-
-#ifdef VMM_KEEP_STATS
- vmm_stat_array_incr(vcpu, vst, 0, x);
-#endif
-}
-
-static void __inline
-vmm_stat_set(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t val)
-{
-
-#ifdef VMM_KEEP_STATS
- vmm_stat_array_set(vcpu, vst, 0, val);
-#endif
-}
+#include <dev/vmm/vmm_stat.h>
VMM_STAT_DECLARE(VMEXIT_COUNT);
VMM_STAT_DECLARE(VMEXIT_UNKNOWN);
@@ -140,6 +45,9 @@ VMM_STAT_DECLARE(VMEXIT_INSN_ABORT);
VMM_STAT_DECLARE(VMEXIT_UNHANDLED_SYNC);
VMM_STAT_DECLARE(VMEXIT_IRQ);
VMM_STAT_DECLARE(VMEXIT_FIQ);
+VMM_STAT_DECLARE(VMEXIT_BRK);
+VMM_STAT_DECLARE(VMEXIT_SS);
VMM_STAT_DECLARE(VMEXIT_UNHANDLED_EL2);
VMM_STAT_DECLARE(VMEXIT_UNHANDLED);
+
#endif
diff --git a/sys/arm64/include/runq.h b/sys/arm64/vmm/vmm_vhe.c
index 5076bd9169df..8a12852e2a7a 100644
--- a/sys/arm64/include/runq.h
+++ b/sys/arm64/vmm/vmm_vhe.c
@@ -1,6 +1,7 @@
/*-
- * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
- * All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Arm Ltd
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,7 +15,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -24,27 +25,15 @@
* SUCH DAMAGE.
*/
-#ifdef __arm__
-#include <arm/runq.h>
-#else /* !__arm__ */
-
-#ifndef _MACHINE_RUNQ_H_
-#define _MACHINE_RUNQ_H_
-
-#define RQB_LEN (1) /* Number of priority status words. */
-#define RQB_L2BPW (6) /* Log2(sizeof(rqb_word_t) * NBBY)). */
-#define RQB_BPW (1<<RQB_L2BPW) /* Bits in an rqb_word_t. */
+#include "vmm_handlers.h"
-#define RQB_BIT(pri) (1ul << ((pri) & (RQB_BPW - 1)))
-#define RQB_WORD(pri) ((pri) >> RQB_L2BPW)
+#define VMM_VHE
-#define RQB_FFS(word) (ffsl(word) - 1)
-
-/*
- * Type of run queue status word.
- */
-typedef unsigned long rqb_word_t;
+#define VMM_STATIC
+#define VMM_HYP_FUNC(func) vmm_vhe_ ## func
-#endif
+#define guest_or_nonvhe(guest) (guest)
+#define EL1_REG(reg) MRS_REG_ALT_NAME(reg ## _EL12)
+#define EL0_REG(reg) MRS_REG_ALT_NAME(reg ## _EL02)
-#endif /* !__arm__ */
+#include "vmm_hyp.c"
diff --git a/sys/arm64/vmm/vmm_vhe_exception.S b/sys/arm64/vmm/vmm_vhe_exception.S
new file mode 100644
index 000000000000..286f5df03707
--- /dev/null
+++ b/sys/arm64/vmm/vmm_vhe_exception.S
@@ -0,0 +1,31 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Arm Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define VMM_VHE
+#define VMM_HYP_FUNC(func) vmm_vhe_ ## func
+
+#include "vmm_hyp_exception.S"