aboutsummaryrefslogtreecommitdiff
path: root/sys/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arm64')
-rw-r--r--sys/arm64/arm64/cpu_errata.c96
-rw-r--r--sys/arm64/arm64/db_disasm.c1
-rw-r--r--sys/arm64/arm64/elf32_machdep.c2
-rw-r--r--sys/arm64/arm64/elf_machdep.c7
-rw-r--r--sys/arm64/arm64/kexec_support.c188
-rw-r--r--sys/arm64/arm64/locore.S44
-rw-r--r--sys/arm64/arm64/mp_machdep.c78
-rw-r--r--sys/arm64/arm64/spec_workaround.c166
-rw-r--r--sys/arm64/conf/std.arm3
-rw-r--r--sys/arm64/coresight/coresight.c2
-rw-r--r--sys/arm64/include/_armreg.h57
-rw-r--r--sys/arm64/include/armreg.h34
-rw-r--r--sys/arm64/include/cpu.h2
-rw-r--r--sys/arm64/include/cpufunc.h7
-rw-r--r--sys/arm64/include/db_machdep.h1
-rw-r--r--sys/arm64/include/hypervisor.h2
-rw-r--r--sys/arm64/include/kexec.h33
-rw-r--r--sys/arm64/include/pcpu.h3
-rw-r--r--sys/arm64/include/smp.h1
-rw-r--r--sys/arm64/include/vmm.h34
-rw-r--r--sys/arm64/linux/linux_sysvec.c10
-rw-r--r--sys/arm64/nvidia/tegra210/max77620_regulators.c4
-rw-r--r--sys/arm64/vmm/arm64.h31
-rw-r--r--sys/arm64/vmm/io/vgic_v3.c1
-rw-r--r--sys/arm64/vmm/io/vtimer.c1
-rw-r--r--sys/arm64/vmm/vmm.c47
-rw-r--r--sys/arm64/vmm/vmm_arm64.c1
-rw-r--r--sys/arm64/vmm/vmm_dev_machdep.c43
-rw-r--r--sys/arm64/vmm/vmm_hyp.c1
-rw-r--r--sys/arm64/vmm/vmm_reset.c1
30 files changed, 688 insertions, 213 deletions
diff --git a/sys/arm64/arm64/cpu_errata.c b/sys/arm64/arm64/cpu_errata.c
index 989924bc0567..b876703a2a15 100644
--- a/sys/arm64/arm64/cpu_errata.c
+++ b/sys/arm64/arm64/cpu_errata.c
@@ -52,56 +52,11 @@ struct cpu_quirks {
u_int flags;
};
-static enum {
- SSBD_FORCE_ON,
- SSBD_FORCE_OFF,
- SSBD_KERNEL,
-} ssbd_method = SSBD_KERNEL;
-
-static cpu_quirk_install install_psci_bp_hardening;
-static cpu_quirk_install install_ssbd_workaround;
static cpu_quirk_install install_thunderx_bcast_tlbi_workaround;
static struct cpu_quirks cpu_quirks[] = {
{
.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
- .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A57,0,0),
- .quirk_install = install_psci_bp_hardening,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
- .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A72,0,0),
- .quirk_install = install_psci_bp_hardening,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
- .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A73,0,0),
- .quirk_install = install_psci_bp_hardening,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
- .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A75,0,0),
- .quirk_install = install_psci_bp_hardening,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
- .midr_value =
- CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0,0),
- .quirk_install = install_psci_bp_hardening,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = 0,
- .midr_value = 0,
- .quirk_install = install_ssbd_workaround,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
.midr_value =
CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, 0),
.quirk_install = install_thunderx_bcast_tlbi_workaround,
@@ -114,57 +69,6 @@ static struct cpu_quirks cpu_quirks[] = {
},
};
-static void
-install_psci_bp_hardening(void)
-{
- /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
- if (!psci_present)
- return;
-
- if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_1) != SMCCC_RET_SUCCESS)
- return;
-
- PCPU_SET(bp_harden, smccc_arch_workaround_1);
-}
-
-static void
-install_ssbd_workaround(void)
-{
- char *env;
-
- if (PCPU_GET(cpuid) == 0) {
- env = kern_getenv("kern.cfg.ssbd");
- if (env != NULL) {
- if (strcmp(env, "force-on") == 0) {
- ssbd_method = SSBD_FORCE_ON;
- } else if (strcmp(env, "force-off") == 0) {
- ssbd_method = SSBD_FORCE_OFF;
- }
- }
- }
-
- /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
- if (!psci_present)
- return;
-
- /* Enable the workaround on this CPU if it's enabled in the firmware */
- if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) != SMCCC_RET_SUCCESS)
- return;
-
- switch(ssbd_method) {
- case SSBD_FORCE_ON:
- smccc_arch_workaround_2(1);
- break;
- case SSBD_FORCE_OFF:
- smccc_arch_workaround_2(0);
- break;
- case SSBD_KERNEL:
- default:
- PCPU_SET(ssbd, smccc_arch_workaround_2);
- break;
- }
-}
-
/*
* Workaround Cavium erratum 27456.
*
diff --git a/sys/arm64/arm64/db_disasm.c b/sys/arm64/arm64/db_disasm.c
index ab1002560b20..14ae2acc2ce6 100644
--- a/sys/arm64/arm64/db_disasm.c
+++ b/sys/arm64/arm64/db_disasm.c
@@ -31,6 +31,7 @@
#include <ddb/db_access.h>
#include <ddb/db_sym.h>
+#include <machine/armreg.h>
#include <machine/disassem.h>
static u_int db_disasm_read_word(vm_offset_t);
diff --git a/sys/arm64/arm64/elf32_machdep.c b/sys/arm64/arm64/elf32_machdep.c
index 8f8a934ad520..4cb8ee5f57ef 100644
--- a/sys/arm64/arm64/elf32_machdep.c
+++ b/sys/arm64/arm64/elf32_machdep.c
@@ -210,7 +210,7 @@ freebsd32_fetch_syscall_args(struct thread *td)
sa->code = *ap++;
nap--;
} else if (sa->code == SYS___syscall) {
- sa->code = ap[1];
+ sa->code = ap[_QUAD_LOWWORD];
nap -= 2;
ap += 2;
}
diff --git a/sys/arm64/arm64/elf_machdep.c b/sys/arm64/arm64/elf_machdep.c
index 13af5c5065d6..207b37180a26 100644
--- a/sys/arm64/arm64/elf_machdep.c
+++ b/sys/arm64/arm64/elf_machdep.c
@@ -121,7 +121,7 @@ static struct sysentvec elf64_freebsd_sysvec = {
};
INIT_SYSENTVEC(elf64_sysvec, &elf64_freebsd_sysvec);
-static Elf64_Brandinfo freebsd_brand_info = {
+static const Elf64_Brandinfo freebsd_brand_info = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_AARCH64,
.compat_3_brand = "FreeBSD",
@@ -131,8 +131,7 @@ static Elf64_Brandinfo freebsd_brand_info = {
.brand_note = &elf64_freebsd_brandnote,
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
-
-SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST,
+C_SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST,
(sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_info);
static bool
@@ -336,7 +335,7 @@ elf_cpu_parse_dynamic(caddr_t loadbase __unused, Elf_Dyn *dynamic __unused)
return (0);
}
-static Elf_Note gnu_property_note = {
+static const Elf_Note gnu_property_note = {
.n_namesz = sizeof(GNU_ABI_VENDOR),
.n_descsz = 16,
.n_type = NT_GNU_PROPERTY_TYPE_0,
diff --git a/sys/arm64/arm64/kexec_support.c b/sys/arm64/arm64/kexec_support.c
new file mode 100644
index 000000000000..8b9719c05b67
--- /dev/null
+++ b/sys/arm64/arm64/kexec_support.c
@@ -0,0 +1,188 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Juniper Networks, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/kexec.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_phys.h>
+#include <vm/vm_radix.h>
+#include <vm/pmap.h>
+#include <vm/vm_page.h>
+
+#include <machine/armreg.h>
+#include <machine/pmap.h>
+#include <machine/pte.h>
+
+/*
+ * Idea behind this:
+ *
+ * kexec_load_md():
+ * - Update boot page tables (identity map) to include all pages needed before
+ * disabling MMU.
+ *
+ * kexec_reboot_md():
+ * - Copy pages into target(s)
+ * - Do "other stuff"
+ * - Does not return
+ */
+
+extern pt_entry_t pagetable_l0_ttbr0_bootstrap[];
+extern unsigned long initstack_end[];
+void switch_stack(void *, void (*)(void *, void *, struct kexec_image *), void *);
+
+#define SCTLR_EL1_NO_MMU (SCTLR_RES1 | SCTLR_LSMAOE | SCTLR_nTLSMD | \
+ SCTLR_EIS | SCTLR_TSCXT | SCTLR_EOS)
+#define vm_page_offset(m) ((vm_offset_t)(m) - vm_page_base)
+static inline vm_page_t
+phys_vm_page(vm_page_t m, vm_offset_t vm_page_v, vm_paddr_t vm_page_p)
+{
+ return ((vm_page_t)((vm_offset_t)m - vm_page_v + vm_page_p));
+}
+
+/* First 2 args are filler for switch_stack() */
+static void __aligned(16) __dead2
+kexec_reboot_bottom( void *arg1 __unused, void *arg2 __unused,
+ struct kexec_image *image)
+{
+ void (*e)(void) = (void *)image->entry;
+ vm_offset_t vm_page_base = (vm_offset_t)vm_page_array;
+ vm_paddr_t vm_page_phys = pmap_kextract((vm_offset_t)vm_page_array);
+ struct kexec_segment_stage *phys_segs =
+ (void *)pmap_kextract((vm_offset_t)&image->segments);
+ vm_paddr_t from_pa, to_pa;
+ vm_size_t size;
+ vm_page_t first, m, mp;
+ struct pctrie_iter pct_i;
+
+ /*
+ * Create a linked list of all pages in the object before we disable the
+ * MMU. Once the MMU is disabled we can't use the vm_radix iterators,
+ * as they rely on virtual address pointers.
+ */
+ first = NULL;
+ vm_radix_iter_init(&pct_i, &image->map_obj->rtree);
+ VM_RADIX_FORALL(m, &pct_i) {
+ if (first == NULL)
+ first = m;
+ else
+ SLIST_INSERT_AFTER(mp, m, plinks.s.ss);
+ mp = m;
+ }
+
+ /*
+ * We're running out of the identity map now, disable the MMU before we
+ * continue. It's possible page tables can be overwritten, which would
+ * be very bad if we were running with the MMU enabled.
+ */
+ WRITE_SPECIALREG(sctlr_el1, SCTLR_EL1_NO_MMU);
+ isb();
+ for (int i = 0; i < KEXEC_SEGMENT_MAX; i++) {
+ if (phys_segs[i].size == 0)
+ break;
+ to_pa = phys_segs[i].target;
+ /* Copy the segment here... */
+ for (vm_page_t p = phys_segs[i].first_page;
+ p != NULL && to_pa - phys_segs[i].target < phys_segs[i].size;
+ p = SLIST_NEXT(p, plinks.s.ss)) {
+ p = phys_vm_page(p, vm_page_base, vm_page_phys);
+ from_pa = p->phys_addr;
+ if (p->phys_addr == to_pa) {
+ to_pa += PAGE_SIZE;
+ continue;
+ }
+ for (size = PAGE_SIZE / sizeof(register_t);
+ size > 0; --size) {
+ *(register_t *)to_pa = *(register_t *)from_pa;
+ to_pa += sizeof(register_t);
+ from_pa += sizeof(register_t);
+ }
+ }
+ }
+ invalidate_icache();
+ e();
+ while (1)
+ ;
+}
+
+void
+kexec_reboot_md(struct kexec_image *image)
+{
+ uintptr_t ptr;
+ register_t reg;
+
+ for (int i = 0; i < KEXEC_SEGMENT_MAX; i++) {
+ if (image->segments[i].size > 0)
+ cpu_dcache_inv_range((void *)PHYS_TO_DMAP(image->segments[i].target),
+ image->segments[i].size);
+ }
+ ptr = pmap_kextract((vm_offset_t)kexec_reboot_bottom);
+ serror_disable();
+
+ reg = pmap_kextract((vm_offset_t)pagetable_l0_ttbr0_bootstrap);
+ set_ttbr0(reg);
+ cpu_tlb_flushID();
+
+ typeof(kexec_reboot_bottom) *p = (void *)ptr;
+ switch_stack((void *)pmap_kextract((vm_offset_t)initstack_end),
+ p, image);
+ while (1)
+ ;
+}
+
+int
+kexec_load_md(struct kexec_image *image)
+{
+ vm_paddr_t tmp;
+ pt_entry_t *pte;
+
+ /* Create L2 page blocks for the trampoline. L0/L1 are from the startup. */
+
+ /*
+ * There are exactly 2 pages before the pagetable_l0_ttbr0_bootstrap, so
+ * move to there.
+ */
+ pte = pagetable_l0_ttbr0_bootstrap;
+ pte -= (Ln_ENTRIES * 2); /* move to start of L2 pages */
+
+ /*
+ * Populate the identity map with symbols we know we'll need before we
+ * turn off the MMU.
+ */
+ tmp = pmap_kextract((vm_offset_t)kexec_reboot_bottom);
+ pte[pmap_l2_index(tmp)] = (tmp | L2_BLOCK | ATTR_AF | ATTR_S1_UXN);
+ tmp = pmap_kextract((vm_offset_t)initstack_end);
+ pte[pmap_l2_index(tmp)] = (tmp | L2_BLOCK | ATTR_AF | ATTR_S1_UXN);
+ /* We'll need vm_page_array for doing offset calculations. */
+ tmp = pmap_kextract((vm_offset_t)&vm_page_array);
+ pte[pmap_l2_index(tmp)] = (tmp | L2_BLOCK | ATTR_AF | ATTR_S1_UXN);
+
+ return (0);
+}
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
index d35e334905a7..3ec12140f139 100644
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -325,6 +325,19 @@ mp_virtdone:
b init_secondary
LEND(mpentry_common)
+
+ENTRY(mp_cpu_spinloop)
+0:
+ wfe
+ ldr x0, mp_cpu_spin_table_release_addr
+ cbz x0, 0b
+ blr x0
+ .globl mp_cpu_spin_table_release_addr
+mp_cpu_spin_table_release_addr:
+ .quad 0
+ .globl mp_cpu_spinloop_end
+mp_cpu_spinloop_end:
+END(mp_cpu_spinloop)
#endif
/*
@@ -475,6 +488,29 @@ LENTRY(enter_kernel_el)
eret
LEND(enter_kernel_el)
+/* Turn off the MMU. Install ttbr0 from the bootstrap page table, and go there.
+ * Does not return.
+ * - x0 - target address to jump to after stopping the MMU.
+ * - x1 - kernel load address
+ */
+ENTRY(stop_mmu)
+ mov x16, x0 /* Save target. */
+ ldr x2, =(1f - KERNBASE)
+ add x17, x1, x2
+ ldr x3, =(pagetable_l0_ttbr0_bootstrap - KERNBASE)
+ add x1, x1, x3
+ msr ttbr0_el1, x1
+ isb
+ br x17
+1:
+ BTI_J
+ mrs x0, sctlr_el1
+ bic x0, x0, SCTLR_M
+ bic x0, x0, SCTLR_C
+ msr sctlr_el1, x0
+ isb
+ br x16
+END(stop_mmu)
/*
* Get the physical address the kernel was loaded at.
*/
@@ -1094,12 +1130,19 @@ tcr:
TCR_SH0_IS | TCR_ORGN0_WBWA | TCR_IRGN0_WBWA)
LEND(start_mmu)
+ENTRY(switch_stack)
+ mov sp, x0
+ mov x16, x1
+ br x16
+END(switch_stack)
+
ENTRY(abort)
b abort
END(abort)
.bss
.align PAGE_SHIFT
+ .globl initstack_end
initstack:
.space BOOT_STACK_SIZE
initstack_end:
@@ -1116,6 +1159,7 @@ initstack_end:
* L0 for user
*/
.globl pagetable_l0_ttbr1
+ .globl pagetable_l0_ttbr0_bootstrap
pagetable:
pagetable_l3_ttbr1:
.space (PAGE_SIZE * L3_PAGE_COUNT)
diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c
index e4d011df3a06..0bdd2ecfd8a7 100644
--- a/sys/arm64/arm64/mp_machdep.c
+++ b/sys/arm64/arm64/mp_machdep.c
@@ -60,6 +60,7 @@
#include <machine/debug_monitor.h>
#include <machine/intr.h>
#include <machine/smp.h>
+#include <machine/vmparam.h>
#ifdef VFP
#include <machine/vfp.h>
#endif
@@ -103,6 +104,7 @@ static void ipi_hardclock(void *);
static void ipi_preempt(void *);
static void ipi_rendezvous(void *);
static void ipi_stop(void *);
+static void ipi_off(void *);
#ifdef FDT
static u_int fdt_cpuid;
@@ -193,6 +195,7 @@ release_aps(void *dummy __unused)
intr_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
intr_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
intr_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
+ intr_ipi_setup(IPI_OFF, "off", ipi_off, NULL);
atomic_store_int(&aps_started, 0);
atomic_store_rel_int(&aps_ready, 1);
@@ -390,6 +393,34 @@ ipi_stop(void *dummy __unused)
CTR0(KTR_SMP, "IPI_STOP (restart)");
}
+void stop_mmu(vm_paddr_t, vm_paddr_t) __dead2;
+extern uint32_t mp_cpu_spinloop[];
+extern uint32_t mp_cpu_spinloop_end[];
+extern uint64_t mp_cpu_spin_table_release_addr;
+static void
+ipi_off(void *dummy __unused)
+{
+ CTR0(KTR_SMP, "IPI_OFF");
+ if (psci_present)
+ psci_cpu_off();
+ else {
+ uint64_t release_addr;
+ vm_size_t size;
+
+ size = (vm_offset_t)&mp_cpu_spin_table_release_addr -
+ (vm_offset_t)mp_cpu_spinloop;
+ release_addr = PCPU_GET(release_addr) - size;
+ isb();
+ invalidate_icache();
+ /* Go catatonic, don't take any interrupts. */
+ intr_disable();
+ stop_mmu(release_addr, pmap_kextract(KERNBASE));
+
+
+ }
+ CTR0(KTR_SMP, "IPI_OFF failed");
+}
+
struct cpu_group *
cpu_topo(void)
{
@@ -511,6 +542,7 @@ start_cpu(u_int cpuid, uint64_t target_cpu, int domain, vm_paddr_t release_addr)
pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
bootpcpu = pcpup;
+ pcpup->pc_release_addr = release_addr;
dpcpu[cpuid - 1] = (void *)(pcpup + 1);
dpcpu_init(dpcpu[cpuid - 1], cpuid);
@@ -752,6 +784,52 @@ cpu_mp_start(void)
}
}
+void
+cpu_mp_stop(void)
+{
+
+ /* Short-circuit for single-CPU */
+ if (CPU_COUNT(&all_cpus) == 1)
+ return;
+
+ KASSERT(PCPU_GET(cpuid) == CPU_FIRST(), ("Not on the first CPU!\n"));
+
+ /*
+ * If we use spin-table, assume U-boot method for now (single address
+ * shared by all CPUs).
+ */
+ if (!psci_present) {
+ int cpu;
+ vm_paddr_t release_addr;
+ void *release_vaddr;
+ vm_size_t size;
+
+ /* Find the shared release address. */
+ CPU_FOREACH(cpu) {
+ release_addr = pcpu_find(cpu)->pc_release_addr;
+ if (release_addr != 0)
+ break;
+ }
+ /* No release address? No way of notifying other CPUs. */
+ if (release_addr == 0)
+ return;
+
+ size = (vm_offset_t)&mp_cpu_spinloop_end -
+ (vm_offset_t)&mp_cpu_spinloop;
+
+ release_addr -= (vm_offset_t)&mp_cpu_spin_table_release_addr -
+ (vm_offset_t)mp_cpu_spinloop;
+
+ release_vaddr = pmap_mapdev(release_addr, size);
+ bcopy(mp_cpu_spinloop, release_vaddr, size);
+ cpu_dcache_wbinv_range(release_vaddr, size);
+ pmap_unmapdev(release_vaddr, size);
+ invalidate_icache();
+ }
+ ipi_all_but_self(IPI_OFF);
+ DELAY(1000000);
+}
+
/* Introduce rest of cores to the world */
void
cpu_mp_announce(void)
diff --git a/sys/arm64/arm64/spec_workaround.c b/sys/arm64/arm64/spec_workaround.c
new file mode 100644
index 000000000000..7f4f86cdb48c
--- /dev/null
+++ b/sys/arm64/arm64/spec_workaround.c
@@ -0,0 +1,166 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Arm Ltd
+ * Copyright (c) 2018 Andrew Turner
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/pcpu.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/cpu_feat.h>
+
+#include <dev/psci/psci.h>
+#include <dev/psci/smccc.h>
+
+static enum {
+ SSBD_FORCE_ON,
+ SSBD_FORCE_OFF,
+ SSBD_KERNEL,
+} ssbd_method = SSBD_KERNEL;
+
+struct psci_bp_hardening_impl {
+ u_int midr_mask;
+ u_int midr_value;
+};
+
+static struct psci_bp_hardening_impl psci_bp_hardening_impl[] = {
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A57,0,0),
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A72,0,0),
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A73,0,0),
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A75,0,0),
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value =
+ CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0,0),
+ }
+};
+
+static cpu_feat_en
+psci_bp_hardening_check(const struct cpu_feat *feat __unused, u_int midr)
+{
+ size_t i;
+
+ for (i = 0; i < nitems(psci_bp_hardening_impl); i++) {
+ if ((midr & psci_bp_hardening_impl[i].midr_mask) ==
+ psci_bp_hardening_impl[i].midr_value) {
+ /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
+ if (!psci_present)
+ return (FEAT_ALWAYS_DISABLE);
+
+ if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_1) !=
+ SMCCC_RET_SUCCESS)
+ return (FEAT_ALWAYS_DISABLE);
+
+ return (FEAT_DEFAULT_ENABLE);
+ }
+ }
+
+ return (FEAT_ALWAYS_DISABLE);
+}
+
+static bool
+psci_bp_hardening_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
+ u_int errata_count __unused)
+{
+ PCPU_SET(bp_harden, smccc_arch_workaround_1);
+
+ return (true);
+}
+
+CPU_FEAT(feat_csv2_missing, "Branch Predictor Hardening",
+ psci_bp_hardening_check, NULL, psci_bp_hardening_enable, NULL,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
+
+static cpu_feat_en
+ssbd_workaround_check(const struct cpu_feat *feat __unused, u_int midr __unused)
+{
+ char *env;
+
+ if (PCPU_GET(cpuid) == 0) {
+ env = kern_getenv("kern.cfg.ssbd");
+ if (env != NULL) {
+ if (strcmp(env, "force-on") == 0) {
+ ssbd_method = SSBD_FORCE_ON;
+ } else if (strcmp(env, "force-off") == 0) {
+ ssbd_method = SSBD_FORCE_OFF;
+ }
+ }
+ }
+
+ /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
+ if (!psci_present)
+ return (FEAT_ALWAYS_DISABLE);
+
+ /* Enable the workaround on this CPU if it's enabled in the firmware */
+ if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) != SMCCC_RET_SUCCESS)
+ return (FEAT_ALWAYS_DISABLE);
+
+ return (FEAT_DEFAULT_ENABLE);
+}
+
+static bool
+ssbd_workaround_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
+ u_int errata_count __unused)
+{
+ switch(ssbd_method) {
+ case SSBD_FORCE_ON:
+ smccc_arch_workaround_2(1);
+ break;
+ case SSBD_FORCE_OFF:
+ smccc_arch_workaround_2(0);
+ break;
+ case SSBD_KERNEL:
+ default:
+ PCPU_SET(ssbd, smccc_arch_workaround_2);
+ break;
+ }
+
+ return (true);
+}
+
+CPU_FEAT(feat_ssbs_missing, "Speculator Store Bypass Disable Workaround",
+ ssbd_workaround_check, NULL, ssbd_workaround_enable, NULL,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
diff --git a/sys/arm64/conf/std.arm b/sys/arm64/conf/std.arm
index fb5561506531..309059a096eb 100644
--- a/sys/arm64/conf/std.arm
+++ b/sys/arm64/conf/std.arm
@@ -21,3 +21,6 @@ device arm_doorbell # ARM Message Handling Unit (MHU)
options FDT
device acpi
+
+# DTBs
+makeoptions MODULES_EXTRA+="dtb/arm"
diff --git a/sys/arm64/coresight/coresight.c b/sys/arm64/coresight/coresight.c
index 5928c153f4ae..9b9d3c65ecc9 100644
--- a/sys/arm64/coresight/coresight.c
+++ b/sys/arm64/coresight/coresight.c
@@ -113,7 +113,7 @@ coresight_get_output_device(struct endpoint *endp, struct endpoint **out_endp)
}
static void
-coresight_init(void)
+coresight_init(void *dummy __unused)
{
mtx_init(&cs_mtx, "ARM Coresight", NULL, MTX_DEF);
diff --git a/sys/arm64/include/_armreg.h b/sys/arm64/include/_armreg.h
new file mode 100644
index 000000000000..0f5134e5a978
--- /dev/null
+++ b/sys/arm64/include/_armreg.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2013, 2014 Andrew Turner
+ * Copyright (c) 2015,2021 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if !defined(_MACHINE_ARMREG_H_) && \
+ !defined(_MACHINE_CPU_H_) && \
+ !defined(_MACHINE_HYPERVISOR_H_)
+#error Do not include this file directly
+#endif
+
+#ifndef _MACHINE__ARMREG_H_
+#define _MACHINE__ARMREG_H_
+
+#define __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
+ S##op0##_##op1##_C##crn##_C##crm##_##op2
+#define _MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
+ __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2)
+#define MRS_REG_ALT_NAME(reg) \
+ _MRS_REG_ALT_NAME(reg##_op0, reg##_op1, reg##_CRn, reg##_CRm, reg##_op2)
+
+
+#define READ_SPECIALREG(reg) \
+({ uint64_t _val; \
+ __asm __volatile("mrs %0, " __STRING(reg) : "=&r" (_val)); \
+ _val; \
+})
+#define WRITE_SPECIALREG(reg, _val) \
+ __asm __volatile("msr " __STRING(reg) ", %0" : : "r"((uint64_t)_val))
+
+#define UL(x) UINT64_C(x)
+
+#endif /* !_MACHINE__ARMREG_H_ */
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index 393d6d89da0c..aa9b672ad85a 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -34,25 +34,9 @@
#ifndef _MACHINE_ARMREG_H_
#define _MACHINE_ARMREG_H_
-#define INSN_SIZE 4
-
-#define __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
- S##op0##_##op1##_C##crn##_C##crm##_##op2
-#define _MRS_REG_ALT_NAME(op0, op1, crn, crm, op2) \
- __MRS_REG_ALT_NAME(op0, op1, crn, crm, op2)
-#define MRS_REG_ALT_NAME(reg) \
- _MRS_REG_ALT_NAME(reg##_op0, reg##_op1, reg##_CRn, reg##_CRm, reg##_op2)
-
+#include <machine/_armreg.h>
-#define READ_SPECIALREG(reg) \
-({ uint64_t _val; \
- __asm __volatile("mrs %0, " __STRING(reg) : "=&r" (_val)); \
- _val; \
-})
-#define WRITE_SPECIALREG(reg, _val) \
- __asm __volatile("msr " __STRING(reg) ", %0" : : "r"((uint64_t)_val))
-
-#define UL(x) UINT64_C(x)
+#define INSN_SIZE 4
/* AFSR0_EL1 - Auxiliary Fault Status Register 0 */
#define AFSR0_EL1_REG MRS_REG_ALT_NAME(AFSR0_EL1)
@@ -2278,6 +2262,11 @@
#define PMBSR_DL (UL(0x1) << PMBSR_DL_SHIFT)
#define PMBSR_EC_SHIFT 26
#define PMBSR_EC_MASK (UL(0x3f) << PMBSR_EC_SHIFT)
+#define PMBSR_EC_VAL(x) (((x) & PMBSR_EC_MASK) >> PMBSR_EC_SHIFT)
+#define PMBSR_EC_OTHER_BUF_MGMT 0x00
+#define PMBSR_EC_GRAN_PROT_CHK 0x1e
+#define PMBSR_EC_STAGE1_DA 0x24
+#define PMBSR_EC_STAGE2_DA 0x25
/* PMCCFILTR_EL0 */
#define PMCCFILTR_EL0_op0 3
@@ -2513,6 +2502,15 @@
#define PMSIDR_FnE (UL(0x1) << PMSIDR_FnE_SHIFT)
#define PMSIDR_Interval_SHIFT 8
#define PMSIDR_Interval_MASK (UL(0xf) << PMSIDR_Interval_SHIFT)
+#define PMSIDR_Interval_VAL(x) (((x) & PMSIDR_Interval_MASK) >> PMSIDR_Interval_SHIFT)
+#define PMSIDR_Interval_256 0
+#define PMSIDR_Interval_512 2
+#define PMSIDR_Interval_768 3
+#define PMSIDR_Interval_1024 4
+#define PMSIDR_Interval_1536 5
+#define PMSIDR_Interval_2048 6
+#define PMSIDR_Interval_3072 7
+#define PMSIDR_Interval_4096 8
#define PMSIDR_MaxSize_SHIFT 12
#define PMSIDR_MaxSize_MASK (UL(0xf) << PMSIDR_MaxSize_SHIFT)
#define PMSIDR_CountSize_SHIFT 16
diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h
index 124da8c215ed..b15210633d37 100644
--- a/sys/arm64/include/cpu.h
+++ b/sys/arm64/include/cpu.h
@@ -43,10 +43,10 @@
#define _MACHINE_CPU_H_
#if !defined(__ASSEMBLER__)
+#include <machine/_armreg.h>
#include <machine/atomic.h>
#include <machine/frame.h>
#endif
-#include <machine/armreg.h>
#define TRAPF_PC(tfp) ((tfp)->tf_elr)
#define TRAPF_USERMODE(tfp) (((tfp)->tf_spsr & PSR_M_MASK) == PSR_M_EL0t)
diff --git a/sys/arm64/include/cpufunc.h b/sys/arm64/include/cpufunc.h
index e6e1f682794e..e9eee643216b 100644
--- a/sys/arm64/include/cpufunc.h
+++ b/sys/arm64/include/cpufunc.h
@@ -96,6 +96,13 @@ serror_enable(void)
__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_A) ")");
}
+static __inline void
+serror_disable(void)
+{
+
+ __asm __volatile("msr daifset, #(" __XSTRING(DAIF_A) ")");
+}
+
static __inline register_t
get_midr(void)
{
diff --git a/sys/arm64/include/db_machdep.h b/sys/arm64/include/db_machdep.h
index 5dc496ca851d..3ef95f7802ea 100644
--- a/sys/arm64/include/db_machdep.h
+++ b/sys/arm64/include/db_machdep.h
@@ -31,7 +31,6 @@
#ifndef _MACHINE_DB_MACHDEP_H_
#define _MACHINE_DB_MACHDEP_H_
-#include <machine/armreg.h>
#include <machine/frame.h>
#include <machine/trap.h>
diff --git a/sys/arm64/include/hypervisor.h b/sys/arm64/include/hypervisor.h
index 8feabd2b981b..7d405e63cd8d 100644
--- a/sys/arm64/include/hypervisor.h
+++ b/sys/arm64/include/hypervisor.h
@@ -30,6 +30,8 @@
#ifndef _MACHINE_HYPERVISOR_H_
#define _MACHINE_HYPERVISOR_H_
+#include <machine/_armreg.h>
+
/*
* These registers are only useful when in hypervisor context,
* e.g. specific to EL2, or controlling the hypervisor.
diff --git a/sys/arm64/include/kexec.h b/sys/arm64/include/kexec.h
new file mode 100644
index 000000000000..0a8c7a053331
--- /dev/null
+++ b/sys/arm64/include/kexec.h
@@ -0,0 +1,33 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Juniper Networks, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARM64_KEXEC_H_
+#define _ARM64_KEXEC_H_
+
+#define KEXEC_MD_PAGES(x) 0
+
+#endif /* _ARM64_KEXEC_H_ */
diff --git a/sys/arm64/include/pcpu.h b/sys/arm64/include/pcpu.h
index 09bd8fa8a966..73399d2c3f8c 100644
--- a/sys/arm64/include/pcpu.h
+++ b/sys/arm64/include/pcpu.h
@@ -50,7 +50,8 @@ struct debug_monitor_state;
struct pmap *pc_curvmpmap; \
uint64_t pc_mpidr; \
u_int pc_bcast_tlbi_workaround; \
- char __pad[197]
+ uint64_t pc_release_addr; \
+ char __pad[189]
#ifdef _KERNEL
diff --git a/sys/arm64/include/smp.h b/sys/arm64/include/smp.h
index 500cd1ef4f02..4a5bfda3ac1c 100644
--- a/sys/arm64/include/smp.h
+++ b/sys/arm64/include/smp.h
@@ -40,6 +40,7 @@ enum {
IPI_STOP,
IPI_STOP_HARD,
IPI_HARDCLOCK,
+ IPI_OFF,
INTR_IPI_COUNT,
};
diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
index e839b5dd92c9..696a69669a2a 100644
--- a/sys/arm64/include/vmm.h
+++ b/sys/arm64/include/vmm.h
@@ -143,10 +143,41 @@ struct vm_eventinfo {
int *iptr; /* reqidle cookie */
};
+#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \
+ ret_type vmmops_##opname args
+
+DECLARE_VMMOPS_FUNC(int, modinit, (int ipinum));
+DECLARE_VMMOPS_FUNC(int, modcleanup, (void));
+DECLARE_VMMOPS_FUNC(void *, init, (struct vm *vm, struct pmap *pmap));
+DECLARE_VMMOPS_FUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging,
+ uint64_t gla, int prot, uint64_t *gpa, int *is_fault));
+DECLARE_VMMOPS_FUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap,
+ struct vm_eventinfo *info));
+DECLARE_VMMOPS_FUNC(void, cleanup, (void *vmi));
+DECLARE_VMMOPS_FUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
+ int vcpu_id));
+DECLARE_VMMOPS_FUNC(void, vcpu_cleanup, (void *vcpui));
+DECLARE_VMMOPS_FUNC(int, exception, (void *vcpui, uint64_t esr, uint64_t far));
+DECLARE_VMMOPS_FUNC(int, getreg, (void *vcpui, int num, uint64_t *retval));
+DECLARE_VMMOPS_FUNC(int, setreg, (void *vcpui, int num, uint64_t val));
+DECLARE_VMMOPS_FUNC(int, getcap, (void *vcpui, int num, int *retval));
+DECLARE_VMMOPS_FUNC(int, setcap, (void *vcpui, int num, int val));
+DECLARE_VMMOPS_FUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
+ vm_offset_t max));
+DECLARE_VMMOPS_FUNC(void, vmspace_free, (struct vmspace *vmspace));
+#ifdef notyet
+#ifdef BHYVE_SNAPSHOT
+DECLARE_VMMOPS_FUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta));
+DECLARE_VMMOPS_FUNC(int, vcpu_snapshot, (void *vcpui,
+ struct vm_snapshot_meta *meta));
+DECLARE_VMMOPS_FUNC(int, restore_tsc, (void *vcpui, uint64_t now));
+#endif
+#endif
+
int vm_create(const char *name, struct vm **retvm);
struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
void vm_disable_vcpu_creation(struct vm *vm);
-void vm_slock_vcpus(struct vm *vm);
+void vm_lock_vcpus(struct vm *vm);
void vm_unlock_vcpus(struct vm *vm);
void vm_destroy(struct vm *vm);
int vm_reinit(struct vm *vm);
@@ -232,7 +263,6 @@ vcpu_should_yield(struct vcpu *vcpu)
void *vcpu_stats(struct vcpu *vcpu);
void vcpu_notify_event(struct vcpu *vcpu);
-struct vmspace *vm_vmspace(struct vm *vm);
struct vm_mem *vm_mem(struct vm *vm);
enum vm_reg_name vm_segment_name(int seg_encoding);
diff --git a/sys/arm64/linux/linux_sysvec.c b/sys/arm64/linux/linux_sysvec.c
index 084b7a11b01f..ac05820f89bc 100644
--- a/sys/arm64/linux/linux_sysvec.c
+++ b/sys/arm64/linux/linux_sysvec.c
@@ -584,7 +584,7 @@ linux_vdso_reloc(char *mapping, Elf_Addr offset)
}
}
-static Elf_Brandnote linux64_brandnote = {
+static const Elf_Brandnote linux64_brandnote = {
.hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
.hdr.n_descsz = 16,
.hdr.n_type = 1,
@@ -593,7 +593,7 @@ static Elf_Brandnote linux64_brandnote = {
.trans_osrel = linux_trans_osrel
};
-static Elf64_Brandinfo linux_glibc2brand = {
+static const Elf64_Brandinfo linux_glibc2brand = {
.brand = ELFOSABI_LINUX,
.machine = EM_AARCH64,
.compat_3_brand = "Linux",
@@ -604,7 +604,7 @@ static Elf64_Brandinfo linux_glibc2brand = {
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
-Elf64_Brandinfo *linux_brandlist[] = {
+const Elf64_Brandinfo *linux_brandlist[] = {
&linux_glibc2brand,
NULL
};
@@ -612,8 +612,8 @@ Elf64_Brandinfo *linux_brandlist[] = {
static int
linux64_elf_modevent(module_t mod, int type, void *data)
{
- Elf64_Brandinfo **brandinfo;
- struct linux_ioctl_handler**lihp;
+ const Elf64_Brandinfo **brandinfo;
+ struct linux_ioctl_handler **lihp;
int error;
error = 0;
diff --git a/sys/arm64/nvidia/tegra210/max77620_regulators.c b/sys/arm64/nvidia/tegra210/max77620_regulators.c
index af1a5af20ec3..d52aeaef1287 100644
--- a/sys/arm64/nvidia/tegra210/max77620_regulators.c
+++ b/sys/arm64/nvidia/tegra210/max77620_regulators.c
@@ -364,7 +364,7 @@ max77620_get_sel(struct max77620_reg_sc *sc, uint8_t *sel)
rv = RD1(sc->base_sc, sc->def->volt_reg, sel);
if (rv != 0) {
- printf("%s: cannot read volatge selector: %d\n",
+ printf("%s: cannot read voltage selector: %d\n",
regnode_get_name(sc->regnode), rv);
return (rv);
}
@@ -384,7 +384,7 @@ max77620_set_sel(struct max77620_reg_sc *sc, uint8_t sel)
rv = RM1(sc->base_sc, sc->def->volt_reg,
sc->def->volt_vsel_mask, sel);
if (rv != 0) {
- printf("%s: cannot set volatge selector: %d\n",
+ printf("%s: cannot set voltage selector: %d\n",
regnode_get_name(sc->regnode), rv);
return (rv);
}
diff --git a/sys/arm64/vmm/arm64.h b/sys/arm64/vmm/arm64.h
index f9b74aef7188..f530dab05331 100644
--- a/sys/arm64/vmm/arm64.h
+++ b/sys/arm64/vmm/arm64.h
@@ -136,37 +136,6 @@ struct hyp {
struct hypctx *ctx[];
};
-#define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \
- ret_type vmmops_##opname args;
-
-DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
-DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
-DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
-DEFINE_VMMOPS_IFUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging,
- uint64_t gla, int prot, uint64_t *gpa, int *is_fault))
-DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap,
- struct vm_eventinfo *info))
-DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
-DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
- int vcpu_id))
-DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui))
-DEFINE_VMMOPS_IFUNC(int, exception, (void *vcpui, uint64_t esr, uint64_t far))
-DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval))
-DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val))
-DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval))
-DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val))
-DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
- vm_offset_t max))
-DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
-#ifdef notyet
-#ifdef BHYVE_SNAPSHOT
-DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta))
-DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui,
- struct vm_snapshot_meta *meta))
-DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now))
-#endif
-#endif
-
uint64_t vmm_call_hyp(uint64_t, ...);
#if 0
diff --git a/sys/arm64/vmm/io/vgic_v3.c b/sys/arm64/vmm/io/vgic_v3.c
index 67afb3374815..023406c64182 100644
--- a/sys/arm64/vmm/io/vgic_v3.c
+++ b/sys/arm64/vmm/io/vgic_v3.c
@@ -47,7 +47,6 @@
#include <dev/ofw/openfirm.h>
-#include <machine/armreg.h>
#include <machine/atomic.h>
#include <machine/bus.h>
#include <machine/cpufunc.h>
diff --git a/sys/arm64/vmm/io/vtimer.c b/sys/arm64/vmm/io/vtimer.c
index da0f0d96c431..7c7fbb49e691 100644
--- a/sys/arm64/vmm/io/vtimer.c
+++ b/sys/arm64/vmm/io/vtimer.c
@@ -44,7 +44,6 @@
#include <machine/bus.h>
#include <machine/machdep.h>
#include <machine/vmm.h>
-#include <machine/armreg.h>
#include <arm64/vmm/arm64.h>
diff --git a/sys/arm64/vmm/vmm.c b/sys/arm64/vmm/vmm.c
index a551a2807183..e7b2b5d8c360 100644
--- a/sys/arm64/vmm/vmm.c
+++ b/sys/arm64/vmm/vmm.c
@@ -51,7 +51,6 @@
#include <vm/vm_extern.h>
#include <vm/vm_param.h>
-#include <machine/armreg.h>
#include <machine/cpu.h>
#include <machine/fpu.h>
#include <machine/machdep.h>
@@ -88,7 +87,6 @@ struct vcpu {
struct vfpstate *guestfpu; /* (a,i) guest fpu state */
};
-#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
#define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
@@ -126,7 +124,6 @@ struct vm {
bool dying; /* (o) is dying */
volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
- struct vmspace *vmspace; /* (o) guest's address space */
struct vm_mem mem; /* (i) guest memory */
char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */
struct vcpu **vcpu; /* (i) guest vcpus */
@@ -274,6 +271,7 @@ vcpu_cleanup(struct vcpu *vcpu, bool destroy)
vmm_stat_free(vcpu->stats);
fpu_save_area_free(vcpu->guestfpu);
vcpu_lock_destroy(vcpu);
+ free(vcpu, M_VMM);
}
}
@@ -407,7 +405,7 @@ vm_init(struct vm *vm, bool create)
{
int i;
- vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
+ vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm)));
MPASS(vm->cookie != NULL);
CPU_ZERO(&vm->active_cpus);
@@ -470,9 +468,9 @@ vm_alloc_vcpu(struct vm *vm, int vcpuid)
}
void
-vm_slock_vcpus(struct vm *vm)
+vm_lock_vcpus(struct vm *vm)
{
- sx_slock(&vm->vcpus_init_lock);
+ sx_xlock(&vm->vcpus_init_lock);
}
void
@@ -485,7 +483,7 @@ int
vm_create(const char *name, struct vm **retvm)
{
struct vm *vm;
- struct vmspace *vmspace;
+ int error;
/*
* If vmm.ko could not be successfully initialized then don't attempt
@@ -497,14 +495,13 @@ vm_create(const char *name, struct vm **retvm)
if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
return (EINVAL);
- vmspace = vmmops_vmspace_alloc(0, 1ul << 39);
- if (vmspace == NULL)
- return (ENOMEM);
-
vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO);
+ error = vm_mem_init(&vm->mem, 0, 1ul << 39);
+ if (error != 0) {
+ free(vm, M_VMM);
+ return (error);
+ }
strcpy(vm->name, name);
- vm->vmspace = vmspace;
- vm_mem_init(&vm->mem);
sx_init(&vm->vcpus_init_lock, "vm vcpus");
vm->sockets = 1;
@@ -558,7 +555,7 @@ vm_cleanup(struct vm *vm, bool destroy)
if (destroy) {
vm_xlock_memsegs(vm);
- pmap = vmspace_pmap(vm->vmspace);
+ pmap = vmspace_pmap(vm_vmspace(vm));
sched_pin();
PCPU_SET(curvmpmap, NULL);
sched_unpin();
@@ -582,11 +579,6 @@ vm_cleanup(struct vm *vm, bool destroy)
if (destroy) {
vm_mem_destroy(vm);
- vmmops_vmspace_free(vm->vmspace);
- vm->vmspace = NULL;
-
- for (i = 0; i < vm->maxcpus; i++)
- free(vm->vcpu[i], M_VMM);
free(vm->vcpu, M_VMM);
sx_destroy(&vm->vcpus_init_lock);
}
@@ -1090,12 +1082,6 @@ vcpu_notify_event(struct vcpu *vcpu)
vcpu_unlock(vcpu);
}
-struct vmspace *
-vm_vmspace(struct vm *vm)
-{
- return (vm->vmspace);
-}
-
struct vm_mem *
vm_mem(struct vm *vm)
{
@@ -1292,8 +1278,7 @@ vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
int
vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
{
-
- if (reg >= VM_REG_LAST)
+ if (reg < 0 || reg >= VM_REG_LAST)
return (EINVAL);
return (vmmops_getreg(vcpu->cookie, reg, retval));
@@ -1304,7 +1289,7 @@ vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
{
int error;
- if (reg >= VM_REG_LAST)
+ if (reg < 0 || reg >= VM_REG_LAST)
return (EINVAL);
error = vmmops_setreg(vcpu->cookie, reg, val);
if (error || reg != VM_REG_GUEST_PC)
@@ -1416,7 +1401,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
vme = &vcpu->exitinfo;
- pmap = vmspace_pmap(vcpu->vm->vmspace);
+ pmap = vmspace_pmap(vm_vmspace(vcpu->vm));
addr = vme->u.paging.gpa;
esr = vme->u.paging.esr;
@@ -1433,7 +1418,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
panic("%s: Invalid exception (esr = %lx)", __func__, esr);
}
- map = &vm->vmspace->vm_map;
+ map = &vm_vmspace(vm)->vm_map;
rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
if (rv != KERN_SUCCESS)
return (EFAULT);
@@ -1507,7 +1492,7 @@ vm_run(struct vcpu *vcpu)
if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
return (EINVAL);
- pmap = vmspace_pmap(vm->vmspace);
+ pmap = vmspace_pmap(vm_vmspace(vm));
vme = &vcpu->exitinfo;
evinfo.rptr = NULL;
evinfo.sptr = &vm->suspend;
diff --git a/sys/arm64/vmm/vmm_arm64.c b/sys/arm64/vmm/vmm_arm64.c
index 618f4afaf8ee..006239431f29 100644
--- a/sys/arm64/vmm/vmm_arm64.c
+++ b/sys/arm64/vmm/vmm_arm64.c
@@ -47,7 +47,6 @@
#include <vm/vm_page.h>
#include <vm/vm_param.h>
-#include <machine/armreg.h>
#include <machine/vm.h>
#include <machine/cpufunc.h>
#include <machine/cpu.h>
diff --git a/sys/arm64/vmm/vmm_dev_machdep.c b/sys/arm64/vmm/vmm_dev_machdep.c
index 926a74fa528b..29d14e1ba952 100644
--- a/sys/arm64/vmm/vmm_dev_machdep.c
+++ b/sys/arm64/vmm/vmm_dev_machdep.c
@@ -68,19 +68,13 @@ int
vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
int fflag, struct thread *td)
{
- struct vm_run *vmrun;
- struct vm_vgic_version *vgv;
- struct vm_vgic_descr *vgic;
- struct vm_irq *vi;
- struct vm_exception *vmexc;
- struct vm_gla2gpa *gg;
- struct vm_msi *vmsi;
int error;
error = 0;
switch (cmd) {
case VM_RUN: {
struct vm_exit *vme;
+ struct vm_run *vmrun;
vmrun = (struct vm_run *)data;
vme = vm_exitinfo(vcpu);
@@ -94,41 +88,62 @@ vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
break;
break;
}
- case VM_INJECT_EXCEPTION:
+ case VM_INJECT_EXCEPTION: {
+ struct vm_exception *vmexc;
+
vmexc = (struct vm_exception *)data;
error = vm_inject_exception(vcpu, vmexc->esr, vmexc->far);
break;
- case VM_GLA2GPA_NOFAULT:
+ }
+ case VM_GLA2GPA_NOFAULT: {
+ struct vm_gla2gpa *gg;
+
gg = (struct vm_gla2gpa *)data;
error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla,
gg->prot, &gg->gpa, &gg->fault);
KASSERT(error == 0 || error == EFAULT,
("%s: vm_gla2gpa unknown error %d", __func__, error));
break;
- case VM_GET_VGIC_VERSION:
+ }
+ case VM_GET_VGIC_VERSION: {
+ struct vm_vgic_version *vgv;
+
vgv = (struct vm_vgic_version *)data;
/* TODO: Query the vgic driver for this */
vgv->version = 3;
vgv->flags = 0;
error = 0;
break;
- case VM_ATTACH_VGIC:
+ }
+ case VM_ATTACH_VGIC: {
+ struct vm_vgic_descr *vgic;
+
vgic = (struct vm_vgic_descr *)data;
error = vm_attach_vgic(vm, vgic);
break;
- case VM_RAISE_MSI:
+ }
+ case VM_RAISE_MSI: {
+ struct vm_msi *vmsi;
+
vmsi = (struct vm_msi *)data;
error = vm_raise_msi(vm, vmsi->msg, vmsi->addr, vmsi->bus,
vmsi->slot, vmsi->func);
break;
- case VM_ASSERT_IRQ:
+ }
+ case VM_ASSERT_IRQ: {
+ struct vm_irq *vi;
+
vi = (struct vm_irq *)data;
error = vm_assert_irq(vm, vi->irq);
break;
- case VM_DEASSERT_IRQ:
+ }
+ case VM_DEASSERT_IRQ: {
+ struct vm_irq *vi;
+
vi = (struct vm_irq *)data;
error = vm_deassert_irq(vm, vi->irq);
break;
+ }
default:
error = ENOTTY;
break;
diff --git a/sys/arm64/vmm/vmm_hyp.c b/sys/arm64/vmm/vmm_hyp.c
index b8c6d2ab7a9a..0ad7930e9a87 100644
--- a/sys/arm64/vmm/vmm_hyp.c
+++ b/sys/arm64/vmm/vmm_hyp.c
@@ -32,7 +32,6 @@
#include <sys/types.h>
#include <sys/proc.h>
-#include <machine/armreg.h>
#include "arm64.h"
#include "hyp.h"
diff --git a/sys/arm64/vmm/vmm_reset.c b/sys/arm64/vmm/vmm_reset.c
index 1240c3ed16ec..0e4910ea87b4 100644
--- a/sys/arm64/vmm/vmm_reset.c
+++ b/sys/arm64/vmm/vmm_reset.c
@@ -31,7 +31,6 @@
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <machine/armreg.h>
#include <machine/cpu.h>
#include <machine/hypervisor.h>