summaryrefslogtreecommitdiff
path: root/sys/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arm64')
-rw-r--r--sys/arm64/acpica/acpi_iort.c34
-rw-r--r--sys/arm64/arm64/freebsd32_machdep.c10
-rw-r--r--sys/arm64/arm64/gic_v3.c4
-rw-r--r--sys/arm64/arm64/gic_v3_var.h3
-rw-r--r--sys/arm64/arm64/gicv3_its.c4
-rw-r--r--sys/arm64/arm64/machdep.c31
-rw-r--r--sys/arm64/arm64/pmap.c129
-rw-r--r--sys/arm64/conf/GENERIC1
8 files changed, 160 insertions, 56 deletions
diff --git a/sys/arm64/acpica/acpi_iort.c b/sys/arm64/acpica/acpi_iort.c
index edd87474c9c9..10a501254bc6 100644
--- a/sys/arm64/acpica/acpi_iort.c
+++ b/sys/arm64/acpica/acpi_iort.c
@@ -370,19 +370,44 @@ srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER *entry, void *arg)
ACPI_SRAT_GIC_ITS_AFFINITY *gicits;
struct iort_node *its_node;
struct iort_its_entry *its_entry;
- int i, matches;
+ int *map_counts;
+ int i, matches, dom;
if (entry->Type != ACPI_SRAT_TYPE_GIC_ITS_AFFINITY)
return;
matches = 0;
+ map_counts = arg;
gicits = (ACPI_SRAT_GIC_ITS_AFFINITY *)entry;
+ dom = acpi_map_pxm_to_vm_domainid(gicits->ProximityDomain);
+
+ /*
+ * Catch firmware and config errors. map_counts keeps a
+ * count of ProximityDomain values mapping to a domain ID
+ */
+#if MAXMEMDOM > 1
+ if (dom == -1)
+ printf("Firmware Error: Proximity Domain %d could not be"
+ " mapped for GIC ITS ID %d!\n",
+ gicits->ProximityDomain, gicits->ItsId);
+#endif
+ /* use dom + 1 as index to handle the case where dom == -1 */
+ i = ++map_counts[dom + 1];
+ if (i > 1) {
+#ifdef NUMA
+ if (dom != -1)
+ printf("ERROR: Multiple Proximity Domains map to the"
+ " same NUMA domain %d!\n", dom);
+#else
+ printf("WARNING: multiple Proximity Domains in SRAT but NUMA"
+ " NOT enabled!\n");
+#endif
+ }
TAILQ_FOREACH(its_node, &its_groups, next) {
its_entry = its_node->entries.its;
for (i = 0; i < its_node->nentries; i++, its_entry++) {
if (its_entry->its_id == gicits->ItsId) {
- its_entry->pxm = acpi_map_pxm_to_vm_domainid(
- gicits->ProximityDomain);
+ its_entry->pxm = dom;
matches++;
}
}
@@ -401,6 +426,7 @@ iort_post_process_its(void)
ACPI_TABLE_MADT *madt;
ACPI_TABLE_SRAT *srat;
vm_paddr_t madt_pa, srat_pa;
+ int map_counts[MAXMEMDOM + 1] = { 0 };
/* Check ITS block in MADT */
madt_pa = acpi_find_table(ACPI_SIG_MADT);
@@ -417,7 +443,7 @@ iort_post_process_its(void)
srat = acpi_map_table(srat_pa, ACPI_SIG_SRAT);
KASSERT(srat != NULL, ("can't map SRAT!"));
acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length,
- srat_resolve_its_pxm, NULL);
+ srat_resolve_its_pxm, map_counts);
acpi_unmap_table(srat);
}
return (0);
diff --git a/sys/arm64/arm64/freebsd32_machdep.c b/sys/arm64/arm64/freebsd32_machdep.c
index aeac4605f2f5..2e25fe062b19 100644
--- a/sys/arm64/arm64/freebsd32_machdep.c
+++ b/sys/arm64/arm64/freebsd32_machdep.c
@@ -122,6 +122,7 @@ static void
get_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp)
{
struct pcb *curpcb;
+ int i;
critical_enter();
curpcb = curthread->td_pcb;
@@ -137,8 +138,8 @@ get_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp)
("Called get_fpcontext while the kernel is using the VFP"));
KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
("Non-userspace FPU flags set in get_fpcontext"));
- memcpy(mcp->mcv_reg, curpcb->pcb_fpustate.vfp_regs,
- sizeof(mcp->mcv_reg));
+ for (i = 0; i < 32; i++)
+ mcp->mcv_reg[i] = (uint64_t)curpcb->pcb_fpustate.vfp_regs[i];
mcp->mcv_fpscr = VFP_FPSCR_FROM_SRCR(curpcb->pcb_fpustate.vfp_fpcr,
curpcb->pcb_fpustate.vfp_fpsr);
}
@@ -149,13 +150,14 @@ static void
set_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp)
{
struct pcb *pcb;
+ int i;
critical_enter();
pcb = td->td_pcb;
if (td == curthread)
vfp_discard(td);
- memcpy(pcb->pcb_fpustate.vfp_regs, mcp->mcv_reg,
- sizeof(pcb->pcb_fpustate.vfp_regs));
+ for (i = 0; i < 32; i++)
+ pcb->pcb_fpustate.vfp_regs[i] = mcp->mcv_reg[i];
pcb->pcb_fpustate.vfp_fpsr = VFP_FPSR_FROM_FPSCR(mcp->mcv_fpscr);
pcb->pcb_fpustate.vfp_fpcr = VFP_FPSR_FROM_FPSCR(mcp->mcv_fpscr);
critical_exit();
diff --git a/sys/arm64/arm64/gic_v3.c b/sys/arm64/arm64/gic_v3.c
index af08ee992bb6..a83ef576e30e 100644
--- a/sys/arm64/arm64/gic_v3.c
+++ b/sys/arm64/arm64/gic_v3.c
@@ -390,10 +390,6 @@ gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
case GICV3_IVAR_NIRQS:
*result = (NIRQ - sc->gic_nirqs) / sc->gic_nchildren;
return (0);
- case GICV3_IVAR_REDIST_VADDR:
- *result = (uintptr_t)rman_get_virtual(
- &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res);
- return (0);
case GICV3_IVAR_REDIST:
*result = (uintptr_t)sc->gic_redists.pcpu[PCPU_GET(cpuid)];
return (0);
diff --git a/sys/arm64/arm64/gic_v3_var.h b/sys/arm64/arm64/gic_v3_var.h
index 27dec4d72190..1257484a5e57 100644
--- a/sys/arm64/arm64/gic_v3_var.h
+++ b/sys/arm64/arm64/gic_v3_var.h
@@ -94,11 +94,10 @@ MALLOC_DECLARE(M_GIC_V3);
/* ivars */
#define GICV3_IVAR_NIRQS 1000
-#define GICV3_IVAR_REDIST_VADDR 1001
+/* 1001 was GICV3_IVAR_REDIST_VADDR */
#define GICV3_IVAR_REDIST 1002
__BUS_ACCESSOR(gicv3, nirqs, GICV3, NIRQS, u_int);
-__BUS_ACCESSOR(gicv3, redist_vaddr, GICV3, REDIST_VADDR, void *);
__BUS_ACCESSOR(gicv3, redist, GICV3, REDIST, void *);
/* Device methods */
diff --git a/sys/arm64/arm64/gicv3_its.c b/sys/arm64/arm64/gicv3_its.c
index f347a36c12cc..2701a7e8df0a 100644
--- a/sys/arm64/arm64/gicv3_its.c
+++ b/sys/arm64/arm64/gicv3_its.c
@@ -747,9 +747,7 @@ gicv3_its_attach(device_t dev)
if (domain < MAXMEMDOM)
CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus);
} else {
- /* XXX : cannot handle more than one ITS per cpu */
- if (device_get_unit(dev) == 0)
- CPU_COPY(&all_cpus, &sc->sc_cpus);
+ CPU_COPY(&all_cpus, &sc->sc_cpus);
}
/* Allocate the command circular buffer */
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
index b8ecfc08e676..4356add12aa0 100644
--- a/sys/arm64/arm64/machdep.c
+++ b/sys/arm64/arm64/machdep.c
@@ -194,6 +194,16 @@ fill_regs(struct thread *td, struct reg *regs)
memcpy(regs->x, frame->tf_x, sizeof(regs->x));
+#ifdef COMPAT_FREEBSD32
+ /*
+ * We may be called here for a 32bits process, if we're using a
+ * 64bits debugger. If so, put PC and SPSR where it expects it.
+ */
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+ regs->x[15] = frame->tf_elr;
+ regs->x[16] = frame->tf_spsr;
+ }
+#endif
return (0);
}
@@ -211,6 +221,17 @@ set_regs(struct thread *td, struct reg *regs)
memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
+#ifdef COMPAT_FREEBSD32
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+ /*
+ * We may be called for a 32bits process if we're using
+ * a 64bits debugger. If so, get PC and SPSR from where
+ * it put it.
+ */
+ frame->tf_elr = regs->x[15];
+ frame->tf_spsr = regs->x[16] & PSR_FLAGS;
+ }
+#endif
return (0);
}
@@ -283,8 +304,9 @@ fill_regs32(struct thread *td, struct reg32 *regs)
tf = td->td_frame;
for (i = 0; i < 13; i++)
regs->r[i] = tf->tf_x[i];
- regs->r_sp = tf->tf_sp;
- regs->r_lr = tf->tf_lr;
+ /* For arm32, SP is r13 and LR is r14 */
+ regs->r_sp = tf->tf_x[13];
+ regs->r_lr = tf->tf_x[14];
regs->r_pc = tf->tf_elr;
regs->r_cpsr = tf->tf_spsr;
@@ -300,8 +322,9 @@ set_regs32(struct thread *td, struct reg32 *regs)
tf = td->td_frame;
for (i = 0; i < 13; i++)
tf->tf_x[i] = regs->r[i];
- tf->tf_sp = regs->r_sp;
- tf->tf_lr = regs->r_lr;
+ /* For arm 32, SP is r13 an LR is r14 */
+ tf->tf_x[13] = regs->r_sp;
+ tf->tf_x[14] = regs->r_lr;
tf->tf_elr = regs->r_pc;
tf->tf_spsr = regs->r_cpsr;
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index df34873025dd..bf74bb55f038 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -2510,6 +2510,82 @@ pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
}
/*
+ * Remove the specified range of addresses from the L3 page table that is
+ * identified by the given L2 entry.
+ */
+static void
+pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
+ vm_offset_t eva, struct spglist *free, struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ struct rwlock *new_lock;
+ pt_entry_t *l3, old_l3;
+ vm_offset_t va;
+ vm_page_t m;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
+ ("pmap_remove_l3_range: range crosses an L3 page table boundary"));
+ va = eva;
+ for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
+ if (!pmap_l3_valid(pmap_load(l3))) {
+ if (va != eva) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = eva;
+ }
+ continue;
+ }
+ old_l3 = pmap_load_clear(l3);
+ if ((old_l3 & ATTR_SW_WIRED) != 0)
+ pmap->pm_stats.wired_count--;
+ pmap_resident_count_dec(pmap, 1);
+ if ((old_l3 & ATTR_SW_MANAGED) != 0) {
+ m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
+ if (pmap_page_dirty(old_l3))
+ vm_page_dirty(m);
+ if ((old_l3 & ATTR_AF) != 0)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m));
+ if (new_lock != *lockp) {
+ if (*lockp != NULL) {
+ /*
+ * Pending TLB invalidations must be
+ * performed before the PV list lock is
+ * released. Otherwise, a concurrent
+ * pmap_remove_all() on a physical page
+ * could return while a stale TLB entry
+ * still provides access to that page.
+ */
+ if (va != eva) {
+ pmap_invalidate_range(pmap, va,
+ sva);
+ va = eva;
+ }
+ rw_wunlock(*lockp);
+ }
+ *lockp = new_lock;
+ rw_wlock(*lockp);
+ }
+ pmap_pvh_free(&m->md, pmap, sva);
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ }
+ }
+ if (va == eva)
+ va = sva;
+ if (pmap_unuse_pt(pmap, sva, l2e, free)) {
+ sva += L3_SIZE;
+ break;
+ }
+ }
+ if (va != eva)
+ pmap_invalidate_range(pmap, va, sva);
+}
+
+/*
* Remove the given range of addresses from the specified map.
*
* It is assumed that the start and end are properly
@@ -2519,9 +2595,9 @@ void
pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
struct rwlock *lock;
- vm_offset_t va, va_next;
+ vm_offset_t va_next;
pd_entry_t *l0, *l1, *l2;
- pt_entry_t l3_paddr, *l3;
+ pt_entry_t l3_paddr;
struct spglist free;
/*
@@ -2594,28 +2670,8 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
if (va_next > eva)
va_next = eva;
- va = va_next;
- for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
- sva += L3_SIZE) {
- if (l3 == NULL)
- panic("l3 == NULL");
- if (pmap_load(l3) == 0) {
- if (va != va_next) {
- pmap_invalidate_range(pmap, va, sva);
- va = va_next;
- }
- continue;
- }
- if (va == va_next)
- va = sva;
- if (pmap_remove_l3(pmap, l3, sva, l3_paddr, &free,
- &lock)) {
- sva += L3_SIZE;
- break;
- }
- }
- if (va != va_next)
- pmap_invalidate_range(pmap, va, sva);
+ pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free,
+ &lock);
}
if (lock != NULL)
rw_wunlock(lock);
@@ -3352,7 +3408,7 @@ validate:
__func__, pmap, va, new_l3);
}
} else {
- /* New mappig */
+ /* New mapping */
pmap_load_store(l3, new_l3);
dsb(ishst);
}
@@ -3419,8 +3475,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
vm_page_t m, struct rwlock **lockp)
{
struct spglist free;
- pd_entry_t *l2, *l3, old_l2;
- vm_offset_t sva;
+ pd_entry_t *l2, old_l2;
vm_page_t l2pg, mt;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -3449,13 +3504,8 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
(void)pmap_remove_l2(pmap, l2, va,
pmap_load(pmap_l1(pmap, va)), &free, lockp);
else
- for (sva = va; sva < va + L2_SIZE; sva += PAGE_SIZE) {
- l3 = pmap_l2_to_l3(l2, sva);
- if (pmap_l3_valid(pmap_load(l3)) &&
- pmap_remove_l3(pmap, l3, sva, old_l2, &free,
- lockp) != 0)
- break;
- }
+ pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
+ &free, lockp);
vm_page_free_pages_toq(&free, true);
if (va >= VM_MAXUSER_ADDRESS) {
/*
@@ -3656,6 +3706,9 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
l3 = pmap_l2_to_l3(pde, va);
}
+ /*
+ * Abort if a mapping already exists.
+ */
if (pmap_load(l3) != 0) {
if (mpte != NULL) {
mpte->wire_count--;
@@ -3705,7 +3758,15 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
pmap_load_store(l3, l3_val);
+
+ /*
+ * XXX In principle, because this L3 entry was invalid, we should not
+ * need to perform a TLB invalidation here. However, in practice,
+ * when simply performing a "dsb ishst" here, processes are being
+ * terminated due to bus errors and segmentation violations.
+ */
pmap_invalidate_page(pmap, va);
+
return (mpte);
}
diff --git a/sys/arm64/conf/GENERIC b/sys/arm64/conf/GENERIC
index 8e3f04fe1db1..2adee9db99d4 100644
--- a/sys/arm64/conf/GENERIC
+++ b/sys/arm64/conf/GENERIC
@@ -293,7 +293,6 @@ device aw_cir
# Pseudo devices.
device crypto # core crypto support
device loop # Network loopback
-device random # Entropy device
device ether # Ethernet support
device vlan # 802.1Q VLAN support
device tuntap # Packet tunnel.