aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/mem.c4
-rw-r--r--sys/amd64/amd64/minidump_machdep.c10
-rw-r--r--sys/amd64/amd64/pmap.c238
-rw-r--r--sys/amd64/amd64/trap.c2
-rw-r--r--sys/amd64/include/param.h5
-rw-r--r--sys/amd64/include/pmap.h20
-rw-r--r--sys/amd64/include/vmparam.h41
-rw-r--r--sys/arm/arm/pmap-v6.c32
-rw-r--r--sys/arm64/arm64/pmap.c86
-rw-r--r--sys/compat/linuxkpi/common/include/linux/slab.h2
-rw-r--r--sys/compat/linuxkpi/common/src/linux_page.c5
-rw-r--r--sys/dev/usb/controller/xhci_pci.c7
-rw-r--r--sys/fs/fdescfs/fdesc_vnops.c9
-rw-r--r--sys/fs/msdosfs/msdosfs_vnops.c21
-rw-r--r--sys/fs/p9fs/p9fs_vnops.c8
-rw-r--r--sys/i386/conf/GENERIC2
-rw-r--r--sys/i386/conf/GENERIC-NODEBUG2
-rw-r--r--sys/i386/conf/LINT1
-rw-r--r--sys/i386/conf/MINIMAL2
-rw-r--r--sys/i386/conf/PAE2
-rw-r--r--sys/i386/i386/pmap.c12
-rw-r--r--sys/kern/kern_descrip.c2
-rw-r--r--sys/kern/subr_asan.c3
-rw-r--r--sys/kern/subr_trap.c5
-rw-r--r--sys/net/if_lagg.c1
-rw-r--r--sys/net/pfvar.h6
-rw-r--r--sys/netinet6/raw_ip6.c3
-rw-r--r--sys/netlink/netlink_message_parser.h3
-rw-r--r--sys/netpfil/ipfilter/netinet/ip_fil_freebsd.c5
-rw-r--r--sys/netpfil/pf/if_pflog.c4
-rw-r--r--sys/netpfil/pf/if_pfsync.c11
-rw-r--r--sys/netpfil/pf/pf.c28
-rw-r--r--sys/netpfil/pf/pf.h3
-rw-r--r--sys/netpfil/pf/pf_ioctl.c52
-rw-r--r--sys/netpfil/pf/pf_lb.c165
-rw-r--r--sys/netpfil/pf/pf_table.c74
-rw-r--r--sys/powerpc/aim/mmu_oea.c3
-rw-r--r--sys/powerpc/aim/mmu_oea64.c3
-rw-r--r--sys/powerpc/aim/mmu_radix.c4
-rw-r--r--sys/powerpc/include/pcb.h10
-rw-r--r--sys/powerpc/include/ucontext.h2
-rw-r--r--sys/powerpc/powerpc/exec_machdep.c39
-rw-r--r--sys/powerpc/powerpc/fpu.c30
-rw-r--r--sys/riscv/riscv/pmap.c2
-rw-r--r--sys/vm/vm_domainset.c16
-rw-r--r--sys/vm/vm_kern.c9
46 files changed, 599 insertions, 395 deletions
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
index 413b7c74890e..851f2df0e6e1 100644
--- a/sys/amd64/amd64/mem.c
+++ b/sys/amd64/amd64/mem.c
@@ -105,8 +105,8 @@ memrw(struct cdev *dev, struct uio *uio, int flags)
* PAGE_SIZE, the uiomove() call does not
* access past the end of the direct map.
*/
- if (v >= DMAP_MIN_ADDRESS &&
- v < DMAP_MIN_ADDRESS + dmaplimit) {
+ if (v >= kva_layout.dmap_low &&
+ v < kva_layout.dmap_high) {
error = uiomove((void *)v, c, uio);
break;
}
diff --git a/sys/amd64/amd64/minidump_machdep.c b/sys/amd64/amd64/minidump_machdep.c
index 6d0917e16099..43bf81a991bf 100644
--- a/sys/amd64/amd64/minidump_machdep.c
+++ b/sys/amd64/amd64/minidump_machdep.c
@@ -186,7 +186,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
* tables, so care must be taken to read each entry only once.
*/
pmapsize = 0;
- for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; ) {
+ for (va = kva_layout.km_low; va < kva_end; ) {
/*
* We always write a page, even if it is zero. Each
* page written corresponds to 1GB of space
@@ -279,9 +279,9 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
mdhdr.msgbufsize = mbp->msg_size;
mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages));
mdhdr.pmapsize = pmapsize;
- mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
- mdhdr.dmapbase = DMAP_MIN_ADDRESS;
- mdhdr.dmapend = DMAP_MAX_ADDRESS;
+ mdhdr.kernbase = kva_layout.km_low;
+ mdhdr.dmapbase = kva_layout.dmap_low;
+ mdhdr.dmapend = kva_layout.dmap_high;
mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION,
@@ -323,7 +323,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
/* Dump kernel page directory pages */
bzero(fakepd, sizeof(fakepd));
- for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += NBPDP) {
+ for (va = kva_layout.km_low; va < kva_end; va += NBPDP) {
ii = pmap_pml4e_index(va);
pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii;
pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 9c985df13ddf..2c7777e608b9 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -415,7 +415,7 @@ SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
static int ndmpdp;
vm_paddr_t dmaplimit;
-vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
+vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS_LA48;
pt_entry_t pg_nx;
static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
@@ -475,11 +475,36 @@ _Static_assert(DMPML4I + NDMPML4E <= KMSANSHADPML4I, "direct map overflow");
static pml4_entry_t *kernel_pml4;
static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
+static u_int64_t DMPML4phys; /* ... level 4, for la57 */
static int ndmpdpphys; /* number of DMPDPphys pages */
vm_paddr_t kernphys; /* phys addr of start of bootstrap data */
vm_paddr_t KERNend; /* and the end */
+struct kva_layout_s kva_layout = {
+ .kva_min = KV4ADDR(PML4PML4I, 0, 0, 0),
+ .dmap_low = KV4ADDR(DMPML4I, 0, 0, 0),
+ .dmap_high = KV4ADDR(DMPML4I + NDMPML4E, 0, 0, 0),
+ .lm_low = KV4ADDR(LMSPML4I, 0, 0, 0),
+ .lm_high = KV4ADDR(LMEPML4I + 1, 0, 0, 0),
+ .km_low = KV4ADDR(KPML4BASE, 0, 0, 0),
+ .km_high = KV4ADDR(KPML4BASE + NKPML4E - 1, NPDPEPG - 1,
+ NPDEPG - 1, NPTEPG - 1),
+ .rec_pt = KV4ADDR(PML4PML4I, 0, 0, 0),
+};
+
+struct kva_layout_s kva_layout_la57 = {
+ .kva_min = KV5ADDR(NPML5EPG / 2, 0, 0, 0, 0), /* == rec_pt */
+ .dmap_low = KV5ADDR(DMPML5I, 0, 0, 0, 0),
+ .dmap_high = KV5ADDR(DMPML5I + NDMPML5E, 0, 0, 0, 0),
+ .lm_low = KV4ADDR(LMSPML4I, 0, 0, 0),
+ .lm_high = KV4ADDR(LMEPML4I + 1, 0, 0, 0),
+ .km_low = KV4ADDR(KPML4BASE, 0, 0, 0),
+ .km_high = KV4ADDR(KPML4BASE + NKPML4E - 1, NPDPEPG - 1,
+ NPDEPG - 1, NPTEPG - 1),
+ .rec_pt = KV5ADDR(PML5PML5I, 0, 0, 0, 0),
+};
+
/*
* pmap_mapdev support pre initialization (i.e. console)
*/
@@ -549,8 +574,8 @@ static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */
static vmem_t *large_vmem;
static u_int lm_ents;
-#define PMAP_ADDRESS_IN_LARGEMAP(va) ((va) >= LARGEMAP_MIN_ADDRESS && \
- (va) < LARGEMAP_MIN_ADDRESS + NBPML4 * (u_long)lm_ents)
+#define PMAP_ADDRESS_IN_LARGEMAP(va) ((va) >= kva_layout.lm_low && \
+ (va) < kva_layout.lm_high)
int pmap_pcid_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
@@ -1336,7 +1361,7 @@ static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va);
static pd_entry_t *pmap_pti_pde(vm_offset_t va);
static void pmap_pti_wire_pte(void *pte);
static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
- bool remove_pt, struct spglist *free, struct rwlock **lockp);
+ bool demote_kpde, struct spglist *free, struct rwlock **lockp);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
@@ -1722,7 +1747,7 @@ create_pagetables(vm_paddr_t *firstaddr)
{
pd_entry_t *pd_p;
pdp_entry_t *pdp_p;
- pml4_entry_t *p4_p;
+ pml4_entry_t *p4_p, *p4d_p;
pml5_entry_t *p5_p;
uint64_t DMPDkernphys;
vm_paddr_t pax;
@@ -1732,7 +1757,7 @@ create_pagetables(vm_paddr_t *firstaddr)
vm_offset_t kasankernbase;
int kasankpdpi, kasankpdi, nkasanpte;
#endif
- int i, j, ndm1g, nkpdpe, nkdmpde;
+ int i, j, ndm1g, nkpdpe, nkdmpde, ndmpml4phys;
TSENTER();
/* Allocate page table pages for the direct map */
@@ -1740,15 +1765,30 @@ create_pagetables(vm_paddr_t *firstaddr)
if (ndmpdp < 4) /* Minimum 4GB of dirmap */
ndmpdp = 4;
ndmpdpphys = howmany(ndmpdp, NPDPEPG);
- if (ndmpdpphys > NDMPML4E) {
- /*
- * Each NDMPML4E allows 512 GB, so limit to that,
- * and then readjust ndmpdp and ndmpdpphys.
- */
- printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512);
- Maxmem = atop(NDMPML4E * NBPML4);
- ndmpdpphys = NDMPML4E;
- ndmpdp = NDMPML4E * NPDEPG;
+ if (la57) {
+ ndmpml4phys = howmany(ndmpdpphys, NPML4EPG);
+ if (ndmpml4phys > NDMPML5E) {
+ printf("NDMPML5E limits system to %ld GB\n",
+ (u_long)NDMPML5E * NBPML5 / 1024 / 1024 / 1024);
+ Maxmem = atop(NDMPML5E * NBPML5);
+ ndmpml4phys = NDMPML5E;
+ ndmpdpphys = ndmpml4phys * NPML4EPG;
+ ndmpdp = ndmpdpphys * NPDEPG;
+ }
+ DMPML4phys = allocpages(firstaddr, ndmpml4phys);
+ } else {
+ if (ndmpdpphys > NDMPML4E) {
+ /*
+ * Each NDMPML4E allows 512 GB, so limit to
+ * that, and then readjust ndmpdp and
+ * ndmpdpphys.
+ */
+ printf("NDMPML4E limits system to %d GB\n",
+ NDMPML4E * 512);
+ Maxmem = atop(NDMPML4E * NBPML4);
+ ndmpdpphys = NDMPML4E;
+ ndmpdp = NDMPML4E * NPDEPG;
+ }
}
DMPDPphys = allocpages(firstaddr, ndmpdpphys);
ndm1g = 0;
@@ -1773,7 +1813,13 @@ create_pagetables(vm_paddr_t *firstaddr)
dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
/* Allocate pages. */
+ if (la57) {
+ KPML5phys = allocpages(firstaddr, 1);
+ p5_p = (pml5_entry_t *)KPML5phys;
+ }
KPML4phys = allocpages(firstaddr, 1);
+ p4_p = (pml4_entry_t *)KPML4phys;
+
KPDPphys = allocpages(firstaddr, NKPML4E);
#ifdef KASAN
KASANPDPphys = allocpages(firstaddr, NKASANPML4E);
@@ -1893,6 +1939,16 @@ create_pagetables(vm_paddr_t *firstaddr)
}
/*
+ * Connect the Direct Map slots up to the PML4.
+ * pml5 entries for DMAP are handled below in global pml5 loop.
+ */
+ p4d_p = la57 ? (pml4_entry_t *)DMPML4phys : &p4_p[DMPML4I];
+ for (i = 0; i < ndmpdpphys; i++) {
+ p4d_p[i] = (DMPDPphys + ptoa(i)) | X86_PG_RW | X86_PG_V |
+ pg_nx;
+ }
+
+ /*
* Instead of using a 1G page for the memory containing the kernel,
* use 2M pages with read-only and no-execute permissions. (If using 1G
* pages, this will partially overwrite the PDPEs above.)
@@ -1911,11 +1967,6 @@ create_pagetables(vm_paddr_t *firstaddr)
}
}
- /* And recursively map PML4 to itself in order to get PTmap */
- p4_p = (pml4_entry_t *)KPML4phys;
- p4_p[PML4PML4I] = KPML4phys;
- p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | pg_nx;
-
#ifdef KASAN
/* Connect the KASAN shadow map slots up to the PML4. */
for (i = 0; i < NKASANPML4E; i++) {
@@ -1938,25 +1989,15 @@ create_pagetables(vm_paddr_t *firstaddr)
}
#endif
- /* Connect the Direct Map slots up to the PML4. */
- for (i = 0; i < ndmpdpphys; i++) {
- p4_p[DMPML4I + i] = DMPDPphys + ptoa(i);
- p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
- }
-
/* Connect the KVA slots up to the PML4 */
for (i = 0; i < NKPML4E; i++) {
p4_p[KPML4BASE + i] = KPDPphys + ptoa(i);
p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V;
}
- kernel_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
-
if (la57) {
/* XXXKIB bootstrap KPML5phys page is lost */
- KPML5phys = allocpages(firstaddr, 1);
- for (i = 0, p5_p = (pml5_entry_t *)KPML5phys; i < NPML5EPG;
- i++) {
+ for (i = 0; i < NPML5EPG; i++) {
if (i == PML5PML5I) {
/*
* Recursively map PML5 to itself in
@@ -1964,6 +2005,10 @@ create_pagetables(vm_paddr_t *firstaddr)
*/
p5_p[i] = KPML5phys | X86_PG_RW | X86_PG_A |
X86_PG_M | X86_PG_V | pg_nx;
+ } else if (i >= DMPML5I && i < DMPML5I + NDMPML5E) {
+ /* Connect DMAP pml4 pages to PML5. */
+ p5_p[i] = (DMPML4phys + ptoa(i - DMPML5I)) |
+ X86_PG_RW | X86_PG_V | pg_nx;
} else if (i == pmap_pml5e_index(UPT_MAX_ADDRESS)) {
p5_p[i] = KPML4phys | X86_PG_RW | X86_PG_A |
X86_PG_M | X86_PG_V;
@@ -1971,6 +2016,10 @@ create_pagetables(vm_paddr_t *firstaddr)
p5_p[i] = 0;
}
}
+ } else {
+ /* Recursively map PML4 to itself in order to get PTmap */
+ p4_p[PML4PML4I] = KPML4phys;
+ p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | pg_nx;
}
TSEXIT();
}
@@ -2024,7 +2073,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
*/
virtual_avail = (vm_offset_t)KERNSTART + round_2mpage(KERNend -
(vm_paddr_t)kernphys);
- virtual_end = VM_MAX_KERNEL_ADDRESS;
+ virtual_end = kva_layout.km_high;
/*
* Enable PG_G global pages, then switch to the kernel page
@@ -2046,9 +2095,13 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
* Initialize the kernel pmap (which is statically allocated).
* Count bootstrap data as being resident in case any of this data is
* later unmapped (using pmap_remove()) and freed.
+ *
+ * DMAP_TO_PHYS()/PHYS_TO_DMAP() are functional only after
+ * kva_layout is fixed.
*/
PMAP_LOCK_INIT(kernel_pmap);
if (la57) {
+ kva_layout = kva_layout_la57;
vtoptem = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT +
NPML4EPGSHIFT + NPML5EPGSHIFT)) - 1) << 3;
PTmap = (vm_offset_t)P5Tmap;
@@ -2059,6 +2112,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
kernel_pmap->pm_cr3 = KPML5phys;
pmap_pt_page_count_adj(kernel_pmap, 1); /* top-level page */
} else {
+ kernel_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
kernel_pmap->pm_pmltop = kernel_pml4;
kernel_pmap->pm_cr3 = KPML4phys;
}
@@ -2420,6 +2474,7 @@ pmap_init(void)
{
struct pmap_preinit_mapping *ppim;
vm_page_t m, mpte;
+ pml4_entry_t *pml4e;
int error, i, ret, skz63;
/* L1TF, reserve page @0 unconditionally */
@@ -2559,18 +2614,19 @@ pmap_init(void)
printf("pmap: large map %u PML4 slots (%lu GB)\n",
lm_ents, (u_long)lm_ents * (NBPML4 / 1024 / 1024 / 1024));
if (lm_ents != 0) {
- large_vmem = vmem_create("large", LARGEMAP_MIN_ADDRESS,
- (vmem_size_t)lm_ents * NBPML4, PAGE_SIZE, 0, M_WAITOK);
+ large_vmem = vmem_create("large", kva_layout.lm_low,
+ (vmem_size_t)kva_layout.lm_high - kva_layout.lm_low,
+ PAGE_SIZE, 0, M_WAITOK);
if (large_vmem == NULL) {
printf("pmap: cannot create large map\n");
lm_ents = 0;
}
for (i = 0; i < lm_ents; i++) {
m = pmap_large_map_getptp_unlocked();
- /* XXXKIB la57 */
- kernel_pml4[LMSPML4I + i] = X86_PG_V |
- X86_PG_RW | X86_PG_A | X86_PG_M | pg_nx |
- VM_PAGE_TO_PHYS(m);
+ pml4e = pmap_pml4e(kernel_pmap, kva_layout.lm_low +
+ (u_long)i * NBPML4);
+ *pml4e = X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M |
+ pg_nx | VM_PAGE_TO_PHYS(m);
}
}
}
@@ -3899,7 +3955,7 @@ pmap_kextract(vm_offset_t va)
pd_entry_t pde;
vm_paddr_t pa;
- if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
+ if (va >= kva_layout.dmap_low && va < kva_layout.dmap_high) {
pa = DMAP_TO_PHYS(va);
} else if (PMAP_ADDRESS_IN_LARGEMAP(va)) {
pa = pmap_large_map_kextract(va);
@@ -4040,7 +4096,7 @@ pmap_qremove(vm_offset_t sva, int count)
* enough to one of those pmap_enter() calls for it to
* be caught up in a promotion.
*/
- KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
+ KASSERT(va >= kva_layout.km_low, ("usermode va %lx", va));
KASSERT((*vtopde(va) & X86_PG_PS) == 0,
("pmap_qremove on promoted va %#lx", va));
@@ -4328,21 +4384,13 @@ void
pmap_pinit_pml5(vm_page_t pml5pg)
{
pml5_entry_t *pm_pml5;
+ int i;
pm_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pg));
-
- /*
- * Add pml5 entry at top of KVA pointing to existing pml4 table,
- * entering all existing kernel mappings into level 5 table.
- */
- pm_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V |
- X86_PG_RW | X86_PG_A | X86_PG_M;
-
- /*
- * Install self-referential address mapping entry.
- */
- pm_pml5[PML5PML5I] = VM_PAGE_TO_PHYS(pml5pg) |
- X86_PG_RW | X86_PG_V | X86_PG_M | X86_PG_A;
+ for (i = 0; i < NPML5EPG / 2; i++)
+ pm_pml5[i] = 0;
+ for (; i < NPML5EPG; i++)
+ pm_pml5[i] = kernel_pmap->pm_pmltop[i];
}
static void
@@ -4899,8 +4947,8 @@ pmap_release(pmap_t pmap)
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pmltop));
if (pmap_is_la57(pmap)) {
- pmap->pm_pmltop[pmap_pml5e_index(UPT_MAX_ADDRESS)] = 0;
- pmap->pm_pmltop[PML5PML5I] = 0;
+ for (i = NPML5EPG / 2; i < NPML5EPG; i++)
+ pmap->pm_pmltop[i] = 0;
} else {
for (i = 0; i < NKPML4E; i++) /* KVA */
pmap->pm_pmltop[KPML4BASE + i] = 0;
@@ -4942,7 +4990,7 @@ pmap_release(pmap_t pmap)
static int
kvm_size(SYSCTL_HANDLER_ARGS)
{
- unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
+ unsigned long ksize = kva_layout.km_high - kva_layout.km_low;
return sysctl_handle_long(oidp, &ksize, 0, req);
}
@@ -4953,7 +5001,7 @@ SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
static int
kvm_free(SYSCTL_HANDLER_ARGS)
{
- unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
+ unsigned long kfree = kva_layout.km_high - kernel_vm_end;
return sysctl_handle_long(oidp, &kfree, 0, req);
}
@@ -5031,7 +5079,7 @@ pmap_page_array_startup(long pages)
vm_page_array_size = pages;
- start = VM_MIN_KERNEL_ADDRESS;
+ start = kva_layout.km_low;
end = start + pages * sizeof(struct vm_page);
for (va = start; va < end; va += NBPDR) {
pfn = first_page + (va - start) / sizeof(struct vm_page);
@@ -6067,8 +6115,8 @@ pmap_demote_pde_mpte(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
* so the direct map region is the only part of the
* kernel address space that must be handled here.
*/
- KASSERT(!in_kernel || (va >= DMAP_MIN_ADDRESS &&
- va < DMAP_MAX_ADDRESS),
+ KASSERT(!in_kernel || (va >= kva_layout.dmap_low &&
+ va < kva_layout.dmap_high),
("pmap_demote_pde: No saved mpte for va %#lx", va));
/*
@@ -6165,8 +6213,7 @@ pmap_demote_pde_mpte(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
* pmap_remove_kernel_pde: Remove a kernel superpage mapping.
*/
static void
-pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
- bool remove_pt)
+pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
{
pd_entry_t newpde;
vm_paddr_t mptepa;
@@ -6174,12 +6221,8 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- if (remove_pt)
- mpte = pmap_remove_pt_page(pmap, va);
- else
- mpte = vm_radix_lookup(&pmap->pm_root, pmap_pde_pindex(va));
- if (mpte == NULL)
- panic("pmap_remove_kernel_pde: Missing pt page.");
+ mpte = pmap_remove_pt_page(pmap, va);
+ KASSERT(mpte != NULL, ("pmap_remove_kernel_pde: missing pt page"));
mptepa = VM_PAGE_TO_PHYS(mpte);
newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
@@ -6209,7 +6252,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
* pmap_remove_pde: do the things to unmap a superpage in a process
*/
static int
-pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, bool remove_pt,
+pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, bool demote_kpde,
struct spglist *free, struct rwlock **lockp)
{
struct md_page *pvh;
@@ -6249,9 +6292,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, bool remove_pt,
pmap_delayed_invl_page(m);
}
}
- if (pmap == kernel_pmap) {
- pmap_remove_kernel_pde(pmap, pdq, sva, remove_pt);
- } else {
+ if (pmap != kernel_pmap) {
mpte = pmap_remove_pt_page(pmap, sva);
if (mpte != NULL) {
KASSERT(vm_page_any_valid(mpte),
@@ -6262,6 +6303,14 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, bool remove_pt,
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, free, false);
}
+ } else if (demote_kpde) {
+ pmap_remove_kernel_pde(pmap, pdq, sva);
+ } else {
+ mpte = vm_radix_lookup(&pmap->pm_root, pmap_pde_pindex(sva));
+ if (vm_page_any_valid(mpte)) {
+ mpte->valid = 0;
+ pmap_zero_page(mpte);
+ }
}
return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
}
@@ -7183,7 +7232,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
PG_RW = pmap_rw_bit(pmap);
va = trunc_page(va);
- KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
+ KASSERT(va <= kva_layout.km_high, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
va));
@@ -7573,8 +7622,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
* the mapping is not from kernel_pmap, then
* a reserved PT page could be freed.
*/
- (void)pmap_remove_pde(pmap, pde, va,
- pmap != kernel_pmap, &free, lockp);
+ (void)pmap_remove_pde(pmap, pde, va, false, &free,
+ lockp);
if ((oldpde & PG_G) == 0)
pmap_invalidate_pde_page(pmap, va, oldpde);
} else {
@@ -7584,10 +7633,9 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
* before any changes to mappings are
* made. Abort on failure.
*/
- mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
- if (pmap_insert_pt_page(pmap, mt, false, false)) {
- if (pdpg != NULL)
- pdpg->ref_count--;
+ mt = PHYS_TO_VM_PAGE(oldpde & PG_FRAME);
+ if (pmap_insert_pt_page(pmap, mt, false,
+ false)) {
CTR1(KTR_PMAP,
"pmap_enter_pde: cannot ins kern ptp va %#lx",
va);
@@ -9550,7 +9598,7 @@ pmap_unmapdev(void *p, vm_size_t size)
va = (vm_offset_t)p;
/* If we gave a direct map region in pmap_mapdev, do nothing */
- if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
+ if (va >= kva_layout.dmap_low && va < kva_layout.dmap_high)
return;
offset = va & PAGE_MASK;
size = round_page(offset + size);
@@ -9649,6 +9697,8 @@ pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va, vm_page_t m)
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
+ if (m->md.pat_mode == ma)
+ return;
m->md.pat_mode = ma;
@@ -9668,6 +9718,9 @@ pmap_page_set_memattr_noflush(vm_page_t m, vm_memattr_t ma)
{
int error;
+ if (m->md.pat_mode == ma)
+ return;
+
m->md.pat_mode = ma;
if ((m->flags & PG_FICTITIOUS) != 0)
@@ -9724,7 +9777,7 @@ pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot)
int error;
/* Only supported within the kernel map. */
- if (va < VM_MIN_KERNEL_ADDRESS)
+ if (va < kva_layout.km_low)
return (EINVAL);
PMAP_LOCK(kernel_pmap);
@@ -9755,7 +9808,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
* Only supported on kernel virtual addresses, including the direct
* map but excluding the recursive map.
*/
- if (base < DMAP_MIN_ADDRESS)
+ if (base < kva_layout.dmap_low)
return (EINVAL);
/*
@@ -9778,7 +9831,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
pte_bits |= X86_PG_RW;
}
if ((prot & VM_PROT_EXECUTE) == 0 ||
- va < VM_MIN_KERNEL_ADDRESS) {
+ va < kva_layout.km_low) {
pde_bits |= pg_nx;
pte_bits |= pg_nx;
}
@@ -9874,7 +9927,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
pmap_pte_props(pdpe, pde_bits, pde_mask);
changed = true;
}
- if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
+ if (tmpva >= kva_layout.km_low &&
(*pdpe & PG_PS_FRAME) < dmaplimit) {
if (pa_start == pa_end) {
/* Start physical address run. */
@@ -9904,7 +9957,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
pmap_pte_props(pde, pde_bits, pde_mask);
changed = true;
}
- if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
+ if (tmpva >= kva_layout.km_low &&
(*pde & PG_PS_FRAME) < dmaplimit) {
if (pa_start == pa_end) {
/* Start physical address run. */
@@ -9932,7 +9985,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
pmap_pte_props(pte, pte_bits, pte_mask);
changed = true;
}
- if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
+ if (tmpva >= kva_layout.km_low &&
(*pte & PG_FRAME) < dmaplimit) {
if (pa_start == pa_end) {
/* Start physical address run. */
@@ -10904,8 +10957,8 @@ pmap_large_unmap(void *svaa, vm_size_t len)
struct spglist spgf;
sva = (vm_offset_t)svaa;
- if (len == 0 || sva + len < sva || (sva >= DMAP_MIN_ADDRESS &&
- sva + len <= DMAP_MIN_ADDRESS + dmaplimit))
+ if (len == 0 || sva + len < sva || (sva >= kva_layout.dmap_low &&
+ sva + len < kva_layout.dmap_high))
return;
SLIST_INIT(&spgf);
@@ -11151,11 +11204,10 @@ pmap_large_map_wb(void *svap, vm_size_t len)
sva = (vm_offset_t)svap;
eva = sva + len;
pmap_large_map_wb_fence();
- if (sva >= DMAP_MIN_ADDRESS && eva <= DMAP_MIN_ADDRESS + dmaplimit) {
+ if (sva >= kva_layout.dmap_low && eva < kva_layout.dmap_high) {
pmap_large_map_flush_range(sva, len);
} else {
- KASSERT(sva >= LARGEMAP_MIN_ADDRESS &&
- eva <= LARGEMAP_MIN_ADDRESS + lm_ents * NBPML4,
+ KASSERT(sva >= kva_layout.lm_low && eva < kva_layout.lm_high,
("pmap_large_map_wb: not largemap %#lx %#lx", sva, len));
pmap_large_map_wb_large(sva, eva);
}
@@ -11196,8 +11248,8 @@ pmap_pti_init(void)
VM_OBJECT_WLOCK(pti_obj);
pml4_pg = pmap_pti_alloc_page();
pti_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4_pg));
- for (va = VM_MIN_KERNEL_ADDRESS; va <= VM_MAX_KERNEL_ADDRESS &&
- va >= VM_MIN_KERNEL_ADDRESS && va > NBPML4; va += NBPML4) {
+ for (va = kva_layout.km_low; va <= kva_layout.km_high &&
+ va >= kva_layout.km_low && va > NBPML4; va += NBPML4) {
pdpe = pmap_pti_pdpe(va);
pmap_pti_wire_pte(pdpe);
}
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 09ac0a67dbef..eefddad2f142 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -769,7 +769,7 @@ trap_pfault(struct trapframe *frame, bool usermode, int *signo, int *ucode)
return (-1);
}
}
- if (eva >= VM_MIN_KERNEL_ADDRESS) {
+ if (eva >= kva_layout.km_low) {
/*
* Don't allow user-mode faults in kernel address space.
*/
diff --git a/sys/amd64/include/param.h b/sys/amd64/include/param.h
index 8db314fa034d..1bbb302259d6 100644
--- a/sys/amd64/include/param.h
+++ b/sys/amd64/include/param.h
@@ -146,8 +146,9 @@
#define amd64_btop(x) ((unsigned long)(x) >> PAGE_SHIFT)
#define amd64_ptob(x) ((unsigned long)(x) << PAGE_SHIFT)
-#define INKERNEL(va) (((va) >= DMAP_MIN_ADDRESS && (va) < DMAP_MAX_ADDRESS) \
- || ((va) >= VM_MIN_KERNEL_ADDRESS && (va) < VM_MAX_KERNEL_ADDRESS))
+#define INKERNEL(va) \
+ (((va) >= kva_layout.dmap_low && (va) < kva_layout.dmap_high) || \
+ ((va) >= kva_layout.km_low && (va) < kva_layout.km_high))
#ifdef SMP
#define SC_TABLESIZE 1024 /* Must be power of 2. */
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 7d3e91bcd9b9..08e96027a5ed 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -169,11 +169,12 @@
* the recursive page table map.
*/
#define NDMPML4E 8
+#define NDMPML5E 32
/*
- * These values control the layout of virtual memory. The starting address
- * of the direct map, which is controlled by DMPML4I, must be a multiple of
- * its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.)
+ * These values control the layout of virtual memory. The starting
+ * address of the direct map is controlled by DMPML4I on LA48 and
+ * DMPML5I on LA57.
*
* Note: KPML4I is the index of the (single) level 4 page that maps
* the KVA that holds KERNBASE, while KPML4BASE is the index of the
@@ -191,6 +192,7 @@
#define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */
#define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */
+#define DMPML5I (NPML5EPG / 2 + 1)
#define KPML4I (NPML4EPG-1)
#define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
@@ -548,6 +550,18 @@ pmap_pml5e_index(vm_offset_t va)
return ((va >> PML5SHIFT) & ((1ul << NPML5EPGSHIFT) - 1));
}
+struct kva_layout_s {
+ vm_offset_t kva_min;
+ vm_offset_t dmap_low; /* DMAP_MIN_ADDRESS */
+ vm_offset_t dmap_high; /* DMAP_MAX_ADDRESS */
+ vm_offset_t lm_low; /* LARGEMAP_MIN_ADDRESS */
+ vm_offset_t lm_high; /* LARGEMAP_MAX_ADDRESS */
+ vm_offset_t km_low; /* VM_MIN_KERNEL_ADDRESS */
+ vm_offset_t km_high; /* VM_MAX_KERNEL_ADDRESS */
+ vm_offset_t rec_pt;
+};
+extern struct kva_layout_s kva_layout;
+
#endif /* !LOCORE */
#endif /* !_MACHINE_PMAP_H_ */
diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h
index 0cd9bb4fa7a4..59053665dc40 100644
--- a/sys/amd64/include/vmparam.h
+++ b/sys/amd64/include/vmparam.h
@@ -163,6 +163,7 @@
* Virtual addresses of things. Derived from the page directory and
* page table indexes from pmap.h for precision.
*
+ * LA48:
* 0x0000000000000000 - 0x00007fffffffffff user map
* 0x0000800000000000 - 0xffff7fffffffffff does not exist (hole)
* 0xffff800000000000 - 0xffff804020100fff recursive page table (512GB slot)
@@ -175,18 +176,29 @@
* 0xfffffc0000000000 - 0xfffffdffffffffff 2TB KMSAN shadow map, optional
* 0xfffffe0000000000 - 0xffffffffffffffff 2TB kernel map
*
+ * LA57:
+ * 0x0000000000000000 - 0x00ffffffffffffff user map
+ * 0x0100000000000000 - 0xf0ffffffffffffff does not exist (hole)
+ * 0xff00000000000000 - 0xff00ffffffffffff recursive page table (2048TB slot)
+ * 0xff01000000000000 - 0xff20ffffffffffff direct map (32 x 2048TB slots)
+ * 0xff21000000000000 - 0xffff807fffffffff unused
+ * 0xffff808000000000 - 0xffff847fffffffff large map (can be tuned up)
+ * 0xffff848000000000 - 0xfffff77fffffffff unused (large map extends there)
+ * 0xfffff60000000000 - 0xfffff7ffffffffff 2TB KMSAN origin map, optional
+ * 0xfffff78000000000 - 0xfffff7bfffffffff 512GB KASAN shadow map, optional
+ * 0xfffff80000000000 - 0xfffffbffffffffff 4TB unused
+ * 0xfffffc0000000000 - 0xfffffdffffffffff 2TB KMSAN shadow map, optional
+ * 0xfffffe0000000000 - 0xffffffffffffffff 2TB kernel map
+ *
* Within the kernel map:
*
* 0xfffffe0000000000 vm_page_array
* 0xffffffff80000000 KERNBASE
*/
-#define VM_MIN_KERNEL_ADDRESS KV4ADDR(KPML4BASE, 0, 0, 0)
-#define VM_MAX_KERNEL_ADDRESS KV4ADDR(KPML4BASE + NKPML4E - 1, \
- NPDPEPG-1, NPDEPG-1, NPTEPG-1)
-
-#define DMAP_MIN_ADDRESS KV4ADDR(DMPML4I, 0, 0, 0)
-#define DMAP_MAX_ADDRESS KV4ADDR(DMPML4I + NDMPML4E, 0, 0, 0)
+#define VM_MIN_KERNEL_ADDRESS_LA48 KV4ADDR(KPML4BASE, 0, 0, 0)
+#define VM_MIN_KERNEL_ADDRESS kva_layout.km_low
+#define VM_MAX_KERNEL_ADDRESS kva_layout.km_high
#define KASAN_MIN_ADDRESS KV4ADDR(KASANPML4I, 0, 0, 0)
#define KASAN_MAX_ADDRESS KV4ADDR(KASANPML4I + NKASANPML4E, 0, 0, 0)
@@ -199,9 +211,6 @@
#define KMSAN_ORIG_MAX_ADDRESS KV4ADDR(KMSANORIGPML4I + NKMSANORIGPML4E, \
0, 0, 0)
-#define LARGEMAP_MIN_ADDRESS KV4ADDR(LMSPML4I, 0, 0, 0)
-#define LARGEMAP_MAX_ADDRESS KV4ADDR(LMEPML4I + 1, 0, 0, 0)
-
/*
* Formally kernel mapping starts at KERNBASE, but kernel linker
* script leaves first PDE reserved. For legacy BIOS boot, kernel is
@@ -239,21 +248,21 @@
* vt fb startup needs to be reworked.
*/
#define PHYS_IN_DMAP(pa) (dmaplimit == 0 || (pa) < dmaplimit)
-#define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \
- (va) < (DMAP_MIN_ADDRESS + dmaplimit))
+#define VIRT_IN_DMAP(va) \
+ ((va) >= kva_layout.dmap_low && (va) < kva_layout.dmap_high)
#define PMAP_HAS_DMAP 1
-#define PHYS_TO_DMAP(x) ({ \
+#define PHYS_TO_DMAP(x) __extension__ ({ \
KASSERT(PHYS_IN_DMAP(x), \
("physical address %#jx not covered by the DMAP", \
(uintmax_t)x)); \
- (x) | DMAP_MIN_ADDRESS; })
+ (x) + kva_layout.dmap_low; })
-#define DMAP_TO_PHYS(x) ({ \
+#define DMAP_TO_PHYS(x) __extension__ ({ \
KASSERT(VIRT_IN_DMAP(x), \
("virtual address %#jx not covered by the DMAP", \
(uintmax_t)x)); \
- (x) & ~DMAP_MIN_ADDRESS; })
+ (x) - kva_layout.dmap_low; })
/*
* amd64 maps the page array into KVA so that it can be more easily
@@ -274,7 +283,7 @@
*/
#ifndef VM_KMEM_SIZE_MAX
#define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \
- VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
+ kva_layout.km_low + 1) * 3 / 5)
#endif
/* initial pagein size of beginning of executable file */
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 92eb0589f80b..78883296c5b7 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -5767,7 +5767,7 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m,
VM_PAGE_TO_PHYS(m), oma, ma);
- if ((m->flags & PG_FICTITIOUS) != 0)
+ if (ma == oma || (m->flags & PG_FICTITIOUS) != 0)
return;
#if 0
/*
@@ -5784,22 +5784,20 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
* If page is not mapped by sf buffer, map the page
* transient and do invalidation.
*/
- if (ma != oma) {
- pa = VM_PAGE_TO_PHYS(m);
- sched_pin();
- pc = get_pcpu();
- cmap2_pte2p = pc->pc_cmap2_pte2p;
- mtx_lock(&pc->pc_cmap_lock);
- if (pte2_load(cmap2_pte2p) != 0)
- panic("%s: CMAP2 busy", __func__);
- pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW,
- vm_memattr_to_pte2(ma)));
- dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE);
- pte2_clear(cmap2_pte2p);
- tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
- sched_unpin();
- mtx_unlock(&pc->pc_cmap_lock);
- }
+ pa = VM_PAGE_TO_PHYS(m);
+ sched_pin();
+ pc = get_pcpu();
+ cmap2_pte2p = pc->pc_cmap2_pte2p;
+ mtx_lock(&pc->pc_cmap_lock);
+ if (pte2_load(cmap2_pte2p) != 0)
+ panic("%s: CMAP2 busy", __func__);
+ pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW,
+ vm_memattr_to_pte2(ma)));
+ dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE);
+ pte2_clear(cmap2_pte2p);
+ tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
+ sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
}
/*
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index d2e56a270f54..a09da794e77d 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -497,7 +497,8 @@ static bool pmap_pv_insert_l3c(pmap_t pmap, vm_offset_t va, vm_page_t m,
struct rwlock **lockp);
static void pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
- pd_entry_t l1e, struct spglist *free, struct rwlock **lockp);
+ pd_entry_t l1e, bool demote_kl2e, struct spglist *free,
+ struct rwlock **lockp);
static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
static bool pmap_remove_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
@@ -3847,8 +3848,7 @@ pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
ml3 = pmap_remove_pt_page(pmap, va);
- if (ml3 == NULL)
- panic("pmap_remove_kernel_l2: Missing pt page");
+ KASSERT(ml3 != NULL, ("pmap_remove_kernel_l2: missing pt page"));
ml3pa = VM_PAGE_TO_PHYS(ml3);
newl2 = PHYS_TO_PTE(ml3pa) | L2_TABLE;
@@ -3873,8 +3873,8 @@ pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
* pmap_remove_l2: Do the things to unmap a level 2 superpage.
*/
static int
-pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
- pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
+pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pd_entry_t l1e,
+ bool demote_kl2e, struct spglist *free, struct rwlock **lockp)
{
struct md_page *pvh;
pt_entry_t old_l2;
@@ -3910,9 +3910,7 @@ pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
}
- if (pmap == kernel_pmap) {
- pmap_remove_kernel_l2(pmap, l2, sva);
- } else {
+ if (pmap != kernel_pmap) {
ml3 = pmap_remove_pt_page(pmap, sva);
if (ml3 != NULL) {
KASSERT(vm_page_any_valid(ml3),
@@ -3923,6 +3921,14 @@ pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
ml3->ref_count = 0;
pmap_add_delayed_free_list(ml3, free, false);
}
+ } else if (demote_kl2e) {
+ pmap_remove_kernel_l2(pmap, l2, sva);
+ } else {
+ ml3 = vm_radix_lookup(&pmap->pm_root, pmap_l2_pindex(sva));
+ if (vm_page_any_valid(ml3)) {
+ ml3->valid = 0;
+ pmap_zero_page(ml3);
+ }
}
return (pmap_unuse_pt(pmap, sva, l1e, free));
}
@@ -4232,7 +4238,7 @@ pmap_remove1(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool map_delete)
if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
if (sva + L2_SIZE == va_next && eva >= va_next) {
pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
- &free, &lock);
+ true, &free, &lock);
continue;
} else if (pmap_demote_l2_locked(pmap, l2, sva,
&lock) == NULL)
@@ -5747,33 +5753,51 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
}
}
SLIST_INIT(&free);
- if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK)
+ if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
(void)pmap_remove_l2(pmap, l2, va,
- pmap_load(pmap_l1(pmap, va)), &free, lockp);
- else
+ pmap_load(pmap_l1(pmap, va)), false, &free, lockp);
+ } else {
+ if (ADDR_IS_KERNEL(va)) {
+ /*
+ * Try to save the ptp in the trie
+ * before any changes to mappings are
+ * made. Abort on failure.
+ */
+ mt = PTE_TO_VM_PAGE(old_l2);
+ if (pmap_insert_pt_page(pmap, mt, false,
+ false)) {
+ CTR1(KTR_PMAP,
+ "pmap_enter_l2: cannot ins kern ptp va %#lx",
+ va);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ /*
+ * Both pmap_remove_l2() and
+ * pmap_remove_l3_range() will zero fill
+ * the L3 kernel page table page.
+ */
+ }
pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
&free, lockp);
+ if (ADDR_IS_KERNEL(va)) {
+ /*
+ * The TLB could have an intermediate
+ * entry for the L3 kernel page table
+ * page, so request an invalidation at
+ * all levels after clearing the
+ * L2_TABLE entry.
+ */
+ pmap_clear(l2);
+ pmap_s1_invalidate_page(pmap, va, false);
+ }
+ }
+ KASSERT(pmap_load(l2) == 0,
+ ("pmap_enter_l2: non-zero L2 entry %p", l2));
if (!ADDR_IS_KERNEL(va)) {
vm_page_free_pages_toq(&free, true);
- KASSERT(pmap_load(l2) == 0,
- ("pmap_enter_l2: non-zero L2 entry %p", l2));
} else {
KASSERT(SLIST_EMPTY(&free),
("pmap_enter_l2: freed kernel page table page"));
-
- /*
- * Both pmap_remove_l2() and pmap_remove_l3_range()
- * will leave the kernel page table page zero filled.
- * Nonetheless, the TLB could have an intermediate
- * entry for the kernel page table page, so request
- * an invalidation at all levels after clearing
- * the L2_TABLE entry.
- */
- mt = PTE_TO_VM_PAGE(pmap_load(l2));
- if (pmap_insert_pt_page(pmap, mt, false, false))
- panic("pmap_enter_l2: trie insert failed");
- pmap_clear(l2);
- pmap_s1_invalidate_page(pmap, va, false);
}
}
@@ -8045,6 +8069,8 @@ pmap_unmapbios(void *p, vm_size_t size)
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
+ if (m->md.pv_memattr == ma)
+ return;
m->md.pv_memattr = ma;
@@ -8424,8 +8450,8 @@ pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
struct spglist free;
SLIST_INIT(&free);
- (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
- lockp);
+ (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), true,
+ &free, lockp);
vm_page_free_pages_toq(&free, true);
}
diff --git a/sys/compat/linuxkpi/common/include/linux/slab.h b/sys/compat/linuxkpi/common/include/linux/slab.h
index f3a840d9bf4b..efa5c8cb67b3 100644
--- a/sys/compat/linuxkpi/common/include/linux/slab.h
+++ b/sys/compat/linuxkpi/common/include/linux/slab.h
@@ -45,7 +45,7 @@
MALLOC_DECLARE(M_KMALLOC);
-#define kvzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO)
+#define kvzalloc(size, flags) kvmalloc(size, (flags) | __GFP_ZERO)
#define kvcalloc(n, size, flags) kvmalloc_array(n, size, (flags) | __GFP_ZERO)
#define kzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO)
#define kzalloc_node(size, flags, node) kmalloc_node(size, (flags) | __GFP_ZERO, node)
diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c
index ebb92eacbf9a..628af17df853 100644
--- a/sys/compat/linuxkpi/common/src/linux_page.c
+++ b/sys/compat/linuxkpi/common/src/linux_page.c
@@ -106,6 +106,7 @@ linux_alloc_pages(gfp_t flags, unsigned int order)
if ((flags & M_ZERO) != 0)
req |= VM_ALLOC_ZERO;
+
if (order == 0 && (flags & GFP_DMA32) == 0) {
page = vm_page_alloc_noobj(req);
if (page == NULL)
@@ -113,6 +114,10 @@ linux_alloc_pages(gfp_t flags, unsigned int order)
} else {
vm_paddr_t pmax = (flags & GFP_DMA32) ?
BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
+
+ if ((flags & __GFP_NORETRY) != 0)
+ req |= VM_ALLOC_NORECLAIM;
+
retry:
page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
diff --git a/sys/dev/usb/controller/xhci_pci.c b/sys/dev/usb/controller/xhci_pci.c
index b50e33ea36ce..d5cfd228a429 100644
--- a/sys/dev/usb/controller/xhci_pci.c
+++ b/sys/dev/usb/controller/xhci_pci.c
@@ -99,6 +99,11 @@ xhci_pci_match(device_t self)
return ("AMD Starship USB 3.0 controller");
case 0x149c1022:
return ("AMD Matisse USB 3.0 controller");
+ case 0x15b61022:
+ case 0x15b71022:
+ return ("AMD Raphael/Granite Ridge USB 3.1 controller");
+ case 0x15b81022:
+ return ("AMD Raphael/Granite Ridge USB 2.0 controller");
case 0x15e01022:
case 0x15e11022:
return ("AMD Raven USB 3.1 controller");
@@ -109,6 +114,8 @@ xhci_pci_match(device_t self)
return ("AMD 300 Series USB 3.1 controller");
case 0x43d51022:
return ("AMD 400 Series USB 3.1 controller");
+ case 0x43f71022:
+ return ("AMD 600 Series USB 3.2 controller");
case 0x78121022:
case 0x78141022:
case 0x79141022:
diff --git a/sys/fs/fdescfs/fdesc_vnops.c b/sys/fs/fdescfs/fdesc_vnops.c
index 676ea5de12b8..58a22b8bdc50 100644
--- a/sys/fs/fdescfs/fdesc_vnops.c
+++ b/sys/fs/fdescfs/fdesc_vnops.c
@@ -547,6 +547,8 @@ fdesc_readdir(struct vop_readdir_args *ap)
fmp = VFSTOFDESC(ap->a_vp->v_mount);
if (ap->a_ncookies != NULL)
*ap->a_ncookies = 0;
+ if (ap->a_eofflag != NULL)
+ *ap->a_eofflag = 0;
off = (int)uio->uio_offset;
if (off != uio->uio_offset || off < 0 || (u_int)off % UIO_MX != 0 ||
@@ -559,7 +561,12 @@ fdesc_readdir(struct vop_readdir_args *ap)
fcnt = i - 2; /* The first two nodes are `.' and `..' */
FILEDESC_SLOCK(fdp);
- while (i < fdp->fd_nfiles + 2 && uio->uio_resid >= UIO_MX) {
+ while (uio->uio_resid >= UIO_MX) {
+ if (i >= fdp->fd_nfiles + 2) {
+ if (ap->a_eofflag != NULL)
+ *ap->a_eofflag = 1;
+ break;
+ }
bzero((caddr_t)dp, UIO_MX);
switch (i) {
case 0: /* `.' */
diff --git a/sys/fs/msdosfs/msdosfs_vnops.c b/sys/fs/msdosfs/msdosfs_vnops.c
index 5db61c8951f6..33e0d94954d7 100644
--- a/sys/fs/msdosfs/msdosfs_vnops.c
+++ b/sys/fs/msdosfs/msdosfs_vnops.c
@@ -1521,6 +1521,9 @@ msdosfs_readdir(struct vop_readdir_args *ap)
ap->a_vp, uio, ap->a_cred, ap->a_eofflag);
#endif
+ if (ap->a_eofflag != NULL)
+ *ap->a_eofflag = 0;
+
/*
* msdosfs_readdir() won't operate properly on regular files since
* it does i/o only with the filesystem vnode, and hence can
@@ -1614,8 +1617,11 @@ msdosfs_readdir(struct vop_readdir_args *ap)
on = (offset - bias) & pmp->pm_crbomask;
n = min(pmp->pm_bpcluster - on, uio->uio_resid);
diff = dep->de_FileSize - (offset - bias);
- if (diff <= 0)
- break;
+ if (diff <= 0) {
+ if (ap->a_eofflag != NULL)
+ *ap->a_eofflag = 1;
+ goto out;
+ }
n = min(n, diff);
error = pcbmap(dep, lbn, &bn, &cn, &blsize);
if (error)
@@ -1646,6 +1652,8 @@ msdosfs_readdir(struct vop_readdir_args *ap)
*/
if (dentp->deName[0] == SLOT_EMPTY) {
brelse(bp);
+ if (ap->a_eofflag != NULL)
+ *ap->a_eofflag = 1;
goto out;
}
/*
@@ -1743,15 +1751,6 @@ out:
uio->uio_offset = off;
- /*
- * Set the eofflag (NFS uses it)
- */
- if (ap->a_eofflag) {
- if (dep->de_FileSize - (offset - bias) <= 0)
- *ap->a_eofflag = 1;
- else
- *ap->a_eofflag = 0;
- }
return (error);
}
diff --git a/sys/fs/p9fs/p9fs_vnops.c b/sys/fs/p9fs/p9fs_vnops.c
index 56bf766ef801..227e2b93883e 100644
--- a/sys/fs/p9fs/p9fs_vnops.c
+++ b/sys/fs/p9fs/p9fs_vnops.c
@@ -1784,6 +1784,9 @@ p9fs_readdir(struct vop_readdir_args *ap)
return (EBADF);
}
+ if (ap->a_eofflag != NULL)
+ *ap->a_eofflag = 0;
+
io_buffer = uma_zalloc(p9fs_io_buffer_zone, M_WAITOK);
/* We haven't reached the end yet. read more. */
@@ -1801,8 +1804,11 @@ p9fs_readdir(struct vop_readdir_args *ap)
count = p9_client_readdir(vofid, (char *)io_buffer,
diroffset, count);
- if (count == 0)
+ if (count == 0) {
+ if (ap->a_eofflag != NULL)
+ *ap->a_eofflag = 1;
break;
+ }
if (count < 0) {
error = EIO;
diff --git a/sys/i386/conf/GENERIC b/sys/i386/conf/GENERIC
index e7d460af21d4..f577cd07ac7c 100644
--- a/sys/i386/conf/GENERIC
+++ b/sys/i386/conf/GENERIC
@@ -17,6 +17,8 @@
# in NOTES.
#
+#NO_UNIVERSE
+
cpu I486_CPU
cpu I586_CPU
cpu I686_CPU
diff --git a/sys/i386/conf/GENERIC-NODEBUG b/sys/i386/conf/GENERIC-NODEBUG
index ea07613a796f..a93304481b5f 100644
--- a/sys/i386/conf/GENERIC-NODEBUG
+++ b/sys/i386/conf/GENERIC-NODEBUG
@@ -25,6 +25,8 @@
# in NOTES.
#
+#NO_UNIVERSE
+
include GENERIC
include "std.nodebug"
diff --git a/sys/i386/conf/LINT b/sys/i386/conf/LINT
index 41207eb63cb9..2e947202f723 100644
--- a/sys/i386/conf/LINT
+++ b/sys/i386/conf/LINT
@@ -1,3 +1,4 @@
+#NO_UNIVERSE
include "../../conf/NOTES"
include "../../x86/conf/NOTES"
diff --git a/sys/i386/conf/MINIMAL b/sys/i386/conf/MINIMAL
index 2a06eb84bff8..8019617ca4d4 100644
--- a/sys/i386/conf/MINIMAL
+++ b/sys/i386/conf/MINIMAL
@@ -31,6 +31,8 @@
# in NOTES.
#
+#NO_UNIVERSE
+
cpu I486_CPU
cpu I586_CPU
cpu I686_CPU
diff --git a/sys/i386/conf/PAE b/sys/i386/conf/PAE
index a39d32d77106..72af9e9a9eec 100644
--- a/sys/i386/conf/PAE
+++ b/sys/i386/conf/PAE
@@ -2,6 +2,8 @@
# PAE -- Generic kernel configuration file for FreeBSD/i386 PAE
#
+#NO_UNIVERSE
+
include GENERIC
ident PAE-GENERIC
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 465b4d0f365b..b44f5e08bbcf 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -876,14 +876,16 @@ __CONCAT(PMTYPE, init_pat)(void)
#ifdef PMAP_PAE_COMP
static void *
-pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
- int wait)
+pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *sflagsp,
+ int flags)
{
/* Inform UMA that this allocator uses kernel_map/object. */
- *flags = UMA_SLAB_KERNEL;
+ *sflagsp = UMA_SLAB_KERNEL;
+ /* contig allocations cannot be NEVERFREED */
+ flags &= ~M_NEVERFREED;
return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
- bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
+ bytes, flags, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
}
#endif
@@ -5617,6 +5619,8 @@ __CONCAT(PMTYPE, unmapdev)(void *p, vm_size_t size)
static void
__CONCAT(PMTYPE, page_set_memattr)(vm_page_t m, vm_memattr_t ma)
{
+ if (m->md.pat_mode == ma)
+ return;
m->md.pat_mode = ma;
if ((m->flags & PG_FICTITIOUS) != 0)
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 93bdd41d1515..a27ab33b34da 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -557,8 +557,10 @@ open_to_fde_flags(int open_flags, bool sticky_orb)
{ .f = O_CLOFORK, .t = UF_FOCLOSE },
{ .f = O_RESOLVE_BENEATH, .t = UF_RESOLVE_BENEATH },
};
+#if defined(__clang__) && __clang_major__ >= 19
_Static_assert(open_to_fde_flags_s[nitems(open_to_fde_flags_s) - 1].f ==
O_RESOLVE_BENEATH, "O_RESOLVE_BENEATH must be last, for sticky_orb");
+#endif
return (flags_trans(open_to_fde_flags_s, nitems(open_to_fde_flags_s) -
(sticky_orb ? 0 : 1), open_flags));
diff --git a/sys/kern/subr_asan.c b/sys/kern/subr_asan.c
index 0edb631d1475..464efda1e91a 100644
--- a/sys/kern/subr_asan.c
+++ b/sys/kern/subr_asan.c
@@ -263,8 +263,7 @@ kasan_mark(const void *addr, size_t size, size_t redzsize, uint8_t code)
if (__predict_false(!kasan_enabled))
return;
- if ((vm_offset_t)addr >= DMAP_MIN_ADDRESS &&
- (vm_offset_t)addr < DMAP_MAX_ADDRESS)
+ if (kasan_md_unsupported((vm_offset_t)addr))
return;
KASSERT((vm_offset_t)addr >= VM_MIN_KERNEL_ADDRESS &&
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 18388ae5f232..bac7d0080c71 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -338,8 +338,9 @@ ast_handler(struct thread *td, struct trapframe *framep, bool dtor)
td->td_ast = 0;
}
- CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, td->td_proc->p_pid,
- td->td_proc->p_comm);
+ CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td,
+ td->td_proc == NULL ? -1 : td->td_proc->p_pid,
+ td->td_proc == NULL ? "" : td->td_proc->p_comm);
KASSERT(framep == NULL || TRAPF_USERMODE(framep),
("ast in kernel mode"));
diff --git a/sys/net/if_lagg.c b/sys/net/if_lagg.c
index 9867a718e148..5b52bfa80e3b 100644
--- a/sys/net/if_lagg.c
+++ b/sys/net/if_lagg.c
@@ -718,6 +718,7 @@ lagg_capabilities(struct lagg_softc *sc)
sc->sc_ifp->if_capenable = ena;
sc->sc_ifp->if_capenable2 = ena2;
sc->sc_ifp->if_hwassist = hwa;
+ (void)if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax);
getmicrotime(&sc->sc_ifp->if_lastchange);
if (sc->sc_ifflags & IFF_DEBUG)
diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index 36fab1a03ee6..452a8eb4024b 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -1370,7 +1370,6 @@ struct pf_kruleset {
struct pf_krulequeue queues[2];
struct {
struct pf_krulequeue *ptr;
- struct pf_krule **ptr_array;
u_int32_t rcount;
u_int32_t ticket;
int open;
@@ -2500,7 +2499,7 @@ int pfr_match_addr(struct pfr_ktable *, struct pf_addr *, sa_family_t);
void pfr_update_stats(struct pfr_ktable *, struct pf_addr *, sa_family_t,
u_int64_t, int, int, int);
int pfr_pool_get(struct pfr_ktable *, int *, struct pf_addr *, sa_family_t,
- pf_addr_filter_func_t);
+ pf_addr_filter_func_t, bool);
void pfr_dynaddr_update(struct pfr_ktable *, struct pfi_dynaddr *);
struct pfr_ktable *
pfr_attach_table(struct pf_kruleset *, char *);
@@ -2534,6 +2533,8 @@ int pfr_ina_rollback(struct pfr_table *, u_int32_t, int *, int);
int pfr_ina_commit(struct pfr_table *, u_int32_t, int *, int *, int);
int pfr_ina_define(struct pfr_table *, struct pfr_addr *, int, int *,
int *, u_int32_t, int);
+struct pfr_ktable
+ *pfr_ktable_select_active(struct pfr_ktable *);
MALLOC_DECLARE(PFI_MTYPE);
VNET_DECLARE(struct pfi_kkif *, pfi_all);
@@ -2712,7 +2713,6 @@ u_short pf_map_addr(u_int8_t, struct pf_krule *,
u_short pf_map_addr_sn(u_int8_t, struct pf_krule *,
struct pf_addr *, struct pf_addr *,
struct pfi_kkif **nkif, struct pf_addr *,
- struct pf_ksrc_node **, struct pf_srchash **,
struct pf_kpool *, pf_sn_types_t);
int pf_get_transaddr_af(struct pf_krule *,
struct pf_pdesc *);
diff --git a/sys/netinet6/raw_ip6.c b/sys/netinet6/raw_ip6.c
index 0379ef7c789a..c90a1213bd66 100644
--- a/sys/netinet6/raw_ip6.c
+++ b/sys/netinet6/raw_ip6.c
@@ -765,8 +765,7 @@ rip6_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
}
if (ifa != NULL &&
((struct in6_ifaddr *)ifa)->ia6_flags &
- (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY|
- IN6_IFF_DETACHED|IN6_IFF_DEPRECATED)) {
+ (IN6_IFF_NOTREADY|IN6_IFF_DETACHED|IN6_IFF_DEPRECATED)) {
NET_EPOCH_EXIT(et);
return (EADDRNOTAVAIL);
}
diff --git a/sys/netlink/netlink_message_parser.h b/sys/netlink/netlink_message_parser.h
index 8492ecb3021b..720317ed74f3 100644
--- a/sys/netlink/netlink_message_parser.h
+++ b/sys/netlink/netlink_message_parser.h
@@ -209,7 +209,8 @@ int nlattr_get_nested(struct nlattr *nla, struct nl_pstate *npt,
int nlattr_get_nested_ptr(struct nlattr *nla, struct nl_pstate *npt,
const void *arg, void *target);
-bool nlmsg_report_err_msg(struct nl_pstate *npt, const char *fmt, ...);
+bool nlmsg_report_err_msg(struct nl_pstate *npt, const char *fmt, ...)
+ __printflike(2, 3);
#define NLMSG_REPORT_ERR_MSG(_npt, _fmt, ...) { \
nlmsg_report_err_msg(_npt, _fmt, ## __VA_ARGS__); \
diff --git a/sys/netpfil/ipfilter/netinet/ip_fil_freebsd.c b/sys/netpfil/ipfilter/netinet/ip_fil_freebsd.c
index 04850549db98..6eb6cf2a7a47 100644
--- a/sys/netpfil/ipfilter/netinet/ip_fil_freebsd.c
+++ b/sys/netpfil/ipfilter/netinet/ip_fil_freebsd.c
@@ -463,13 +463,14 @@ ipf_send_ip(fr_info_t *fin, mb_t *m)
int
ipf_send_icmp_err(int type, fr_info_t *fin, int dst)
{
- int err, hlen, xtra, iclen, ohlen, avail, code;
+ int err, hlen, xtra, iclen, ohlen, avail;
struct in_addr dst4;
struct icmp *icmp;
struct mbuf *m;
i6addr_t dst6;
void *ifp;
#ifdef USE_INET6
+ int code;
ip6_t *ip6;
#endif
ip_t *ip, *ip2;
@@ -477,8 +478,8 @@ ipf_send_icmp_err(int type, fr_info_t *fin, int dst)
if ((type < 0) || (type >= ICMP_MAXTYPE))
return (-1);
- code = fin->fin_icode;
#ifdef USE_INET6
+ code = fin->fin_icode;
/* See NetBSD ip_fil_netbsd.c r1.4: */
if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int)))
return (-1);
diff --git a/sys/netpfil/pf/if_pflog.c b/sys/netpfil/pf/if_pflog.c
index 0a84f9d680ac..cb96d2fcc44c 100644
--- a/sys/netpfil/pf/if_pflog.c
+++ b/sys/netpfil/pf/if_pflog.c
@@ -284,9 +284,9 @@ pflog_packet(uint8_t action, u_int8_t reason,
* state lock, since this leads to unsafe LOR.
* These conditions are very very rare, however.
*/
- if (trigger->log & PF_LOG_SOCKET_LOOKUP && !pd->lookup.done && lookupsafe)
+ if (trigger->log & PF_LOG_USER && !pd->lookup.done && lookupsafe)
pd->lookup.done = pf_socket_lookup(pd);
- if (pd->lookup.done > 0)
+ if (trigger->log & PF_LOG_USER && pd->lookup.done > 0)
hdr.uid = pd->lookup.uid;
else
hdr.uid = -1;
diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c
index 2391edaf1a5a..4e03584b8f85 100644
--- a/sys/netpfil/pf/if_pfsync.c
+++ b/sys/netpfil/pf/if_pfsync.c
@@ -532,6 +532,7 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
struct pf_kpooladdr *rpool_first;
int error;
uint8_t rt = 0;
+ int n = 0;
PF_RULES_RASSERT();
@@ -557,10 +558,12 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
*/
if (sp->pfs_1301.rule != htonl(-1) && sp->pfs_1301.anchor == htonl(-1) &&
(flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->pfs_1301.rule) <
- pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
- r = pf_main_ruleset.rules[
- PF_RULESET_FILTER].active.ptr_array[ntohl(sp->pfs_1301.rule)];
- else
+ pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) {
+ TAILQ_FOREACH(r, pf_main_ruleset.rules[
+ PF_RULESET_FILTER].active.ptr, entries)
+ if (ntohl(sp->pfs_1301.rule) == n++)
+ break;
+ } else
r = &V_pf_default_rule;
/*
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index d5f01e5c4956..63d513fb1956 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -5901,18 +5901,17 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
M_SETFIB(pd->m, pd->act.rtableid);
if (r->rt) {
- struct pf_ksrc_node *sn = NULL;
- struct pf_srchash *snh = NULL;
/*
* Set act.rt here instead of in pf_rule_to_actions() because
* it is applied only from the last pass rule.
*/
pd->act.rt = r->rt;
- /* Don't use REASON_SET, pf_map_addr increases the reason counters */
- ctx.reason = pf_map_addr_sn(pd->af, r, pd->src, &pd->act.rt_addr,
- &pd->act.rt_kif, NULL, &sn, &snh, &(r->route), PF_SN_ROUTE);
- if (ctx.reason != 0)
+ if ((transerror = pf_map_addr_sn(pd->af, r, pd->src,
+ &pd->act.rt_addr, &pd->act.rt_kif, NULL, &(r->route),
+ PF_SN_ROUTE)) != PFRES_MATCH) {
+ REASON_SET(&ctx.reason, transerror);
goto cleanup;
+ }
}
if (pd->virtual_proto != PF_VPROTO_FRAGMENT &&
@@ -6056,9 +6055,16 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
/* src node for translation rule */
if (ctx->nr != NULL) {
KASSERT(ctx->nat_pool != NULL, ("%s: nat_pool is NULL", __func__));
+ /*
+ * The NAT addresses are chosen during ruleset parsing.
+ * The new afto code stores post-nat addresses in nsaddr.
+ * The old nat code (also used for new nat-to rules) creates
+ * state keys and stores addresses in them.
+ */
if ((ctx->nat_pool->opts & PF_POOL_STICKYADDR) &&
(sn_reason = pf_insert_src_node(sns, snhs, ctx->nr,
- &ctx->sk->addr[pd->sidx], pd->af, &ctx->nk->addr[1], NULL,
+ ctx->sk ? &(ctx->sk->addr[pd->sidx]) : pd->src, pd->af,
+ ctx->nk ? &(ctx->nk->addr[1]) : &(pd->nsaddr), NULL,
PF_SN_NAT)) != 0 ) {
REASON_SET(&ctx->reason, sn_reason);
goto csfailed;
@@ -6213,7 +6219,7 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
if (ctx->tag > 0)
s->tag = ctx->tag;
if (pd->proto == IPPROTO_TCP && (tcp_get_flags(th) & (TH_SYN|TH_ACK)) ==
- TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
+ TH_SYN && r->keep_state == PF_STATE_SYNPROXY && pd->dir == PF_IN) {
pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_SRC);
pf_undo_nat(ctx->nr, pd, bip_sum);
s->src.seqhi = arc4random();
@@ -9062,6 +9068,9 @@ pf_route(struct pf_krule *r, struct ifnet *oifp,
goto bad;
}
+ if (r->rt == PF_DUPTO)
+ skip_test = true;
+
if (pd->dir == PF_IN && !skip_test) {
if (pf_test(AF_INET, PF_OUT, PFIL_FWD, ifp, &m0, inp,
&pd->act) != PF_PASS) {
@@ -9364,6 +9373,9 @@ pf_route6(struct pf_krule *r, struct ifnet *oifp,
goto bad;
}
+ if (r->rt == PF_DUPTO)
+ skip_test = true;
+
if (pd->dir == PF_IN && !skip_test) {
if (pf_test(AF_INET6, PF_OUT, PFIL_FWD | PF_PFIL_NOREFRAGMENT,
ifp, &m0, inp, &pd->act) != PF_PASS) {
diff --git a/sys/netpfil/pf/pf.h b/sys/netpfil/pf/pf.h
index 2009d2907985..cfff58064922 100644
--- a/sys/netpfil/pf/pf.h
+++ b/sys/netpfil/pf/pf.h
@@ -140,7 +140,7 @@ enum { PF_ADDR_ADDRMASK, PF_ADDR_NOROUTE, PF_ADDR_DYNIFTL,
#define PF_LOG 0x01
#define PF_LOG_ALL 0x02
-#define PF_LOG_SOCKET_LOOKUP 0x04
+#define PF_LOG_USER 0x04
#define PF_LOG_FORCE 0x08
#define PF_LOG_MATCHES 0x10
@@ -490,6 +490,7 @@ struct pf_osfp_ioctl {
#define PF_ANCHOR_NAME_SIZE 64
#define PF_ANCHOR_MAXPATH (MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1)
+#define PF_OPTIMIZER_TABLE_PFX "__automatic_"
struct pf_rule {
struct pf_rule_addr src;
diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c
index c96741023db9..3caa0d2e3b11 100644
--- a/sys/netpfil/pf/pf_ioctl.c
+++ b/sys/netpfil/pf/pf_ioctl.c
@@ -1274,7 +1274,9 @@ pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
PF_MD5_UPD(pfr, addr.iflags);
break;
case PF_ADDR_TABLE:
- PF_MD5_UPD(pfr, addr.v.tblname);
+ if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
+ strlen(PF_OPTIMIZER_TABLE_PFX)))
+ PF_MD5_UPD(pfr, addr.v.tblname);
break;
case PF_ADDR_ADDRMASK:
/* XXX ignore af? */
@@ -1357,7 +1359,7 @@ static int
pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
{
struct pf_kruleset *rs;
- struct pf_krule *rule, **old_array, *old_rule;
+ struct pf_krule *rule, *old_rule;
struct pf_krulequeue *old_rules;
struct pf_krule_global *old_tree;
int error;
@@ -1382,13 +1384,10 @@ pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
/* Swap rules, keep the old. */
old_rules = rs->rules[rs_num].active.ptr;
old_rcount = rs->rules[rs_num].active.rcount;
- old_array = rs->rules[rs_num].active.ptr_array;
old_tree = rs->rules[rs_num].active.tree;
rs->rules[rs_num].active.ptr =
rs->rules[rs_num].inactive.ptr;
- rs->rules[rs_num].active.ptr_array =
- rs->rules[rs_num].inactive.ptr_array;
rs->rules[rs_num].active.tree =
rs->rules[rs_num].inactive.tree;
rs->rules[rs_num].active.rcount =
@@ -1418,7 +1417,6 @@ pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
}
rs->rules[rs_num].inactive.ptr = old_rules;
- rs->rules[rs_num].inactive.ptr_array = old_array;
rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
rs->rules[rs_num].inactive.rcount = old_rcount;
@@ -1431,9 +1429,6 @@ pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
while ((rule = TAILQ_FIRST(old_rules)) != NULL)
pf_unlink_rule_locked(old_rules, rule);
PF_UNLNKDRULES_UNLOCK();
- if (rs->rules[rs_num].inactive.ptr_array)
- free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
- rs->rules[rs_num].inactive.ptr_array = NULL;
rs->rules[rs_num].inactive.rcount = 0;
rs->rules[rs_num].inactive.open = 0;
pf_remove_if_empty_kruleset(rs);
@@ -1456,24 +1451,11 @@ pf_setup_pfsync_matching(struct pf_kruleset *rs)
if (rs_cnt == PF_RULESET_SCRUB)
continue;
- if (rs->rules[rs_cnt].inactive.ptr_array)
- free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
- rs->rules[rs_cnt].inactive.ptr_array = NULL;
-
if (rs->rules[rs_cnt].inactive.rcount) {
- rs->rules[rs_cnt].inactive.ptr_array =
- mallocarray(rs->rules[rs_cnt].inactive.rcount,
- sizeof(struct pf_rule **),
- M_TEMP, M_NOWAIT);
-
- if (!rs->rules[rs_cnt].inactive.ptr_array)
- return (ENOMEM);
- }
-
- TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
- entries) {
- pf_hash_rule_rolling(&ctx, rule);
- (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
+ TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
+ entries) {
+ pf_hash_rule_rolling(&ctx, rule);
+ }
}
}
@@ -2059,6 +2041,19 @@ pf_ioctl_getrules(struct pfioc_rule *pr)
return (0);
}
+static int
+pf_validate_range(uint8_t op, uint16_t port[2])
+{
+ uint16_t a = ntohs(port[0]);
+ uint16_t b = ntohs(port[1]);
+
+ if ((op == PF_OP_RRG && a > b) || /* 34:12, i.e. none */
+ (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
+ (op == PF_OP_XRG && a > b)) /* 34<>22, i.e. all */
+ return 1;
+ return 0;
+}
+
int
pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
uint32_t pool_ticket, const char *anchor, const char *anchor_call,
@@ -2078,6 +2073,11 @@ pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
#define ERROUT(x) ERROUT_FUNCTION(errout, x)
+ if (pf_validate_range(rule->src.port_op, rule->src.port))
+ ERROUT(EINVAL);
+ if (pf_validate_range(rule->dst.port_op, rule->dst.port))
+ ERROUT(EINVAL);
+
if (rule->ifname[0])
kif = pf_kkif_create(M_WAITOK);
if (rule->rcv_ifname[0])
diff --git a/sys/netpfil/pf/pf_lb.c b/sys/netpfil/pf/pf_lb.c
index 308d76c46e5b..26f7ab41eef4 100644
--- a/sys/netpfil/pf/pf_lb.c
+++ b/sys/netpfil/pf/pf_lb.c
@@ -80,7 +80,6 @@ static enum pf_test_status pf_step_into_translation_anchor(int, struct pf_test_c
struct pf_krule *);
static int pf_get_sport(struct pf_pdesc *, struct pf_krule *,
struct pf_addr *, uint16_t *, uint16_t, uint16_t,
- struct pf_ksrc_node **, struct pf_srchash **,
struct pf_kpool *, struct pf_udp_mapping **,
pf_sn_types_t);
static bool pf_islinklocal(const sa_family_t, const struct pf_addr *);
@@ -291,10 +290,8 @@ pf_match_translation(int rs_num, struct pf_test_ctx *ctx)
}
static int
-pf_get_sport(struct pf_pdesc *pd, struct pf_krule *r,
- struct pf_addr *naddr, uint16_t *nport, uint16_t low,
- uint16_t high, struct pf_ksrc_node **sn,
- struct pf_srchash **sh, struct pf_kpool *rpool,
+pf_get_sport(struct pf_pdesc *pd, struct pf_krule *r, struct pf_addr *naddr,
+ uint16_t *nport, uint16_t low, uint16_t high, struct pf_kpool *rpool,
struct pf_udp_mapping **udp_mapping, pf_sn_types_t sn_type)
{
struct pf_state_key_cmp key;
@@ -322,19 +319,24 @@ pf_get_sport(struct pf_pdesc *pd, struct pf_krule *r,
pf_addrcpy(&udp_source.addr, &pd->nsaddr, pd->af);
udp_source.port = pd->nsport;
if (udp_mapping) {
+ struct pf_ksrc_node *sn = NULL;
+ struct pf_srchash *sh = NULL;
*udp_mapping = pf_udp_mapping_find(&udp_source);
if (*udp_mapping) {
pf_addrcpy(naddr,
&(*udp_mapping)->endpoints[1].addr,
pd->af);
*nport = (*udp_mapping)->endpoints[1].port;
- /* Try to find a src_node as per pf_map_addr(). */
- if (*sn == NULL && rpool->opts & PF_POOL_STICKYADDR &&
+ /*
+ * Try to find a src_node as per pf_map_addr().
+ * XXX: Why? This code seems to do nothing.
+ */
+ if (rpool->opts & PF_POOL_STICKYADDR &&
(rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE)
- *sn = pf_find_src_node(&pd->nsaddr, r,
- pd->af, sh, sn_type, false);
- if (*sn != NULL)
- PF_SRC_NODE_UNLOCK(*sn);
+ sn = pf_find_src_node(&pd->nsaddr, r,
+ pd->af, &sh, sn_type, false);
+ if (sn != NULL)
+ PF_SRC_NODE_UNLOCK(sn);
return (0);
} else {
*udp_mapping = pf_udp_mapping_create(pd->af, &pd->nsaddr,
@@ -346,7 +348,7 @@ pf_get_sport(struct pf_pdesc *pd, struct pf_krule *r,
}
if (pf_map_addr_sn(pd->naf, r, &pd->nsaddr, naddr, NULL, &init_addr,
- sn, sh, rpool, sn_type))
+ rpool, sn_type))
goto failed;
if (pd->proto == IPPROTO_ICMP) {
@@ -470,9 +472,8 @@ pf_get_sport(struct pf_pdesc *pd, struct pf_krule *r,
* pick a different source address since we're out
* of free port choices for the current one.
*/
- (*sn) = NULL;
if (pf_map_addr_sn(pd->naf, r, &pd->nsaddr, naddr, NULL,
- &init_addr, sn, sh, rpool, sn_type))
+ &init_addr, rpool, sn_type))
return (1);
break;
case PF_POOL_NONE:
@@ -503,7 +504,6 @@ pf_islinklocal(const sa_family_t af, const struct pf_addr *addr)
static int
pf_get_mape_sport(struct pf_pdesc *pd, struct pf_krule *r,
struct pf_addr *naddr, uint16_t *nport,
- struct pf_ksrc_node **sn, struct pf_srchash **sh,
struct pf_udp_mapping **udp_mapping, struct pf_kpool *rpool)
{
uint16_t psmask, low, highmask;
@@ -523,16 +523,14 @@ pf_get_mape_sport(struct pf_pdesc *pd, struct pf_krule *r,
for (i = cut; i <= ahigh; i++) {
low = (i << ashift) | psmask;
- if (!pf_get_sport(pd, r,
- naddr, nport, low, low | highmask, sn, sh, rpool,
- udp_mapping, PF_SN_NAT))
+ if (!pf_get_sport(pd, r, naddr, nport, low, low | highmask,
+ rpool, udp_mapping, PF_SN_NAT))
return (0);
}
for (i = cut - 1; i > 0; i--) {
low = (i << ashift) | psmask;
- if (!pf_get_sport(pd, r,
- naddr, nport, low, low | highmask, sn, sh, rpool,
- udp_mapping, PF_SN_NAT))
+ if (!pf_get_sport(pd, r, naddr, nport, low, low | highmask,
+ rpool, udp_mapping, PF_SN_NAT))
return (0);
}
return (1);
@@ -545,6 +543,7 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
{
u_short reason = PFRES_MATCH;
struct pf_addr *raddr = NULL, *rmask = NULL;
+ struct pfr_ktable *kt;
uint64_t hashidx;
int cnt;
@@ -600,29 +599,25 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
pf_poolmask(naddr, raddr, rmask, saddr, af);
break;
case PF_POOL_RANDOM:
- if (rpool->cur->addr.type == PF_ADDR_TABLE) {
- cnt = rpool->cur->addr.p.tbl->pfrkt_cnt;
- if (cnt == 0)
- rpool->tblidx = 0;
+ if (rpool->cur->addr.type == PF_ADDR_TABLE ||
+ rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
+ if (rpool->cur->addr.type == PF_ADDR_TABLE)
+ kt = rpool->cur->addr.p.tbl;
else
- rpool->tblidx = (int)arc4random_uniform(cnt);
- memset(&rpool->counter, 0, sizeof(rpool->counter));
- if (pfr_pool_get(rpool->cur->addr.p.tbl,
- &rpool->tblidx, &rpool->counter, af, NULL)) {
+ kt = rpool->cur->addr.p.dyn->pfid_kt;
+ kt = pfr_ktable_select_active(kt);
+ if (kt == NULL) {
reason = PFRES_MAPFAILED;
goto done_pool_mtx; /* unsupported */
}
- pf_addrcpy(naddr, &rpool->counter, af);
- } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
- cnt = rpool->cur->addr.p.dyn->pfid_kt->pfrkt_cnt;
+ cnt = kt->pfrkt_cnt;
if (cnt == 0)
rpool->tblidx = 0;
else
rpool->tblidx = (int)arc4random_uniform(cnt);
memset(&rpool->counter, 0, sizeof(rpool->counter));
- if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
- &rpool->tblidx, &rpool->counter, af,
- pf_islinklocal)) {
+ if (pfr_pool_get(kt, &rpool->tblidx, &rpool->counter,
+ af, pf_islinklocal, false)) {
reason = PFRES_MAPFAILED;
goto done_pool_mtx; /* unsupported */
}
@@ -671,29 +666,25 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
hashidx =
pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
- if (rpool->cur->addr.type == PF_ADDR_TABLE) {
- cnt = rpool->cur->addr.p.tbl->pfrkt_cnt;
- if (cnt == 0)
- rpool->tblidx = 0;
+ if (rpool->cur->addr.type == PF_ADDR_TABLE ||
+ rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
+ if (rpool->cur->addr.type == PF_ADDR_TABLE)
+ kt = rpool->cur->addr.p.tbl;
else
- rpool->tblidx = (int)(hashidx % cnt);
- memset(&rpool->counter, 0, sizeof(rpool->counter));
- if (pfr_pool_get(rpool->cur->addr.p.tbl,
- &rpool->tblidx, &rpool->counter, af, NULL)) {
+ kt = rpool->cur->addr.p.dyn->pfid_kt;
+ kt = pfr_ktable_select_active(kt);
+ if (kt == NULL) {
reason = PFRES_MAPFAILED;
goto done_pool_mtx; /* unsupported */
}
- pf_addrcpy(naddr, &rpool->counter, af);
- } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
- cnt = rpool->cur->addr.p.dyn->pfid_kt->pfrkt_cnt;
+ cnt = kt->pfrkt_cnt;
if (cnt == 0)
rpool->tblidx = 0;
else
rpool->tblidx = (int)(hashidx % cnt);
memset(&rpool->counter, 0, sizeof(rpool->counter));
- if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
- &rpool->tblidx, &rpool->counter, af,
- pf_islinklocal)) {
+ if (pfr_pool_get(kt, &rpool->tblidx, &rpool->counter,
+ af, pf_islinklocal, false)) {
reason = PFRES_MAPFAILED;
goto done_pool_mtx; /* unsupported */
}
@@ -710,11 +701,12 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
if (rpool->cur->addr.type == PF_ADDR_TABLE) {
if (!pfr_pool_get(rpool->cur->addr.p.tbl,
- &rpool->tblidx, &rpool->counter, af, NULL))
+ &rpool->tblidx, &rpool->counter, af, NULL, true))
goto get_addr;
} else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
- &rpool->tblidx, &rpool->counter, af, pf_islinklocal))
+ &rpool->tblidx, &rpool->counter, af, pf_islinklocal,
+ true))
goto get_addr;
} else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af))
goto get_addr;
@@ -724,9 +716,10 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
rpool->cur = TAILQ_FIRST(&rpool->list);
else
rpool->cur = TAILQ_NEXT(rpool->cur, entries);
+ rpool->tblidx = -1;
if (rpool->cur->addr.type == PF_ADDR_TABLE) {
if (pfr_pool_get(rpool->cur->addr.p.tbl,
- &rpool->tblidx, &rpool->counter, af, NULL)) {
+ &rpool->tblidx, &rpool->counter, af, NULL, true)) {
/* table contains no address of type 'af' */
if (rpool->cur != acur)
goto try_next;
@@ -734,9 +727,9 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
goto done_pool_mtx;
}
} else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
- rpool->tblidx = -1;
if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
- &rpool->tblidx, &rpool->counter, af, pf_islinklocal)) {
+ &rpool->tblidx, &rpool->counter, af, pf_islinklocal,
+ true)) {
/* table contains no address of type 'af' */
if (rpool->cur != acur)
goto try_next;
@@ -764,48 +757,41 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
done_pool_mtx:
mtx_unlock(&rpool->mtx);
- if (reason) {
- counter_u64_add(V_pf_status.counters[reason], 1);
- }
-
return (reason);
}
u_short
pf_map_addr_sn(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
struct pf_addr *naddr, struct pfi_kkif **nkif, struct pf_addr *init_addr,
- struct pf_ksrc_node **sn, struct pf_srchash **sh, struct pf_kpool *rpool,
- pf_sn_types_t sn_type)
+ struct pf_kpool *rpool, pf_sn_types_t sn_type)
{
+ struct pf_ksrc_node *sn = NULL;
+ struct pf_srchash *sh = NULL;
u_short reason = 0;
- KASSERT(*sn == NULL, ("*sn not NULL"));
-
/*
* If this is a sticky-address rule, try to find an existing src_node.
- * Request the sh to be unlocked if sn was not found, as we never
- * insert a new sn when parsing the ruleset.
*/
if (rpool->opts & PF_POOL_STICKYADDR &&
(rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE)
- *sn = pf_find_src_node(saddr, r, af, sh, sn_type, false);
+ sn = pf_find_src_node(saddr, r, af, &sh, sn_type, false);
- if (*sn != NULL) {
- PF_SRC_NODE_LOCK_ASSERT(*sn);
+ if (sn != NULL) {
+ PF_SRC_NODE_LOCK_ASSERT(sn);
/* If the supplied address is the same as the current one we've
* been asked before, so tell the caller that there's no other
* address to be had. */
- if (PF_AEQ(naddr, &(*sn)->raddr, af)) {
+ if (PF_AEQ(naddr, &(sn->raddr), af)) {
reason = PFRES_MAPFAILED;
goto done;
}
- pf_addrcpy(naddr, &(*sn)->raddr, af);
+ pf_addrcpy(naddr, &(sn->raddr), af);
if (nkif)
- *nkif = (*sn)->rkif;
+ *nkif = sn->rkif;
if (V_pf_status.debug >= PF_DEBUG_NOISY) {
- printf("pf_map_addr: src tracking maps ");
+ printf("%s: src tracking maps ", __func__);
pf_print_host(saddr, 0, af);
printf(" to ");
pf_print_host(naddr, 0, af);
@@ -820,14 +806,16 @@ pf_map_addr_sn(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
* Source node has not been found. Find a new address and store it
* in variables given by the caller.
*/
- if (pf_map_addr(af, r, saddr, naddr, nkif, init_addr, rpool) != 0) {
- /* pf_map_addr() sets reason counters on its own */
+ if ((reason = pf_map_addr(af, r, saddr, naddr, nkif, init_addr,
+ rpool)) != 0) {
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
+ printf("%s: pf_map_addr has failed\n", __func__);
goto done;
}
if (V_pf_status.debug >= PF_DEBUG_NOISY &&
(rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
- printf("pf_map_addr: selected address ");
+ printf("%s: selected address ", __func__);
pf_print_host(naddr, 0, af);
if (nkif)
printf("@%s", (*nkif)->pfik_name);
@@ -835,12 +823,8 @@ pf_map_addr_sn(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
}
done:
- if ((*sn) != NULL)
- PF_SRC_NODE_UNLOCK(*sn);
-
- if (reason) {
- counter_u64_add(V_pf_status.counters[reason], 1);
- }
+ if (sn != NULL)
+ PF_SRC_NODE_UNLOCK(sn);
return (reason);
}
@@ -890,8 +874,6 @@ pf_get_transaddr(struct pf_test_ctx *ctx, struct pf_krule *r,
{
struct pf_pdesc *pd = ctx->pd;
struct pf_addr *naddr;
- struct pf_ksrc_node *sn = NULL;
- struct pf_srchash *sh = NULL;
uint16_t *nportp;
uint16_t low, high;
u_short reason;
@@ -919,8 +901,8 @@ pf_get_transaddr(struct pf_test_ctx *ctx, struct pf_krule *r,
high = rpool->proxy_port[1];
}
if (rpool->mape.offset > 0) {
- if (pf_get_mape_sport(pd, r, naddr, nportp, &sn,
- &sh, &ctx->udp_mapping, rpool)) {
+ if (pf_get_mape_sport(pd, r, naddr, nportp,
+ &ctx->udp_mapping, rpool)) {
DPFPRINTF(PF_DEBUG_MISC,
("pf: MAP-E port allocation (%u/%u/%u)"
" failed\n",
@@ -930,8 +912,8 @@ pf_get_transaddr(struct pf_test_ctx *ctx, struct pf_krule *r,
reason = PFRES_MAPFAILED;
goto notrans;
}
- } else if (pf_get_sport(pd, r, naddr, nportp, low, high, &sn,
- &sh, rpool, &ctx->udp_mapping, PF_SN_NAT)) {
+ } else if (pf_get_sport(pd, r, naddr, nportp, low, high,
+ rpool, &ctx->udp_mapping, PF_SN_NAT)) {
DPFPRINTF(PF_DEBUG_MISC,
("pf: NAT proxy port allocation (%u-%u) failed\n",
rpool->proxy_port[0], rpool->proxy_port[1]));
@@ -1017,7 +999,7 @@ pf_get_transaddr(struct pf_test_ctx *ctx, struct pf_krule *r,
uint16_t cut, low, high, nport;
reason = pf_map_addr_sn(pd->af, r, &pd->nsaddr, naddr, NULL,
- NULL, &sn, &sh, rpool, PF_SN_NAT);
+ NULL, rpool, PF_SN_NAT);
if (reason != 0)
goto notrans;
if ((rpool->opts & PF_POOL_TYPEMASK) == PF_POOL_BITMASK)
@@ -1134,8 +1116,6 @@ pf_get_transaddr_af(struct pf_krule *r, struct pf_pdesc *pd)
struct pf_addr ndaddr, nsaddr, naddr;
u_int16_t nport = 0;
int prefixlen = 96;
- struct pf_srchash *sh = NULL;
- struct pf_ksrc_node *sns = NULL;
bzero(&nsaddr, sizeof(nsaddr));
bzero(&ndaddr, sizeof(ndaddr));
@@ -1154,9 +1134,8 @@ pf_get_transaddr_af(struct pf_krule *r, struct pf_pdesc *pd)
panic("pf_get_transaddr_af: no nat pool for source address");
/* get source address and port */
- if (pf_get_sport(pd, r, &nsaddr, &nport,
- r->nat.proxy_port[0], r->nat.proxy_port[1], &sns, &sh, &r->nat,
- NULL, PF_SN_NAT)) {
+ if (pf_get_sport(pd, r, &nsaddr, &nport, r->nat.proxy_port[0],
+ r->nat.proxy_port[1], &r->nat, NULL, PF_SN_NAT)) {
DPFPRINTF(PF_DEBUG_MISC,
("pf: af-to NAT proxy port allocation (%u-%u) failed",
r->nat.proxy_port[0], r->nat.proxy_port[1]));
@@ -1182,7 +1161,7 @@ pf_get_transaddr_af(struct pf_krule *r, struct pf_pdesc *pd)
/* get the destination address and port */
if (! TAILQ_EMPTY(&r->rdr.list)) {
if (pf_map_addr_sn(pd->naf, r, &nsaddr, &naddr, NULL, NULL,
- &sns, NULL, &r->rdr, PF_SN_NAT))
+ &r->rdr, PF_SN_NAT))
return (-1);
if (r->rdr.proxy_port[0])
pd->ndport = htons(r->rdr.proxy_port[0]);
diff --git a/sys/netpfil/pf/pf_table.c b/sys/netpfil/pf/pf_table.c
index 43e4366845a2..9c0151b7da2b 100644
--- a/sys/netpfil/pf/pf_table.c
+++ b/sys/netpfil/pf/pf_table.c
@@ -819,10 +819,10 @@ pfr_create_kentry(struct pfr_addr *ad, bool counters)
static void
pfr_destroy_kentries(struct pfr_kentryworkq *workq)
{
- struct pfr_kentry *p, *q;
+ struct pfr_kentry *p;
- for (p = SLIST_FIRST(workq); p != NULL; p = q) {
- q = SLIST_NEXT(p, pfrke_workq);
+ while ((p = SLIST_FIRST(workq)) != NULL) {
+ SLIST_REMOVE_HEAD(workq, pfrke_workq);
pfr_destroy_kentry(p);
}
}
@@ -1680,8 +1680,7 @@ pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
}
if (!(flags & PFR_FLAG_DUMMY)) {
- for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
- q = SLIST_NEXT(p, pfrkt_workq);
+ SLIST_FOREACH_SAFE(p, &workq, pfrkt_workq, q) {
pfr_commit_ktable(p, tzero);
}
rs->topen = 0;
@@ -1710,7 +1709,7 @@ pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero)
} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
/* kt might contain addresses */
struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
- struct pfr_kentry *p, *q, *next;
+ struct pfr_kentry *p, *q;
struct pfr_addr ad;
pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
@@ -1720,7 +1719,8 @@ pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero)
SLIST_INIT(&delq);
SLIST_INIT(&garbageq);
pfr_clean_node_mask(shadow, &addrq);
- SLIST_FOREACH_SAFE(p, &addrq, pfrke_workq, next) {
+ while ((p = SLIST_FIRST(&addrq)) != NULL) {
+ SLIST_REMOVE_HEAD(&addrq, pfrke_workq);
pfr_copyout_addr(&ad, p);
q = pfr_lookup_addr(kt, &ad, 1);
if (q != NULL) {
@@ -1864,8 +1864,7 @@ pfr_setflags_ktables(struct pfr_ktableworkq *workq)
{
struct pfr_ktable *p, *q;
- for (p = SLIST_FIRST(workq); p; p = q) {
- q = SLIST_NEXT(p, pfrkt_workq);
+ SLIST_FOREACH_SAFE(p, workq, pfrkt_workq, q) {
pfr_setflags_ktable(p, p->pfrkt_nflags);
}
}
@@ -2015,10 +2014,10 @@ pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset)
static void
pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
{
- struct pfr_ktable *p, *q;
+ struct pfr_ktable *p;
- for (p = SLIST_FIRST(workq); p; p = q) {
- q = SLIST_NEXT(p, pfrkt_workq);
+ while ((p = SLIST_FIRST(workq)) != NULL) {
+ SLIST_REMOVE_HEAD(workq, pfrkt_workq);
pfr_destroy_ktable(p, flushaddr);
}
}
@@ -2074,17 +2073,16 @@ pfr_lookup_table(struct pfr_table *tbl)
(struct pfr_ktable *)tbl));
}
-int
-pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
+static struct pfr_kentry *
+pfr_kentry_byaddr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
+ int exact)
{
struct pfr_kentry *ke = NULL;
- int match;
PF_RULES_RASSERT();
- if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
- kt = kt->pfrkt_root;
- if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ kt = pfr_ktable_select_active(kt);
+ if (kt == NULL)
return (0);
switch (af) {
@@ -2121,11 +2119,26 @@ pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
default:
unhandled_af(af);
}
+ if (exact && ke && KENTRY_NETWORK(ke))
+ ke = NULL;
+
+ return (ke);
+}
+
+int
+pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
+{
+ struct pfr_kentry *ke = NULL;
+ int match;
+
+ ke = pfr_kentry_byaddr(kt, a, af, 0);
+
match = (ke && !ke->pfrke_not);
if (match)
pfr_kstate_counter_add(&kt->pfrkt_match, 1);
else
pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
+
return (match);
}
@@ -2135,9 +2148,8 @@ pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
{
struct pfr_kentry *ke = NULL;
- if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
- kt = kt->pfrkt_root;
- if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ kt = pfr_ktable_select_active(kt);
+ if (kt == NULL)
return;
switch (af) {
@@ -2281,7 +2293,7 @@ pfr_detach_table(struct pfr_ktable *kt)
int
pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
- sa_family_t af, pf_addr_filter_func_t filter)
+ sa_family_t af, pf_addr_filter_func_t filter, bool loop_once)
{
struct pf_addr *addr, cur, mask, umask_addr;
union sockaddr_union uaddr, umask;
@@ -2306,9 +2318,8 @@ pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
unhandled_af(af);
}
- if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
- kt = kt->pfrkt_root;
- if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ kt = pfr_ktable_select_active(kt);
+ if (kt == NULL)
return (-1);
idx = *pidx;
@@ -2327,7 +2338,7 @@ _next_block:
ke = pfr_kentry_byidx(kt, idx, af);
if (ke == NULL) {
/* we don't have this idx, try looping */
- if (loop || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
+ if ((loop || loop_once) || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
return (1);
}
@@ -2455,3 +2466,14 @@ pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
unhandled_af(dyn->pfid_af);
}
}
+
+struct pfr_ktable *
+pfr_ktable_select_active(struct pfr_ktable *kt)
+{
+ if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
+ kt = kt->pfrkt_root;
+ if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (NULL);
+
+ return (kt);
+}
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 7746b668265d..ae17b3289593 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1469,6 +1469,9 @@ moea_page_set_memattr(vm_page_t m, vm_memattr_t ma)
pmap_t pmap;
u_int lo;
+ if (m->md.mdpg_cache_attrs == ma)
+ return;
+
if ((m->oflags & VPO_UNMANAGED) != 0) {
m->md.mdpg_cache_attrs = ma;
return;
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 79cea408bb5f..796b1719b8ba 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -2134,6 +2134,9 @@ moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma)
CTR3(KTR_PMAP, "%s: pa=%#jx, ma=%#x",
__func__, (uintmax_t)VM_PAGE_TO_PHYS(m), ma);
+ if (m->md.mdpg_cache_attrs == ma)
+ return;
+
if ((m->oflags & VPO_UNMANAGED) != 0) {
m->md.mdpg_cache_attrs = ma;
return;
diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c
index 45f7bef8bcc9..a12142fc2d7b 100644
--- a/sys/powerpc/aim/mmu_radix.c
+++ b/sys/powerpc/aim/mmu_radix.c
@@ -5937,6 +5937,10 @@ mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
+
+ if (m->md.mdpg_cache_attrs == ma)
+ return;
+
m->md.mdpg_cache_attrs = ma;
/*
diff --git a/sys/powerpc/include/pcb.h b/sys/powerpc/include/pcb.h
index 050ada6b0f64..0230cf78aba7 100644
--- a/sys/powerpc/include/pcb.h
+++ b/sys/powerpc/include/pcb.h
@@ -66,16 +66,8 @@ struct pcb {
#define PCB_VECREGS 0x200 /* Process had Altivec registers initialized */
struct fpu {
union {
-#if _BYTE_ORDER == _BIG_ENDIAN
- double fpr;
- uint32_t vsr[4];
-#else
uint32_t vsr[4];
- struct {
- double padding;
- double fpr;
- };
-#endif
+ double fpr;
} fpr[32];
double fpscr; /* FPSCR stored as double for easier access */
} pcb_fpu; /* Floating point processor */
diff --git a/sys/powerpc/include/ucontext.h b/sys/powerpc/include/ucontext.h
index d35c6c773fe0..dc87edd578bc 100644
--- a/sys/powerpc/include/ucontext.h
+++ b/sys/powerpc/include/ucontext.h
@@ -41,6 +41,7 @@ typedef struct __mcontext {
int mc_flags;
#define _MC_FP_VALID 0x01
#define _MC_AV_VALID 0x02
+#define _MC_VS_VALID 0x04
int mc_onstack; /* saved onstack flag */
int mc_len; /* sizeof(__mcontext) */
__uint64_t mc_avec[32*2]; /* vector register file */
@@ -56,6 +57,7 @@ typedef struct __mcontext32 {
int mc_flags;
#define _MC_FP_VALID 0x01
#define _MC_AV_VALID 0x02
+#define _MC_VS_VALID 0x04
int mc_onstack; /* saved onstack flag */
int mc_len; /* sizeof(__mcontext) */
uint64_t mc_avec[32*2]; /* vector register file */
diff --git a/sys/powerpc/powerpc/exec_machdep.c b/sys/powerpc/powerpc/exec_machdep.c
index 1893d79f29a8..8a33d0f589a7 100644
--- a/sys/powerpc/powerpc/exec_machdep.c
+++ b/sys/powerpc/powerpc/exec_machdep.c
@@ -214,10 +214,10 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
sfpsize = sizeof(sf);
#ifdef __powerpc64__
/*
- * 64-bit PPC defines a 288 byte scratch region
- * below the stack.
+ * 64-bit PPC defines a 512 byte red zone below
+ * the existing stack (ELF ABI v2 ยง2.2.2.4)
*/
- rndfsize = 288 + roundup(sizeof(sf), 48);
+ rndfsize = 512 + roundup(sizeof(sf), 48);
#else
rndfsize = roundup(sizeof(sf), 16);
#endif
@@ -349,13 +349,6 @@ sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
if (error != 0)
return (error);
- /*
- * Save FPU state if needed. User may have changed it on
- * signal handler
- */
- if (uc.uc_mcontext.mc_srr1 & PSL_FP)
- save_fpu(td);
-
kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
@@ -432,6 +425,7 @@ grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
}
if (pcb->pcb_flags & PCB_VSX) {
+ mcp->mc_flags |= _MC_VS_VALID;
for (i = 0; i < 32; i++)
memcpy(&mcp->mc_vsxfpreg[i],
&pcb->pcb_fpu.fpr[i].vsr[2], sizeof(double));
@@ -481,6 +475,7 @@ set_mcontext(struct thread *td, mcontext_t *mcp)
struct pcb *pcb;
struct trapframe *tf;
register_t tls;
+ register_t msr;
int i;
pcb = td->td_pcb;
@@ -531,6 +526,22 @@ set_mcontext(struct thread *td, mcontext_t *mcp)
tf->srr1 &= ~(PSL_FP | PSL_VSX | PSL_VEC);
pcb->pcb_flags &= ~(PCB_FPU | PCB_VSX | PCB_VEC);
+ /*
+ * Ensure the FPU is also disabled in hardware.
+ *
+ * Without this, it's possible for the register reload to fail if we
+ * don't switch to a FPU disabled context before resuming the original
+ * thread. Specifically, if the FPU/VSX unavailable exception is never
+ * hit, then whatever data is still in the FP/VSX registers when
+ * sigresume is callled will used by the resumed thread, instead of the
+ * previously saved data from the mcontext.
+ */
+ critical_enter();
+ msr = mfmsr() & ~(PSL_FP | PSL_VSX | PSL_VEC);
+ isync();
+ mtmsr(msr);
+ critical_exit();
+
if (mcp->mc_flags & _MC_FP_VALID) {
/* enable_fpu() will happen lazily on a fault */
pcb->pcb_flags |= PCB_FPREGS;
@@ -539,8 +550,12 @@ set_mcontext(struct thread *td, mcontext_t *mcp)
for (i = 0; i < 32; i++) {
memcpy(&pcb->pcb_fpu.fpr[i].fpr, &mcp->mc_fpreg[i],
sizeof(double));
- memcpy(&pcb->pcb_fpu.fpr[i].vsr[2],
- &mcp->mc_vsxfpreg[i], sizeof(double));
+ }
+ if (mcp->mc_flags & _MC_VS_VALID) {
+ for (i = 0; i < 32; i++) {
+ memcpy(&pcb->pcb_fpu.fpr[i].vsr[2],
+ &mcp->mc_vsxfpreg[i], sizeof(double));
+ }
}
}
diff --git a/sys/powerpc/powerpc/fpu.c b/sys/powerpc/powerpc/fpu.c
index 0eaff2ea4932..cc8f22f7dda3 100644
--- a/sys/powerpc/powerpc/fpu.c
+++ b/sys/powerpc/powerpc/fpu.c
@@ -64,8 +64,19 @@ save_fpu_int(struct thread *td)
* Save the floating-point registers and FPSCR to the PCB
*/
if (pcb->pcb_flags & PCB_VSX) {
- #define SFP(n) __asm ("stxvw4x " #n ", 0,%0" \
+#if _BYTE_ORDER == _BIG_ENDIAN
+ #define SFP(n) __asm("stxvw4x " #n ", 0,%0" \
:: "b"(&pcb->pcb_fpu.fpr[n]));
+#else
+ /*
+ * stxvw2x will swap words within the FP double word on LE systems,
+ * leading to corruption if VSX is used to store state and FP is
+ * subsequently used to restore state.
+ * Use stxvd2x instead.
+ */
+ #define SFP(n) __asm("stxvd2x " #n ", 0,%0" \
+ :: "b"(&pcb->pcb_fpu.fpr[n]));
+#endif
SFP(0); SFP(1); SFP(2); SFP(3);
SFP(4); SFP(5); SFP(6); SFP(7);
SFP(8); SFP(9); SFP(10); SFP(11);
@@ -76,7 +87,7 @@ save_fpu_int(struct thread *td)
SFP(28); SFP(29); SFP(30); SFP(31);
#undef SFP
} else {
- #define SFP(n) __asm ("stfd " #n ", 0(%0)" \
+ #define SFP(n) __asm("stfd " #n ", 0(%0)" \
:: "b"(&pcb->pcb_fpu.fpr[n].fpr));
SFP(0); SFP(1); SFP(2); SFP(3);
SFP(4); SFP(5); SFP(6); SFP(7);
@@ -149,8 +160,19 @@ enable_fpu(struct thread *td)
:: "b"(&pcb->pcb_fpu.fpscr));
if (pcb->pcb_flags & PCB_VSX) {
- #define LFP(n) __asm ("lxvw4x " #n ", 0,%0" \
+#if _BYTE_ORDER == _BIG_ENDIAN
+ #define LFP(n) __asm("lxvw4x " #n ", 0,%0" \
+ :: "b"(&pcb->pcb_fpu.fpr[n]));
+#else
+ /*
+ * lxvw4x will swap words within the FP double word on LE systems,
+ * leading to corruption if FP is used to store state and VSX is
+ * subsequently used to restore state.
+ * Use lxvd2x instead.
+ */
+ #define LFP(n) __asm("lxvd2x " #n ", 0,%0" \
:: "b"(&pcb->pcb_fpu.fpr[n]));
+#endif
LFP(0); LFP(1); LFP(2); LFP(3);
LFP(4); LFP(5); LFP(6); LFP(7);
LFP(8); LFP(9); LFP(10); LFP(11);
@@ -161,7 +183,7 @@ enable_fpu(struct thread *td)
LFP(28); LFP(29); LFP(30); LFP(31);
#undef LFP
} else {
- #define LFP(n) __asm ("lfd " #n ", 0(%0)" \
+ #define LFP(n) __asm("lfd " #n ", 0(%0)" \
:: "b"(&pcb->pcb_fpu.fpr[n].fpr));
LFP(0); LFP(1); LFP(2); LFP(3);
LFP(4); LFP(5); LFP(6); LFP(7);
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 5d15bd671285..26efaecc64d1 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -4838,6 +4838,8 @@ pmap_unmapbios(void *p, vm_size_t size)
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
+ if (m->md.pv_memattr == ma)
+ return;
m->md.pv_memattr = ma;
diff --git a/sys/vm/vm_domainset.c b/sys/vm/vm_domainset.c
index 7b8bf4c77663..b44bdb96b0d4 100644
--- a/sys/vm/vm_domainset.c
+++ b/sys/vm/vm_domainset.c
@@ -131,8 +131,7 @@ static void
vm_domainset_iter_next(struct vm_domainset_iter *di, int *domain)
{
- KASSERT(di->di_n > 0,
- ("vm_domainset_iter_first: Invalid n %d", di->di_n));
+ KASSERT(di->di_n > 0, ("%s: Invalid n %d", __func__, di->di_n));
switch (di->di_policy) {
case DOMAINSET_POLICY_FIRSTTOUCH:
/*
@@ -149,11 +148,10 @@ vm_domainset_iter_next(struct vm_domainset_iter *di, int *domain)
vm_domainset_iter_prefer(di, domain);
break;
default:
- panic("vm_domainset_iter_first: Unknown policy %d",
- di->di_policy);
+ panic("%s: Unknown policy %d", __func__, di->di_policy);
}
KASSERT(*domain < vm_ndomains,
- ("vm_domainset_iter_next: Invalid domain %d", *domain));
+ ("%s: Invalid domain %d", __func__, *domain));
}
static void
@@ -189,13 +187,11 @@ vm_domainset_iter_first(struct vm_domainset_iter *di, int *domain)
di->di_n = di->di_domain->ds_cnt;
break;
default:
- panic("vm_domainset_iter_first: Unknown policy %d",
- di->di_policy);
+ panic("%s: Unknown policy %d", __func__, di->di_policy);
}
- KASSERT(di->di_n > 0,
- ("vm_domainset_iter_first: Invalid n %d", di->di_n));
+ KASSERT(di->di_n > 0, ("%s: Invalid n %d", __func__, di->di_n));
KASSERT(*domain < vm_ndomains,
- ("vm_domainset_iter_first: Invalid domain %d", *domain));
+ ("%s: Invalid domain %d", __func__, *domain));
}
void
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 875c22d27628..e7d7b6726d2c 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -110,11 +110,18 @@ u_int exec_map_entry_size;
u_int exec_map_entries;
SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
- SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
+#if defined(__amd64__)
+ &kva_layout.km_low, 0,
+#else
+ SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS,
+#endif
+ "Min kernel address");
SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
#if defined(__arm__)
&vm_max_kernel_address, 0,
+#elif defined(__amd64__)
+ &kva_layout.km_high, 0,
#else
SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
#endif