aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2026-04-23 17:05:54 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2026-04-23 17:05:54 +0000
commitec3a0b2a02ee2aa459aa72d751f6670b7f813f31 (patch)
treeb3044ebff63ac1c7021cb1c300f69bb8d281c6a4
parent573e6313bc3b3d7d8da4531fbfaa2763dbe209a7 (diff)
-rw-r--r--sys/amd64/amd64/pmap.c24
-rw-r--r--sys/amd64/amd64/uio_machdep.c4
-rw-r--r--sys/amd64/include/pmap.h4
-rw-r--r--sys/arm64/arm64/pmap.c10
-rw-r--r--sys/arm64/arm64/uio_machdep.c4
-rw-r--r--sys/arm64/include/pmap.h4
-rw-r--r--sys/dev/cxgbe/cxgbei/icl_cxgbei.c4
-rw-r--r--sys/riscv/include/pmap.h4
-rw-r--r--sys/riscv/riscv/pmap.c10
-rw-r--r--sys/riscv/riscv/uio_machdep.c4
10 files changed, 40 insertions, 32 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 66b17f89b4ff..bc4fa33fa175 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -8303,7 +8303,8 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
{
void *a_cp, *b_cp;
vm_page_t pages[2];
- vm_offset_t vaddr[2], a_pg_offset, b_pg_offset;
+ void *vaddr[2];
+ vm_offset_t a_pg_offset, b_pg_offset;
int cnt;
bool mapped;
@@ -10494,10 +10495,11 @@ done:
*
*/
bool
-pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+pmap_map_io_transient(vm_page_t page[], void *vaddr[], int count,
bool can_fault)
{
vm_paddr_t paddr;
+ vmem_addr_t addr;
bool needs_mapping;
int error __unused, i;
@@ -10510,11 +10512,12 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
paddr = VM_PAGE_TO_PHYS(page[i]);
if (__predict_false(paddr >= dmaplimit)) {
error = vmem_alloc(kernel_arena, PAGE_SIZE,
- M_BESTFIT | M_WAITOK, &vaddr[i]);
+ M_BESTFIT | M_WAITOK, &addr);
KASSERT(error == 0, ("vmem_alloc failed: %d", error));
+ vaddr[i] = (void *)addr;
needs_mapping = true;
} else {
- vaddr[i] = PHYS_TO_DMAP(paddr);
+ vaddr[i] = (void *)PHYS_TO_DMAP(paddr);
}
}
@@ -10542,11 +10545,11 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
* thread to the CPU and instead add a global
* mapping visible to all CPUs.
*/
- pmap_qenter(vaddr[i], &page[i], 1);
+ pmap_qenter((vm_offset_t)vaddr[i], &page[i], 1);
} else {
- pmap_kenter_attr(vaddr[i], paddr,
+ pmap_kenter_attr((vm_offset_t)vaddr[i], paddr,
page[i]->md.pat_mode);
- pmap_invlpg(kernel_pmap, vaddr[i]);
+ pmap_invlpg(kernel_pmap, (vm_offset_t)vaddr[i]);
}
}
}
@@ -10555,7 +10558,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
}
void
-pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+pmap_unmap_io_transient(vm_page_t page[], void *vaddr[], int count,
bool can_fault)
{
vm_paddr_t paddr;
@@ -10567,8 +10570,9 @@ pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
paddr = VM_PAGE_TO_PHYS(page[i]);
if (paddr >= dmaplimit) {
if (can_fault)
- pmap_qremove(vaddr[i], 1);
- vmem_free(kernel_arena, vaddr[i], PAGE_SIZE);
+ pmap_qremove((vm_offset_t)vaddr[i], 1);
+ vmem_free(kernel_arena, (vm_offset_t)vaddr[i],
+ PAGE_SIZE);
}
}
}
diff --git a/sys/amd64/amd64/uio_machdep.c b/sys/amd64/amd64/uio_machdep.c
index 83795653fa28..16915bccf9f5 100644
--- a/sys/amd64/amd64/uio_machdep.c
+++ b/sys/amd64/amd64/uio_machdep.c
@@ -57,8 +57,8 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
{
struct thread *td = curthread;
struct iovec *iov;
- void *cp;
- vm_offset_t page_offset, vaddr;
+ void *cp, *vaddr;
+ vm_offset_t page_offset;
size_t cnt;
int error = 0;
int save = 0;
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 1bf67248125c..1d9124f34434 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -425,8 +425,8 @@ void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
-bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool);
-void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool);
+bool pmap_map_io_transient(vm_page_t *, void **, int, bool);
+void pmap_unmap_io_transient(vm_page_t *, void **, int, bool);
void pmap_map_delete(pmap_t, vm_offset_t, vm_offset_t);
void pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec);
void pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva);
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 3d42ee058c28..0956f33bcd01 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -9620,10 +9620,11 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
*
*/
bool
-pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+pmap_map_io_transient(vm_page_t page[], void *vaddr[], int count,
bool can_fault)
{
vm_paddr_t paddr;
+ vmem_addr_t addr;
bool needs_mapping;
int error __diagused, i;
@@ -9636,11 +9637,12 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
paddr = VM_PAGE_TO_PHYS(page[i]);
if (__predict_false(!PHYS_IN_DMAP(paddr))) {
error = vmem_alloc(kernel_arena, PAGE_SIZE,
- M_BESTFIT | M_WAITOK, &vaddr[i]);
+ M_BESTFIT | M_WAITOK, &addr);
KASSERT(error == 0, ("vmem_alloc failed: %d", error));
+ vaddr[i] = (void *)addr;
needs_mapping = true;
} else {
- vaddr[i] = PHYS_TO_DMAP(paddr);
+ vaddr[i] = (void *)PHYS_TO_DMAP(paddr);
}
}
@@ -9662,7 +9664,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
}
void
-pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+pmap_unmap_io_transient(vm_page_t page[], void *vaddr[], int count,
bool can_fault)
{
vm_paddr_t paddr;
diff --git a/sys/arm64/arm64/uio_machdep.c b/sys/arm64/arm64/uio_machdep.c
index 1c12940419cc..976055a69491 100644
--- a/sys/arm64/arm64/uio_machdep.c
+++ b/sys/arm64/arm64/uio_machdep.c
@@ -55,8 +55,8 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
{
struct thread *td = curthread;
struct iovec *iov;
- void *cp;
- vm_offset_t page_offset, vaddr;
+ void *cp, *vaddr;
+ vm_offset_t page_offset;
size_t cnt;
int error = 0;
int save = 0;
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
index 599e5a7f46ef..2ee70fc754da 100644
--- a/sys/arm64/include/pmap.h
+++ b/sys/arm64/include/pmap.h
@@ -166,8 +166,8 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t);
void pmap_unmapdev(void *, vm_size_t);
void pmap_unmapbios(void *, vm_size_t);
-bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool);
-void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool);
+bool pmap_map_io_transient(vm_page_t *, void **, int, bool);
+void pmap_unmap_io_transient(vm_page_t *, void **, int, bool);
bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
pd_entry_t **, pt_entry_t **);
diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
index 2e7767a0fc27..09023b00248e 100644
--- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
@@ -622,7 +622,7 @@ icl_cxgbei_conn_pdu_append_bio(struct icl_conn *ic, struct icl_pdu *ip,
{
struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
struct mbuf *m, *m_tail;
- vm_offset_t vaddr;
+ void *vaddr;
size_t page_offset, todo, mtodo;
bool mapped;
int i;
@@ -810,7 +810,7 @@ icl_cxgbei_conn_pdu_get_bio(struct icl_conn *ic, struct icl_pdu *ip,
size_t pdu_off, struct bio *bp, size_t bio_off, size_t len)
{
struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
- vm_offset_t vaddr;
+ void *vaddr;
size_t page_offset, todo;
bool mapped;
int i;
diff --git a/sys/riscv/include/pmap.h b/sys/riscv/include/pmap.h
index b7be1a0a262f..50496bb9ff20 100644
--- a/sys/riscv/include/pmap.h
+++ b/sys/riscv/include/pmap.h
@@ -150,8 +150,8 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t);
void pmap_unmapdev(void *, vm_size_t);
void pmap_unmapbios(void *, vm_size_t);
-bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool);
-void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool);
+bool pmap_map_io_transient(vm_page_t *, void **, int, bool);
+void pmap_unmap_io_transient(vm_page_t *, void **, int, bool);
bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
pt_entry_t **);
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 90493418c499..f4c5a1eab6ff 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -5300,10 +5300,11 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
*
*/
bool
-pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+pmap_map_io_transient(vm_page_t page[], void *vaddr[], int count,
bool can_fault)
{
vm_paddr_t paddr;
+ vmem_addr_t addr;
bool needs_mapping;
int error __diagused, i;
@@ -5316,11 +5317,12 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
paddr = VM_PAGE_TO_PHYS(page[i]);
if (__predict_false(paddr >= DMAP_MAX_PHYSADDR)) {
error = vmem_alloc(kernel_arena, PAGE_SIZE,
- M_BESTFIT | M_WAITOK, &vaddr[i]);
+ M_BESTFIT | M_WAITOK, &addr);
KASSERT(error == 0, ("vmem_alloc failed: %d", error));
+ vaddr[i] = (void *)addr;
needs_mapping = true;
} else {
- vaddr[i] = PHYS_TO_DMAP(paddr);
+ vaddr[i] = (void *)PHYS_TO_DMAP(paddr);
}
}
@@ -5342,7 +5344,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
}
void
-pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+pmap_unmap_io_transient(vm_page_t page[], void *vaddr[], int count,
bool can_fault)
{
vm_paddr_t paddr;
diff --git a/sys/riscv/riscv/uio_machdep.c b/sys/riscv/riscv/uio_machdep.c
index 002685c98ccf..f171feb1a4bd 100644
--- a/sys/riscv/riscv/uio_machdep.c
+++ b/sys/riscv/riscv/uio_machdep.c
@@ -55,8 +55,8 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
{
struct thread *td = curthread;
struct iovec *iov;
- void *cp;
- vm_offset_t page_offset, vaddr;
+ void *cp, *vaddr;
+ vm_offset_t page_offset;
size_t cnt;
int error = 0;
int save = 0;