aboutsummaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2026-04-23 17:05:54 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2026-04-23 17:05:54 +0000
commit2d0634d2e74bb697573afaf888207a8ad1ba3242 (patch)
tree1b4f9223adba92402c54747479047ba96ae1f812 /sys/dev
parent025b39b854202b316f4c5ec3e00d727115ac0eaa (diff)
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_util.c5
-rw-r--r--sys/dev/gve/gve_qpl.c10
-rw-r--r--sys/dev/hwt/hwt_vm.c6
-rw-r--r--sys/dev/hwt/hwt_vm.h2
-rw-r--r--sys/dev/md/md.c4
-rw-r--r--sys/dev/pci/controller/pci_n1sdp.c4
-rw-r--r--sys/dev/spibus/spigen.c4
-rw-r--r--sys/dev/xdma/xdma.h2
-rw-r--r--sys/dev/xdma/xdma_sg.c12
9 files changed, 24 insertions, 25 deletions
diff --git a/sys/dev/drm2/ttm/ttm_bo_util.c b/sys/dev/drm2/ttm/ttm_bo_util.c
index 1734a8103cde..4d4de90b6525 100644
--- a/sys/dev/drm2/ttm/ttm_bo_util.c
+++ b/sys/dev/drm2/ttm/ttm_bo_util.c
@@ -510,7 +510,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
VM_MEMATTR_DEFAULT : ttm_io_prot(mem->placement);
map->bo_kmap_type = ttm_bo_map_vmap;
map->num_pages = num_pages;
- map->virtual = (void *)kva_alloc(num_pages * PAGE_SIZE);
+ map->virtual = kva_alloc(num_pages * PAGE_SIZE);
if (map->virtual != NULL) {
for (i = 0; i < num_pages; i++) {
/* XXXKIB hack */
@@ -572,8 +572,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
break;
case ttm_bo_map_vmap:
pmap_qremove(map->virtual, map->num_pages);
- kva_free((vm_offset_t)map->virtual,
- map->num_pages * PAGE_SIZE);
+ kva_free(map->virtual, map->num_pages * PAGE_SIZE);
break;
case ttm_bo_map_kmap:
sf_buf_free(map->sf);
diff --git a/sys/dev/gve/gve_qpl.c b/sys/dev/gve/gve_qpl.c
index f04e82497fa4..1f153d08c126 100644
--- a/sys/dev/gve/gve_qpl.c
+++ b/sys/dev/gve/gve_qpl.c
@@ -47,7 +47,7 @@ gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl)
if (qpl->kva) {
pmap_qremove(qpl->kva, qpl->num_pages);
- kva_free((vm_offset_t)qpl->kva, PAGE_SIZE * qpl->num_pages);
+ kva_free(qpl->kva, PAGE_SIZE * qpl->num_pages);
}
for (i = 0; i < qpl->num_pages; i++) {
@@ -60,7 +60,7 @@ gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl)
if (vm_page_unwire_noq(qpl->pages[i])) {
if (!qpl->kva) {
pmap_qremove(qpl->dmas[i].cpu_addr, 1);
- kva_free((vm_offset_t)qpl->dmas[i].cpu_addr, PAGE_SIZE);
+ kva_free(qpl->dmas[i].cpu_addr, PAGE_SIZE);
}
vm_page_free(qpl->pages[i]);
}
@@ -106,7 +106,7 @@ gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
qpl->kva = NULL;
if (single_kva) {
- qpl->kva = (char *)kva_alloc(PAGE_SIZE * npages);
+ qpl->kva = kva_alloc(PAGE_SIZE * npages);
if (!qpl->kva) {
device_printf(priv->dev, "Failed to create the single kva for QPL %d\n", id);
err = ENOMEM;
@@ -120,7 +120,7 @@ gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
VM_ALLOC_ZERO);
if (!single_kva) {
- qpl->dmas[i].cpu_addr = (void *)kva_alloc(PAGE_SIZE);
+ qpl->dmas[i].cpu_addr = kva_alloc(PAGE_SIZE);
if (!qpl->dmas[i].cpu_addr) {
device_printf(priv->dev, "Failed to create kva for page %d in QPL %d", i, id);
err = ENOMEM;
@@ -253,7 +253,7 @@ gve_mextadd_free(struct mbuf *mbuf)
*/
if (__predict_false(vm_page_unwire_noq(page))) {
pmap_qremove(va, 1);
- kva_free((vm_offset_t)va, PAGE_SIZE);
+ kva_free(va, PAGE_SIZE);
vm_page_free(page);
}
}
diff --git a/sys/dev/hwt/hwt_vm.c b/sys/dev/hwt/hwt_vm.c
index a3e906d71099..18bbdbe37a99 100644
--- a/sys/dev/hwt/hwt_vm.c
+++ b/sys/dev/hwt/hwt_vm.c
@@ -127,7 +127,7 @@ hwt_vm_alloc_pages(struct hwt_vm *vm, int kva_req)
if (kva_req) {
vm->kvaddr = kva_alloc(vm->npages * PAGE_SIZE);
- if (!vm->kvaddr)
+ if (vm->kvaddr == NULL)
return (ENOMEM);
}
@@ -441,8 +441,8 @@ hwt_vm_destroy_buffers(struct hwt_vm *vm)
vm_page_t m;
int i;
- if (vm->ctx->hwt_backend->kva_req && vm->kvaddr != 0) {
- pmap_qremove((void *)vm->kvaddr, vm->npages);
+ if (vm->ctx->hwt_backend->kva_req && vm->kvaddr != NULL) {
+ pmap_qremove(vm->kvaddr, vm->npages);
kva_free(vm->kvaddr, vm->npages * PAGE_SIZE);
}
VM_OBJECT_WLOCK(vm->obj);
diff --git a/sys/dev/hwt/hwt_vm.h b/sys/dev/hwt/hwt_vm.h
index 5002bd43e093..6c9bdf48ae6d 100644
--- a/sys/dev/hwt/hwt_vm.h
+++ b/sys/dev/hwt/hwt_vm.h
@@ -33,7 +33,7 @@ struct hwt_vm {
vm_page_t *pages;
int npages;
vm_object_t obj;
- vm_offset_t kvaddr;
+ void *kvaddr;
struct cdev *cdev;
struct hwt_context *ctx;
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index e2c0451b5843..2dcb56160fc6 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -1512,7 +1512,7 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td)
goto bad;
}
- sc->s_vnode.kva = (char *)kva_alloc(maxphys + PAGE_SIZE);
+ sc->s_vnode.kva = kva_alloc(maxphys + PAGE_SIZE);
return (0);
bad:
VOP_UNLOCK(nd.ni_vp);
@@ -1567,7 +1567,7 @@ mddestroy(struct md_s *sc, struct thread *td)
sc->cred, td);
}
if (sc->s_vnode.kva != NULL)
- kva_free((vm_offset_t)sc->s_vnode.kva, maxphys + PAGE_SIZE);
+ kva_free(sc->s_vnode.kva, maxphys + PAGE_SIZE);
break;
case MD_SWAP:
if (sc->s_swap.object != NULL)
diff --git a/sys/dev/pci/controller/pci_n1sdp.c b/sys/dev/pci/controller/pci_n1sdp.c
index c1f8624e45aa..60664eec569e 100644
--- a/sys/dev/pci/controller/pci_n1sdp.c
+++ b/sys/dev/pci/controller/pci_n1sdp.c
@@ -100,7 +100,7 @@ n1sdp_init(struct generic_pcie_n1sdp_softc *sc)
MPASS(m[i] != NULL);
}
- vaddr = (void *)kva_alloc((vm_size_t)BDF_TABLE_SIZE);
+ vaddr = kva_alloc((vm_size_t)BDF_TABLE_SIZE);
if (vaddr == NULL) {
printf("%s: Can't allocate KVA memory.", __func__);
error = ENXIO;
@@ -130,7 +130,7 @@ n1sdp_init(struct generic_pcie_n1sdp_softc *sc)
out_pmap:
pmap_qremove(vaddr, nitems(m));
- kva_free((vm_offset_t)vaddr, (vm_size_t)BDF_TABLE_SIZE);
+ kva_free(vaddr, (vm_size_t)BDF_TABLE_SIZE);
out:
vm_phys_fictitious_unreg_range(paddr, paddr + BDF_TABLE_SIZE);
diff --git a/sys/dev/spibus/spigen.c b/sys/dev/spibus/spigen.c
index 400ae1e139ad..8f7dbb504537 100644
--- a/sys/dev/spibus/spigen.c
+++ b/sys/dev/spibus/spigen.c
@@ -285,7 +285,7 @@ spigen_mmap_cleanup(void *arg)
if (mmap->kvaddr != NULL) {
pmap_qremove(mmap->kvaddr, mmap->bufsize / PAGE_SIZE);
- kva_free((vm_offset_t)mmap->kvaddr, mmap->bufsize);
+ kva_free(mmap->kvaddr, mmap->bufsize);
}
if (mmap->bufobj != NULL)
vm_object_deallocate(mmap->bufobj);
@@ -312,7 +312,7 @@ spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
return (EBUSY);
mmap = malloc(sizeof(*mmap), M_DEVBUF, M_ZERO | M_WAITOK);
- if ((mmap->kvaddr = (void *)kva_alloc(size)) == 0) {
+ if ((mmap->kvaddr = kva_alloc(size)) == 0) {
spigen_mmap_cleanup(mmap);
return (ENOMEM);
}
diff --git a/sys/dev/xdma/xdma.h b/sys/dev/xdma/xdma.h
index 40f6ea8f6f98..2e1c0d64ee46 100644
--- a/sys/dev/xdma/xdma.h
+++ b/sys/dev/xdma/xdma.h
@@ -93,7 +93,7 @@ struct xchan_buf {
bus_dmamap_t map;
uint32_t nsegs;
uint32_t nsegs_left;
- vm_offset_t vaddr;
+ void *vaddr;
vm_offset_t paddr;
vm_size_t size;
};
diff --git a/sys/dev/xdma/xdma_sg.c b/sys/dev/xdma/xdma_sg.c
index ccf721e3c16c..c102e9b9f456 100644
--- a/sys/dev/xdma/xdma_sg.c
+++ b/sys/dev/xdma/xdma_sg.c
@@ -75,9 +75,9 @@ xchan_bufs_free_reserved(xdma_channel_t *xchan)
xr = &xchan->xr_mem[i];
size = xr->buf.size;
if (xr->buf.vaddr) {
- pmap_kremove_device(xr->buf.vaddr, size);
+ pmap_kremove_device((vm_offset_t)xr->buf.vaddr, size);
kva_free(xr->buf.vaddr, size);
- xr->buf.vaddr = 0;
+ xr->buf.vaddr = NULL;
}
if (xr->buf.paddr) {
vmem_free(xchan->vmem, xr->buf.paddr, size);
@@ -115,13 +115,13 @@ xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
xr->buf.size = size;
xr->buf.paddr = addr;
xr->buf.vaddr = kva_alloc(size);
- if (xr->buf.vaddr == 0) {
+ if (xr->buf.vaddr == NULL) {
device_printf(xdma->dev,
"%s: Can't allocate KVA\n", __func__);
xchan_bufs_free_reserved(xchan);
return (ENOMEM);
}
- pmap_kenter_device(xr->buf.vaddr, size, addr);
+ pmap_kenter_device((vm_offset_t)xr->buf.vaddr, size, addr);
}
return (0);
@@ -346,7 +346,7 @@ xchan_seg_done(xdma_channel_t *xchan,
if (xr->req_type == XR_TYPE_MBUF &&
xr->direction == XDMA_DEV_TO_MEM)
m_copyback(xr->m, 0, st->transferred,
- (void *)xr->buf.vaddr);
+ xr->buf.vaddr);
} else if (xchan->caps & XCHAN_CAP_IOMMU) {
if (xr->direction == XDMA_MEM_TO_DEV)
addr = xr->src_addr;
@@ -500,7 +500,7 @@ _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
if (xchan->caps & XCHAN_CAP_BOUNCE) {
if (xr->direction == XDMA_MEM_TO_DEV)
m_copydata(m, 0, m->m_pkthdr.len,
- (void *)xr->buf.vaddr);
+ xr->buf.vaddr);
seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
} else if (xchan->caps & XCHAN_CAP_IOMMU) {
addr = mtod(m, bus_addr_t);