aboutsummaryrefslogtreecommitdiff
path: root/sys/powerpc
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2026-04-23 17:05:54 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2026-04-23 17:05:54 +0000
commitac7d52740249de51e805a7cd577b4374d6a6ae81 (patch)
tree030283d061f0928c274ee5330d22dae8135449ea /sys/powerpc
parentfe2957f591b55d9cbd70cf1325048012fc833fbb (diff)
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c6
-rw-r--r--sys/powerpc/aim/mmu_oea64.c11
-rw-r--r--sys/powerpc/aim/mmu_radix.c9
-rw-r--r--sys/powerpc/booke/pmap.c8
-rw-r--r--sys/powerpc/include/mmuvar.h2
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c2
6 files changed, 19 insertions, 19 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index d2105d22d07d..e051bac45aed 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -291,7 +291,7 @@ bool moea_is_modified(vm_page_t);
bool moea_is_prefaultable(pmap_t, vm_offset_t);
bool moea_is_referenced(vm_page_t);
int moea_ts_referenced(vm_page_t);
-vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
+void *moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
static int moea_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
bool moea_page_exists_quick(pmap_t, vm_page_t);
void moea_page_init(vm_page_t);
@@ -1642,7 +1642,7 @@ moea_decode_kernel_ptr(vm_offset_t addr, int *is_user,
* unchanged. We cannot and therefore do not; *virt is updated with the
* first usable address after the mapped region.
*/
-vm_offset_t
+void *
moea_map(vm_offset_t *virt, vm_paddr_t pa_start,
vm_paddr_t pa_end, int prot)
{
@@ -1653,7 +1653,7 @@ moea_map(vm_offset_t *virt, vm_paddr_t pa_start,
for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
moea_kenter(va, pa_start);
*virt = va;
- return (sva);
+ return ((void *)sva);
}
/*
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 27b214ccf1bf..ac0444ddade0 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -372,7 +372,7 @@ bool moea64_is_modified(vm_page_t);
bool moea64_is_prefaultable(pmap_t, vm_offset_t);
bool moea64_is_referenced(vm_page_t);
int moea64_ts_referenced(vm_page_t);
-vm_offset_t moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
+void *moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
bool moea64_page_exists_quick(pmap_t, vm_page_t);
void moea64_page_init(vm_page_t);
int moea64_page_wired_mappings(vm_page_t);
@@ -2314,7 +2314,7 @@ moea64_decode_kernel_ptr(vm_offset_t addr, int *is_user,
* unchanged. Other architectures should map the pages starting at '*virt' and
* update '*virt' with the first usable address after the mapped region.
*/
-vm_offset_t
+void *
moea64_map(vm_offset_t *virt, vm_paddr_t pa_start,
vm_paddr_t pa_end, int prot)
{
@@ -2331,7 +2331,7 @@ moea64_map(vm_offset_t *virt, vm_paddr_t pa_start,
if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
break;
if (va == pa_end)
- return (PHYS_TO_DMAP(pa_start));
+ return ((void *)PHYS_TO_DMAP(pa_start));
}
sva = *virt;
va = sva;
@@ -2340,7 +2340,7 @@ moea64_map(vm_offset_t *virt, vm_paddr_t pa_start,
moea64_kenter(va, pa_start);
*virt = va;
- return (sva);
+ return ((void *)sva);
}
/*
@@ -3419,10 +3419,9 @@ moea64_page_array_startup(long pages)
if (vm_ndomains == 1) {
size = round_page(pages * sizeof(struct vm_page));
pa = vm_phys_early_alloc(0, size);
- vm_page_base = moea64_map(&vm_page_base,
+ vm_page_array = moea64_map(&vm_page_base,
pa, pa + size, VM_PROT_READ | VM_PROT_WRITE);
vm_page_array_size = pages;
- vm_page_array = (vm_page_t)vm_page_base;
return;
}
diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c
index 42b906de2ff4..b8be7f188cb6 100644
--- a/sys/powerpc/aim/mmu_radix.c
+++ b/sys/powerpc/aim/mmu_radix.c
@@ -483,7 +483,7 @@ static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
static int mmu_radix_growkernel(vm_offset_t);
static void mmu_radix_init(void);
static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
-static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
+static void *mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
static void mmu_radix_pinit0(pmap_t);
static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t);
@@ -4030,14 +4030,14 @@ out:
return (cleared + not_cleared);
}
-static vm_offset_t
+static void *
mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start,
vm_paddr_t end, int prot __unused)
{
CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
prot);
- return (PHYS_TO_DMAP(start));
+ return ((void *)PHYS_TO_DMAP(start));
}
void
@@ -6447,7 +6447,8 @@ mmu_radix_page_array_startup(long pages)
pa = vm_phys_early_alloc(-1, end - start);
- start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT);
+ start = (vm_offset_t)mmu_radix_map(&start, pa, end - start,
+ VM_MEMATTR_DEFAULT);
#ifdef notyet
/* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */
for (va = start; va < end; va += L3_PAGE_SIZE) {
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 2ebe4d64fbaa..315e86aa64a9 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -307,7 +307,7 @@ static bool mmu_booke_is_modified(vm_page_t);
static bool mmu_booke_is_prefaultable(pmap_t, vm_offset_t);
static bool mmu_booke_is_referenced(vm_page_t);
static int mmu_booke_ts_referenced(vm_page_t);
-static vm_offset_t mmu_booke_map(vm_offset_t *, vm_paddr_t, vm_paddr_t,
+static void *mmu_booke_map(vm_offset_t *, vm_paddr_t, vm_paddr_t,
int);
static int mmu_booke_mincore(pmap_t, vm_offset_t,
vm_paddr_t *);
@@ -1570,7 +1570,7 @@ mmu_booke_remove_all(vm_page_t m)
/*
* Map a range of physical addresses into kernel virtual address space.
*/
-static vm_offset_t
+static void *
mmu_booke_map(vm_offset_t *virt, vm_paddr_t pa_start,
vm_paddr_t pa_end, int prot)
{
@@ -1580,7 +1580,7 @@ mmu_booke_map(vm_offset_t *virt, vm_paddr_t pa_start,
#ifdef __powerpc64__
/* XXX: Handle memory not starting at 0x0. */
if (pa_end < ctob(Maxmem))
- return (PHYS_TO_DMAP(pa_start));
+ return ((void *)PHYS_TO_DMAP(pa_start));
#endif
while (pa_start < pa_end) {
@@ -1590,7 +1590,7 @@ mmu_booke_map(vm_offset_t *virt, vm_paddr_t pa_start,
}
*virt = va;
- return (sva);
+ return ((void *)sva);
}
/*
diff --git a/sys/powerpc/include/mmuvar.h b/sys/powerpc/include/mmuvar.h
index 7b2063ba7325..5eba81b88a09 100644
--- a/sys/powerpc/include/mmuvar.h
+++ b/sys/powerpc/include/mmuvar.h
@@ -66,7 +66,7 @@ typedef bool (*pmap_is_modified_t)(vm_page_t);
typedef bool (*pmap_is_prefaultable_t)(pmap_t, vm_offset_t);
typedef bool (*pmap_is_referenced_t)(vm_page_t);
typedef int (*pmap_ts_referenced_t)(vm_page_t);
-typedef vm_offset_t (*pmap_map_t)(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
+typedef void *(*pmap_map_t)(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
typedef void (*pmap_object_init_pt_t)(pmap_t, vm_offset_t, vm_object_t,
vm_pindex_t, vm_size_t);
typedef bool (*pmap_page_exists_quick_t)(pmap_t, vm_page_t);
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index fccb7b6bf5a9..81c17c7abf20 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -169,7 +169,7 @@ DEFINE_PMAP_IFUNC(void, copy_pages,
vm_offset_t b_offset, int xfersize));
DEFINE_PMAP_IFUNC(int, growkernel_nopanic, (vm_offset_t));
DEFINE_PMAP_IFUNC(void, init, (void));
-DEFINE_PMAP_IFUNC(vm_offset_t, map, (vm_offset_t *, vm_paddr_t, vm_paddr_t, int));
+DEFINE_PMAP_IFUNC(void *, map, (vm_offset_t *, vm_paddr_t, vm_paddr_t, int));
DEFINE_PMAP_IFUNC(int, pinit, (pmap_t));
DEFINE_PMAP_IFUNC(void, pinit0, (pmap_t));
DEFINE_PMAP_IFUNC(int, mincore, (pmap_t, vm_offset_t, vm_paddr_t *));