aboutsummaryrefslogtreecommitdiff
path: root/lib/libkvm
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libkvm')
-rw-r--r--lib/libkvm/Makefile39
-rw-r--r--lib/libkvm/Makefile.depend16
-rw-r--r--lib/libkvm/kvm.3169
-rw-r--r--lib/libkvm/kvm.c536
-rw-r--r--lib/libkvm/kvm.h129
-rw-r--r--lib/libkvm/kvm_aarch64.h63
-rw-r--r--lib/libkvm/kvm_amd64.c360
-rw-r--r--lib/libkvm/kvm_amd64.h87
-rw-r--r--lib/libkvm/kvm_arm.c277
-rw-r--r--lib/libkvm/kvm_arm.h116
-rw-r--r--lib/libkvm/kvm_cptime.c139
-rw-r--r--lib/libkvm/kvm_getcptime.375
-rw-r--r--lib/libkvm/kvm_geterr.384
-rw-r--r--lib/libkvm/kvm_getloadavg.359
-rw-r--r--lib/libkvm/kvm_getloadavg.c98
-rw-r--r--lib/libkvm/kvm_getpcpu.3167
-rw-r--r--lib/libkvm/kvm_getprocs.3170
-rw-r--r--lib/libkvm/kvm_getswapinfo.3109
-rw-r--r--lib/libkvm/kvm_getswapinfo.c267
-rw-r--r--lib/libkvm/kvm_i386.c425
-rw-r--r--lib/libkvm/kvm_i386.h81
-rw-r--r--lib/libkvm/kvm_kerndisp.355
-rw-r--r--lib/libkvm/kvm_minidump_aarch64.c323
-rw-r--r--lib/libkvm/kvm_minidump_amd64.c440
-rw-r--r--lib/libkvm/kvm_minidump_arm.c276
-rw-r--r--lib/libkvm/kvm_minidump_i386.c344
-rw-r--r--lib/libkvm/kvm_minidump_powerpc64.c209
-rw-r--r--lib/libkvm/kvm_minidump_powerpc64_hpt.c660
-rw-r--r--lib/libkvm/kvm_minidump_riscv.c293
-rw-r--r--lib/libkvm/kvm_native.359
-rw-r--r--lib/libkvm/kvm_nlist.3120
-rw-r--r--lib/libkvm/kvm_open.3286
-rw-r--r--lib/libkvm/kvm_pcpu.c382
-rw-r--r--lib/libkvm/kvm_powerpc.c234
-rw-r--r--lib/libkvm/kvm_powerpc64.c267
-rw-r--r--lib/libkvm/kvm_powerpc64.h79
-rw-r--r--lib/libkvm/kvm_private.c824
-rw-r--r--lib/libkvm/kvm_private.h195
-rw-r--r--lib/libkvm/kvm_proc.c783
-rw-r--r--lib/libkvm/kvm_read.3108
-rw-r--r--lib/libkvm/kvm_riscv.h87
-rw-r--r--lib/libkvm/kvm_vnet.c244
-rw-r--r--lib/libkvm/tests/Makefile20
-rw-r--r--lib/libkvm/tests/Makefile.depend19
-rw-r--r--lib/libkvm/tests/kvm_close_test.c54
-rw-r--r--lib/libkvm/tests/kvm_geterr_test.c138
-rw-r--r--lib/libkvm/tests/kvm_open2_test.c113
-rw-r--r--lib/libkvm/tests/kvm_open_test.c101
-rw-r--r--lib/libkvm/tests/kvm_read_test.c95
-rw-r--r--lib/libkvm/tests/kvm_test_common.c45
-rw-r--r--lib/libkvm/tests/kvm_test_common.h37
51 files changed, 10356 insertions, 0 deletions
diff --git a/lib/libkvm/Makefile b/lib/libkvm/Makefile
new file mode 100644
index 000000000000..94a1173010a4
--- /dev/null
+++ b/lib/libkvm/Makefile
@@ -0,0 +1,39 @@
+LIB= kvm
+
+PACKAGE= runtime
+SHLIBDIR?= /lib
+SHLIB_MAJOR= 7
+CFLAGS+=-DNO__SCCSID -I${.CURDIR}
+
+SRCS= kvm.c kvm_cptime.c kvm_getloadavg.c \
+ kvm_getswapinfo.c kvm_pcpu.c kvm_private.c kvm_proc.c kvm_vnet.c \
+ kvm_minidump_aarch64.c \
+ kvm_amd64.c kvm_minidump_amd64.c \
+ kvm_arm.c kvm_minidump_arm.c \
+ kvm_i386.c kvm_minidump_i386.c \
+ kvm_powerpc.c kvm_powerpc64.c \
+ kvm_minidump_riscv.c \
+ kvm_minidump_powerpc64.c kvm_minidump_powerpc64_hpt.c
+INCS= kvm.h
+
+LIBADD= elf
+
+MAN= kvm.3 kvm_getcptime.3 kvm_geterr.3 kvm_getloadavg.3 \
+ kvm_getpcpu.3 kvm_getprocs.3 kvm_getswapinfo.3 kvm_kerndisp.3 \
+ kvm_native.3 kvm_nlist.3 kvm_open.3 kvm_read.3
+
+MLINKS+=kvm_getpcpu.3 kvm_getmaxcpu.3 \
+ kvm_getpcpu.3 kvm_dpcpu_setcpu.3 \
+ kvm_getpcpu.3 kvm_read_zpcpu.3 \
+ kvm_getpcpu.3 kvm_counter_u64_fetch.3
+MLINKS+=kvm_getprocs.3 kvm_getargv.3 kvm_getprocs.3 kvm_getenvv.3
+MLINKS+=kvm_nlist.3 kvm_nlist2.3
+MLINKS+=kvm_open.3 kvm_close.3 kvm_open.3 kvm_open2.3 kvm_open.3 kvm_openfiles.3
+MLINKS+=kvm_read.3 kvm_read2.3 kvm_read.3 kvm_write.3
+
+.include <src.opts.mk>
+
+HAS_TESTS=
+SUBDIR.${MK_TESTS}= tests
+
+.include <bsd.lib.mk>
diff --git a/lib/libkvm/Makefile.depend b/lib/libkvm/Makefile.depend
new file mode 100644
index 000000000000..b0aa274151ad
--- /dev/null
+++ b/lib/libkvm/Makefile.depend
@@ -0,0 +1,16 @@
+# Autogenerated - do NOT edit!
+
+DIRDEPS = \
+ include \
+ include/xlocale \
+ lib/${CSU_DIR} \
+ lib/libc \
+ lib/libcompiler_rt \
+ lib/libelf \
+
+
+.include <dirdeps.mk>
+
+.if ${DEP_RELDIR} == ${_DEP_RELDIR}
+# local dependencies - needed for -jN in clean tree
+.endif
diff --git a/lib/libkvm/kvm.3 b/lib/libkvm/kvm.3
new file mode 100644
index 000000000000..33a5d2319d18
--- /dev/null
+++ b/lib/libkvm/kvm.3
@@ -0,0 +1,169 @@
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd February 5, 2020
+.Dt KVM 3
+.Os
+.Sh NAME
+.Nm kvm
+.Nd kernel memory interface
+.Sh LIBRARY
+.Lb libkvm
+.Sh DESCRIPTION
+The
+.Nm
+library provides a uniform interface for accessing kernel virtual memory
+images, including live systems and crash dumps.
+Access to live systems is via
+.Xr sysctl 3
+for some functions, and
+.Xr mem 4
+and
+.Xr kmem 4
+for other functions,
+while crash dumps can be examined via the core file generated by
+.Xr savecore 8 .
+The interface behaves similarly in both cases.
+Memory can be read and written, kernel symbol addresses can be
+looked up efficiently, and information about user processes can
+be gathered.
+.Pp
+The
+.Fn kvm_open
+function is first called to obtain a descriptor for all subsequent calls.
+.Sh COMPATIBILITY
+The kvm interface was first introduced in SunOS.
+A considerable
+number of programs have been developed that use this interface,
+making backward compatibility highly desirable.
+In most respects, the Sun kvm interface is consistent and clean.
+Accordingly, the generic portion of the interface (i.e.,
+.Fn kvm_open ,
+.Fn kvm_close ,
+.Fn kvm_read ,
+.Fn kvm_write ,
+and
+.Fn kvm_nlist )
+has been incorporated into the
+.Bx
+interface.
+Indeed, many kvm
+applications (i.e., debuggers and statistical monitors) use only
+this subset of the interface.
+.Pp
+The process interface was not kept.
+This is not a portability
+issue since any code that manipulates processes is inherently
+machine dependent.
+.Pp
+Finally, the Sun kvm error reporting semantics are poorly defined.
+The library can be configured either to print errors to
+.Dv stderr
+automatically,
+or to print no error messages at all.
+In the latter case, the nature of the error cannot be determined.
+To overcome this, the
+.Bx
+interface includes a
+routine,
+.Xr kvm_geterr 3 ,
+to return (not print out) the error message
+corresponding to the most recent error condition on the
+given descriptor.
+.Sh CROSS DEBUGGING
+The
+.Nm
+library supports inspection of crash dumps from non-native kernels.
+Only a limited subset of the kvm interface is supported for these dumps.
+To inspect a crash dump of a non-native kernel,
+the caller must provide a
+.Fa resolver
+function when opening a descriptor via
+.Fn kvm_open2 .
+In addition,
+the kvm interface defines an integer type
+.Pq Vt kvaddr_t
+that is large enough to hold all valid addresses of all supported
+architectures.
+The interface also defines a new namelist structure type
+.Pq Vt "struct kvm_nlist"
+for use with
+.Fn kvm_nlist2 .
+To avoid address truncation issues,
+the caller should use
+.Fn kvm_nlist2
+and
+.Fn kvm_read2
+in place of
+.Fn kvm_nlist
+and
+.Fn kvm_read ,
+respectively.
+Finally, only a limited subset of operations are supported for non-native
+crash dumps:
+.Fn kvm_close ,
+.Fn kvm_geterr ,
+.Fn kvm_kerndisp ,
+.Fn kvm_open2 ,
+.Fn kvm_native ,
+.Fn kvm_nlist2 ,
+and
+.Fn kvm_read2 .
+.Sh SEE ALSO
+.Xr kvm_close 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_getloadavg 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_getswapinfo 3 ,
+.Xr kvm_kerndisp 3 ,
+.Xr kvm_native 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_nlist2 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_open2 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_read2 3 ,
+.Xr kvm_write 3 ,
+.Xr sysctl 3 ,
+.Xr kmem 4 ,
+.Xr mem 4
+.Sh HISTORY
+The
+.Fn kvm_native ,
+.Fn kvm_nlist2 ,
+.Fn kvm_open2 ,
+and
+.Fn kvm_read2
+functions first appeared in
+.Fx 11.0 .
diff --git a/lib/libkvm/kvm.c b/lib/libkvm/kvm.c
new file mode 100644
index 000000000000..4ed48951a327
--- /dev/null
+++ b/lib/libkvm/kvm.c
@@ -0,0 +1,536 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include <sys/param.h>
+#include <sys/fnv_hash.h>
+
+#define _WANT_VNET
+
+#include <sys/user.h>
+#include <sys/linker.h>
+#include <sys/pcpu.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+#include <sys/mman.h>
+
+#include <stdbool.h>
+#include <net/vnet.h>
+
+#include <fcntl.h>
+#include <kvm.h>
+#include <limits.h>
+#include <paths.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "kvm_private.h"
+
+SET_DECLARE(kvm_arch, struct kvm_arch);
+
+static char _kd_is_null[] = "";
+
+char *
+kvm_geterr(kvm_t *kd)
+{
+
+ if (kd == NULL)
+ return (_kd_is_null);
+ return (kd->errbuf);
+}
+
+static int
+_kvm_read_kernel_ehdr(kvm_t *kd)
+{
+ Elf *elf;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ _kvm_err(kd, kd->program, "Unsupported libelf");
+ return (-1);
+ }
+ elf = elf_begin(kd->nlfd, ELF_C_READ, NULL);
+ if (elf == NULL) {
+ _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
+ return (-1);
+ }
+ if (elf_kind(elf) != ELF_K_ELF) {
+ _kvm_err(kd, kd->program, "kernel is not an ELF file");
+ return (-1);
+ }
+ if (gelf_getehdr(elf, &kd->nlehdr) == NULL) {
+ _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
+ elf_end(elf);
+ return (-1);
+ }
+ elf_end(elf);
+
+ switch (kd->nlehdr.e_ident[EI_DATA]) {
+ case ELFDATA2LSB:
+ case ELFDATA2MSB:
+ return (0);
+ default:
+ _kvm_err(kd, kd->program,
+ "unsupported ELF data encoding for kernel");
+ return (-1);
+ }
+}
+
+static kvm_t *
+_kvm_open(kvm_t *kd, const char *uf, const char *mf, int flag, char *errout)
+{
+ struct kvm_arch **parch;
+ struct stat st;
+
+ kd->vmfd = -1;
+ kd->pmfd = -1;
+ kd->nlfd = -1;
+ kd->vmst = NULL;
+ kd->procbase = NULL;
+ kd->argspc = NULL;
+ kd->argv = NULL;
+
+ if (uf == NULL)
+ uf = getbootfile();
+ else if (strlen(uf) >= MAXPATHLEN) {
+ _kvm_err(kd, kd->program, "exec file name too long");
+ goto failed;
+ }
+ if (flag & ~O_RDWR) {
+ _kvm_err(kd, kd->program, "bad flags arg");
+ goto failed;
+ }
+ if (mf == NULL)
+ mf = _PATH_MEM;
+
+ if ((kd->pmfd = open(mf, flag | O_CLOEXEC, 0)) < 0) {
+ _kvm_syserr(kd, kd->program, "%s", mf);
+ goto failed;
+ }
+ if (fstat(kd->pmfd, &st) < 0) {
+ _kvm_syserr(kd, kd->program, "%s", mf);
+ goto failed;
+ }
+ if (S_ISREG(st.st_mode) && st.st_size <= 0) {
+ errno = EINVAL;
+ _kvm_syserr(kd, kd->program, "empty file");
+ goto failed;
+ }
+ if (S_ISCHR(st.st_mode)) {
+ /*
+ * If this is a character special device, then check that
+ * it's /dev/mem. If so, open kmem too. (Maybe we should
+ * make it work for either /dev/mem or /dev/kmem -- in either
+ * case you're working with a live kernel.)
+ */
+ if (strcmp(mf, _PATH_DEVNULL) == 0) {
+ kd->vmfd = open(_PATH_DEVNULL, O_RDONLY | O_CLOEXEC);
+ return (kd);
+ } else if (strcmp(mf, _PATH_MEM) == 0) {
+ if ((kd->vmfd = open(_PATH_KMEM, flag | O_CLOEXEC)) <
+ 0) {
+ _kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
+ goto failed;
+ }
+ return (kd);
+ }
+ }
+
+ /*
+ * This is either a crash dump or a remote live system with its physical
+ * memory fully accessible via a special device.
+ * Open the namelist fd and determine the architecture.
+ */
+ if ((kd->nlfd = open(uf, O_RDONLY | O_CLOEXEC, 0)) < 0) {
+ _kvm_syserr(kd, kd->program, "%s", uf);
+ goto failed;
+ }
+ if (_kvm_read_kernel_ehdr(kd) < 0)
+ goto failed;
+ if (strncmp(mf, _PATH_FWMEM, strlen(_PATH_FWMEM)) == 0 ||
+ strncmp(mf, _PATH_DEVVMM, strlen(_PATH_DEVVMM)) == 0) {
+ kd->rawdump = 1;
+ kd->writable = 1;
+ }
+ SET_FOREACH(parch, kvm_arch) {
+ if ((*parch)->ka_probe(kd)) {
+ kd->arch = *parch;
+ break;
+ }
+ }
+ if (kd->arch == NULL) {
+ _kvm_err(kd, kd->program, "unsupported architecture");
+ goto failed;
+ }
+
+ /*
+ * Non-native kernels require a symbol resolver.
+ */
+ if (!kd->arch->ka_native(kd) && kd->resolve_symbol == NULL) {
+ _kvm_err(kd, kd->program,
+ "non-native kernel requires a symbol resolver");
+ goto failed;
+ }
+
+ /*
+ * Initialize the virtual address translation machinery.
+ */
+ if (kd->arch->ka_initvtop(kd) < 0)
+ goto failed;
+ return (kd);
+failed:
+ /*
+ * Copy out the error if doing sane error semantics.
+ */
+ if (errout != NULL)
+ strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
+ (void)kvm_close(kd);
+ return (NULL);
+}
+
+kvm_t *
+kvm_openfiles(const char *uf, const char *mf, const char *sf __unused, int flag,
+ char *errout)
+{
+ kvm_t *kd;
+
+ if ((kd = calloc(1, sizeof(*kd))) == NULL) {
+ if (errout != NULL)
+ (void)strlcpy(errout, strerror(errno),
+ _POSIX2_LINE_MAX);
+ return (NULL);
+ }
+ return (_kvm_open(kd, uf, mf, flag, errout));
+}
+
+kvm_t *
+kvm_open(const char *uf, const char *mf, const char *sf __unused, int flag,
+ const char *errstr)
+{
+ kvm_t *kd;
+
+ if ((kd = calloc(1, sizeof(*kd))) == NULL) {
+ if (errstr != NULL)
+ (void)fprintf(stderr, "%s: %s\n",
+ errstr, strerror(errno));
+ return (NULL);
+ }
+ kd->program = errstr;
+ return (_kvm_open(kd, uf, mf, flag, NULL));
+}
+
+kvm_t *
+kvm_open2(const char *uf, const char *mf, int flag, char *errout,
+ int (*resolver)(const char *, kvaddr_t *))
+{
+ kvm_t *kd;
+
+ if ((kd = calloc(1, sizeof(*kd))) == NULL) {
+ if (errout != NULL)
+ (void)strlcpy(errout, strerror(errno),
+ _POSIX2_LINE_MAX);
+ return (NULL);
+ }
+ kd->resolve_symbol = resolver;
+ return (_kvm_open(kd, uf, mf, flag, errout));
+}
+
+int
+kvm_close(kvm_t *kd)
+{
+ int error = 0;
+
+ if (kd == NULL) {
+ errno = EINVAL;
+ return (-1);
+ }
+ if (kd->vmst != NULL)
+ kd->arch->ka_freevtop(kd);
+ if (kd->pmfd >= 0)
+ error |= close(kd->pmfd);
+ if (kd->vmfd >= 0)
+ error |= close(kd->vmfd);
+ if (kd->nlfd >= 0)
+ error |= close(kd->nlfd);
+ if (kd->procbase != 0)
+ free((void *)kd->procbase);
+ if (kd->argbuf != 0)
+ free((void *) kd->argbuf);
+ if (kd->argspc != 0)
+ free((void *) kd->argspc);
+ if (kd->argv != 0)
+ free((void *)kd->argv);
+ if (kd->dpcpu_initialized != 0)
+ free(kd->dpcpu_off);
+ if (kd->pt_map != NULL)
+ free(kd->pt_map);
+ if (kd->page_map != NULL)
+ free(kd->page_map);
+ if (kd->sparse_map != MAP_FAILED && kd->sparse_map != NULL)
+ munmap(kd->sparse_map, kd->pt_sparse_size);
+ free((void *)kd);
+
+ return (error);
+}
+
+int
+kvm_nlist2(kvm_t *kd, struct kvm_nlist *nl)
+{
+
+ /*
+ * If called via the public interface, permit initialization of
+ * further virtualized modules on demand.
+ */
+ return (_kvm_nlist(kd, nl, 1));
+}
+
+int
+kvm_nlist(kvm_t *kd, struct nlist *nl)
+{
+ struct kvm_nlist *kl;
+ int count, i, nfail;
+
+ /*
+ * Avoid reporting truncated addresses by failing for non-native
+ * cores.
+ */
+ if (!kvm_native(kd)) {
+ _kvm_err(kd, kd->program, "kvm_nlist of non-native vmcore");
+ return (-1);
+ }
+
+ for (count = 0; nl[count].n_name != NULL && nl[count].n_name[0] != '\0';
+ count++)
+ ;
+ if (count == 0)
+ return (0);
+ kl = calloc(count + 1, sizeof(*kl));
+ if (kl == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate memory");
+ return (-1);
+ }
+ for (i = 0; i < count; i++)
+ kl[i].n_name = nl[i].n_name;
+ nfail = kvm_nlist2(kd, kl);
+ for (i = 0; i < count; i++) {
+ nl[i].n_type = kl[i].n_type;
+ nl[i].n_other = 0;
+ nl[i].n_desc = 0;
+ nl[i].n_value = kl[i].n_value;
+ }
+ free(kl);
+ return (nfail);
+}
+
+ssize_t
+kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
+{
+
+ return (kvm_read2(kd, kva, buf, len));
+}
+
+ssize_t
+kvm_read2(kvm_t *kd, kvaddr_t kva, void *buf, size_t len)
+{
+ int cc;
+ ssize_t cr;
+ off_t pa;
+ char *cp;
+
+ if (ISALIVE(kd)) {
+ /*
+ * We're using /dev/kmem. Just read straight from the
+ * device and let the active kernel do the address translation.
+ */
+ errno = 0;
+ if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
+ _kvm_err(kd, 0, "invalid address (0x%jx)",
+ (uintmax_t)kva);
+ return (-1);
+ }
+ cr = read(kd->vmfd, buf, len);
+ if (cr < 0) {
+ _kvm_syserr(kd, 0, "kvm_read");
+ return (-1);
+ } else if (cr < (ssize_t)len)
+ _kvm_err(kd, kd->program, "short read");
+ return (cr);
+ }
+
+ cp = buf;
+ while (len > 0) {
+ cc = kd->arch->ka_kvatop(kd, kva, &pa);
+ if (cc == 0)
+ return (-1);
+ if (cc > (ssize_t)len)
+ cc = len;
+ errno = 0;
+ if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) {
+ _kvm_syserr(kd, 0, _PATH_MEM);
+ break;
+ }
+ cr = read(kd->pmfd, cp, cc);
+ if (cr < 0) {
+ _kvm_syserr(kd, kd->program, "kvm_read");
+ break;
+ }
+ /*
+ * If ka_kvatop returns a bogus value or our core file is
+ * truncated, we might wind up seeking beyond the end of the
+ * core file in which case the read will return 0 (EOF).
+ */
+ if (cr == 0)
+ break;
+ cp += cr;
+ kva += cr;
+ len -= cr;
+ }
+
+ return (cp - (char *)buf);
+}
+
+ssize_t
+kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
+{
+ int cc;
+ ssize_t cw;
+ off_t pa;
+ const char *cp;
+
+ if (!ISALIVE(kd) && !kd->writable) {
+ _kvm_err(kd, kd->program,
+ "kvm_write not implemented for dead kernels");
+ return (-1);
+ }
+
+ if (ISALIVE(kd)) {
+ /*
+ * Just like kvm_read, only we write.
+ */
+ errno = 0;
+ if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
+ _kvm_err(kd, 0, "invalid address (%lx)", kva);
+ return (-1);
+ }
+ cc = write(kd->vmfd, buf, len);
+ if (cc < 0) {
+ _kvm_syserr(kd, 0, "kvm_write");
+ return (-1);
+ } else if ((size_t)cc < len)
+ _kvm_err(kd, kd->program, "short write");
+ return (cc);
+ }
+
+ cp = buf;
+ while (len > 0) {
+ cc = kd->arch->ka_kvatop(kd, kva, &pa);
+ if (cc == 0)
+ return (-1);
+ if (cc > (ssize_t)len)
+ cc = len;
+ errno = 0;
+ if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) {
+ _kvm_syserr(kd, 0, _PATH_MEM);
+ break;
+ }
+ cw = write(kd->pmfd, cp, cc);
+ if (cw < 0) {
+ _kvm_syserr(kd, kd->program, "kvm_write");
+ break;
+ }
+ /*
+ * If ka_kvatop returns a bogus value or our core file is
+ * truncated, we might wind up seeking beyond the end of the
+ * core file in which case the read will return 0 (EOF).
+ */
+ if (cw == 0)
+ break;
+ cp += cw;
+ kva += cw;
+ len -= cw;
+ }
+
+ return (cp - (const char *)buf);
+}
+
+int
+kvm_native(kvm_t *kd)
+{
+
+ if (ISALIVE(kd))
+ return (1);
+ return (kd->arch->ka_native(kd));
+}
+
+int
+kvm_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *closure)
+{
+
+ if (kd->arch->ka_walk_pages == NULL)
+ return (0);
+
+ return (kd->arch->ka_walk_pages(kd, cb, closure));
+}
+
+kssize_t
+kvm_kerndisp(kvm_t *kd)
+{
+ unsigned long kernbase, rel_kernbase;
+ size_t kernbase_len = sizeof(kernbase);
+ size_t rel_kernbase_len = sizeof(rel_kernbase);
+
+ if (ISALIVE(kd)) {
+ if (sysctlbyname("kern.base_address", &kernbase,
+ &kernbase_len, NULL, 0) == -1) {
+ _kvm_syserr(kd, kd->program,
+ "failed to get kernel base address");
+ return (0);
+ }
+ if (sysctlbyname("kern.relbase_address", &rel_kernbase,
+ &rel_kernbase_len, NULL, 0) == -1) {
+ _kvm_syserr(kd, kd->program,
+ "failed to get relocated kernel base address");
+ return (0);
+ }
+ return (rel_kernbase - kernbase);
+ }
+
+ if (kd->arch->ka_kerndisp == NULL)
+ return (0);
+
+ return (kd->arch->ka_kerndisp(kd));
+}
diff --git a/lib/libkvm/kvm.h b/lib/libkvm/kvm.h
new file mode 100644
index 000000000000..0061eb795ec4
--- /dev/null
+++ b/lib/libkvm/kvm.h
@@ -0,0 +1,129 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _KVM_H_
+#define _KVM_H_
+
+#include <sys/types.h>
+#include <nlist.h>
+
+/*
+ * Including vm/vm.h causes namespace pollution issues. For the
+ * most part, only things using kvm_walk_pages() need to #include it.
+ */
+#ifndef VM_H
+typedef u_char vm_prot_t;
+#endif
+
+/* Default version symbol. */
+#define VRS_SYM "_version"
+#define VRS_KEY "VERSION"
+
+#ifndef _SIZE_T_DECLARED
+typedef __size_t size_t;
+#define _SIZE_T_DECLARED
+#endif
+
+#ifndef _SSIZE_T_DECLARED
+typedef __ssize_t ssize_t;
+#define _SSIZE_T_DECLARED
+#endif
+
+struct kvm_nlist {
+ const char *n_name;
+ unsigned char n_type;
+ kvaddr_t n_value;
+};
+
+typedef struct __kvm kvm_t;
+
+struct kinfo_proc;
+struct proc;
+
+struct kvm_swap {
+ char ksw_devname[32];
+ u_int ksw_used;
+ u_int ksw_total;
+ int ksw_flags;
+ u_int ksw_reserved1;
+ u_int ksw_reserved2;
+};
+
+struct kvm_page {
+ u_int kp_version;
+ kpaddr_t kp_paddr;
+ kvaddr_t kp_kmap_vaddr;
+ kvaddr_t kp_dmap_vaddr;
+ vm_prot_t kp_prot;
+ off_t kp_offset;
+ size_t kp_len;
+ /* end of version 2 */
+};
+
+#define SWIF_DEV_PREFIX 0x0002
+#define LIBKVM_WALK_PAGES_VERSION 2
+
+__BEGIN_DECLS
+int kvm_close(kvm_t *);
+int kvm_dpcpu_setcpu(kvm_t *, unsigned int);
+char **kvm_getargv(kvm_t *, const struct kinfo_proc *, int);
+int kvm_getcptime(kvm_t *, long *);
+char **kvm_getenvv(kvm_t *, const struct kinfo_proc *, int);
+char *kvm_geterr(kvm_t *);
+int kvm_getloadavg(kvm_t *, double [], int);
+int kvm_getmaxcpu(kvm_t *);
+int kvm_getncpus(kvm_t *);
+void *kvm_getpcpu(kvm_t *, int);
+uint64_t kvm_counter_u64_fetch(kvm_t *, u_long);
+struct kinfo_proc *
+ kvm_getprocs(kvm_t *, int, int, int *);
+int kvm_getswapinfo(kvm_t *, struct kvm_swap *, int, int);
+int kvm_native(kvm_t *);
+int kvm_nlist(kvm_t *, struct nlist *);
+int kvm_nlist2(kvm_t *, struct kvm_nlist *);
+kvm_t *kvm_open
+ (const char *, const char *, const char *, int, const char *);
+kvm_t *kvm_openfiles
+ (const char *, const char *, const char *, int, char *);
+kvm_t *kvm_open2
+ (const char *, const char *, int, char *,
+ int (*)(const char *, kvaddr_t *));
+ssize_t kvm_read(kvm_t *, unsigned long, void *, size_t);
+ssize_t kvm_read_zpcpu(kvm_t *, unsigned long, void *, size_t, int);
+ssize_t kvm_read2(kvm_t *, kvaddr_t, void *, size_t);
+ssize_t kvm_write(kvm_t *, unsigned long, const void *, size_t);
+kssize_t kvm_kerndisp(kvm_t *);
+
+typedef int kvm_walk_pages_cb_t(struct kvm_page *, void *);
+int kvm_walk_pages(kvm_t *, kvm_walk_pages_cb_t *, void *);
+__END_DECLS
+
+#endif /* !_KVM_H_ */
diff --git a/lib/libkvm/kvm_aarch64.h b/lib/libkvm/kvm_aarch64.h
new file mode 100644
index 000000000000..7727c85c15ac
--- /dev/null
+++ b/lib/libkvm/kvm_aarch64.h
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 2015 John H. Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __KVM_AARCH64_H__
+#define __KVM_AARCH64_H__
+
+#ifdef __aarch64__
+#include <machine/pte.h>
+#endif
+
+typedef uint64_t aarch64_physaddr_t;
+typedef uint64_t aarch64_pte_t;
+
+#define AARCH64_PAGE_SHIFT_4K 12
+#define AARCH64_PAGE_SIZE_4K (1 << AARCH64_PAGE_SHIFT_4K)
+
+#define AARCH64_PAGE_SHIFT_16K 14
+#define AARCH64_PAGE_SIZE_16K (1 << AARCH64_PAGE_SHIFT_16K)
+
+/* Source: arm64/include/pte.h */
+#define AARCH64_ATTR_MASK 0xfffc000000000fff
+#define AARCH64_ATTR_UXN (1ULL << 54)
+#define AARCH64_ATTR_PXN (1ULL << 53)
+#define AARCH64_ATTR_XN (AARCH64_ATTR_PXN | AARCH64_ATTR_UXN)
+#define AARCH64_ATTR_AP(x) ((x) << 6)
+#define AARCH64_ATTR_AP_RO (1 << 1)
+
+#define AARCH64_ATTR_DESCR_MASK 3
+
+#define AARCH64_L3_SHIFT_4K 12
+#define AARCH64_L3_SHIFT_16K 14
+#define AARCH64_L3_PAGE 0x3
+
+#ifdef __aarch64__
+_Static_assert(ATTR_MASK == AARCH64_ATTR_MASK, "ATTR_MASK mismatch");
+_Static_assert(ATTR_DESCR_MASK == AARCH64_ATTR_DESCR_MASK,
+ "ATTR_DESCR_MASK mismatch");
+_Static_assert(L3_PAGE == AARCH64_L3_PAGE, "L3_PAGE mismatch");
+#endif
+
+#endif /* !__KVM_AARCH64_H__ */
diff --git a/lib/libkvm/kvm_amd64.c b/lib/libkvm/kvm_amd64.c
new file mode 100644
index 000000000000..acc7ac017a3c
--- /dev/null
+++ b/lib/libkvm/kvm_amd64.c
@@ -0,0 +1,360 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+/*
+ * AMD64 machine dependent routines for kvm. Hopefully, the forthcoming
+ * vm code will one day obsolete this module.
+ */
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <vm/vm.h>
+#include <kvm.h>
+
+#include <limits.h>
+
+#include "kvm_private.h"
+#include "kvm_amd64.h"
+
+struct vmstate {
+ size_t phnum;
+ GElf_Phdr *phdr;
+ amd64_pml4e_t *PML4;
+};
+
+/*
+ * Translate a physical memory address to a file-offset in the crash-dump.
+ */
+static size_t
+_kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs)
+{
+ struct vmstate *vm = kd->vmst;
+ GElf_Phdr *p;
+ size_t n;
+
+ if (kd->rawdump) {
+ *ofs = pa;
+ return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK));
+ }
+
+ p = vm->phdr;
+ n = vm->phnum;
+ while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
+ p++, n--;
+ if (n == 0)
+ return (0);
+ *ofs = (pa - p->p_paddr) + p->p_offset;
+ return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK));
+}
+
+static void
+_amd64_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ if (vm->PML4)
+ free(vm->PML4);
+ free(vm->phdr);
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_amd64_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) &&
+ !_kvm_is_minidump(kd));
+}
+
+static int
+_amd64_initvtop(kvm_t *kd)
+{
+ struct kvm_nlist nl[2];
+ amd64_physaddr_t pa;
+ kvaddr_t kernbase, kernphys;
+ amd64_pml4e_t *PML4;
+ int found = 0;
+
+ kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
+ if (kd->vmst == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate vm");
+ return (-1);
+ }
+ kd->vmst->PML4 = 0;
+
+ if (kd->rawdump == 0) {
+ if (_kvm_read_core_phdrs(kd, &kd->vmst->phnum,
+ &kd->vmst->phdr) == -1)
+ return (-1);
+
+ for (size_t i = 0; i < kd->vmst->phnum; i++) {
+ if (kd->vmst->phdr[i].p_type == PT_DUMP_DELTA) {
+ /* Account for the 2M hole at KERNBASE. */
+ kernphys = kd->vmst->phdr[i].p_paddr -
+ kd->vmst->phdr[i].p_align;
+ kernbase = kd->vmst->phdr[i].p_vaddr;
+
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ if (found == 0) {
+ nl[0].n_name = "kernbase";
+ nl[1].n_name = 0;
+
+ if (kvm_nlist2(kd, nl) != 0) {
+ _kvm_err(kd, kd->program, "bad namelist - no kernbase");
+ return (-1);
+ }
+
+ nl[0].n_name = "kernphys";
+ nl[1].n_name = 0;
+
+ /* XXX
+ * Relocatable kernels can still be loaded at 2M.
+ */
+ if (kvm_nlist2(kd, nl) != 1) {
+ _kvm_err(kd, kd->program, "cannot determine kernphys");
+ return (-1);
+ }
+
+ kernphys = 0;
+ kernbase = nl[0].n_value;
+ }
+
+ nl[0].n_name = "KPML4phys";
+ nl[1].n_name = 0;
+
+ if (kvm_nlist2(kd, nl) != 0) {
+ _kvm_err(kd, kd->program, "bad namelist - no KPML4phys");
+ return (-1);
+ }
+ if (kvm_read2(kd, (nl[0].n_value - kernbase + kernphys), &pa,
+ sizeof(pa)) != sizeof(pa)) {
+ _kvm_err(kd, kd->program, "cannot read KPML4phys");
+ return (-1);
+ }
+ pa = le64toh(pa);
+ PML4 = _kvm_malloc(kd, AMD64_PAGE_SIZE);
+ if (PML4 == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate PML4");
+ return (-1);
+ }
+ if (kvm_read2(kd, pa, PML4, AMD64_PAGE_SIZE) != AMD64_PAGE_SIZE) {
+ _kvm_err(kd, kd->program, "cannot read KPML4phys");
+ free(PML4);
+ return (-1);
+ }
+ kd->vmst->PML4 = PML4;
+ return (0);
+}
+
+static int
+_amd64_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct vmstate *vm;
+ amd64_physaddr_t offset;
+ amd64_physaddr_t pdpe_pa;
+ amd64_physaddr_t pde_pa;
+ amd64_physaddr_t pte_pa;
+ amd64_pml4e_t pml4e;
+ amd64_pdpe_t pdpe;
+ amd64_pde_t pde;
+ amd64_pte_t pte;
+ kvaddr_t pml4eindex;
+ kvaddr_t pdpeindex;
+ kvaddr_t pdeindex;
+ kvaddr_t pteindex;
+ amd64_physaddr_t a;
+ off_t ofs;
+ size_t s;
+
+ vm = kd->vmst;
+ offset = va & AMD64_PAGE_MASK;
+
+ /*
+ * If we are initializing (kernel page table descriptor pointer
+ * not yet set) then return pa == va to avoid infinite recursion.
+ */
+ if (vm->PML4 == NULL) {
+ s = _kvm_pa2off(kd, va, pa);
+ if (s == 0) {
+ _kvm_err(kd, kd->program,
+ "_amd64_vatop: bootstrap data not in dump");
+ goto invalid;
+ } else
+ return (AMD64_PAGE_SIZE - offset);
+ }
+
+ pml4eindex = (va >> AMD64_PML4SHIFT) & (AMD64_NPML4EPG - 1);
+ pml4e = le64toh(vm->PML4[pml4eindex]);
+ if ((pml4e & AMD64_PG_V) == 0) {
+ _kvm_err(kd, kd->program, "_amd64_vatop: pml4e not valid");
+ goto invalid;
+ }
+
+ pdpeindex = (va >> AMD64_PDPSHIFT) & (AMD64_NPDPEPG - 1);
+ pdpe_pa = (pml4e & AMD64_PG_FRAME) + (pdpeindex * sizeof(amd64_pdpe_t));
+
+ s = _kvm_pa2off(kd, pdpe_pa, &ofs);
+ if (s < sizeof(pdpe)) {
+ _kvm_err(kd, kd->program, "_amd64_vatop: pdpe_pa not found");
+ goto invalid;
+ }
+ if (pread(kd->pmfd, &pdpe, sizeof(pdpe), ofs) != sizeof(pdpe)) {
+ _kvm_syserr(kd, kd->program, "_amd64_vatop: read pdpe");
+ goto invalid;
+ }
+ pdpe = le64toh(pdpe);
+ if ((pdpe & AMD64_PG_V) == 0) {
+ _kvm_err(kd, kd->program, "_amd64_vatop: pdpe not valid");
+ goto invalid;
+ }
+
+ if (pdpe & AMD64_PG_PS) {
+ /*
+ * No next-level page table; pdpe describes one 1GB page.
+ */
+ a = (pdpe & AMD64_PG_1GB_FRAME) + (va & AMD64_PDPMASK);
+ s = _kvm_pa2off(kd, a, pa);
+ if (s == 0) {
+ _kvm_err(kd, kd->program,
+ "_amd64_vatop: 1GB page address not in dump");
+ goto invalid;
+ } else
+ return (AMD64_NBPDP - (va & AMD64_PDPMASK));
+ }
+
+ pdeindex = (va >> AMD64_PDRSHIFT) & (AMD64_NPDEPG - 1);
+ pde_pa = (pdpe & AMD64_PG_FRAME) + (pdeindex * sizeof(amd64_pde_t));
+
+ s = _kvm_pa2off(kd, pde_pa, &ofs);
+ if (s < sizeof(pde)) {
+ _kvm_syserr(kd, kd->program, "_amd64_vatop: pde_pa not found");
+ goto invalid;
+ }
+ if (pread(kd->pmfd, &pde, sizeof(pde), ofs) != sizeof(pde)) {
+ _kvm_syserr(kd, kd->program, "_amd64_vatop: read pde");
+ goto invalid;
+ }
+ pde = le64toh(pde);
+ if ((pde & AMD64_PG_V) == 0) {
+ _kvm_err(kd, kd->program, "_amd64_vatop: pde not valid");
+ goto invalid;
+ }
+
+ if (pde & AMD64_PG_PS) {
+ /*
+ * No final-level page table; pde describes one 2MB page.
+ */
+ a = (pde & AMD64_PG_PS_FRAME) + (va & AMD64_PDRMASK);
+ s = _kvm_pa2off(kd, a, pa);
+ if (s == 0) {
+ _kvm_err(kd, kd->program,
+ "_amd64_vatop: 2MB page address not in dump");
+ goto invalid;
+ } else
+ return (AMD64_NBPDR - (va & AMD64_PDRMASK));
+ }
+
+ pteindex = (va >> AMD64_PAGE_SHIFT) & (AMD64_NPTEPG - 1);
+ pte_pa = (pde & AMD64_PG_FRAME) + (pteindex * sizeof(amd64_pte_t));
+
+ s = _kvm_pa2off(kd, pte_pa, &ofs);
+ if (s < sizeof(pte)) {
+ _kvm_err(kd, kd->program, "_amd64_vatop: pte_pa not found");
+ goto invalid;
+ }
+ if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) {
+ _kvm_syserr(kd, kd->program, "_amd64_vatop: read");
+ goto invalid;
+ }
+ if ((pte & AMD64_PG_V) == 0) {
+ _kvm_err(kd, kd->program, "_amd64_vatop: pte not valid");
+ goto invalid;
+ }
+
+ a = (pte & AMD64_PG_FRAME) + offset;
+ s = _kvm_pa2off(kd, a, pa);
+ if (s == 0) {
+ _kvm_err(kd, kd->program, "_amd64_vatop: address not in dump");
+ goto invalid;
+ } else
+ return (AMD64_PAGE_SIZE - offset);
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static int
+_amd64_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "kvm_kvatop called in live kernel!");
+ return (0);
+ }
+ return (_amd64_vatop(kd, va, pa));
+}
+
+int
+_amd64_native(kvm_t *kd __unused)
+{
+
+#ifdef __amd64__
+ return (1);
+#else
+ return (0);
+#endif
+}
+
+static struct kvm_arch kvm_amd64 = {
+ .ka_probe = _amd64_probe,
+ .ka_initvtop = _amd64_initvtop,
+ .ka_freevtop = _amd64_freevtop,
+ .ka_kvatop = _amd64_kvatop,
+ .ka_native = _amd64_native,
+};
+
+KVM_ARCH(kvm_amd64);
diff --git a/lib/libkvm/kvm_amd64.h b/lib/libkvm/kvm_amd64.h
new file mode 100644
index 000000000000..bc8b08db5036
--- /dev/null
+++ b/lib/libkvm/kvm_amd64.h
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 2015 John H. Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __KVM_AMD64_H__
+#define __KVM_AMD64_H__
+
+#ifdef __amd64__
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#endif
+
+typedef uint64_t amd64_physaddr_t;
+typedef uint64_t amd64_pte_t;
+typedef uint64_t amd64_pde_t;
+typedef uint64_t amd64_pdpe_t;
+typedef uint64_t amd64_pml4e_t;
+
+#define AMD64_NPTEPG (AMD64_PAGE_SIZE / sizeof(amd64_pte_t))
+#define AMD64_PAGE_SHIFT 12
+#define AMD64_PAGE_SIZE (1 << AMD64_PAGE_SHIFT)
+#define AMD64_PAGE_MASK (AMD64_PAGE_SIZE - 1)
+#define AMD64_NPDEPG (AMD64_PAGE_SIZE / sizeof(amd64_pde_t))
+#define AMD64_PDRSHIFT 21
+#define AMD64_NBPDR (1 << AMD64_PDRSHIFT)
+#define AMD64_PDRMASK (AMD64_NBPDR - 1)
+#define AMD64_NPDPEPG (AMD64_PAGE_SIZE / sizeof(amd64_pdpe_t))
+#define AMD64_PDPSHIFT 30
+#define AMD64_NBPDP (1 << AMD64_PDPSHIFT)
+#define AMD64_PDPMASK (AMD64_NBPDP - 1)
+#define AMD64_NPML4EPG (AMD64_PAGE_SIZE / sizeof(amd64_pml4e_t))
+#define AMD64_PML4SHIFT 39
+
+#define AMD64_PG_NX (1ULL << 63)
+#define AMD64_PG_V 0x001
+#define AMD64_PG_RW 0x002
+#define AMD64_PG_PS 0x080
+#define AMD64_PG_FRAME (0x000ffffffffff000)
+#define AMD64_PG_PS_FRAME (0x000fffffffe00000)
+#define AMD64_PG_1GB_FRAME (0x000fffffc0000000)
+
+#ifdef __amd64__
+_Static_assert(NPTEPG == AMD64_NPTEPG, "NPTEPG mismatch");
+_Static_assert(PAGE_SHIFT == AMD64_PAGE_SHIFT, "PAGE_SHIFT mismatch");
+_Static_assert(PAGE_SIZE == AMD64_PAGE_SIZE, "PAGE_SIZE mismatch");
+_Static_assert(PAGE_MASK == AMD64_PAGE_MASK, "PAGE_MASK mismatch");
+_Static_assert(NPDEPG == AMD64_NPDEPG, "NPDEPG mismatch");
+_Static_assert(PDRSHIFT == AMD64_PDRSHIFT, "PDRSHIFT mismatch");
+_Static_assert(NBPDR == AMD64_NBPDR, "NBPDR mismatch");
+_Static_assert(PDRMASK == AMD64_PDRMASK, "PDRMASK mismatch");
+_Static_assert(NPDPEPG == AMD64_NPDPEPG, "NPDPEPG mismatch");
+_Static_assert(PDPSHIFT == AMD64_PDPSHIFT, "PDPSHIFT mismatch");
+_Static_assert(NBPDP == AMD64_NBPDP, "NBPDP mismatch");
+_Static_assert(PDPMASK == AMD64_PDPMASK, "PDPMASK mismatch");
+_Static_assert(NPML4EPG == AMD64_NPML4EPG, "NPML4EPG mismatch");
+_Static_assert(PML4SHIFT == AMD64_PML4SHIFT, "PML4SHIFT mismatch");
+
+_Static_assert(PG_V == AMD64_PG_V, "PG_V mismatch");
+_Static_assert(PG_PS == AMD64_PG_PS, "PG_PS mismatch");
+_Static_assert(PG_FRAME == AMD64_PG_FRAME, "PG_FRAME mismatch");
+_Static_assert(PG_PS_FRAME == AMD64_PG_PS_FRAME, "PG_PS_FRAME mismatch");
+#endif
+
+int _amd64_native(kvm_t *);
+
+#endif /* !__KVM_AMD64_H__ */
diff --git a/lib/libkvm/kvm_arm.c b/lib/libkvm/kvm_arm.c
new file mode 100644
index 000000000000..b1e00bda7918
--- /dev/null
+++ b/lib/libkvm/kvm_arm.c
@@ -0,0 +1,277 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2005 Olivier Houchard
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * ARM machine dependent routines for kvm.
+ */
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <kvm.h>
+#include <limits.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#ifdef __arm__
+#include <machine/vmparam.h>
+#endif
+
+#include "kvm_private.h"
+#include "kvm_arm.h"
+
+struct vmstate {
+ arm_pd_entry_t *l1pt;
+ size_t phnum;
+ GElf_Phdr *phdr;
+};
+
+/*
+ * Translate a physical memory address to a file-offset in the crash-dump.
+ */
+static size_t
+_kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz)
+{
+ struct vmstate *vm = kd->vmst;
+ GElf_Phdr *p;
+ size_t n;
+
+ p = vm->phdr;
+ n = vm->phnum;
+ while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
+ p++, n--;
+ if (n == 0)
+ return (0);
+
+ *ofs = (pa - p->p_paddr) + p->p_offset;
+ if (pgsz == 0)
+ return (p->p_memsz - (pa - p->p_paddr));
+ return (pgsz - ((size_t)pa & (pgsz - 1)));
+}
+
+static void
+_arm_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ free(vm->phdr);
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_arm_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) &&
+ !_kvm_is_minidump(kd));
+}
+
+static int
+_arm_initvtop(kvm_t *kd)
+{
+ struct vmstate *vm;
+ struct kvm_nlist nl[2];
+ kvaddr_t kernbase;
+ arm_physaddr_t physaddr, pa;
+ arm_pd_entry_t *l1pt;
+ size_t i;
+ int found;
+
+ if (kd->rawdump) {
+ _kvm_err(kd, kd->program, "raw dumps not supported on arm");
+ return (-1);
+ }
+
+ vm = _kvm_malloc(kd, sizeof(*vm));
+ if (vm == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate vm");
+ return (-1);
+ }
+ kd->vmst = vm;
+ vm->l1pt = NULL;
+
+ if (_kvm_read_core_phdrs(kd, &vm->phnum, &vm->phdr) == -1)
+ return (-1);
+
+ found = 0;
+ for (i = 0; i < vm->phnum; i++) {
+ if (vm->phdr[i].p_type == PT_DUMP_DELTA) {
+ kernbase = vm->phdr[i].p_vaddr;
+ physaddr = vm->phdr[i].p_paddr;
+ found = 1;
+ break;
+ }
+ }
+
+ nl[1].n_name = NULL;
+ if (!found) {
+ nl[0].n_name = "kernbase";
+ if (kvm_nlist2(kd, nl) != 0) {
+#ifdef __arm__
+ kernbase = KERNBASE;
+#else
+ _kvm_err(kd, kd->program, "cannot resolve kernbase");
+ return (-1);
+#endif
+ } else
+ kernbase = nl[0].n_value;
+
+ nl[0].n_name = "physaddr";
+ if (kvm_nlist2(kd, nl) != 0) {
+ _kvm_err(kd, kd->program, "couldn't get phys addr");
+ return (-1);
+ }
+ physaddr = nl[0].n_value;
+ }
+ nl[0].n_name = "kernel_l1pa";
+ if (kvm_nlist2(kd, nl) != 0) {
+ _kvm_err(kd, kd->program, "bad namelist");
+ return (-1);
+ }
+ if (kvm_read2(kd, (nl[0].n_value - kernbase + physaddr), &pa,
+ sizeof(pa)) != sizeof(pa)) {
+ _kvm_err(kd, kd->program, "cannot read kernel_l1pa");
+ return (-1);
+ }
+ l1pt = _kvm_malloc(kd, ARM_L1_TABLE_SIZE);
+ if (l1pt == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate l1pt");
+ return (-1);
+ }
+ if (kvm_read2(kd, pa, l1pt, ARM_L1_TABLE_SIZE) != ARM_L1_TABLE_SIZE) {
+ _kvm_err(kd, kd->program, "cannot read l1pt");
+ free(l1pt);
+ return (-1);
+ }
+ vm->l1pt = l1pt;
+ return 0;
+}
+
+/* from arm/pmap.c */
+#define ARM_L1_IDX(va) ((va) >> ARM_L1_S_SHIFT)
+
+#define l1pte_section_p(pde) (((pde) & ARM_L1_TYPE_MASK) == ARM_L1_TYPE_S)
+#define l1pte_valid(pde) ((pde) != 0)
+#define l2pte_valid(pte) ((pte) != 0)
+#define l2pte_index(v) (((v) & ARM_L1_S_OFFSET) >> ARM_L2_S_SHIFT)
+
+
+static int
+_arm_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct vmstate *vm = kd->vmst;
+ arm_pd_entry_t pd;
+ arm_pt_entry_t pte;
+ arm_physaddr_t pte_pa;
+ off_t pte_off;
+
+ if (vm->l1pt == NULL)
+ return (_kvm_pa2off(kd, va, pa, ARM_PAGE_SIZE));
+ pd = _kvm32toh(kd, vm->l1pt[ARM_L1_IDX(va)]);
+ if (!l1pte_valid(pd))
+ goto invalid;
+ if (l1pte_section_p(pd)) {
+ /* 1MB section mapping. */
+ *pa = (pd & ARM_L1_S_ADDR_MASK) + (va & ARM_L1_S_OFFSET);
+ return (_kvm_pa2off(kd, *pa, pa, ARM_L1_S_SIZE));
+ }
+ pte_pa = (pd & ARM_L1_C_ADDR_MASK) + l2pte_index(va) * sizeof(pte);
+ _kvm_pa2off(kd, pte_pa, &pte_off, ARM_L1_S_SIZE);
+ if (pread(kd->pmfd, &pte, sizeof(pte), pte_off) != sizeof(pte)) {
+ _kvm_syserr(kd, kd->program, "_arm_kvatop: pread");
+ goto invalid;
+ }
+ pte = _kvm32toh(kd, pte);
+ if (!l2pte_valid(pte)) {
+ goto invalid;
+ }
+ if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) {
+ *pa = (pte & ARM_L2_L_FRAME) | (va & ARM_L2_L_OFFSET);
+ return (_kvm_pa2off(kd, *pa, pa, ARM_L2_L_SIZE));
+ }
+ *pa = (pte & ARM_L2_S_FRAME) | (va & ARM_L2_S_OFFSET);
+ return (_kvm_pa2off(kd, *pa, pa, ARM_PAGE_SIZE));
+invalid:
+ _kvm_err(kd, 0, "Invalid address (%jx)", (uintmax_t)va);
+ return 0;
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+#ifdef FBSD_NOT_YET
+int
+_kvm_mdopen(kvm_t *kd)
+{
+
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return (0);
+}
+#endif
+
+int
+#ifdef __arm__
+_arm_native(kvm_t *kd)
+#else
+_arm_native(kvm_t *kd __unused)
+#endif
+{
+
+#ifdef __arm__
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+ return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
+#else
+ return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
+#endif
+#else
+ return (0);
+#endif
+}
+
+static struct kvm_arch kvm_arm = {
+ .ka_probe = _arm_probe,
+ .ka_initvtop = _arm_initvtop,
+ .ka_freevtop = _arm_freevtop,
+ .ka_kvatop = _arm_kvatop,
+ .ka_native = _arm_native,
+};
+
+KVM_ARCH(kvm_arm);
diff --git a/lib/libkvm/kvm_arm.h b/lib/libkvm/kvm_arm.h
new file mode 100644
index 000000000000..fac61039b86a
--- /dev/null
+++ b/lib/libkvm/kvm_arm.h
@@ -0,0 +1,116 @@
+/*-
+ * Copyright (c) 2015 John H. Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __KVM_ARM_H__
+#define __KVM_ARM_H__
+
+typedef uint32_t arm_physaddr_t;
+typedef uint32_t arm_pd_entry_t;
+typedef uint32_t arm_pt_entry_t;
+
+#define ARM_PAGE_SHIFT 12
+#define ARM_PAGE_SIZE (1 << ARM_PAGE_SHIFT) /* Page size */
+#define ARM_PAGE_MASK (ARM_PAGE_SIZE - 1)
+
+#define ARM_L1_TABLE_SIZE 0x4000 /* 16K */
+
+#define ARM_L1_S_SIZE 0x00100000 /* 1M */
+#define ARM_L1_S_OFFSET (ARM_L1_S_SIZE - 1)
+#define ARM_L1_S_FRAME (~ARM_L1_S_OFFSET)
+#define ARM_L1_S_SHIFT 20
+
+#define ARM_L2_L_SIZE 0x00010000 /* 64K */
+#define ARM_L2_L_OFFSET (ARM_L2_L_SIZE - 1)
+#define ARM_L2_L_FRAME (~ARM_L2_L_OFFSET)
+#define ARM_L2_L_SHIFT 16
+
+#define ARM_L2_S_SIZE 0x00001000 /* 4K */
+#define ARM_L2_S_OFFSET (ARM_L2_S_SIZE - 1)
+#define ARM_L2_S_FRAME (~ARM_L2_S_OFFSET)
+#define ARM_L2_S_SHIFT 12
+#define ARM_L2_TEX1 0x00000080
+#define ARM_PTE2_RO ARM_L2_TEX1
+#define ARM_L2_NX 0x00000001
+#define ARM_PTE2_NX ARM_L2_NX
+
+/*
+ * Note: L2_S_PROT_W differs depending on whether the system is generic or
+ * xscale. This isn't easily accessible in this context, so use an
+ * approximation of 'xscale' which is a subset of 'generic'.
+ */
+#define ARM_L2_AP0(x) ((x) << 4)
+#define ARM_AP_W 0x01
+#define ARM_L2_S_PROT_W (ARM_L2_AP0(ARM_AP_W))
+
+#define ARM_L1_TYPE_INV 0x00 /* Invalid (fault) */
+#define ARM_L1_TYPE_C 0x01 /* Coarse L2 */
+#define ARM_L1_TYPE_S 0x02 /* Section */
+#define ARM_L1_TYPE_MASK 0x03 /* Mask of type bits */
+
+#define ARM_L1_S_ADDR_MASK 0xfff00000 /* phys address of section */
+#define ARM_L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */
+
+#define ARM_L2_TYPE_INV 0x00 /* Invalid (fault) */
+#define ARM_L2_TYPE_L 0x01 /* Large Page - 64k */
+#define ARM_L2_TYPE_S 0x02 /* Small Page - 4k */
+#define ARM_L2_TYPE_T 0x03 /* Tiny Page - 1k - not used */
+#define ARM_L2_TYPE_MASK 0x03
+
+#ifdef __arm__
+#include <machine/acle-compat.h>
+
+#include <machine/pte.h>
+
+_Static_assert(PAGE_SHIFT == ARM_PAGE_SHIFT, "PAGE_SHIFT mismatch");
+_Static_assert(PAGE_SIZE == ARM_PAGE_SIZE, "PAGE_SIZE mismatch");
+_Static_assert(PAGE_MASK == ARM_PAGE_MASK, "PAGE_MASK mismatch");
+_Static_assert(L1_TABLE_SIZE == ARM_L1_TABLE_SIZE, "L1_TABLE_SIZE mismatch");
+_Static_assert(L1_S_SIZE == ARM_L1_S_SIZE, "L1_S_SIZE mismatch");
+_Static_assert(L1_S_OFFSET == ARM_L1_S_OFFSET, "L1_S_OFFSET mismatch");
+_Static_assert(L1_S_FRAME == ARM_L1_S_FRAME, "L1_S_FRAME mismatch");
+_Static_assert(L1_S_SHIFT == ARM_L1_S_SHIFT, "L1_S_SHIFT mismatch");
+_Static_assert(L2_L_SIZE == ARM_L2_L_SIZE, "L2_L_SIZE mismatch");
+_Static_assert(L2_L_OFFSET == ARM_L2_L_OFFSET, "L2_L_OFFSET mismatch");
+_Static_assert(L2_L_FRAME == ARM_L2_L_FRAME, "L2_L_FRAME mismatch");
+_Static_assert(L2_L_SHIFT == ARM_L2_L_SHIFT, "L2_L_SHIFT mismatch");
+_Static_assert(L2_S_SIZE == ARM_L2_S_SIZE, "L2_S_SIZE mismatch");
+_Static_assert(L2_S_OFFSET == ARM_L2_S_OFFSET, "L2_S_OFFSET mismatch");
+_Static_assert(L2_S_FRAME == ARM_L2_S_FRAME, "L2_S_FRAME mismatch");
+_Static_assert(L2_S_SHIFT == ARM_L2_S_SHIFT, "L2_S_SHIFT mismatch");
+_Static_assert(L1_TYPE_INV == ARM_L1_TYPE_INV, "L1_TYPE_INV mismatch");
+_Static_assert(L1_TYPE_C == ARM_L1_TYPE_C, "L1_TYPE_C mismatch");
+_Static_assert(L1_TYPE_S == ARM_L1_TYPE_S, "L1_TYPE_S mismatch");
+_Static_assert(L1_TYPE_MASK == ARM_L1_TYPE_MASK, "L1_TYPE_MASK mismatch");
+_Static_assert(L1_S_ADDR_MASK == ARM_L1_S_ADDR_MASK, "L1_S_ADDR_MASK mismatch");
+_Static_assert(L1_C_ADDR_MASK == ARM_L1_C_ADDR_MASK, "L1_C_ADDR_MASK mismatch");
+_Static_assert(L2_TYPE_INV == ARM_L2_TYPE_INV, "L2_TYPE_INV mismatch");
+_Static_assert(L2_TYPE_L == ARM_L2_TYPE_L, "L2_TYPE_L mismatch");
+_Static_assert(L2_TYPE_S == ARM_L2_TYPE_S, "L2_TYPE_S mismatch");
+_Static_assert(L2_TYPE_MASK == ARM_L2_TYPE_MASK, "L2_TYPE_MASK mismatch");
+#endif
+
+int _arm_native(kvm_t *);
+
+#endif /* !__KVM_ARM_H__ */
diff --git a/lib/libkvm/kvm_cptime.c b/lib/libkvm/kvm_cptime.c
new file mode 100644
index 000000000000..bf6d10c0c620
--- /dev/null
+++ b/lib/libkvm/kvm_cptime.c
@@ -0,0 +1,139 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2008 Yahoo!, Inc.
+ * All rights reserved.
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/pcpu.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <errno.h>
+#include <kvm.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "kvm_private.h"
+
+static struct nlist kvm_cp_time_nl[] = {
+ { .n_name = "_cp_time" }, /* (deprecated) */
+ { .n_name = NULL },
+};
+
+#define NL_CP_TIME 0
+
+static int kvm_cp_time_cached;
+
+static int
+_kvm_cp_time_init(kvm_t *kd)
+{
+
+ if (kvm_nlist(kd, kvm_cp_time_nl) < 0)
+ return (-1);
+ kvm_cp_time_cached = 1;
+ return (0);
+}
+
+static int
+getsysctl(kvm_t *kd, const char *name, void *buf, size_t len)
+{
+ size_t nlen;
+
+ nlen = len;
+ if (sysctlbyname(name, buf, &nlen, NULL, 0) < 0) {
+ _kvm_err(kd, kd->program, "cannot read sysctl %s:%s", name,
+ strerror(errno));
+ return (-1);
+ }
+ if (nlen != len) {
+ _kvm_err(kd, kd->program, "sysctl %s has unexpected size",
+ name);
+ return (-1);
+ }
+ return (0);
+}
+
+int
+kvm_getcptime(kvm_t *kd, long *cp_time)
+{
+ struct pcpu *pc;
+ int i, j, maxcpu;
+
+ if (kd == NULL) {
+ kvm_cp_time_cached = 0;
+ return (0);
+ }
+
+ if (ISALIVE(kd))
+ return (getsysctl(kd, "kern.cp_time", cp_time, sizeof(long) *
+ CPUSTATES));
+
+ if (!kd->arch->ka_native(kd)) {
+ _kvm_err(kd, kd->program,
+ "cannot read cp_time from non-native core");
+ return (-1);
+ }
+
+ if (kvm_cp_time_cached == 0) {
+ if (_kvm_cp_time_init(kd) < 0)
+ return (-1);
+ }
+
+ /* If this kernel has a "cp_time[]" symbol, then just read that. */
+ if (kvm_cp_time_nl[NL_CP_TIME].n_value != 0) {
+ if (kvm_read(kd, kvm_cp_time_nl[NL_CP_TIME].n_value, cp_time,
+ sizeof(long) * CPUSTATES) != sizeof(long) * CPUSTATES) {
+ _kvm_err(kd, kd->program, "cannot read cp_time array");
+ return (-1);
+ }
+ return (0);
+ }
+
+ /*
+ * If we don't have that symbol, then we have to simulate
+ * "cp_time[]" by adding up the individual times for each CPU.
+ */
+ maxcpu = kvm_getmaxcpu(kd);
+ if (maxcpu < 0)
+ return (-1);
+ for (i = 0; i < CPUSTATES; i++)
+ cp_time[i] = 0;
+ for (i = 0; i < maxcpu; i++) {
+ pc = kvm_getpcpu(kd, i);
+ if (pc == NULL)
+ continue;
+ if (pc == (void *)-1)
+ return (-1);
+ for (j = 0; j < CPUSTATES; j++)
+ cp_time[j] += pc->pc_cp_time[j];
+ free(pc);
+ }
+ return (0);
+}
diff --git a/lib/libkvm/kvm_getcptime.3 b/lib/libkvm/kvm_getcptime.3
new file mode 100644
index 000000000000..a2fea4c7cae4
--- /dev/null
+++ b/lib/libkvm/kvm_getcptime.3
@@ -0,0 +1,75 @@
+.\" Copyright (c) 2008 Yahoo!, Inc.
+.\" All rights reserved.
+.\" Written by: John Baldwin <jhb@FreeBSD.org>
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the author nor the names of any co-contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd August 19, 2008
+.Dt KVM_GETCPTIME 3
+.Os
+.Sh NAME
+.Nm kvm_getcptime
+.Nd fetch global CPU time statistics
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In sys/param.h
+.In sys/resource.h
+.In sys/sysctl.h
+.In kvm.h
+.Ft int
+.Fn kvm_getcptime "kvm_t *kd" "long *cp_time"
+.Sh DESCRIPTION
+The
+.Fn kvm_getcptime
+function stores the global CPU time statistics from the kernel
+.Fa kd
+in the array of counters pointed to by
+.Fa cp_time .
+Note that
+.Fa cp_time
+should point to an array of
+.Dv CPUSTATES
+long integers.
+The format of the counters is identical to that output by the
+.Va kern.cp_time
+sysctl.
+.Sh CACHING
+This function caches the nlist values for various kernel variables which it
+reuses in successive calls.
+You may call the function with
+.Fa kd
+set to
+.Dv NULL
+to clear this cache.
+.Sh RETURN VALUES
+The
+.Nm kvm_getcptime
+function returns 0 on success and -1 on failure.
+If an error occurs,
+then an error message may be retrieved via
+.Xr kvm_geterr 3 .
+.Sh SEE ALSO
+.Xr kvm 3
diff --git a/lib/libkvm/kvm_geterr.3 b/lib/libkvm/kvm_geterr.3
new file mode 100644
index 000000000000..659bdcda82d2
--- /dev/null
+++ b/lib/libkvm/kvm_geterr.3
@@ -0,0 +1,84 @@
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd March 21, 2017
+.Dt KVM_GETERR 3
+.Os
+.Sh NAME
+.Nm kvm_geterr
+.Nd get error message on kvm descriptor
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.Ft char *
+.Fn kvm_geterr "kvm_t *kd"
+.Sh DESCRIPTION
+This function returns a string describing the most recent error condition
+on the descriptor
+.Fa kd .
+The results are undefined if the most recent
+.Xr kvm 3
+library call did not produce an error.
+The string returned is stored in memory owned by
+.Xr kvm 3
+so the message should be copied out and saved elsewhere if necessary.
+.Sh RETURN VALUES
+The function
+.Fn kvm_geterr
+will return "" if
+.Fa kd
+is
+.Dv NULL
+or an error has not been captured for
+.Fa kd .
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3
+.Sh BUGS
+This routine cannot be used to access error conditions due to a failed
+.Fn kvm_openfiles
+call, since failure is indicated by returning a
+.Dv NULL
+descriptor.
+Therefore, errors on open are output to the special error buffer
+passed to
+.Fn kvm_openfiles .
+This option is not available to
+.Fn kvm_open .
diff --git a/lib/libkvm/kvm_getloadavg.3 b/lib/libkvm/kvm_getloadavg.3
new file mode 100644
index 000000000000..66fa20a6dc9b
--- /dev/null
+++ b/lib/libkvm/kvm_getloadavg.3
@@ -0,0 +1,59 @@
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd June 4, 1993
+.Dt KVM_GETLOADAVG 3
+.Os
+.Sh NAME
+.Nm kvm_getloadavg
+.Nd get load average of the system
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.Ft int
+.Fn kvm_getloadavg "kvm_t *kd" "double loadavg[]" "int nelem"
+.Sh DESCRIPTION
+The
+.Fn kvm_getloadavg
+function returns the number of processes in the system run queue
+of the kernel indicated by
+.Fa kd ,
+averaged over various periods of time.
+Up to
+.Fa nelem
+samples are retrieved and assigned to successive elements of
+.Fa loadavg Ns Bq .
+The system imposes a maximum of 3 samples, representing averages
+over the last 1, 5, and 15 minutes, respectively.
+.Sh DIAGNOSTICS
+If the load average was unobtainable, \-1 is returned; otherwise,
+the number of samples actually retrieved is returned.
+.Sh SEE ALSO
+.Xr uptime 1 ,
+.Xr getloadavg 3 ,
+.Xr kvm 3
diff --git a/lib/libkvm/kvm_getloadavg.c b/lib/libkvm/kvm_getloadavg.c
new file mode 100644
index 000000000000..cbefa8f0064a
--- /dev/null
+++ b/lib/libkvm/kvm_getloadavg.c
@@ -0,0 +1,98 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include <stdlib.h>
+#include <limits.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include "kvm_private.h"
+
+static struct nlist nl[] = {
+ { .n_name = "_averunnable" },
+#define X_AVERUNNABLE 0
+ { .n_name = "_fscale" },
+#define X_FSCALE 1
+ { .n_name = "" },
+};
+
+/*
+ * kvm_getloadavg() -- Get system load averages, from live or dead kernels.
+ *
+ * Put `nelem' samples into `loadavg' array.
+ * Return number of samples retrieved, or -1 on error.
+ */
+int
+kvm_getloadavg(kvm_t *kd, double loadavg[], int nelem)
+{
+ struct loadavg loadinfo;
+ int fscale, i;
+
+ if (ISALIVE(kd))
+ return (getloadavg(loadavg, nelem));
+
+ if (!kd->arch->ka_native(kd)) {
+ _kvm_err(kd, kd->program,
+ "cannot read loadavg from non-native core");
+ return (-1);
+ }
+
+ if (kvm_nlist(kd, nl) != 0 && nl[X_AVERUNNABLE].n_type == 0) {
+ _kvm_err(kd, kd->program,
+ "%s: no such symbol", nl[X_AVERUNNABLE].n_name);
+ return (-1);
+ }
+
+#define KREAD(kd, addr, obj) \
+ (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
+ if (KREAD(kd, nl[X_AVERUNNABLE].n_value, &loadinfo)) {
+ _kvm_err(kd, kd->program, "can't read averunnable");
+ return (-1);
+ }
+
+ /*
+ * Old kernels have fscale separately; if not found assume
+ * running new format.
+ */
+ if (nl[X_FSCALE].n_type != 0 &&
+ !KREAD(kd, nl[X_FSCALE].n_value, &fscale))
+ loadinfo.fscale = fscale;
+
+ nelem = MIN(nelem, (int)(sizeof(loadinfo.ldavg) / sizeof(fixpt_t)));
+ for (i = 0; i < nelem; i++)
+ loadavg[i] = (double) loadinfo.ldavg[i] / loadinfo.fscale;
+ return (nelem);
+}
diff --git a/lib/libkvm/kvm_getpcpu.3 b/lib/libkvm/kvm_getpcpu.3
new file mode 100644
index 000000000000..86822db0a364
--- /dev/null
+++ b/lib/libkvm/kvm_getpcpu.3
@@ -0,0 +1,167 @@
+.\" Copyright (c) 2008 Yahoo!, Inc.
+.\" All rights reserved.
+.\" Written by: John Baldwin <jhb@FreeBSD.org>
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the author nor the names of any co-contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd March 15, 2017
+.Dt KVM_GETPCPU 3
+.Os
+.Sh NAME
+.Nm kvm_dpcpu_setcpu ,
+.Nm kvm_getmaxcpu ,
+.Nm kvm_getpcpu
+.Nd access per-CPU data
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In sys/param.h
+.In sys/pcpu.h
+.In sys/sysctl.h
+.In kvm.h
+.Ft int
+.Fn kvm_dpcpu_setcpu "kvm_t *kd" "u_int cpu"
+.Ft int
+.Fn kvm_getmaxcpu "kvm_t *kd"
+.Ft int
+.Fn kvm_getncpus "kvm_t *kd"
+.Ft void *
+.Fn kvm_getpcpu "kvm_t *kd" "int cpu"
+.Ft ssize_t
+.Fn kvm_read_zpcpu "kvm_t *kd" "u_long base" "void *buf" "size_t size" "int cpu"
+.Ft uint64_t
+.Fn kvm_counter_u64_fetch "kvm_t *kd" "u_long base"
+.Sh DESCRIPTION
+The
+.Fn kvm_dpcpu_setcpu ,
+.Fn kvm_getmaxcpu ,
+and
+.Fn kvm_getpcpu
+functions are used to access the per-CPU data of active processors in the
+kernel indicated by
+.Fa kd .
+Per-CPU storage comes in two flavours: data stored directly in a
+.Vt "struct pcpu"
+associated with each CPU, and dynamic per-CPU storage (DPCPU), in which a
+single kernel symbol refers to different data depending on what CPU it is
+accessed from.
+.Pp
+The
+.Fn kvm_getmaxcpu
+function returns the maximum number of CPUs supported by the kernel.
+.Pp
+The
+.Fn kvm_getncpus
+function returns the current number of CPUs in the kernel.
+.Pp
+The
+.Fn kvm_getpcpu
+function returns a buffer holding the per-CPU data for a single CPU.
+This buffer is described by the
+.Vt "struct pcpu"
+type.
+The caller is responsible for releasing the buffer via a call to
+.Xr free 3
+when it is no longer needed.
+If
+.Fa cpu
+is not active, then
+.Dv NULL
+is returned instead.
+.Pp
+The
+.Fn kvm_read_zpcpu
+function is used to obtain private per-CPU copy from a
+.Dv UMA_ZONE_PCPU
+.Xr zone 9 .
+It takes
+.Fa base
+argument as base address of an allocation and copyies
+.Fa size
+bytes into
+.Fa buf
+from the part of allocation that is private to
+.Fa cpu .
+.Pp
+The
+.Fn kvm_counter_u64_fetch
+function fetches value of a
+.Xr counter 9
+pointed by
+.Fa base
+address.
+.Pp
+Symbols for dynamic per-CPU data are accessed via
+.Xr kvm_nlist 3
+as with other symbols.
+.Nm libkvm
+maintains a notion of the "current CPU", set by
+.Fn kvm_dpcpu_setcpu ,
+which defaults to 0.
+Once another CPU is selected,
+.Xr kvm_nlist 3
+will return pointers to that data on the appropriate CPU.
+.Sh CACHING
+.Fn kvm_getmaxcpu
+and
+.Fn kvm_getpcpu
+cache the nlist values for various kernel variables which are
+reused in successive calls.
+You may call either function with
+.Fa kd
+set to
+.Dv NULL
+to clear this cache.
+.Sh RETURN VALUES
+On success, the
+.Fn kvm_getmaxcpu
+function returns the maximum number of CPUs supported by the kernel.
+If an error occurs,
+it returns -1 instead.
+.Pp
+On success, the
+.Fn kvm_getpcpu
+function returns a pointer to an allocated buffer or
+.Dv NULL .
+If an error occurs,
+it returns -1 instead.
+.Pp
+On success, the
+.Fn kvm_dpcpu_setcpu
+call returns 0; if an error occurs, it returns -1 instead.
+.Pp
+On success, the
+.Fn kvm_read_zpcpu
+function returns number of bytes copied.
+If an error occurs, it returns -1 instead.
+.Pp
+If any function encounters an error,
+then an error message may be retrieved via
+.Xr kvm_geterr 3 .
+.Sh SEE ALSO
+.Xr free 3 ,
+.Xr kvm 3 ,
+.Xr counter 9 ,
+.Xr zone 9
diff --git a/lib/libkvm/kvm_getprocs.3 b/lib/libkvm/kvm_getprocs.3
new file mode 100644
index 000000000000..ced683461aa1
--- /dev/null
+++ b/lib/libkvm/kvm_getprocs.3
@@ -0,0 +1,170 @@
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd November 22, 2011
+.Dt KVM_GETPROCS 3
+.Os
+.Sh NAME
+.Nm kvm_getprocs ,
+.Nm kvm_getargv ,
+.Nm kvm_getenvv
+.Nd access user process state
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.In sys/param.h
+.In sys/sysctl.h
+.In sys/user.h
+.Ft struct kinfo_proc *
+.Fn kvm_getprocs "kvm_t *kd" "int op" "int arg" "int *cnt"
+.Ft char **
+.Fn kvm_getargv "kvm_t *kd" "const struct kinfo_proc *p" "int nchr"
+.Ft char **
+.Fn kvm_getenvv "kvm_t *kd" "const struct kinfo_proc *p" "int nchr"
+.Sh DESCRIPTION
+The
+.Fn kvm_getprocs
+function returns a (sub-)set of active processes in the kernel indicated by
+.Fa kd .
+The
+.Fa op
+and
+.Fa arg
+arguments constitute a predicate which limits the set of processes
+returned.
+The value of
+.Fa op
+describes the filtering predicate as follows:
+.Pp
+.Bl -tag -width 20n -offset indent -compact
+.It Dv KERN_PROC_ALL
+all processes and kernel visible threads
+.It Dv KERN_PROC_PROC
+all processes, without threads
+.It Dv KERN_PROC_PID
+processes with process ID
+.Fa arg
+.It Dv KERN_PROC_PGRP
+processes with process group
+.Fa arg
+.It Dv KERN_PROC_SESSION
+processes with session
+.Fa arg
+.It Dv KERN_PROC_TTY
+processes with TTY
+.Fa arg
+.It Dv KERN_PROC_UID
+processes with effective user ID
+.Fa arg
+.It Dv KERN_PROC_RUID
+processes with real user ID
+.Fa arg
+.It Dv KERN_PROC_INC_THREAD
+modifier to return all kernel visible threads when filtering
+by process ID, process group, TTY, user ID, and real user ID
+.El
+.Pp
+The number of processes found is returned in the reference parameter
+.Fa cnt .
+The processes are returned as a contiguous array of kinfo_proc structures.
+This memory is locally allocated, and subsequent calls to
+.Fn kvm_getprocs
+and
+.Fn kvm_close
+will overwrite this storage.
+.Pp
+The
+.Fn kvm_getargv
+function returns a null-terminated argument vector that corresponds to the
+command line arguments passed to process indicated by
+.Fa p .
+Most likely, these arguments correspond to the values passed to
+.Xr exec 3
+on process creation.
+This information is, however,
+deliberately under control of the process itself.
+Note that the original command name can be found, unaltered,
+in the p_comm field of the process structure returned by
+.Fn kvm_getprocs .
+.Pp
+The
+.Fa nchr
+argument indicates the maximum number of characters, including null bytes,
+to use in building the strings.
+If this amount is exceeded, the string
+causing the overflow is truncated and the partial result is returned.
+This is handy for programs like
+.Xr ps 1
+and
+.Xr w 1
+that print only a one line summary of a command and should not copy
+out large amounts of text only to ignore it.
+If
+.Fa nchr
+is zero, no limit is imposed and all argument strings are returned in
+their entirety.
+.Pp
+The memory allocated to the argv pointers and string storage
+is owned by the kvm library.
+Subsequent
+.Fn kvm_getprocs
+and
+.Xr kvm_close 3
+calls will clobber this storage.
+.Pp
+The
+.Fn kvm_getenvv
+function is similar to
+.Fn kvm_getargv
+but returns the vector of environment strings.
+This data is
+also alterable by the process.
+.Sh RETURN VALUES
+The
+.Fn kvm_getprocs ,
+.Fn kvm_getargv ,
+and
+.Fn kvm_getenvv
+functions return
+.Dv NULL
+on failure.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3
+.Sh BUGS
+These routines do not belong in the kvm interface.
diff --git a/lib/libkvm/kvm_getswapinfo.3 b/lib/libkvm/kvm_getswapinfo.3
new file mode 100644
index 000000000000..98b695b51a5a
--- /dev/null
+++ b/lib/libkvm/kvm_getswapinfo.3
@@ -0,0 +1,109 @@
+.\" Copyright (C) 1999 Matthew Dillon. All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd January 2, 2017
+.Dt KVM_SWAPINFO 3
+.Os
+.Sh NAME
+.Nm kvm_getswapinfo
+.Nd return swap summary statistics for the system
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.Ft int
+.Fn kvm_getswapinfo "kvm_t *kd" "struct kvm_swap *" "int maxswap" "int flags"
+.Sh DESCRIPTION
+The
+.Fn kvm_getswapinfo
+function fills an array of
+.Vt kvm_swap
+structures with swap summary
+information for each swap device, for up to
+.Fa maxswap
+\- 1 devices.
+The number of devices, up to
+.Fa maxswap
+\- 1, is returned.
+A grand
+total of all swap devices (including any devices that go beyond
+.Fa maxswap
+\- 1) is returned in one additional array entry.
+This
+entry is not counted in the return value.
+Thus, if you specify a
+.Fa maxswap
+value of 1, the function will typically return the
+value 0 and the single
+.Vt kvm_swap
+structure will be filled with
+the grand total over all swap devices.
+The grand total is calculated
+from all available swap devices whether or not you made room
+for them all in the array.
+The grand total is returned.
+.Pp
+The flags argument is currently unused and must be passed as 0.
+.Pp
+If an error occurs, -1 is returned.
+.Pp
+Each swap partition and the grand total is summarized in the
+.Vt kvm_swap
+structure.
+This structure contains the following fields:
+.Pp
+.Bl -item -offset indent -compact
+.It
+.Va char ksw_devname[] ;
+.It
+.Va u_int ksw_total ;
+.It
+.Va u_int ksw_used ;
+.It
+.Va int ksw_flags ;
+.El
+.Pp
+Values are in
+.Dv PAGE_SIZE Ns 'd
+chunks (see
+.Xr getpagesize 3 ) .
+.Va ksw_flags
+contains
+a copy of the swap device flags.
+.Sh CACHING
+This function caches the nlist values for various kernel variables which
+it reuses in successive calls.
+You may call the function with
+.Fa kd
+==
+.Dv NULL
+to clear the cache.
+.Sh DIAGNOSTICS
+If the swap summary information was unobtainable, \-1 is returned;
+otherwise, the number of swap devices actually retrieved is returned.
+.Pp
+If the name of the swap device does not fit in the static char buffer
+in the structure, it is truncated.
+The buffer is always zero terminated.
+.Sh SEE ALSO
+.Xr kvm 3
diff --git a/lib/libkvm/kvm_getswapinfo.c b/lib/libkvm/kvm_getswapinfo.c
new file mode 100644
index 000000000000..a713c6752fa7
--- /dev/null
+++ b/lib/libkvm/kvm_getswapinfo.c
@@ -0,0 +1,267 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 1999, Matthew Dillon. All Rights Reserved.
+ * Copyright (c) 2001, Thomas Moestl. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/stat.h>
+#include <sys/blist.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+
+#include <vm/swap_pager.h>
+#include <vm/vm_param.h>
+
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <kvm.h>
+#include <nlist.h>
+#include <paths.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <limits.h>
+
+#include "kvm_private.h"
+
+static struct nlist kvm_swap_nl[] = {
+ { .n_name = "_swtailq" }, /* list of swap devices and sizes */
+ { .n_name = "_dmmax" }, /* maximum size of a swap block */
+ { .n_name = NULL }
+};
+
+#define NL_SWTAILQ 0
+#define NL_DMMAX 1
+
+static int kvm_swap_nl_cached = 0;
+static int unswdev; /* number of found swap dev's */
+static int dmmax;
+
+static int kvm_getswapinfo_kvm(kvm_t *, struct kvm_swap *, int, int);
+static int kvm_getswapinfo_sysctl(kvm_t *, struct kvm_swap *, int, int);
+static int nlist_init(kvm_t *);
+static int getsysctl(kvm_t *, const char *, void *, size_t);
+
+#define KREAD(kd, addr, obj) \
+ (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
+#define KGET(idx, var) \
+ KGET2(kvm_swap_nl[(idx)].n_value, var, kvm_swap_nl[(idx)].n_name)
+#define KGET2(addr, var, msg) \
+ if (KREAD(kd, (u_long)(addr), (var))) { \
+ _kvm_err(kd, kd->program, "cannot read %s", msg); \
+ return (-1); \
+ }
+
+#define GETSWDEVNAME(dev, str, flags) \
+ if (dev == NODEV) { \
+ strlcpy(str, "[NFS swap]", sizeof(str)); \
+ } else { \
+ snprintf( \
+ str, sizeof(str),"%s%s", \
+ ((flags & SWIF_DEV_PREFIX) ? _PATH_DEV : ""), \
+ devname(dev, S_IFCHR) \
+ ); \
+ }
+
+int
+kvm_getswapinfo(kvm_t *kd, struct kvm_swap *swap_ary, int swap_max, int flags)
+{
+
+ /*
+ * clear cache
+ */
+ if (kd == NULL) {
+ kvm_swap_nl_cached = 0;
+ return(0);
+ }
+
+ if (ISALIVE(kd)) {
+ return kvm_getswapinfo_sysctl(kd, swap_ary, swap_max, flags);
+ } else {
+ return kvm_getswapinfo_kvm(kd, swap_ary, swap_max, flags);
+ }
+}
+
+int
+kvm_getswapinfo_kvm(kvm_t *kd, struct kvm_swap *swap_ary, int swap_max,
+ int flags)
+{
+ int i, ttl;
+ TAILQ_HEAD(, swdevt) swtailq;
+ struct swdevt *sp, swinfo;
+ struct kvm_swap tot;
+
+ if (!kd->arch->ka_native(kd)) {
+ _kvm_err(kd, kd->program,
+ "cannot read swapinfo from non-native core");
+ return (-1);
+ }
+
+ if (!nlist_init(kd))
+ return (-1);
+
+ bzero(&tot, sizeof(tot));
+ KGET(NL_SWTAILQ, &swtailq);
+ sp = TAILQ_FIRST(&swtailq);
+ for (i = 0; sp != NULL; i++) {
+ KGET2(sp, &swinfo, "swinfo");
+ ttl = swinfo.sw_nblks - dmmax;
+ if (i < swap_max - 1) {
+ bzero(&swap_ary[i], sizeof(swap_ary[i]));
+ swap_ary[i].ksw_total = ttl;
+ swap_ary[i].ksw_used = swinfo.sw_used;
+ swap_ary[i].ksw_flags = swinfo.sw_flags;
+ GETSWDEVNAME(swinfo.sw_dev, swap_ary[i].ksw_devname,
+ flags);
+ }
+ tot.ksw_total += ttl;
+ tot.ksw_used += swinfo.sw_used;
+ sp = TAILQ_NEXT(&swinfo, sw_list);
+ }
+
+ if (i >= swap_max)
+ i = swap_max - 1;
+ if (i >= 0)
+ swap_ary[i] = tot;
+
+ return(i);
+}
+
+#define GETSYSCTL(kd, name, var) \
+ getsysctl(kd, name, &(var), sizeof(var))
+
+/* The maximum MIB length for vm.swap_info and an additional device number */
+#define SWI_MAXMIB 3
+
+int
+kvm_getswapinfo_sysctl(kvm_t *kd, struct kvm_swap *swap_ary, int swap_max,
+ int flags)
+{
+ int ti, ttl;
+ size_t mibi, len;
+ int soid[SWI_MAXMIB];
+ struct xswdev xsd;
+ struct kvm_swap tot;
+
+ if (!GETSYSCTL(kd, "vm.dmmax", dmmax))
+ return -1;
+
+ mibi = SWI_MAXMIB - 1;
+ if (sysctlnametomib("vm.swap_info", soid, &mibi) == -1) {
+ _kvm_err(kd, kd->program, "sysctlnametomib failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ bzero(&tot, sizeof(tot));
+ for (unswdev = 0;; unswdev++) {
+ soid[mibi] = unswdev;
+ len = sizeof(xsd);
+ if (sysctl(soid, mibi + 1, &xsd, &len, NULL, 0) == -1) {
+ if (errno == ENOENT)
+ break;
+ _kvm_err(kd, kd->program, "cannot read sysctl: %s.",
+ strerror(errno));
+ return -1;
+ }
+ if (len != sizeof(xsd)) {
+ _kvm_err(kd, kd->program, "struct xswdev has unexpected "
+ "size; kernel and libkvm out of sync?");
+ return -1;
+ }
+ if (xsd.xsw_version != XSWDEV_VERSION) {
+ _kvm_err(kd, kd->program, "struct xswdev version "
+ "mismatch; kernel and libkvm out of sync?");
+ return -1;
+ }
+
+ ttl = xsd.xsw_nblks - dmmax;
+ if (unswdev < swap_max - 1) {
+ bzero(&swap_ary[unswdev], sizeof(swap_ary[unswdev]));
+ swap_ary[unswdev].ksw_total = ttl;
+ swap_ary[unswdev].ksw_used = xsd.xsw_used;
+ swap_ary[unswdev].ksw_flags = xsd.xsw_flags;
+ GETSWDEVNAME(xsd.xsw_dev, swap_ary[unswdev].ksw_devname,
+ flags);
+ }
+ tot.ksw_total += ttl;
+ tot.ksw_used += xsd.xsw_used;
+ }
+
+ ti = unswdev;
+ if (ti >= swap_max)
+ ti = swap_max - 1;
+ if (ti >= 0)
+ swap_ary[ti] = tot;
+
+ return(ti);
+}
+
+static int
+nlist_init(kvm_t *kd)
+{
+
+ if (kvm_swap_nl_cached)
+ return (1);
+
+ if (kvm_nlist(kd, kvm_swap_nl) < 0)
+ return (0);
+
+ /* Required entries */
+ if (kvm_swap_nl[NL_SWTAILQ].n_value == 0) {
+ _kvm_err(kd, kd->program, "unable to find swtailq");
+ return (0);
+ }
+
+ if (kvm_swap_nl[NL_DMMAX].n_value == 0) {
+ _kvm_err(kd, kd->program, "unable to find dmmax");
+ return (0);
+ }
+
+ /* Get globals, type of swap */
+ KGET(NL_DMMAX, &dmmax);
+
+ kvm_swap_nl_cached = 1;
+ return (1);
+}
+
+static int
+getsysctl(kvm_t *kd, const char *name, void *ptr, size_t len)
+{
+ size_t nlen = len;
+ if (sysctlbyname(name, ptr, &nlen, NULL, 0) == -1) {
+ _kvm_err(kd, kd->program, "cannot read sysctl %s:%s", name,
+ strerror(errno));
+ return (0);
+ }
+ if (nlen != len) {
+ _kvm_err(kd, kd->program, "sysctl %s has unexpected size", name);
+ return (0);
+ }
+ return (1);
+}
diff --git a/lib/libkvm/kvm_i386.c b/lib/libkvm/kvm_i386.c
new file mode 100644
index 000000000000..776c486579a1
--- /dev/null
+++ b/lib/libkvm/kvm_i386.c
@@ -0,0 +1,425 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+/*
+ * i386 machine dependent routines for kvm. Hopefully, the forthcoming
+ * vm code will one day obsolete this module.
+ */
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <vm/vm.h>
+#include <kvm.h>
+
+#ifdef __i386__
+#include <machine/vmparam.h> /* For KERNBASE. */
+#endif
+
+#include <limits.h>
+
+#include "kvm_private.h"
+#include "kvm_i386.h"
+
+struct vmstate {
+ void *PTD;
+ int pae;
+ size_t phnum;
+ GElf_Phdr *phdr;
+};
+
+/*
+ * Translate a physical memory address to a file-offset in the crash-dump.
+ */
+static size_t
+_kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs)
+{
+ struct vmstate *vm = kd->vmst;
+ GElf_Phdr *p;
+ size_t n;
+
+ if (kd->rawdump) {
+ *ofs = pa;
+ return (I386_PAGE_SIZE - (pa & I386_PAGE_MASK));
+ }
+
+ p = vm->phdr;
+ n = vm->phnum;
+ while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
+ p++, n--;
+ if (n == 0)
+ return (0);
+ *ofs = (pa - p->p_paddr) + p->p_offset;
+ return (I386_PAGE_SIZE - (pa & I386_PAGE_MASK));
+}
+
+static void
+_i386_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ if (vm->PTD)
+ free(vm->PTD);
+ free(vm->phdr);
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_i386_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_386) &&
+ !_kvm_is_minidump(kd));
+}
+
+static int
+_i386_initvtop(kvm_t *kd)
+{
+ struct kvm_nlist nl[2];
+ i386_physaddr_t pa;
+ kvaddr_t kernbase;
+ char *PTD;
+ int i;
+
+ kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(struct vmstate));
+ if (kd->vmst == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate vm");
+ return (-1);
+ }
+ kd->vmst->PTD = 0;
+
+ if (kd->rawdump == 0) {
+ if (_kvm_read_core_phdrs(kd, &kd->vmst->phnum,
+ &kd->vmst->phdr) == -1)
+ return (-1);
+ }
+
+ nl[0].n_name = "kernbase";
+ nl[1].n_name = 0;
+
+ if (kvm_nlist2(kd, nl) != 0) {
+#ifdef __i386__
+ kernbase = KERNBASE; /* for old kernels */
+#else
+ _kvm_err(kd, kd->program, "cannot resolve kernbase");
+ return (-1);
+#endif
+ } else
+ kernbase = nl[0].n_value;
+
+ nl[0].n_name = "IdlePDPT";
+ nl[1].n_name = 0;
+
+ if (kvm_nlist2(kd, nl) == 0) {
+ i386_physaddr_pae_t pa64;
+
+ if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa,
+ sizeof(pa)) != sizeof(pa)) {
+ _kvm_err(kd, kd->program, "cannot read IdlePDPT");
+ return (-1);
+ }
+ pa = le32toh(pa);
+ PTD = _kvm_malloc(kd, 4 * I386_PAGE_SIZE);
+ if (PTD == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate PTD");
+ return (-1);
+ }
+ for (i = 0; i < 4; i++) {
+ if (kvm_read2(kd, pa + (i * sizeof(pa64)), &pa64,
+ sizeof(pa64)) != sizeof(pa64)) {
+ _kvm_err(kd, kd->program, "Cannot read PDPT");
+ free(PTD);
+ return (-1);
+ }
+ pa64 = le64toh(pa64);
+ if (kvm_read2(kd, pa64 & I386_PG_FRAME_PAE,
+ PTD + (i * I386_PAGE_SIZE), I386_PAGE_SIZE) !=
+ I386_PAGE_SIZE) {
+ _kvm_err(kd, kd->program, "cannot read PDPT");
+ free(PTD);
+ return (-1);
+ }
+ }
+ kd->vmst->PTD = PTD;
+ kd->vmst->pae = 1;
+ } else {
+ nl[0].n_name = "IdlePTD";
+ nl[1].n_name = 0;
+
+ if (kvm_nlist2(kd, nl) != 0) {
+ _kvm_err(kd, kd->program, "bad namelist");
+ return (-1);
+ }
+ if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa,
+ sizeof(pa)) != sizeof(pa)) {
+ _kvm_err(kd, kd->program, "cannot read IdlePTD");
+ return (-1);
+ }
+ pa = le32toh(pa);
+ PTD = _kvm_malloc(kd, I386_PAGE_SIZE);
+ if (PTD == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate PTD");
+ return (-1);
+ }
+ if (kvm_read2(kd, pa, PTD, I386_PAGE_SIZE) != I386_PAGE_SIZE) {
+ _kvm_err(kd, kd->program, "cannot read PTD");
+ return (-1);
+ }
+ kd->vmst->PTD = PTD;
+ kd->vmst->pae = 0;
+ }
+ return (0);
+}
+
+static int
+_i386_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct vmstate *vm;
+ i386_physaddr_t offset;
+ i386_physaddr_t pte_pa;
+ i386_pde_t pde;
+ i386_pte_t pte;
+ kvaddr_t pdeindex;
+ kvaddr_t pteindex;
+ size_t s;
+ i386_physaddr_t a;
+ off_t ofs;
+ i386_pde_t *PTD;
+
+ vm = kd->vmst;
+ PTD = (i386_pde_t *)vm->PTD;
+ offset = va & I386_PAGE_MASK;
+
+ /*
+ * If we are initializing (kernel page table descriptor pointer
+ * not yet set) then return pa == va to avoid infinite recursion.
+ */
+ if (PTD == NULL) {
+ s = _kvm_pa2off(kd, va, pa);
+ if (s == 0) {
+ _kvm_err(kd, kd->program,
+ "_i386_vatop: bootstrap data not in dump");
+ goto invalid;
+ } else
+ return (I386_PAGE_SIZE - offset);
+ }
+
+ pdeindex = va >> I386_PDRSHIFT;
+ pde = le32toh(PTD[pdeindex]);
+ if ((pde & I386_PG_V) == 0) {
+ _kvm_err(kd, kd->program, "_i386_vatop: pde not valid");
+ goto invalid;
+ }
+
+ if (pde & I386_PG_PS) {
+ /*
+ * No second-level page table; ptd describes one 4MB
+ * page. (We assume that the kernel wouldn't set
+ * PG_PS without enabling it cr0).
+ */
+ offset = va & I386_PAGE_PS_MASK;
+ a = (pde & I386_PG_PS_FRAME) + offset;
+ s = _kvm_pa2off(kd, a, pa);
+ if (s == 0) {
+ _kvm_err(kd, kd->program,
+ "_i386_vatop: 4MB page address not in dump");
+ goto invalid;
+ }
+ return (I386_NBPDR - offset);
+ }
+
+ pteindex = (va >> I386_PAGE_SHIFT) & (I386_NPTEPG - 1);
+ pte_pa = (pde & I386_PG_FRAME) + (pteindex * sizeof(pte));
+
+ s = _kvm_pa2off(kd, pte_pa, &ofs);
+ if (s < sizeof(pte)) {
+ _kvm_err(kd, kd->program, "_i386_vatop: pte_pa not found");
+ goto invalid;
+ }
+
+ /* XXX This has to be a physical address read, kvm_read is virtual */
+ if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) {
+ _kvm_syserr(kd, kd->program, "_i386_vatop: pread");
+ goto invalid;
+ }
+ pte = le32toh(pte);
+ if ((pte & I386_PG_V) == 0) {
+ _kvm_err(kd, kd->program, "_kvm_kvatop: pte not valid");
+ goto invalid;
+ }
+
+ a = (pte & I386_PG_FRAME) + offset;
+ s = _kvm_pa2off(kd, a, pa);
+ if (s == 0) {
+ _kvm_err(kd, kd->program, "_i386_vatop: address not in dump");
+ goto invalid;
+ } else
+ return (I386_PAGE_SIZE - offset);
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static int
+_i386_vatop_pae(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct vmstate *vm;
+ i386_physaddr_pae_t offset;
+ i386_physaddr_pae_t pte_pa;
+ i386_pde_pae_t pde;
+ i386_pte_pae_t pte;
+ kvaddr_t pdeindex;
+ kvaddr_t pteindex;
+ size_t s;
+ i386_physaddr_pae_t a;
+ off_t ofs;
+ i386_pde_pae_t *PTD;
+
+ vm = kd->vmst;
+ PTD = (i386_pde_pae_t *)vm->PTD;
+ offset = va & I386_PAGE_MASK;
+
+ /*
+ * If we are initializing (kernel page table descriptor pointer
+ * not yet set) then return pa == va to avoid infinite recursion.
+ */
+ if (PTD == NULL) {
+ s = _kvm_pa2off(kd, va, pa);
+ if (s == 0) {
+ _kvm_err(kd, kd->program,
+ "_i386_vatop_pae: bootstrap data not in dump");
+ goto invalid;
+ } else
+ return (I386_PAGE_SIZE - offset);
+ }
+
+ pdeindex = va >> I386_PDRSHIFT_PAE;
+ pde = le64toh(PTD[pdeindex]);
+ if ((pde & I386_PG_V) == 0) {
+ _kvm_err(kd, kd->program, "_kvm_kvatop_pae: pde not valid");
+ goto invalid;
+ }
+
+ if (pde & I386_PG_PS) {
+ /*
+ * No second-level page table; ptd describes one 2MB
+ * page. (We assume that the kernel wouldn't set
+ * PG_PS without enabling it cr0).
+ */
+ offset = va & I386_PAGE_PS_MASK_PAE;
+ a = (pde & I386_PG_PS_FRAME_PAE) + offset;
+ s = _kvm_pa2off(kd, a, pa);
+ if (s == 0) {
+ _kvm_err(kd, kd->program,
+ "_i386_vatop: 2MB page address not in dump");
+ goto invalid;
+ }
+ return (I386_NBPDR_PAE - offset);
+ }
+
+ pteindex = (va >> I386_PAGE_SHIFT) & (I386_NPTEPG_PAE - 1);
+ pte_pa = (pde & I386_PG_FRAME_PAE) + (pteindex * sizeof(pde));
+
+ s = _kvm_pa2off(kd, pte_pa, &ofs);
+ if (s < sizeof(pte)) {
+ _kvm_err(kd, kd->program, "_i386_vatop_pae: pdpe_pa not found");
+ goto invalid;
+ }
+
+ /* XXX This has to be a physical address read, kvm_read is virtual */
+ if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) {
+ _kvm_syserr(kd, kd->program, "_i386_vatop_pae: read");
+ goto invalid;
+ }
+ pte = le64toh(pte);
+ if ((pte & I386_PG_V) == 0) {
+ _kvm_err(kd, kd->program, "_i386_vatop_pae: pte not valid");
+ goto invalid;
+ }
+
+ a = (pte & I386_PG_FRAME_PAE) + offset;
+ s = _kvm_pa2off(kd, a, pa);
+ if (s == 0) {
+ _kvm_err(kd, kd->program,
+ "_i386_vatop_pae: address not in dump");
+ goto invalid;
+ } else
+ return (I386_PAGE_SIZE - offset);
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static int
+_i386_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return (0);
+ }
+ if (kd->vmst->pae)
+ return (_i386_vatop_pae(kd, va, pa));
+ else
+ return (_i386_vatop(kd, va, pa));
+}
+
+int
+_i386_native(kvm_t *kd __unused)
+{
+
+#ifdef __i386__
+ return (1);
+#else
+ return (0);
+#endif
+}
+
+static struct kvm_arch kvm_i386 = {
+ .ka_probe = _i386_probe,
+ .ka_initvtop = _i386_initvtop,
+ .ka_freevtop = _i386_freevtop,
+ .ka_kvatop = _i386_kvatop,
+ .ka_native = _i386_native,
+};
+
+KVM_ARCH(kvm_i386);
diff --git a/lib/libkvm/kvm_i386.h b/lib/libkvm/kvm_i386.h
new file mode 100644
index 000000000000..69f7767bbab7
--- /dev/null
+++ b/lib/libkvm/kvm_i386.h
@@ -0,0 +1,81 @@
+/*-
+ * Copyright (c) 2015 John H. Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __KVM_I386_H__
+#define __KVM_I386_H__
+
+#ifdef __i386__
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#endif
+
+typedef uint32_t i386_physaddr_t;
+typedef uint32_t i386_pte_t;
+typedef uint32_t i386_pde_t;
+typedef uint64_t i386_physaddr_pae_t;
+typedef uint64_t i386_pte_pae_t;
+typedef uint64_t i386_pde_pae_t;
+
+#define I386_PAGE_SHIFT 12
+#define I386_PAGE_SIZE (1 << I386_PAGE_SHIFT)
+#define I386_PAGE_MASK (I386_PAGE_SIZE - 1)
+#define I386_NPTEPG (I386_PAGE_SIZE / sizeof(i386_pte_t))
+#define I386_PDRSHIFT 22
+#define I386_NBPDR (1 << I386_PDRSHIFT)
+#define I386_PAGE_PS_MASK (I386_NBPDR - 1)
+#define I386_NPTEPG_PAE (I386_PAGE_SIZE / sizeof(i386_pte_pae_t))
+#define I386_PDRSHIFT_PAE 21
+#define I386_NBPDR_PAE (1 << I386_PDRSHIFT_PAE)
+#define I386_PAGE_PS_MASK_PAE (I386_NBPDR_PAE - 1)
+
+/* Source: i386/include/pmap.h */
+#define I386_PG_V 0x001
+#define I386_PG_RW 0x002
+#define I386_PG_PS 0x080
+#define I386_PG_NX (1ULL << 63)
+#define I386_PG_FRAME_PAE (0x000ffffffffff000ull)
+#define I386_PG_PS_FRAME_PAE (0x000fffffffe00000ull)
+#define I386_PG_FRAME (0xfffff000)
+#define I386_PG_PS_FRAME (0xffc00000)
+
+#ifdef __i386__
+_Static_assert(PAGE_SHIFT == I386_PAGE_SHIFT, "PAGE_SHIFT mismatch");
+_Static_assert(PAGE_SIZE == I386_PAGE_SIZE, "PAGE_SIZE mismatch");
+_Static_assert(PAGE_MASK == I386_PAGE_MASK, "PAGE_MASK mismatch");
+#if 0
+_Static_assert(NPTEPG == I386_NPTEPG, "NPTEPG mismatch");
+_Static_assert(NBPDR == I386_NBPDR, "NBPDR mismatch");
+#endif
+_Static_assert(PDRSHIFT_NOPAE == I386_PDRSHIFT, "PDRSHIFT mismatch");
+
+_Static_assert(PG_V == I386_PG_V, "PG_V mismatch");
+_Static_assert(PG_PS == I386_PG_PS, "PG_PS mismatch");
+_Static_assert((u_int)PG_FRAME_NOPAE == I386_PG_FRAME, "PG_FRAME mismatch");
+_Static_assert(PG_PS_FRAME_NOPAE == I386_PG_PS_FRAME, "PG_PS_FRAME mismatch");
+#endif
+
+int _i386_native(kvm_t *);
+
+#endif /* !__KVM_I386_H__ */
diff --git a/lib/libkvm/kvm_kerndisp.3 b/lib/libkvm/kvm_kerndisp.3
new file mode 100644
index 000000000000..83ea2528f4d6
--- /dev/null
+++ b/lib/libkvm/kvm_kerndisp.3
@@ -0,0 +1,55 @@
+.\"
+.\" Copyright (c) 2020 Leandro Lupori <luporl@FreeBSD.org>
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd February 5, 2020
+.Dt KVM_KERNDISP 3
+.Os
+.Sh NAME
+.Nm kvm_kerndisp
+.Nd get kernel displacement
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.Ft kssize_t
+.Fn kvm_kerndisp "kvm_t *kd"
+.Sh DESCRIPTION
+.Fn kvm_kerndisp
+returns the number of bytes by which the kernel referenced by
+.Fa kd
+is displaced.
+This is the difference between the kernel's base virtual address at run time
+and the kernel base virtual address specified in the kernel image file.
+.Pp
+Note that if the kernel is moved to a lower memory address,
+the displacement will be negative.
+.Sh RETURN VALUES
+.Fn kvm_kerndisp
+returns the number of bytes by which the kernel is displaced.
+If the kernel is not displaced or if it is not possible to find the
+displacement then 0 is returned.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_open 3
diff --git a/lib/libkvm/kvm_minidump_aarch64.c b/lib/libkvm/kvm_minidump_aarch64.c
new file mode 100644
index 000000000000..0836f5b2b1db
--- /dev/null
+++ b/lib/libkvm/kvm_minidump_aarch64.c
@@ -0,0 +1,323 @@
+/*-
+ * Copyright (c) 2006 Peter Wemm
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799
+ */
+
+#include <sys/cdefs.h>
+/*
+ * ARM64 (AArch64) machine dependent routines for kvm and minidumps.
+ */
+
+#include <sys/param.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <vm/vm.h>
+#include <kvm.h>
+
+#include "../../sys/arm64/include/minidump.h"
+
+#include <limits.h>
+
+#include "kvm_private.h"
+#include "kvm_aarch64.h"
+
+#define aarch64_round_page(x, size) roundup2((kvaddr_t)(x), size)
+#define aarch64_trunc_page(x, size) rounddown2((kvaddr_t)(x), size)
+
+struct vmstate {
+ struct minidumphdr hdr;
+ size_t page_size;
+ u_int l3_shift;
+};
+
+static aarch64_pte_t
+_aarch64_pte_get(kvm_t *kd, u_long pteindex)
+{
+ aarch64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+ return le64toh(*pte);
+}
+
+static int
+_aarch64_minidump_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) &&
+ _kvm_is_minidump(kd));
+}
+
+static void
+_aarch64_minidump_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_aarch64_minidump_initvtop(kvm_t *kd)
+{
+ struct vmstate *vmst;
+ off_t off, dump_avail_off, sparse_off;
+
+ vmst = _kvm_malloc(kd, sizeof(*vmst));
+ if (vmst == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate vm");
+ return (-1);
+ }
+ kd->vmst = vmst;
+ if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
+ sizeof(vmst->hdr)) {
+ _kvm_err(kd, kd->program, "cannot read dump header");
+ return (-1);
+ }
+ if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
+ sizeof(vmst->hdr.magic)) != 0) {
+ _kvm_err(kd, kd->program, "not a minidump for this platform");
+ return (-1);
+ }
+
+ vmst->hdr.version = le32toh(vmst->hdr.version);
+ if (vmst->hdr.version > MINIDUMP_VERSION || vmst->hdr.version < 1) {
+ _kvm_err(kd, kd->program, "wrong minidump version. "
+ "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
+ return (-1);
+ }
+ vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
+ vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
+ vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
+ vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
+ vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys);
+ vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
+ vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
+ /* dumpavailsize added in version 2 */
+ if (vmst->hdr.version >= 2) {
+ vmst->hdr.dumpavailsize = le32toh(vmst->hdr.dumpavailsize);
+ } else {
+ vmst->hdr.dumpavailsize = 0;
+ }
+ /* flags added in version 3 */
+ if (vmst->hdr.version >= 3) {
+ vmst->hdr.flags = le32toh(vmst->hdr.flags);
+ } else {
+ vmst->hdr.flags = MINIDUMP_FLAG_PS_4K;
+ }
+
+ switch (vmst->hdr.flags & MINIDUMP_FLAG_PS_MASK) {
+ case MINIDUMP_FLAG_PS_4K:
+ vmst->page_size = AARCH64_PAGE_SIZE_4K;
+ vmst->l3_shift = AARCH64_L3_SHIFT_4K;
+ break;
+ case MINIDUMP_FLAG_PS_16K:
+ vmst->page_size = AARCH64_PAGE_SIZE_16K;
+ vmst->l3_shift = AARCH64_L3_SHIFT_16K;
+ break;
+ default:
+ _kvm_err(kd, kd->program, "unknown page size flag %x",
+ vmst->hdr.flags & MINIDUMP_FLAG_PS_MASK);
+ return (-1);
+ }
+
+ /* Skip header and msgbuf */
+ dump_avail_off = vmst->page_size +
+ aarch64_round_page(vmst->hdr.msgbufsize, vmst->page_size);
+
+ /* Skip dump_avail */
+ off = dump_avail_off +
+ aarch64_round_page(vmst->hdr.dumpavailsize, vmst->page_size);
+
+ /* build physical address lookup table for sparse pages */
+ sparse_off = off +
+ aarch64_round_page(vmst->hdr.bitmapsize, vmst->page_size) +
+ aarch64_round_page(vmst->hdr.pmapsize, vmst->page_size);
+ if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
+ vmst->hdr.bitmapsize, off, sparse_off, vmst->page_size) == -1) {
+ return (-1);
+ }
+ off += aarch64_round_page(vmst->hdr.bitmapsize, vmst->page_size);
+
+ if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) {
+ return (-1);
+ }
+ off += aarch64_round_page(vmst->hdr.pmapsize, vmst->page_size);
+
+ return (0);
+}
+
+static int
+_aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct vmstate *vm;
+ aarch64_physaddr_t offset;
+ aarch64_pte_t l3;
+ kvaddr_t l3_index;
+ aarch64_physaddr_t a;
+ off_t ofs;
+
+ vm = kd->vmst;
+ offset = va & (kd->vmst->page_size - 1);
+
+ if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
+ a = aarch64_trunc_page(va - vm->hdr.dmapbase + vm->hdr.dmapphys,
+ kd->vmst->page_size);
+ ofs = _kvm_pt_find(kd, a, kd->vmst->page_size);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: "
+ "direct map address 0x%jx not in minidump",
+ (uintmax_t)va);
+ goto invalid;
+ }
+ *pa = ofs + offset;
+ return (kd->vmst->page_size - offset);
+ } else if (va >= vm->hdr.kernbase) {
+ l3_index = (va - vm->hdr.kernbase) >> kd->vmst->l3_shift;
+ if (l3_index >= vm->hdr.pmapsize / sizeof(l3))
+ goto invalid;
+ l3 = _aarch64_pte_get(kd, l3_index);
+ if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) {
+ _kvm_err(kd, kd->program,
+ "_aarch64_minidump_vatop: pde not valid");
+ goto invalid;
+ }
+ a = l3 & ~AARCH64_ATTR_MASK;
+ ofs = _kvm_pt_find(kd, a, kd->vmst->page_size);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: "
+ "physical address 0x%jx not in minidump",
+ (uintmax_t)a);
+ goto invalid;
+ }
+ *pa = ofs + offset;
+ return (kd->vmst->page_size - offset);
+ } else {
+ _kvm_err(kd, kd->program,
+ "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped",
+ (uintmax_t)va);
+ goto invalid;
+ }
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static int
+_aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0,
+ "_aarch64_minidump_kvatop called in live kernel!");
+ return (0);
+ }
+ return (_aarch64_minidump_vatop(kd, va, pa));
+}
+
+static int
+_aarch64_native(kvm_t *kd __unused)
+{
+
+#ifdef __aarch64__
+ return (1);
+#else
+ return (0);
+#endif
+}
+
+static vm_prot_t
+_aarch64_entry_to_prot(aarch64_pte_t pte)
+{
+ vm_prot_t prot = VM_PROT_READ;
+
+ /* Source: arm64/arm64/pmap.c:pmap_protect() */
+ if ((pte & AARCH64_ATTR_AP(AARCH64_ATTR_AP_RO)) == 0)
+ prot |= VM_PROT_WRITE;
+ if ((pte & AARCH64_ATTR_XN) == 0)
+ prot |= VM_PROT_EXECUTE;
+ return prot;
+}
+
+static int
+_aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+ struct vmstate *vm = kd->vmst;
+ u_long nptes = vm->hdr.pmapsize / sizeof(aarch64_pte_t);
+ u_long bmindex, dva, pa, pteindex, va;
+ struct kvm_bitmap bm;
+ vm_prot_t prot;
+ int ret = 0;
+
+ if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex))
+ return (0);
+
+ for (pteindex = 0; pteindex < nptes; pteindex++) {
+ aarch64_pte_t pte = _aarch64_pte_get(kd, pteindex);
+
+ if ((pte & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE)
+ continue;
+
+ va = vm->hdr.kernbase + (pteindex << kd->vmst->l3_shift);
+ pa = pte & ~AARCH64_ATTR_MASK;
+ dva = vm->hdr.dmapbase + pa;
+ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+ _aarch64_entry_to_prot(pte), kd->vmst->page_size, 0)) {
+ goto out;
+ }
+ }
+
+ while (_kvm_bitmap_next(&bm, &bmindex)) {
+ pa = _kvm_bit_id_pa(kd, bmindex, kd->vmst->page_size);
+ if (pa == _KVM_PA_INVALID)
+ break;
+ dva = vm->hdr.dmapbase + pa;
+ if (vm->hdr.dmapend < (dva + kd->vmst->page_size))
+ break;
+ va = 0;
+ prot = VM_PROT_READ | VM_PROT_WRITE;
+ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+ prot, kd->vmst->page_size, 0)) {
+ goto out;
+ }
+ }
+ ret = 1;
+
+out:
+ _kvm_bitmap_deinit(&bm);
+ return (ret);
+}
+
+static struct kvm_arch kvm_aarch64_minidump = {
+ .ka_probe = _aarch64_minidump_probe,
+ .ka_initvtop = _aarch64_minidump_initvtop,
+ .ka_freevtop = _aarch64_minidump_freevtop,
+ .ka_kvatop = _aarch64_minidump_kvatop,
+ .ka_native = _aarch64_native,
+ .ka_walk_pages = _aarch64_minidump_walk_pages,
+};
+
+KVM_ARCH(kvm_aarch64_minidump);
diff --git a/lib/libkvm/kvm_minidump_amd64.c b/lib/libkvm/kvm_minidump_amd64.c
new file mode 100644
index 000000000000..e121208a628a
--- /dev/null
+++ b/lib/libkvm/kvm_minidump_amd64.c
@@ -0,0 +1,440 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2006 Peter Wemm
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+/*
+ * AMD64 machine dependent routines for kvm and minidumps.
+ */
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <vm/vm.h>
+#include <kvm.h>
+
+#include "../../sys/amd64/include/minidump.h"
+
+#include <limits.h>
+
+#include "kvm_private.h"
+#include "kvm_amd64.h"
+
+#define amd64_round_page(x) roundup2((kvaddr_t)(x), AMD64_PAGE_SIZE)
+#define VM_IS_V1(vm) (vm->hdr.version == 1)
+#define VA_OFF(vm, va) \
+ (VM_IS_V1(vm) ? ((va) & (AMD64_PAGE_SIZE - 1)) : ((va) & AMD64_PAGE_MASK))
+
+struct vmstate {
+ struct minidumphdr hdr;
+};
+
+static vm_prot_t
+_amd64_entry_to_prot(uint64_t entry)
+{
+ vm_prot_t prot = VM_PROT_READ;
+
+ if ((entry & AMD64_PG_RW) != 0)
+ prot |= VM_PROT_WRITE;
+ if ((entry & AMD64_PG_NX) == 0)
+ prot |= VM_PROT_EXECUTE;
+ return prot;
+}
+
+/*
+ * Version 2 minidumps use page directory entries, while version 1 use page
+ * table entries.
+ */
+
+static amd64_pde_t
+_amd64_pde_get(kvm_t *kd, u_long pdeindex)
+{
+ amd64_pde_t *pde = _kvm_pmap_get(kd, pdeindex, sizeof(*pde));
+
+ return le64toh(*pde);
+}
+
+static amd64_pte_t
+_amd64_pte_get(kvm_t *kd, u_long pteindex)
+{
+ amd64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+ return le64toh(*pte);
+}
+
+/* Get the first page table entry for a given page directory index. */
+static amd64_pte_t *
+_amd64_pde_first_pte(kvm_t *kd, u_long pdeindex)
+{
+ u_long *pa;
+
+ pa = _kvm_pmap_get(kd, pdeindex, sizeof(amd64_pde_t));
+ if (pa == NULL)
+ return NULL;
+ return _kvm_map_get(kd, *pa & AMD64_PG_FRAME, AMD64_PAGE_SIZE);
+}
+
+static int
+_amd64_minidump_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) &&
+ _kvm_is_minidump(kd));
+}
+
+static void
+_amd64_minidump_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_amd64_minidump_initvtop(kvm_t *kd)
+{
+ struct vmstate *vmst;
+ off_t off, dump_avail_off, sparse_off;
+
+ vmst = _kvm_malloc(kd, sizeof(*vmst));
+ if (vmst == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate vm");
+ return (-1);
+ }
+ kd->vmst = vmst;
+ if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
+ sizeof(vmst->hdr)) {
+ _kvm_err(kd, kd->program, "cannot read dump header");
+ return (-1);
+ }
+ if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) {
+ _kvm_err(kd, kd->program, "not a minidump for this platform");
+ return (-1);
+ }
+
+ /*
+ * NB: amd64 minidump header is binary compatible between version 1
+ * and version 2; version 3 adds the dumpavailsize field
+ */
+ vmst->hdr.version = le32toh(vmst->hdr.version);
+ if (vmst->hdr.version > MINIDUMP_VERSION || vmst->hdr.version < 1) {
+ _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d",
+ MINIDUMP_VERSION, vmst->hdr.version);
+ return (-1);
+ }
+ vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
+ vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
+ vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
+ vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
+ vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
+ vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
+ vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
+ le32toh(vmst->hdr.dumpavailsize) : 0;
+
+ /* Skip header and msgbuf */
+ dump_avail_off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize);
+
+ /* Skip dump_avail */
+ off = dump_avail_off + amd64_round_page(vmst->hdr.dumpavailsize);
+
+ sparse_off = off + amd64_round_page(vmst->hdr.bitmapsize) +
+ amd64_round_page(vmst->hdr.pmapsize);
+ if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
+ vmst->hdr.bitmapsize, off, sparse_off, AMD64_PAGE_SIZE) == -1) {
+ return (-1);
+ }
+ off += amd64_round_page(vmst->hdr.bitmapsize);
+
+ if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) {
+ return (-1);
+ }
+ off += amd64_round_page(vmst->hdr.pmapsize);
+
+ return (0);
+}
+
+static int
+_amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct vmstate *vm;
+ amd64_physaddr_t offset;
+ amd64_pte_t pte;
+ kvaddr_t pteindex;
+ amd64_physaddr_t a;
+ off_t ofs;
+
+ vm = kd->vmst;
+ offset = va & AMD64_PAGE_MASK;
+
+ if (va >= vm->hdr.kernbase) {
+ pteindex = (va - vm->hdr.kernbase) >> AMD64_PAGE_SHIFT;
+ if (pteindex >= vm->hdr.pmapsize / sizeof(pte))
+ goto invalid;
+ pte = _amd64_pte_get(kd, pteindex);
+ if ((pte & AMD64_PG_V) == 0) {
+ _kvm_err(kd, kd->program,
+ "_amd64_minidump_vatop_v1: pte not valid");
+ goto invalid;
+ }
+ a = pte & AMD64_PG_FRAME;
+ ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program,
+ "_amd64_minidump_vatop_v1: physical address 0x%jx not in minidump",
+ (uintmax_t)a);
+ goto invalid;
+ }
+ *pa = ofs + offset;
+ return (AMD64_PAGE_SIZE - offset);
+ } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
+ a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK;
+ ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program,
+ "_amd64_minidump_vatop_v1: direct map address 0x%jx not in minidump",
+ (uintmax_t)va);
+ goto invalid;
+ }
+ *pa = ofs + offset;
+ return (AMD64_PAGE_SIZE - offset);
+ } else {
+ _kvm_err(kd, kd->program,
+ "_amd64_minidump_vatop_v1: virtual address 0x%jx not minidumped",
+ (uintmax_t)va);
+ goto invalid;
+ }
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static int
+_amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ amd64_pte_t pt[AMD64_NPTEPG];
+ struct vmstate *vm;
+ amd64_physaddr_t offset;
+ amd64_pde_t pde;
+ amd64_pte_t pte;
+ kvaddr_t pteindex;
+ kvaddr_t pdeindex;
+ amd64_physaddr_t a;
+ off_t ofs;
+
+ vm = kd->vmst;
+ offset = va & AMD64_PAGE_MASK;
+
+ if (va >= vm->hdr.kernbase) {
+ pdeindex = (va - vm->hdr.kernbase) >> AMD64_PDRSHIFT;
+ if (pdeindex >= vm->hdr.pmapsize / sizeof(pde))
+ goto invalid;
+ pde = _amd64_pde_get(kd, pdeindex);
+ if ((pde & AMD64_PG_V) == 0) {
+ _kvm_err(kd, kd->program,
+ "_amd64_minidump_vatop: pde not valid");
+ goto invalid;
+ }
+ if ((pde & AMD64_PG_PS) == 0) {
+ a = pde & AMD64_PG_FRAME;
+ /* TODO: Just read the single PTE */
+ ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program,
+ "cannot find page table entry for %ju",
+ (uintmax_t)a);
+ goto invalid;
+ }
+ if (pread(kd->pmfd, &pt, AMD64_PAGE_SIZE, ofs) !=
+ AMD64_PAGE_SIZE) {
+ _kvm_err(kd, kd->program,
+ "cannot read page table entry for %ju",
+ (uintmax_t)a);
+ goto invalid;
+ }
+ pteindex = (va >> AMD64_PAGE_SHIFT) &
+ (AMD64_NPTEPG - 1);
+ pte = le64toh(pt[pteindex]);
+ if ((pte & AMD64_PG_V) == 0) {
+ _kvm_err(kd, kd->program,
+ "_amd64_minidump_vatop: pte not valid");
+ goto invalid;
+ }
+ a = pte & AMD64_PG_FRAME;
+ } else {
+ a = pde & AMD64_PG_PS_FRAME;
+ a += (va & AMD64_PDRMASK) ^ offset;
+ }
+ ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program,
+ "_amd64_minidump_vatop: physical address 0x%jx not in minidump",
+ (uintmax_t)a);
+ goto invalid;
+ }
+ *pa = ofs + offset;
+ return (AMD64_PAGE_SIZE - offset);
+ } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
+ a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK;
+ ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program,
+ "_amd64_minidump_vatop: direct map address 0x%jx not in minidump",
+ (uintmax_t)va);
+ goto invalid;
+ }
+ *pa = ofs + offset;
+ return (AMD64_PAGE_SIZE - offset);
+ } else {
+ _kvm_err(kd, kd->program,
+ "_amd64_minidump_vatop: virtual address 0x%jx not minidumped",
+ (uintmax_t)va);
+ goto invalid;
+ }
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static int
+_amd64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0,
+ "_amd64_minidump_kvatop called in live kernel!");
+ return (0);
+ }
+ if (((struct vmstate *)kd->vmst)->hdr.version == 1)
+ return (_amd64_minidump_vatop_v1(kd, va, pa));
+ else
+ return (_amd64_minidump_vatop(kd, va, pa));
+}
+
+static int
+_amd64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+ struct vmstate *vm = kd->vmst;
+ u_long npdes = vm->hdr.pmapsize / sizeof(amd64_pde_t);
+ u_long bmindex, dva, pa, pdeindex, va;
+ struct kvm_bitmap bm;
+ int ret = 0;
+ vm_prot_t prot;
+ unsigned int pgsz = AMD64_PAGE_SIZE;
+
+ if (vm->hdr.version < 2)
+ return (0);
+
+ if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex))
+ return (0);
+
+ for (pdeindex = 0; pdeindex < npdes; pdeindex++) {
+ amd64_pde_t pde = _amd64_pde_get(kd, pdeindex);
+ amd64_pte_t *ptes;
+ u_long i;
+
+ va = vm->hdr.kernbase + (pdeindex << AMD64_PDRSHIFT);
+ if ((pde & AMD64_PG_V) == 0)
+ continue;
+
+ if ((pde & AMD64_PG_PS) != 0) {
+ /*
+ * Large page. Iterate on each 4K page section
+ * within this page. This differs from 4K pages in
+ * that every page here uses the same PDE to
+ * generate permissions.
+ */
+ pa = (pde & AMD64_PG_PS_FRAME) +
+ ((va & AMD64_PDRMASK) ^ VA_OFF(vm, va));
+ dva = vm->hdr.dmapbase + pa;
+ _kvm_bitmap_set(&bm, _kvm_pa_bit_id(kd, pa, AMD64_PAGE_SIZE));
+ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+ _amd64_entry_to_prot(pde), AMD64_NBPDR, pgsz)) {
+ goto out;
+ }
+ continue;
+ }
+
+ /* 4K pages: pde references another page of entries. */
+ ptes = _amd64_pde_first_pte(kd, pdeindex);
+ /* Ignore page directory pages that were not dumped. */
+ if (ptes == NULL)
+ continue;
+
+ for (i = 0; i < AMD64_NPTEPG; i++) {
+ amd64_pte_t pte = (u_long)ptes[i];
+
+ pa = pte & AMD64_PG_FRAME;
+ dva = vm->hdr.dmapbase + pa;
+ if ((pte & AMD64_PG_V) != 0) {
+ _kvm_bitmap_set(&bm,
+ _kvm_pa_bit_id(kd, pa, AMD64_PAGE_SIZE));
+ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+ _amd64_entry_to_prot(pte), pgsz, 0)) {
+ goto out;
+ }
+ }
+ va += AMD64_PAGE_SIZE;
+ }
+ }
+
+ while (_kvm_bitmap_next(&bm, &bmindex)) {
+ pa = _kvm_bit_id_pa(kd, bmindex, AMD64_PAGE_SIZE);
+ if (pa == _KVM_PA_INVALID)
+ break;
+ dva = vm->hdr.dmapbase + pa;
+ if (vm->hdr.dmapend < (dva + pgsz))
+ break;
+ va = 0;
+ /* amd64/pmap.c: create_pagetables(): dmap always R|W. */
+ prot = VM_PROT_READ | VM_PROT_WRITE;
+ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, pgsz, 0)) {
+ goto out;
+ }
+ }
+
+ ret = 1;
+
+out:
+ _kvm_bitmap_deinit(&bm);
+ return (ret);
+}
+
+static struct kvm_arch kvm_amd64_minidump = {
+ .ka_probe = _amd64_minidump_probe,
+ .ka_initvtop = _amd64_minidump_initvtop,
+ .ka_freevtop = _amd64_minidump_freevtop,
+ .ka_kvatop = _amd64_minidump_kvatop,
+ .ka_native = _amd64_native,
+ .ka_walk_pages = _amd64_minidump_walk_pages,
+};
+
+KVM_ARCH(kvm_amd64_minidump);
diff --git a/lib/libkvm/kvm_minidump_arm.c b/lib/libkvm/kvm_minidump_arm.c
new file mode 100644
index 000000000000..9054e4d1f45f
--- /dev/null
+++ b/lib/libkvm/kvm_minidump_arm.c
@@ -0,0 +1,276 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2008 Semihalf, Grzegorz Bernacki
+ * Copyright (c) 2006 Peter Wemm
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: FreeBSD: src/lib/libkvm/kvm_minidump_i386.c,v 1.2 2006/06/05 08:51:14
+ */
+
+#include <sys/cdefs.h>
+/*
+ * ARM machine dependent routines for kvm and minidumps.
+ */
+
+#include <sys/endian.h>
+#include <sys/param.h>
+#include <vm/vm.h>
+#include <kvm.h>
+#include <limits.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "../../sys/arm/include/minidump.h"
+
+#include "kvm_private.h"
+#include "kvm_arm.h"
+
+#define arm_round_page(x) roundup2((kvaddr_t)(x), ARM_PAGE_SIZE)
+
+struct vmstate {
+ struct minidumphdr hdr;
+ unsigned char ei_data;
+};
+
+static arm_pt_entry_t
+_arm_pte_get(kvm_t *kd, u_long pteindex)
+{
+ arm_pt_entry_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+ return _kvm32toh(kd, *pte);
+}
+
+static int
+_arm_minidump_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) &&
+ _kvm_is_minidump(kd));
+}
+
+static void
+_arm_minidump_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_arm_minidump_initvtop(kvm_t *kd)
+{
+ struct vmstate *vmst;
+ off_t off, dump_avail_off, sparse_off;
+
+ vmst = _kvm_malloc(kd, sizeof(*vmst));
+ if (vmst == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate vm");
+ return (-1);
+ }
+
+ kd->vmst = vmst;
+
+ if (pread(kd->pmfd, &vmst->hdr,
+ sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) {
+ _kvm_err(kd, kd->program, "cannot read dump header");
+ return (-1);
+ }
+
+ if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
+ sizeof(vmst->hdr.magic)) != 0) {
+ _kvm_err(kd, kd->program, "not a minidump for this platform");
+ return (-1);
+ }
+ vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version);
+ if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
+ _kvm_err(kd, kd->program, "wrong minidump version. "
+ "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
+ return (-1);
+ }
+ vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize);
+ vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize);
+ vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize);
+ vmst->hdr.kernbase = _kvm32toh(kd, vmst->hdr.kernbase);
+ vmst->hdr.arch = _kvm32toh(kd, vmst->hdr.arch);
+ vmst->hdr.mmuformat = _kvm32toh(kd, vmst->hdr.mmuformat);
+ if (vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_UNKNOWN) {
+ /* This is a safe default as 1K pages are not used. */
+ vmst->hdr.mmuformat = MINIDUMP_MMU_FORMAT_V6;
+ }
+ vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
+ _kvm32toh(kd, vmst->hdr.dumpavailsize) : 0;
+
+ /* Skip header and msgbuf */
+ dump_avail_off = ARM_PAGE_SIZE + arm_round_page(vmst->hdr.msgbufsize);
+
+ /* Skip dump_avail */
+ off = dump_avail_off + arm_round_page(vmst->hdr.dumpavailsize);
+
+ sparse_off = off + arm_round_page(vmst->hdr.bitmapsize) +
+ arm_round_page(vmst->hdr.ptesize);
+ if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
+ vmst->hdr.bitmapsize, off, sparse_off, ARM_PAGE_SIZE) == -1) {
+ return (-1);
+ }
+ off += arm_round_page(vmst->hdr.bitmapsize);
+
+ if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) {
+ return (-1);
+ }
+ off += arm_round_page(vmst->hdr.ptesize);
+
+ return (0);
+}
+
+static int
+_arm_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct vmstate *vm;
+ arm_pt_entry_t pte;
+ arm_physaddr_t offset, a;
+ kvaddr_t pteindex;
+ off_t ofs;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "_arm_minidump_kvatop called in live kernel!");
+ return (0);
+ }
+
+ vm = kd->vmst;
+
+ if (va >= vm->hdr.kernbase) {
+ pteindex = (va - vm->hdr.kernbase) >> ARM_PAGE_SHIFT;
+ if (pteindex >= vm->hdr.ptesize / sizeof(pte))
+ goto invalid;
+ pte = _arm_pte_get(kd, pteindex);
+ if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV) {
+ _kvm_err(kd, kd->program,
+ "_arm_minidump_kvatop: pte not valid");
+ goto invalid;
+ }
+ if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) {
+ /* 64K page -> convert to be like 4K page */
+ offset = va & ARM_L2_S_OFFSET;
+ a = (pte & ARM_L2_L_FRAME) +
+ (va & ARM_L2_L_OFFSET & ARM_L2_S_FRAME);
+ } else {
+ if (kd->vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4 &&
+ (pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_T) {
+ _kvm_err(kd, kd->program,
+ "_arm_minidump_kvatop: pte not supported");
+ goto invalid;
+ }
+ /* 4K page */
+ offset = va & ARM_L2_S_OFFSET;
+ a = pte & ARM_L2_S_FRAME;
+ }
+
+ ofs = _kvm_pt_find(kd, a, ARM_PAGE_SIZE);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program, "_arm_minidump_kvatop: "
+ "physical address 0x%jx not in minidump",
+ (uintmax_t)a);
+ goto invalid;
+ }
+
+ *pa = ofs + offset;
+ return (ARM_PAGE_SIZE - offset);
+ } else
+ _kvm_err(kd, kd->program, "_arm_minidump_kvatop: virtual "
+ "address 0x%jx not minidumped", (uintmax_t)va);
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static vm_prot_t
+_arm_entry_to_prot(kvm_t *kd, arm_pt_entry_t pte)
+{
+ struct vmstate *vm = kd->vmst;
+ vm_prot_t prot = VM_PROT_READ;
+
+ /* Source: arm/arm/pmap-v4.c:pmap_fault_fixup() */
+ if (vm->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4) {
+ if (pte & ARM_L2_S_PROT_W)
+ prot |= VM_PROT_WRITE;
+ return prot;
+ }
+
+ /* Source: arm/arm/pmap-v6.c:pmap_protect() */
+ if ((pte & ARM_PTE2_RO) == 0)
+ prot |= VM_PROT_WRITE;
+ if ((pte & ARM_PTE2_NX) == 0)
+ prot |= VM_PROT_EXECUTE;
+ return prot;
+}
+
+static int
+_arm_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+ struct vmstate *vm = kd->vmst;
+ u_long nptes = vm->hdr.ptesize / sizeof(arm_pt_entry_t);
+ u_long dva, pa, pteindex, va;
+
+ for (pteindex = 0; pteindex < nptes; pteindex++) {
+ arm_pt_entry_t pte = _arm_pte_get(kd, pteindex);
+
+ if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV)
+ continue;
+
+ va = vm->hdr.kernbase + (pteindex << ARM_PAGE_SHIFT);
+ if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) {
+ /* 64K page */
+ pa = (pte & ARM_L2_L_FRAME) +
+ (va & ARM_L2_L_OFFSET & ARM_L2_S_FRAME);
+ } else {
+ if (vm->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4 &&
+ (pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_T) {
+ continue;
+ }
+ /* 4K page */
+ pa = pte & ARM_L2_S_FRAME;
+ }
+
+ dva = 0; /* no direct map on this platform */
+ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+ _arm_entry_to_prot(kd, pte), ARM_PAGE_SIZE, 0))
+ return (0);
+ }
+ return (1);
+}
+
+static struct kvm_arch kvm_arm_minidump = {
+ .ka_probe = _arm_minidump_probe,
+ .ka_initvtop = _arm_minidump_initvtop,
+ .ka_freevtop = _arm_minidump_freevtop,
+ .ka_kvatop = _arm_minidump_kvatop,
+ .ka_native = _arm_native,
+ .ka_walk_pages = _arm_minidump_walk_pages,
+};
+
+KVM_ARCH(kvm_arm_minidump);
diff --git a/lib/libkvm/kvm_minidump_i386.c b/lib/libkvm/kvm_minidump_i386.c
new file mode 100644
index 000000000000..65fd1f43e21e
--- /dev/null
+++ b/lib/libkvm/kvm_minidump_i386.c
@@ -0,0 +1,344 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2006 Peter Wemm
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+/*
+ * i386 machine dependent routines for kvm and minidumps.
+ */
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <vm/vm.h>
+#include <kvm.h>
+
+#include "../../sys/i386/include/minidump.h"
+
+#include <limits.h>
+
+#include "kvm_private.h"
+#include "kvm_i386.h"
+
+#define i386_round_page(x) roundup2((kvaddr_t)(x), I386_PAGE_SIZE)
+
+struct vmstate {
+ struct minidumphdr hdr;
+};
+
+static i386_pte_pae_t
+_i386_pte_pae_get(kvm_t *kd, u_long pteindex)
+{
+ i386_pte_pae_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+ return le64toh(*pte);
+}
+
+static i386_pte_t
+_i386_pte_get(kvm_t *kd, u_long pteindex)
+{
+ i386_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+ return le32toh(*pte);
+}
+
+static int
+_i386_minidump_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_386) &&
+ _kvm_is_minidump(kd));
+}
+
+static void
+_i386_minidump_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_i386_minidump_initvtop(kvm_t *kd)
+{
+ struct vmstate *vmst;
+ off_t off, dump_avail_off, sparse_off;
+
+ vmst = _kvm_malloc(kd, sizeof(*vmst));
+ if (vmst == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate vm");
+ return (-1);
+ }
+ kd->vmst = vmst;
+ if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
+ sizeof(vmst->hdr)) {
+ _kvm_err(kd, kd->program, "cannot read dump header");
+ return (-1);
+ }
+ if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) {
+ _kvm_err(kd, kd->program, "not a minidump for this platform");
+ return (-1);
+ }
+ vmst->hdr.version = le32toh(vmst->hdr.version);
+ if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
+ _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d",
+ MINIDUMP_VERSION, vmst->hdr.version);
+ return (-1);
+ }
+ vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
+ vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
+ vmst->hdr.ptesize = le32toh(vmst->hdr.ptesize);
+ vmst->hdr.kernbase = le32toh(vmst->hdr.kernbase);
+ vmst->hdr.paemode = le32toh(vmst->hdr.paemode);
+ vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
+ le32toh(vmst->hdr.dumpavailsize) : 0;
+
+ /* Skip header and msgbuf */
+ dump_avail_off = I386_PAGE_SIZE + i386_round_page(vmst->hdr.msgbufsize);
+
+ /* Skip dump_avail */
+ off = dump_avail_off + i386_round_page(vmst->hdr.dumpavailsize);
+
+ sparse_off = off + i386_round_page(vmst->hdr.bitmapsize) +
+ i386_round_page(vmst->hdr.ptesize);
+ if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
+ vmst->hdr.bitmapsize, off, sparse_off, I386_PAGE_SIZE) == -1) {
+ return (-1);
+ }
+ off += i386_round_page(vmst->hdr.bitmapsize);
+
+ if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) {
+ return (-1);
+ }
+ off += i386_round_page(vmst->hdr.ptesize);
+
+ return (0);
+}
+
+static int
+_i386_minidump_vatop_pae(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct vmstate *vm;
+ i386_physaddr_pae_t offset;
+ i386_pte_pae_t pte;
+ kvaddr_t pteindex;
+ i386_physaddr_pae_t a;
+ off_t ofs;
+
+ vm = kd->vmst;
+ offset = va & I386_PAGE_MASK;
+
+ if (va >= vm->hdr.kernbase) {
+ pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT;
+ if (pteindex >= vm->hdr.ptesize / sizeof(pte))
+ goto invalid;
+ pte = _i386_pte_pae_get(kd, pteindex);
+ if ((pte & I386_PG_V) == 0) {
+ _kvm_err(kd, kd->program,
+ "_i386_minidump_vatop_pae: pte not valid");
+ goto invalid;
+ }
+ a = pte & I386_PG_FRAME_PAE;
+ ofs = _kvm_pt_find(kd, a, I386_PAGE_SIZE);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program,
+ "_i386_minidump_vatop_pae: physical address 0x%jx not in minidump",
+ (uintmax_t)a);
+ goto invalid;
+ }
+ *pa = ofs + offset;
+ return (I386_PAGE_SIZE - offset);
+ } else {
+ _kvm_err(kd, kd->program,
+ "_i386_minidump_vatop_pae: virtual address 0x%jx not minidumped",
+ (uintmax_t)va);
+ goto invalid;
+ }
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static int
+_i386_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct vmstate *vm;
+ i386_physaddr_t offset;
+ i386_pte_t pte;
+ kvaddr_t pteindex;
+ i386_physaddr_t a;
+ off_t ofs;
+
+ vm = kd->vmst;
+ offset = va & I386_PAGE_MASK;
+
+ if (va >= vm->hdr.kernbase) {
+ pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT;
+ if (pteindex >= vm->hdr.ptesize / sizeof(pte))
+ goto invalid;
+ pte = _i386_pte_get(kd, pteindex);
+ if ((pte & I386_PG_V) == 0) {
+ _kvm_err(kd, kd->program,
+ "_i386_minidump_vatop: pte not valid");
+ goto invalid;
+ }
+ a = pte & I386_PG_FRAME;
+ ofs = _kvm_pt_find(kd, a, I386_PAGE_SIZE);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program,
+ "_i386_minidump_vatop: physical address 0x%jx not in minidump",
+ (uintmax_t)a);
+ goto invalid;
+ }
+ *pa = ofs + offset;
+ return (I386_PAGE_SIZE - offset);
+ } else {
+ _kvm_err(kd, kd->program,
+ "_i386_minidump_vatop: virtual address 0x%jx not minidumped",
+ (uintmax_t)va);
+ goto invalid;
+ }
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static int
+_i386_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "_i386_minidump_kvatop called in live kernel!");
+ return (0);
+ }
+ if (kd->vmst->hdr.paemode)
+ return (_i386_minidump_vatop_pae(kd, va, pa));
+ else
+ return (_i386_minidump_vatop(kd, va, pa));
+}
+
+static vm_prot_t
+_i386_entry_to_prot(uint64_t pte)
+{
+ vm_prot_t prot = VM_PROT_READ;
+
+ /* Source: i386/pmap.c:pmap_protect() */
+ if (pte & I386_PG_RW)
+ prot |= VM_PROT_WRITE;
+ if ((pte & I386_PG_NX) == 0)
+ prot |= VM_PROT_EXECUTE;
+
+ return prot;
+}
+
+struct i386_iter {
+ kvm_t *kd;
+ u_long nptes;
+ u_long pteindex;
+};
+
+static void
+_i386_iterator_init(struct i386_iter *it, kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ it->kd = kd;
+ it->pteindex = 0;
+ if (vm->hdr.paemode) {
+ it->nptes = vm->hdr.ptesize / sizeof(i386_pte_pae_t);
+ } else {
+ it->nptes = vm->hdr.ptesize / sizeof(i386_pte_t);
+ }
+ return;
+}
+
+static int
+_i386_iterator_next(struct i386_iter *it, u_long *pa, u_long *va, u_long *dva,
+ vm_prot_t *prot)
+{
+ struct vmstate *vm = it->kd->vmst;
+ i386_pte_t pte32;
+ i386_pte_pae_t pte64;
+ int found = 0;
+
+ *dva = 0;
+ *pa = 0;
+ *va = 0;
+ *dva = 0;
+ *prot = 0;
+ for (; it->pteindex < it->nptes && found == 0; it->pteindex++) {
+ if (vm->hdr.paemode) {
+ pte64 = _i386_pte_pae_get(it->kd, it->pteindex);
+ if ((pte64 & I386_PG_V) == 0)
+ continue;
+ *prot = _i386_entry_to_prot(pte64);
+ *pa = pte64 & I386_PG_FRAME_PAE;
+ } else {
+ pte32 = _i386_pte_get(it->kd, it->pteindex);
+ if ((pte32 & I386_PG_V) == 0)
+ continue;
+ *prot = _i386_entry_to_prot(pte32);
+ *pa = pte32 & I386_PG_FRAME;
+ }
+ *va = vm->hdr.kernbase + (it->pteindex << I386_PAGE_SHIFT);
+ found = 1;
+ }
+ return found;
+}
+
+static int
+_i386_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+ struct i386_iter it;
+ u_long dva, pa, va;
+ vm_prot_t prot;
+
+ _i386_iterator_init(&it, kd);
+ while (_i386_iterator_next(&it, &pa, &va, &dva, &prot)) {
+ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+ prot, I386_PAGE_SIZE, 0)) {
+ return (0);
+ }
+ }
+ return (1);
+}
+
+static struct kvm_arch kvm_i386_minidump = {
+ .ka_probe = _i386_minidump_probe,
+ .ka_initvtop = _i386_minidump_initvtop,
+ .ka_freevtop = _i386_minidump_freevtop,
+ .ka_kvatop = _i386_minidump_kvatop,
+ .ka_native = _i386_native,
+ .ka_walk_pages = _i386_minidump_walk_pages,
+};
+
+KVM_ARCH(kvm_i386_minidump);
diff --git a/lib/libkvm/kvm_minidump_powerpc64.c b/lib/libkvm/kvm_minidump_powerpc64.c
new file mode 100644
index 000000000000..caf504cff077
--- /dev/null
+++ b/lib/libkvm/kvm_minidump_powerpc64.c
@@ -0,0 +1,209 @@
+/*-
+ * Copyright (c) 2006 Peter Wemm
+ * Copyright (c) 2019 Leandro Lupori
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: FreeBSD: src/lib/libkvm/kvm_minidump_riscv.c
+ */
+
+#include <sys/param.h>
+
+#include <kvm.h>
+
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "../../sys/powerpc/include/minidump.h"
+#include "kvm_private.h"
+#include "kvm_powerpc64.h"
+
+
+static int
+_powerpc64_minidump_probe(kvm_t *kd)
+{
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
+ _kvm_is_minidump(kd));
+}
+
+static void
+_powerpc64_minidump_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ if (vm == NULL)
+ return;
+ if (PPC64_MMU_OPS(kd))
+ PPC64_MMU_OP(kd, cleanup);
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_powerpc64_minidump_initvtop(kvm_t *kd)
+{
+ struct vmstate *vmst;
+ struct minidumphdr *hdr;
+ off_t dump_avail_off, bitmap_off, pmap_off, sparse_off;
+ const char *mmu_name;
+
+ /* Alloc VM */
+ vmst = _kvm_malloc(kd, sizeof(*vmst));
+ if (vmst == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate vm");
+ return (-1);
+ }
+ hdr = &vmst->hdr;
+ kd->vmst = vmst;
+ PPC64_MMU_OPS(kd) = NULL;
+ /* Read minidump header */
+ if (pread(kd->pmfd, hdr, sizeof(*hdr), 0) != sizeof(*hdr)) {
+ _kvm_err(kd, kd->program, "cannot read minidump header");
+ goto failed;
+ }
+ /* Check magic */
+ if (strncmp(MINIDUMP_MAGIC, hdr->magic, sizeof(hdr->magic)) != 0) {
+ _kvm_err(kd, kd->program, "not a minidump for this platform");
+ goto failed;
+ }
+ /* Check version */
+ hdr->version = be32toh(hdr->version);
+ if (hdr->version != MINIDUMP_VERSION && hdr->version != 1) {
+ _kvm_err(kd, kd->program, "wrong minidump version. "
+ "Expected %d got %d", MINIDUMP_VERSION, hdr->version);
+ goto failed;
+ }
+ /* Convert header fields to host endian */
+ hdr->msgbufsize = be32toh(hdr->msgbufsize);
+ hdr->bitmapsize = be32toh(hdr->bitmapsize);
+ hdr->pmapsize = be32toh(hdr->pmapsize);
+ hdr->kernbase = be64toh(hdr->kernbase);
+ hdr->kernend = be64toh(hdr->kernend);
+ hdr->dmapbase = be64toh(hdr->dmapbase);
+ hdr->dmapend = be64toh(hdr->dmapend);
+ hdr->hw_direct_map = be32toh(hdr->hw_direct_map);
+ hdr->startkernel = be64toh(hdr->startkernel);
+ hdr->endkernel = be64toh(hdr->endkernel);
+ hdr->dumpavailsize = hdr->version == MINIDUMP_VERSION ?
+ be32toh(hdr->dumpavailsize) : 0;
+
+ vmst->kimg_start = PPC64_KERNBASE;
+ vmst->kimg_end = PPC64_KERNBASE + hdr->endkernel - hdr->startkernel;
+
+ /* dump header */
+ dprintf("%s: mmu_name=%s,\n\t"
+ "msgbufsize=0x%jx, bitmapsize=0x%jx, pmapsize=0x%jx, "
+ "kernbase=0x%jx, kernend=0x%jx,\n\t"
+ "dmapbase=0x%jx, dmapend=0x%jx, hw_direct_map=%d, "
+ "startkernel=0x%jx, endkernel=0x%jx\n\t"
+ "kimg_start=0x%jx, kimg_end=0x%jx\n",
+ __func__, hdr->mmu_name,
+ (uintmax_t)hdr->msgbufsize,
+ (uintmax_t)hdr->bitmapsize, (uintmax_t)hdr->pmapsize,
+ (uintmax_t)hdr->kernbase, (uintmax_t)hdr->kernend,
+ (uintmax_t)hdr->dmapbase, (uintmax_t)hdr->dmapend,
+ hdr->hw_direct_map, hdr->startkernel, hdr->endkernel,
+ (uintmax_t)vmst->kimg_start, (uintmax_t)vmst->kimg_end);
+
+ /* Detect and initialize MMU */
+ mmu_name = hdr->mmu_name;
+ if (strcmp(mmu_name, PPC64_MMU_G5) == 0 ||
+ strcmp(mmu_name, PPC64_MMU_PHYP) == 0)
+ PPC64_MMU_OPS(kd) = ppc64_mmu_ops_hpt;
+ else {
+ _kvm_err(kd, kd->program, "unsupported MMU: %s", mmu_name);
+ goto failed;
+ }
+ if (PPC64_MMU_OP(kd, init) == -1)
+ goto failed;
+
+ /* Get dump parts' offsets */
+ dump_avail_off = PPC64_PAGE_SIZE + ppc64_round_page(hdr->msgbufsize);
+ bitmap_off = dump_avail_off + ppc64_round_page(hdr->dumpavailsize);
+ pmap_off = bitmap_off + ppc64_round_page(hdr->bitmapsize);
+ sparse_off = pmap_off + ppc64_round_page(hdr->pmapsize);
+
+ /* dump offsets */
+ dprintf("%s: msgbuf_off=0x%jx, bitmap_off=0x%jx, pmap_off=0x%jx, "
+ "sparse_off=0x%jx\n",
+ __func__, (uintmax_t)PPC64_PAGE_SIZE, (uintmax_t)bitmap_off,
+ (uintmax_t)pmap_off, (uintmax_t)sparse_off);
+
+ /* build physical address lookup table for sparse pages */
+ if (_kvm_pt_init(kd, hdr->dumpavailsize, dump_avail_off,
+ hdr->bitmapsize, bitmap_off, sparse_off, PPC64_PAGE_SIZE) == -1)
+ goto failed;
+
+ if (_kvm_pmap_init(kd, hdr->pmapsize, pmap_off) == -1)
+ goto failed;
+ return (0);
+
+failed:
+ _powerpc64_minidump_freevtop(kd);
+ return (-1);
+}
+
+static int
+_powerpc64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "%s called in live kernel!", __func__);
+ return (0);
+ }
+ return (PPC64_MMU_OP(kd, kvatop, va, pa));
+}
+
+static int
+_powerpc64_native(kvm_t *kd __unused)
+{
+#ifdef __powerpc64__
+ return (1);
+#else
+ return (0);
+#endif
+}
+
+static kssize_t
+_powerpc64_kerndisp(kvm_t *kd)
+{
+ return (kd->vmst->hdr.startkernel - PPC64_KERNBASE);
+}
+
+static int
+_powerpc64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+ return (PPC64_MMU_OP(kd, walk_pages, cb, arg));
+}
+
+static struct kvm_arch kvm_powerpc64_minidump = {
+ .ka_probe = _powerpc64_minidump_probe,
+ .ka_initvtop = _powerpc64_minidump_initvtop,
+ .ka_freevtop = _powerpc64_minidump_freevtop,
+ .ka_kvatop = _powerpc64_minidump_kvatop,
+ .ka_walk_pages = _powerpc64_minidump_walk_pages,
+ .ka_native = _powerpc64_native,
+ .ka_kerndisp = _powerpc64_kerndisp,
+};
+
+KVM_ARCH(kvm_powerpc64_minidump);
diff --git a/lib/libkvm/kvm_minidump_powerpc64_hpt.c b/lib/libkvm/kvm_minidump_powerpc64_hpt.c
new file mode 100644
index 000000000000..1db3d373e169
--- /dev/null
+++ b/lib/libkvm/kvm_minidump_powerpc64_hpt.c
@@ -0,0 +1,660 @@
+/*-
+ * Copyright (c) 2006 Peter Wemm
+ * Copyright (c) 2019 Leandro Lupori
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: FreeBSD: src/lib/libkvm/kvm_minidump_riscv.c
+ */
+
+#include <sys/param.h>
+#include <vm/vm.h>
+
+#include <kvm.h>
+
+#include <limits.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "../../sys/powerpc/include/minidump.h"
+#include "kvm_private.h"
+#include "kvm_powerpc64.h"
+
+/*
+ * PowerPC64 HPT machine dependent routines for kvm and minidumps.
+ *
+ * Address Translation parameters:
+ *
+ * b = 12 (SLB base page size: 4 KB)
+ * b = 24 (SLB base page size: 16 MB)
+ * p = 12 (page size: 4 KB)
+ * p = 24 (page size: 16 MB)
+ * s = 28 (segment size: 256 MB)
+ */
+
+/* Large (huge) page params */
+#define LP_PAGE_SHIFT 24
+#define LP_PAGE_SIZE (1ULL << LP_PAGE_SHIFT)
+#define LP_PAGE_MASK 0x00ffffffULL
+
+/* SLB */
+
+#define SEGMENT_LENGTH 0x10000000ULL
+
+#define round_seg(x) roundup2((uint64_t)(x), SEGMENT_LENGTH)
+
+/* Virtual real-mode VSID in LPARs */
+#define VSID_VRMA 0x1ffffffULL
+
+#define SLBV_L 0x0000000000000100ULL /* Large page selector */
+#define SLBV_CLASS 0x0000000000000080ULL /* Class selector */
+#define SLBV_LP_MASK 0x0000000000000030ULL
+#define SLBV_VSID_MASK 0x3ffffffffffff000ULL /* Virtual SegID mask */
+#define SLBV_VSID_SHIFT 12
+
+#define SLBE_B_MASK 0x0000000006000000ULL
+#define SLBE_B_256MB 0x0000000000000000ULL
+#define SLBE_VALID 0x0000000008000000ULL /* SLB entry valid */
+#define SLBE_INDEX_MASK 0x0000000000000fffULL /* SLB index mask */
+#define SLBE_ESID_MASK 0xfffffffff0000000ULL /* Effective SegID mask */
+#define SLBE_ESID_SHIFT 28
+
+/* PTE */
+
+#define LPTEH_VSID_SHIFT 12
+#define LPTEH_AVPN_MASK 0xffffffffffffff80ULL
+#define LPTEH_B_MASK 0xc000000000000000ULL
+#define LPTEH_B_256MB 0x0000000000000000ULL
+#define LPTEH_BIG 0x0000000000000004ULL /* 4KB/16MB page */
+#define LPTEH_HID 0x0000000000000002ULL
+#define LPTEH_VALID 0x0000000000000001ULL
+
+#define LPTEL_RPGN 0xfffffffffffff000ULL
+#define LPTEL_LP_MASK 0x00000000000ff000ULL
+#define LPTEL_NOEXEC 0x0000000000000004ULL
+
+/* Supervisor (U: RW, S: RW) */
+#define LPTEL_BW 0x0000000000000002ULL
+
+/* Both Read Only (U: RO, S: RO) */
+#define LPTEL_BR 0x0000000000000003ULL
+
+#define LPTEL_RW LPTEL_BW
+#define LPTEL_RO LPTEL_BR
+
+/*
+ * PTE AVA field manipulation macros.
+ *
+ * AVA[0:54] = PTEH[2:56]
+ * AVA[VSID] = AVA[0:49] = PTEH[2:51]
+ * AVA[PAGE] = AVA[50:54] = PTEH[52:56]
+ */
+#define PTEH_AVA_VSID_MASK 0x3ffffffffffff000UL
+#define PTEH_AVA_VSID_SHIFT 12
+#define PTEH_AVA_VSID(p) \
+ (((p) & PTEH_AVA_VSID_MASK) >> PTEH_AVA_VSID_SHIFT)
+
+#define PTEH_AVA_PAGE_MASK 0x0000000000000f80UL
+#define PTEH_AVA_PAGE_SHIFT 7
+#define PTEH_AVA_PAGE(p) \
+ (((p) & PTEH_AVA_PAGE_MASK) >> PTEH_AVA_PAGE_SHIFT)
+
+/* Masks to obtain the Physical Address from PTE low 64-bit word. */
+#define PTEL_PA_MASK 0x0ffffffffffff000UL
+#define PTEL_LP_PA_MASK 0x0fffffffff000000UL
+
+#define PTE_HASH_MASK 0x0000007fffffffffUL
+
+/*
+ * Number of AVA/VA page bits to shift right, in order to leave only the
+ * ones that should be considered.
+ *
+ * q = MIN(54, 77-b) (PowerISA v2.07B, 5.7.7.3)
+ * n = q + 1 - 50 (VSID size in bits)
+ * s(ava) = 5 - n
+ * s(va) = (28 - b) - n
+ *
+ * q: bit number of lower limit of VA/AVA bits to compare
+ * n: number of AVA/VA page bits to compare
+ * s: shift amount
+ * 28 - b: VA page size in bits
+ */
+#define AVA_PAGE_SHIFT(b) (5 - (MIN(54, 77-(b)) + 1 - 50))
+#define VA_PAGE_SHIFT(b) (28 - (b) - (MIN(54, 77-(b)) + 1 - 50))
+
+/* Kernel ESID -> VSID mapping */
+#define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */
+#define KERNEL_VSID(esid) ((((((uint64_t)esid << 8) | ((uint64_t)esid >> 28)) \
+ * 0x13bbUL) & (KERNEL_VSID_BIT - 1)) | \
+ KERNEL_VSID_BIT)
+
+/* Types */
+
+typedef uint64_t ppc64_physaddr_t;
+
+typedef struct {
+ uint64_t slbv;
+ uint64_t slbe;
+} ppc64_slb_entry_t;
+
+typedef struct {
+ uint64_t pte_hi;
+ uint64_t pte_lo;
+} ppc64_pt_entry_t;
+
+struct hpt_data {
+ ppc64_slb_entry_t *slbs;
+ uint32_t slbsize;
+};
+
+
+static void
+slb_fill(ppc64_slb_entry_t *slb, uint64_t ea, uint64_t i)
+{
+ uint64_t esid;
+
+ esid = ea >> SLBE_ESID_SHIFT;
+ slb->slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
+ slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID | i;
+}
+
+static int
+slb_init(kvm_t *kd)
+{
+ struct minidumphdr *hdr;
+ struct hpt_data *data;
+ ppc64_slb_entry_t *slb;
+ uint32_t slbsize;
+ uint64_t ea, i, maxmem;
+
+ hdr = &kd->vmst->hdr;
+ data = PPC64_MMU_DATA(kd);
+
+ /* Alloc SLBs */
+ maxmem = hdr->bitmapsize * 8 * PPC64_PAGE_SIZE;
+ slbsize = round_seg(hdr->kernend + 1 - hdr->kernbase + maxmem) /
+ SEGMENT_LENGTH * sizeof(ppc64_slb_entry_t);
+ data->slbs = _kvm_malloc(kd, slbsize);
+ if (data->slbs == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate slbs");
+ return (-1);
+ }
+ data->slbsize = slbsize;
+
+ dprintf("%s: maxmem=0x%jx, segs=%jd, slbsize=0x%jx\n",
+ __func__, (uintmax_t)maxmem,
+ (uintmax_t)slbsize / sizeof(ppc64_slb_entry_t), (uintmax_t)slbsize);
+
+ /*
+ * Generate needed SLB entries.
+ *
+ * When translating addresses from EA to VA to PA, the needed SLB
+ * entry could be generated on the fly, but this is not the case
+ * for the walk_pages method, that needs to search the SLB entry
+ * by VSID, in order to find out the EA from a PTE.
+ */
+
+ /* VM area */
+ for (ea = hdr->kernbase, i = 0, slb = data->slbs;
+ ea < hdr->kernend; ea += SEGMENT_LENGTH, i++, slb++)
+ slb_fill(slb, ea, i);
+
+ /* DMAP area */
+ for (ea = hdr->dmapbase;
+ ea < MIN(hdr->dmapend, hdr->dmapbase + maxmem);
+ ea += SEGMENT_LENGTH, i++, slb++) {
+ slb_fill(slb, ea, i);
+ if (hdr->hw_direct_map)
+ slb->slbv |= SLBV_L;
+ }
+
+ return (0);
+}
+
+static void
+ppc64mmu_hpt_cleanup(kvm_t *kd)
+{
+ struct hpt_data *data;
+
+ if (kd->vmst == NULL)
+ return;
+
+ data = PPC64_MMU_DATA(kd);
+ free(data->slbs);
+ free(data);
+ PPC64_MMU_DATA(kd) = NULL;
+}
+
+static int
+ppc64mmu_hpt_init(kvm_t *kd)
+{
+ struct hpt_data *data;
+
+ /* Alloc MMU data */
+ data = _kvm_malloc(kd, sizeof(*data));
+ if (data == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate MMU data");
+ return (-1);
+ }
+ data->slbs = NULL;
+ PPC64_MMU_DATA(kd) = data;
+
+ if (slb_init(kd) == -1)
+ goto failed;
+
+ return (0);
+
+failed:
+ ppc64mmu_hpt_cleanup(kd);
+ return (-1);
+}
+
+static ppc64_slb_entry_t *
+slb_search(kvm_t *kd, kvaddr_t ea)
+{
+ struct hpt_data *data;
+ ppc64_slb_entry_t *slb;
+ int i, n;
+
+ data = PPC64_MMU_DATA(kd);
+ slb = data->slbs;
+ n = data->slbsize / sizeof(ppc64_slb_entry_t);
+
+ /* SLB search */
+ for (i = 0; i < n; i++, slb++) {
+ if ((slb->slbe & SLBE_VALID) == 0)
+ continue;
+
+ /* Compare 36-bit ESID of EA with segment one (64-s) */
+ if ((slb->slbe & SLBE_ESID_MASK) != (ea & SLBE_ESID_MASK))
+ continue;
+
+ /* Match found */
+ dprintf("SEG#%02d: slbv=0x%016jx, slbe=0x%016jx\n",
+ i, (uintmax_t)slb->slbv, (uintmax_t)slb->slbe);
+ break;
+ }
+
+ /* SLB not found */
+ if (i == n) {
+ _kvm_err(kd, kd->program, "%s: segment not found for EA 0x%jx",
+ __func__, (uintmax_t)ea);
+ return (NULL);
+ }
+ return (slb);
+}
+
+static ppc64_pt_entry_t
+pte_get(kvm_t *kd, u_long ptex)
+{
+ ppc64_pt_entry_t pte, *p;
+
+ p = _kvm_pmap_get(kd, ptex, sizeof(pte));
+ pte.pte_hi = be64toh(p->pte_hi);
+ pte.pte_lo = be64toh(p->pte_lo);
+ return (pte);
+}
+
+static int
+pte_search(kvm_t *kd, ppc64_slb_entry_t *slb, uint64_t hid, kvaddr_t ea,
+ ppc64_pt_entry_t *p)
+{
+ uint64_t hash, hmask;
+ uint64_t pteg, ptex;
+ uint64_t va_vsid, va_page;
+ int b;
+ int ava_pg_shift, va_pg_shift;
+ ppc64_pt_entry_t pte;
+
+ /*
+ * Get VA:
+ *
+ * va(78) = va_vsid(50) || va_page(s-b) || offset(b)
+ *
+ * va_vsid: 50-bit VSID (78-s)
+ * va_page: (s-b)-bit VA page
+ */
+ b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
+ va_vsid = (slb->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
+ va_page = (ea & ~SLBE_ESID_MASK) >> b;
+
+ dprintf("%s: hid=0x%jx, ea=0x%016jx, b=%d, va_vsid=0x%010jx, "
+ "va_page=0x%04jx\n",
+ __func__, (uintmax_t)hid, (uintmax_t)ea, b,
+ (uintmax_t)va_vsid, (uintmax_t)va_page);
+
+ /*
+ * Get hash:
+ *
+ * Primary hash: va_vsid(11:49) ^ va_page(s-b)
+ * Secondary hash: ~primary_hash
+ */
+ hash = (va_vsid & PTE_HASH_MASK) ^ va_page;
+ if (hid)
+ hash = ~hash & PTE_HASH_MASK;
+
+ /*
+ * Get PTEG:
+ *
+ * pteg = (hash(0:38) & hmask) << 3
+ *
+ * hmask (hash mask): mask generated from HTABSIZE || 11*0b1
+ * hmask = number_of_ptegs - 1
+ */
+ hmask = kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) - 1;
+ pteg = (hash & hmask) << 3;
+
+ ava_pg_shift = AVA_PAGE_SHIFT(b);
+ va_pg_shift = VA_PAGE_SHIFT(b);
+
+ dprintf("%s: hash=0x%010jx, hmask=0x%010jx, (hash & hmask)=0x%010jx, "
+ "pteg=0x%011jx, ava_pg_shift=%d, va_pg_shift=%d\n",
+ __func__, (uintmax_t)hash, (uintmax_t)hmask,
+ (uintmax_t)(hash & hmask), (uintmax_t)pteg,
+ ava_pg_shift, va_pg_shift);
+
+ /* Search PTEG */
+ for (ptex = pteg; ptex < pteg + 8; ptex++) {
+ pte = pte_get(kd, ptex);
+
+ /* Check H, V and B */
+ if ((pte.pte_hi & LPTEH_HID) != hid ||
+ (pte.pte_hi & LPTEH_VALID) == 0 ||
+ (pte.pte_hi & LPTEH_B_MASK) != LPTEH_B_256MB)
+ continue;
+
+ /* Compare AVA with VA */
+ if (PTEH_AVA_VSID(pte.pte_hi) != va_vsid ||
+ (PTEH_AVA_PAGE(pte.pte_hi) >> ava_pg_shift) !=
+ (va_page >> va_pg_shift))
+ continue;
+
+ /*
+ * Check if PTE[L] matches SLBV[L].
+ *
+ * Note: this check ignores PTE[LP], as does the kernel.
+ */
+ if (b == PPC64_PAGE_SHIFT) {
+ if (pte.pte_hi & LPTEH_BIG)
+ continue;
+ } else if ((pte.pte_hi & LPTEH_BIG) == 0)
+ continue;
+
+ /* Match found */
+ dprintf("%s: PTE found: ptex=0x%jx, pteh=0x%016jx, "
+ "ptel=0x%016jx\n",
+ __func__, (uintmax_t)ptex, (uintmax_t)pte.pte_hi,
+ (uintmax_t)pte.pte_lo);
+ break;
+ }
+
+ /* Not found? */
+ if (ptex == pteg + 8) {
+ /* Try secondary hash */
+ if (hid == 0)
+ return (pte_search(kd, slb, LPTEH_HID, ea, p));
+ else {
+ _kvm_err(kd, kd->program,
+ "%s: pte not found", __func__);
+ return (-1);
+ }
+ }
+
+ /* PTE found */
+ *p = pte;
+ return (0);
+}
+
+static int
+pte_lookup(kvm_t *kd, kvaddr_t ea, ppc64_pt_entry_t *pte)
+{
+ ppc64_slb_entry_t *slb;
+
+ /* First, find SLB */
+ if ((slb = slb_search(kd, ea)) == NULL)
+ return (-1);
+
+ /* Next, find PTE */
+ return (pte_search(kd, slb, 0, ea, pte));
+}
+
+static int
+ppc64mmu_hpt_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct minidumphdr *hdr;
+ struct vmstate *vm;
+ ppc64_pt_entry_t pte;
+ ppc64_physaddr_t pgoff, pgpa;
+ off_t ptoff;
+ int err;
+
+ vm = kd->vmst;
+ hdr = &vm->hdr;
+ pgoff = va & PPC64_PAGE_MASK;
+
+ dprintf("%s: va=0x%016jx\n", __func__, (uintmax_t)va);
+
+ /*
+ * A common use case of libkvm is to first find a symbol address
+ * from the kernel image and then use kvatop to translate it and
+ * to be able to fetch its corresponding data.
+ *
+ * The problem is that, in PowerPC64 case, the addresses of relocated
+ * data won't match those in the kernel image. This is handled here by
+ * adding the relocation offset to those addresses.
+ */
+ if (va < hdr->dmapbase)
+ va += hdr->startkernel - PPC64_KERNBASE;
+
+ /* Handle DMAP */
+ if (va >= hdr->dmapbase && va <= hdr->dmapend) {
+ pgpa = (va & ~hdr->dmapbase) & ~PPC64_PAGE_MASK;
+ ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
+ if (ptoff == -1) {
+ _kvm_err(kd, kd->program, "%s: "
+ "direct map address 0x%jx not in minidump",
+ __func__, (uintmax_t)va);
+ goto invalid;
+ }
+ *pa = ptoff + pgoff;
+ return (PPC64_PAGE_SIZE - pgoff);
+ /* Translate VA to PA */
+ } else if (va >= hdr->kernbase) {
+ if ((err = pte_lookup(kd, va, &pte)) == -1) {
+ _kvm_err(kd, kd->program,
+ "%s: pte not valid", __func__);
+ goto invalid;
+ }
+
+ if (pte.pte_hi & LPTEH_BIG)
+ pgpa = (pte.pte_lo & PTEL_LP_PA_MASK) |
+ (va & ~PPC64_PAGE_MASK & LP_PAGE_MASK);
+ else
+ pgpa = pte.pte_lo & PTEL_PA_MASK;
+ dprintf("%s: pgpa=0x%016jx\n", __func__, (uintmax_t)pgpa);
+
+ ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
+ if (ptoff == -1) {
+ _kvm_err(kd, kd->program, "%s: "
+ "physical address 0x%jx not in minidump",
+ __func__, (uintmax_t)pgpa);
+ goto invalid;
+ }
+ *pa = ptoff + pgoff;
+ return (PPC64_PAGE_SIZE - pgoff);
+ } else {
+ _kvm_err(kd, kd->program,
+ "%s: virtual address 0x%jx not minidumped",
+ __func__, (uintmax_t)va);
+ goto invalid;
+ }
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static vm_prot_t
+entry_to_prot(ppc64_pt_entry_t *pte)
+{
+ vm_prot_t prot = VM_PROT_READ;
+
+ if (pte->pte_lo & LPTEL_RW)
+ prot |= VM_PROT_WRITE;
+ if ((pte->pte_lo & LPTEL_NOEXEC) != 0)
+ prot |= VM_PROT_EXECUTE;
+ return (prot);
+}
+
+static ppc64_slb_entry_t *
+slb_vsid_search(kvm_t *kd, uint64_t vsid)
+{
+ struct hpt_data *data;
+ ppc64_slb_entry_t *slb;
+ int i, n;
+
+ data = PPC64_MMU_DATA(kd);
+ slb = data->slbs;
+ n = data->slbsize / sizeof(ppc64_slb_entry_t);
+ vsid <<= SLBV_VSID_SHIFT;
+
+ /* SLB search */
+ for (i = 0; i < n; i++, slb++) {
+ /* Check if valid and compare VSID */
+ if ((slb->slbe & SLBE_VALID) &&
+ (slb->slbv & SLBV_VSID_MASK) == vsid)
+ break;
+ }
+
+ /* SLB not found */
+ if (i == n) {
+ _kvm_err(kd, kd->program,
+ "%s: segment not found for VSID 0x%jx",
+ __func__, (uintmax_t)vsid >> SLBV_VSID_SHIFT);
+ return (NULL);
+ }
+ return (slb);
+}
+
+static u_long
+get_ea(kvm_t *kd, ppc64_pt_entry_t *pte, u_long ptex)
+{
+ ppc64_slb_entry_t *slb;
+ uint64_t ea, hash, vsid;
+ int b, shift;
+
+ /* Find SLB */
+ vsid = PTEH_AVA_VSID(pte->pte_hi);
+ if ((slb = slb_vsid_search(kd, vsid)) == NULL)
+ return (~0UL);
+
+ /* Get ESID part of EA */
+ ea = slb->slbe & SLBE_ESID_MASK;
+
+ b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
+
+ /*
+ * If there are less than 64K PTEGs (16-bit), the upper bits of
+ * EA page must be obtained from PTEH's AVA.
+ */
+ if (kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) <
+ 0x10000U) {
+ /*
+ * Add 0 to 5 EA bits, right after VSID.
+ * b == 12: 5 bits
+ * b == 24: 4 bits
+ */
+ shift = AVA_PAGE_SHIFT(b);
+ ea |= (PTEH_AVA_PAGE(pte->pte_hi) >> shift) <<
+ (SLBE_ESID_SHIFT - 5 + shift);
+ }
+
+ /* Get VA page from hash and add to EA. */
+ hash = (ptex & ~7) >> 3;
+ if (pte->pte_hi & LPTEH_HID)
+ hash = ~hash & PTE_HASH_MASK;
+ ea |= ((hash ^ (vsid & PTE_HASH_MASK)) << b) & ~SLBE_ESID_MASK;
+ return (ea);
+}
+
+static int
+ppc64mmu_hpt_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+ struct vmstate *vm;
+ int ret;
+ unsigned int pagesz;
+ u_long dva, pa, va;
+ u_long ptex, nptes;
+ uint64_t vsid;
+
+ ret = 0;
+ vm = kd->vmst;
+ nptes = vm->hdr.pmapsize / sizeof(ppc64_pt_entry_t);
+
+ /* Walk through PTEs */
+ for (ptex = 0; ptex < nptes; ptex++) {
+ ppc64_pt_entry_t pte = pte_get(kd, ptex);
+ if ((pte.pte_hi & LPTEH_VALID) == 0)
+ continue;
+
+ /* Skip non-kernel related pages, as well as VRMA ones */
+ vsid = PTEH_AVA_VSID(pte.pte_hi);
+ if ((vsid & KERNEL_VSID_BIT) == 0 ||
+ (vsid >> PPC64_PAGE_SHIFT) == VSID_VRMA)
+ continue;
+
+ /* Retrieve page's VA (EA on PPC64 terminology) */
+ if ((va = get_ea(kd, &pte, ptex)) == ~0UL)
+ goto out;
+
+ /* Get PA and page size */
+ if (pte.pte_hi & LPTEH_BIG) {
+ pa = pte.pte_lo & PTEL_LP_PA_MASK;
+ pagesz = LP_PAGE_SIZE;
+ } else {
+ pa = pte.pte_lo & PTEL_PA_MASK;
+ pagesz = PPC64_PAGE_SIZE;
+ }
+
+ /* Get DMAP address */
+ dva = vm->hdr.dmapbase + pa;
+
+ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+ entry_to_prot(&pte), pagesz, 0))
+ goto out;
+ }
+ ret = 1;
+
+out:
+ return (ret);
+}
+
+
+static struct ppc64_mmu_ops ops = {
+ .init = ppc64mmu_hpt_init,
+ .cleanup = ppc64mmu_hpt_cleanup,
+ .kvatop = ppc64mmu_hpt_kvatop,
+ .walk_pages = ppc64mmu_hpt_walk_pages,
+};
+struct ppc64_mmu_ops *ppc64_mmu_ops_hpt = &ops;
diff --git a/lib/libkvm/kvm_minidump_riscv.c b/lib/libkvm/kvm_minidump_riscv.c
new file mode 100644
index 000000000000..b90c255d4822
--- /dev/null
+++ b/lib/libkvm/kvm_minidump_riscv.c
@@ -0,0 +1,293 @@
+/*-
+ * Copyright (c) 2006 Peter Wemm
+ * Copyright (c) 2019 Mitchell Horne
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799
+ */
+
+#include <sys/cdefs.h>
+/*
+ * RISC-V machine dependent routines for kvm and minidumps.
+ */
+
+#include <sys/param.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <vm/vm.h>
+#include <kvm.h>
+
+#include "../../sys/riscv/include/minidump.h"
+
+#include <limits.h>
+
+#include "kvm_private.h"
+#include "kvm_riscv.h"
+
+#define riscv_round_page(x) roundup2((kvaddr_t)(x), RISCV_PAGE_SIZE)
+
+struct vmstate {
+ struct minidumphdr hdr;
+};
+
+static riscv_pt_entry_t
+_riscv_pte_get(kvm_t *kd, u_long pteindex)
+{
+ riscv_pt_entry_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+ return le64toh(*pte);
+}
+
+static int
+_riscv_minidump_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_RISCV) &&
+ _kvm_is_minidump(kd));
+}
+
+static void
+_riscv_minidump_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_riscv_minidump_initvtop(kvm_t *kd)
+{
+ struct vmstate *vmst;
+ off_t off, dump_avail_off, sparse_off;
+
+ vmst = _kvm_malloc(kd, sizeof(*vmst));
+ if (vmst == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate vm");
+ return (-1);
+ }
+ kd->vmst = vmst;
+ if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
+ sizeof(vmst->hdr)) {
+ _kvm_err(kd, kd->program, "cannot read dump header");
+ return (-1);
+ }
+ if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
+ sizeof(vmst->hdr.magic)) != 0) {
+ _kvm_err(kd, kd->program, "not a minidump for this platform");
+ return (-1);
+ }
+
+ vmst->hdr.version = le32toh(vmst->hdr.version);
+ if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
+ _kvm_err(kd, kd->program, "wrong minidump version. "
+ "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
+ return (-1);
+ }
+ vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
+ vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
+ vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
+ vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
+ vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys);
+ vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
+ vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
+ vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
+ le32toh(vmst->hdr.dumpavailsize) : 0;
+
+ /* Skip header and msgbuf */
+ dump_avail_off = RISCV_PAGE_SIZE + riscv_round_page(vmst->hdr.msgbufsize);
+
+ /* Skip dump_avail */
+ off = dump_avail_off + riscv_round_page(vmst->hdr.dumpavailsize);
+
+ /* build physical address lookup table for sparse pages */
+ sparse_off = off + riscv_round_page(vmst->hdr.bitmapsize) +
+ riscv_round_page(vmst->hdr.pmapsize);
+ if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
+ vmst->hdr.bitmapsize, off, sparse_off, RISCV_PAGE_SIZE) == -1) {
+ return (-1);
+ }
+ off += riscv_round_page(vmst->hdr.bitmapsize);
+
+ if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) {
+ return (-1);
+ }
+ off += riscv_round_page(vmst->hdr.pmapsize);
+
+ return (0);
+}
+
+static int
+_riscv_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+ struct vmstate *vm;
+ riscv_physaddr_t offset;
+ riscv_pt_entry_t l3;
+ kvaddr_t l3_index;
+ riscv_physaddr_t a;
+ off_t ofs;
+
+ vm = kd->vmst;
+ offset = va & RISCV_PAGE_MASK;
+
+ if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
+ a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) &
+ ~RISCV_PAGE_MASK;
+ ofs = _kvm_pt_find(kd, a, RISCV_PAGE_SIZE);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program, "_riscv_minidump_vatop: "
+ "direct map address 0x%jx not in minidump",
+ (uintmax_t)va);
+ goto invalid;
+ }
+ *pa = ofs + offset;
+ return (RISCV_PAGE_SIZE - offset);
+ } else if (va >= vm->hdr.kernbase) {
+ l3_index = (va - vm->hdr.kernbase) >> RISCV_L3_SHIFT;
+ if (l3_index >= vm->hdr.pmapsize / sizeof(l3))
+ goto invalid;
+ l3 = _riscv_pte_get(kd, l3_index);
+ if ((l3 & RISCV_PTE_V) == 0 || (l3 & RISCV_PTE_RWX) == 0) {
+ _kvm_err(kd, kd->program,
+ "_riscv_minidump_vatop: pte not valid");
+ goto invalid;
+ }
+ a = (l3 >> RISCV_PTE_PPN0_S) << RISCV_L3_SHIFT;
+ ofs = _kvm_pt_find(kd, a, RISCV_PAGE_SIZE);
+ if (ofs == -1) {
+ _kvm_err(kd, kd->program, "_riscv_minidump_vatop: "
+ "physical address 0x%jx not in minidump",
+ (uintmax_t)a);
+ goto invalid;
+ }
+ *pa = ofs + offset;
+ return (RISCV_PAGE_SIZE - offset);
+ } else {
+ _kvm_err(kd, kd->program,
+ "_riscv_minidump_vatop: virtual address 0x%jx not minidumped",
+ (uintmax_t)va);
+ goto invalid;
+ }
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
+ return (0);
+}
+
+static int
+_riscv_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
+{
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0,
+ "_riscv_minidump_kvatop called in live kernel!");
+ return (0);
+ }
+ return (_riscv_minidump_vatop(kd, va, pa));
+}
+
+static int
+_riscv_native(kvm_t *kd __unused)
+{
+
+#ifdef __riscv
+ return (1);
+#else
+ return (0);
+#endif
+}
+
+static vm_prot_t
+_riscv_entry_to_prot(riscv_pt_entry_t pte)
+{
+ vm_prot_t prot = VM_PROT_READ;
+
+ if ((pte & RISCV_PTE_W) != 0)
+ prot |= VM_PROT_WRITE;
+ if ((pte & RISCV_PTE_X) != 0)
+ prot |= VM_PROT_EXECUTE;
+ return prot;
+}
+
+static int
+_riscv_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+ struct vmstate *vm = kd->vmst;
+ u_long nptes = vm->hdr.pmapsize / sizeof(riscv_pt_entry_t);
+ u_long bmindex, dva, pa, pteindex, va;
+ struct kvm_bitmap bm;
+ vm_prot_t prot;
+ int ret = 0;
+
+ if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex))
+ return (0);
+
+ for (pteindex = 0; pteindex < nptes; pteindex++) {
+ riscv_pt_entry_t pte = _riscv_pte_get(kd, pteindex);
+
+ if (((pte & RISCV_PTE_V) == 0) ||
+ ((pte & RISCV_PTE_RWX) == 0))
+ continue;
+
+ va = vm->hdr.kernbase + (pteindex << RISCV_L3_SHIFT);
+ pa = (pte >> RISCV_PTE_PPN0_S) << RISCV_L3_SHIFT;
+ dva = vm->hdr.dmapbase + pa;
+ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+ _riscv_entry_to_prot(pte), RISCV_PAGE_SIZE, 0)) {
+ goto out;
+ }
+ }
+
+ while (_kvm_bitmap_next(&bm, &bmindex)) {
+ pa = _kvm_bit_id_pa(kd, bmindex, RISCV_PAGE_SIZE);
+ if (pa == _KVM_PA_INVALID)
+ break;
+ dva = vm->hdr.dmapbase + pa;
+ if (vm->hdr.dmapend < (dva + RISCV_PAGE_SIZE))
+ break;
+ va = 0;
+ prot = VM_PROT_READ | VM_PROT_WRITE;
+ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+ prot, RISCV_PAGE_SIZE, 0)) {
+ goto out;
+ }
+ }
+ ret = 1;
+
+out:
+ _kvm_bitmap_deinit(&bm);
+ return (ret);
+}
+
+static struct kvm_arch kvm_riscv_minidump = {
+ .ka_probe = _riscv_minidump_probe,
+ .ka_initvtop = _riscv_minidump_initvtop,
+ .ka_freevtop = _riscv_minidump_freevtop,
+ .ka_kvatop = _riscv_minidump_kvatop,
+ .ka_native = _riscv_native,
+ .ka_walk_pages = _riscv_minidump_walk_pages,
+};
+
+KVM_ARCH(kvm_riscv_minidump);
diff --git a/lib/libkvm/kvm_native.3 b/lib/libkvm/kvm_native.3
new file mode 100644
index 000000000000..7d99a5c5d087
--- /dev/null
+++ b/lib/libkvm/kvm_native.3
@@ -0,0 +1,59 @@
+.\"
+.\" Copyright (c) 2015 John Baldwin <jhb@FreeBSD.org>
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd March 15, 2017
+.Dt KVM_NATIVE 3
+.Os
+.Sh NAME
+.Nm kvm_native
+.Nd is a kvm descriptor opened on a native kernel image
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.Ft int
+.Fn kvm_native "kvm_t *kd"
+.Sh DESCRIPTION
+The
+.Nm kvm
+library provides an interface for accessing kernel virtual memory images
+for both native kernel images
+.Pq where the ABI of the kernel executable matches the host system
+and non-native kernel images.
+The
+.Fn kvm_native
+function returns a non-zero value if the kvm descriptor
+.Fa kd
+is attached to a native kernel image;
+otherwise it returns zero.
+.Sh RETURN VALUES
+The
+.Fn kvm_native
+function returns a non-zero value if the kvm descriptor
+.Fa kd
+is attached to a native kernel image;
+otherwise it returns zero.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_open2 3
diff --git a/lib/libkvm/kvm_nlist.3 b/lib/libkvm/kvm_nlist.3
new file mode 100644
index 000000000000..b7701828fadb
--- /dev/null
+++ b/lib/libkvm/kvm_nlist.3
@@ -0,0 +1,120 @@
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd November 27, 2015
+.Dt KVM_NLIST 3
+.Os
+.Sh NAME
+.Nm kvm_nlist ,
+.Nm kvm_nlist2
+.Nd retrieve symbol table names from a kernel image
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.In nlist.h
+.Ft int
+.Fn kvm_nlist "kvm_t *kd" "struct nlist *nl"
+.Ft int
+.Fn kvm_nlist2 "kvm_t *kd" "struct kvm_nlist *nl"
+.Sh DESCRIPTION
+The
+.Fn kvm_nlist
+function retrieves the symbol table entries indicated by the name list argument
+.Fa \&nl .
+This argument points to an array of nlist structures, terminated by
+an entry whose
+.Fa n_name
+field is
+.Dv NULL
+(see
+.Xr nlist 3 ) .
+Each symbol is looked up using the
+.Fa n_name
+field, and if found, the
+corresponding
+.Fa n_type
+and
+.Fa n_value
+fields are filled in.
+These fields are set
+to 0 if the symbol is not found.
+.Pp
+The
+.Xr kldsym 2
+system call is used to locate symbols in live kernels.
+This is a less than perfect
+emulation of the nlist values but has the advantage of being aware of kernel
+modules and is reasonably fast.
+.Pp
+The
+.Fn kvm_nlist2
+function retrieves the symbol table entries indicated by the name list argument
+.Fa nl .
+This argument points to an array of
+.Vt "struct kvm_nlist"
+structures,
+terminated by an entry whose
+.Fa n_name
+field is
+.Dv NULL
+These structures are similar to the nlist structures used by
+.Fn kvm_nlist
+except that the
+.Fa n_value
+field uses a different type
+.Pq Vt kvaddr_t
+to avoid truncation when examining non-native kernel images.
+.Sh RETURN VALUES
+The
+.Fn kvm_nlist
+and
+.Fn kvm_nlist2
+functions return the number of invalid entries found.
+If the kernel symbol table was unreadable, -1 is returned.
+.Sh SEE ALSO
+.Xr kldsym 2 ,
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_native 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3
+.Sh HISTORY
+The
+.Fn kvm_nlist2
+function first appeared in
+.Fx 11.0 .
diff --git a/lib/libkvm/kvm_open.3 b/lib/libkvm/kvm_open.3
new file mode 100644
index 000000000000..9a32719d537b
--- /dev/null
+++ b/lib/libkvm/kvm_open.3
@@ -0,0 +1,286 @@
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd June 22, 2025
+.Dt KVM_OPEN 3
+.Os
+.Sh NAME
+.Nm kvm_open ,
+.Nm kvm_open2 ,
+.Nm kvm_openfiles ,
+.Nm kvm_close
+.Nd initialize kernel virtual memory access
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In fcntl.h
+.In kvm.h
+.Ft kvm_t *
+.Fn kvm_open "const char *execfile" "const char *corefile" "const char *swapfile" "int flags" "const char *errstr"
+.Ft kvm_t *
+.Fo kvm_open2
+.Fa "const char *execfile"
+.Fa "const char *corefile"
+.Fa "int flags"
+.Fa "char *errbuf"
+.Fa "int (*resolver)(const char *name, kvaddr_t *addr)"
+.Fc
+.Ft kvm_t *
+.Fn kvm_openfiles "const char *execfile" "const char *corefile" "const char *swapfile" "int flags" "char *errbuf"
+.Ft int
+.Fn kvm_close "kvm_t *kd"
+.Sh DESCRIPTION
+The functions
+.Fn kvm_open ,
+.Fn kvm_open2 ,
+and
+.Fn kvm_openfiles
+return a descriptor used to access kernel virtual memory
+via the
+.Xr kvm 3
+library routines.
+Both active kernels and crash dumps are accessible
+through this interface.
+.Pp
+The
+.Fa execfile
+argument is the executable image of the kernel being examined.
+This file must contain a symbol table.
+If this argument is
+.Dv NULL ,
+the currently running system is assumed,
+as determined from
+.Xr getbootfile 3 .
+.Pp
+The
+.Fa corefile
+argument is the kernel memory device file.
+It can be either
+.Pa /dev/mem
+or a crash dump core generated by
+.Xr savecore 8 .
+If
+.Fa corefile
+is
+.Dv NULL ,
+the default indicated by
+.Dv _PATH_MEM
+from
+.In paths.h
+is used.
+It can also be set to a special value
+.Pa /dev/null
+by utilities like
+.Xr ps 1
+that do not directly access kernel memory.
+.Pp
+The
+.Fa swapfile
+argument is currently unused.
+.Pp
+The
+.Fa flags
+argument indicates read/write access as in
+.Xr open 2
+and applies only to the core file.
+Only
+.Dv O_RDONLY ,
+.Dv O_WRONLY ,
+and
+.Dv O_RDWR
+are permitted.
+.Pp
+The
+.Nm kvm
+library provides two different error reporting mechanisms.
+One provides backward compatibility with the SunOS kvm library, while the
+other provides an improved error reporting framework.
+The mechanism used by a descriptor is determined by the function used to
+open the descriptor.
+.Pp
+The
+.Fn kvm_open
+function is the Sun kvm compatible open call.
+Here, the
+.Fa errstr
+argument indicates how errors should be handled.
+If it is
+.Dv NULL ,
+no errors are reported and the application cannot know the
+specific nature of the failed kvm call.
+If it is not
+.Dv NULL ,
+errors are printed to
+.Dv stderr
+with
+.Fa errstr
+prepended to the message, as in
+.Xr perror 3 .
+Normally, the name of the program is used here.
+The string is assumed to persist at least until the corresponding
+.Fn kvm_close
+call.
+.Pp
+The
+.Fn kvm_open2
+and
+.Fn kvm_openfiles
+functions provide
+.Bx
+style error reporting.
+Here, error messages are not printed out by the library.
+Instead, the application obtains the error message
+corresponding to the most recent kvm library call using
+.Fn kvm_geterr
+(see
+.Xr kvm_geterr 3 ) .
+The results are undefined if the most recent kvm call did not produce
+an error.
+Since
+.Fn kvm_geterr
+requires a kvm descriptor, but the open routines return
+.Dv NULL
+on failure,
+.Fn kvm_geterr
+cannot be used to get the error message if open fails.
+Thus,
+.Fn kvm_open2
+and
+.Fn kvm_openfiles
+will place any error message in the
+.Fa errbuf
+argument.
+This buffer should be _POSIX2_LINE_MAX characters large (from
+<limits.h>).
+.Pp
+The
+.Fa resolver
+argument points to a function used by the
+.Nm kvm
+library to map symbol names to kernel virtual addresses.
+When the
+.Fa resolver
+function is called,
+.Fa name
+specifies the requested symbol name.
+If the function is able to resolve the name to an address,
+the address should be set in
+.Fa addr
+and the function should return zero.
+If the function is not able to resolve the name to an address,
+it should return a non-zero value.
+When opening a native kernel image,
+.Fa resolver
+may be set to
+.Dv NULL
+to use an internal function to resolve symbol names.
+Non-native kernel images
+.Pq such as when cross-debugging a crash dump
+require a valid
+.Fa resolver .
+.Sh RETURN VALUES
+The
+.Fn kvm_open ,
+.Fn kvm_open2 ,
+and
+.Fn kvm_openfiles
+functions return a descriptor to be used
+in all subsequent kvm library calls.
+The library is fully re-entrant.
+On failure,
+.Dv NULL
+is returned, in which case
+.Fn kvm_open2
+and
+.Fn kvm_openfiles
+write the error message into
+.Fa errbuf .
+.Pp
+.Rv -std kvm_close
+.Sh ERRORS
+The
+.Fn kvm_close
+function may fail and set the global variable
+.Va errno
+for any of the errors specified for
+.Xr close 2 .
+.Pp
+The
+.Fn kvm_close
+function may also fail and set
+.Va errno
+if:
+.Bl -tag -width Er
+.It Bq Er EINVAL
+The value passed via
+.Fa kd
+was
+.Dv NULL .
+.El
+.Sh NOTE
+Full memory dumps taken on 13.x (excluding 13.0) and 14.x amd64 kernels
+will cause both
+.Fn kvm_open
+and
+.Fn kvm_open2
+to fail since they do not provide sufficient information to figure out
+where in physical memory the kernel was loaded.
+Full memory dumps have to be explicitly enabled by setting the
+.Va debug.minidump
+.Xr sysctl 8
+to 0.
+.Sh SEE ALSO
+.Xr dumpon 8 ,
+.Xr close 2 ,
+.Xr open 2 ,
+.Xr kvm 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_native 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3 ,
+.Xr kmem 4 ,
+.Xr mem 4
+.Sh BUGS
+There should not be three open calls.
+The ill-defined error semantics
+of the Sun library and the desire to have a backward-compatible library
+for
+.Bx
+left little choice.
+.Sh HISTORY
+The
+.Fn kvm_open2
+function first appeared in
+.Fx 11.0 .
diff --git a/lib/libkvm/kvm_pcpu.c b/lib/libkvm/kvm_pcpu.c
new file mode 100644
index 000000000000..a83d927bcfd9
--- /dev/null
+++ b/lib/libkvm/kvm_pcpu.c
@@ -0,0 +1,382 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2013 Gleb Smirnoff <glebius@FreeBSD.org>
+ * Copyright (c) 2010 Juniper Networks, Inc.
+ * Copyright (c) 2009 Robert N. M. Watson
+ * Copyright (c) 2009 Bjoern A. Zeeb <bz@FreeBSD.org>
+ * Copyright (c) 2008 Yahoo!, Inc.
+ * All rights reserved.
+ *
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * This software was developed by Robert N. M. Watson under contract
+ * to Juniper Networks, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/pcpu.h>
+#include <sys/sysctl.h>
+#include <kvm.h>
+#include <limits.h>
+#include <stdlib.h>
+
+#include "kvm_private.h"
+
+#ifdef __amd64__
+#define __OFFSET_BY_PCPU
+#endif
+
+static struct nlist kvm_pcpu_nl[] = {
+ { .n_name = "_cpuid_to_pcpu" },
+ { .n_name = "_mp_maxcpus" },
+ { .n_name = "_mp_ncpus" },
+#ifdef __OFFSET_BY_PCPU
+ { .n_name = "___pcpu" },
+#endif
+ { .n_name = NULL },
+};
+#define NL_CPUID_TO_PCPU 0
+#define NL_MP_MAXCPUS 1
+#define NL_MP_NCPUS 2
+#define NL___PCPU 3
+
+/*
+ * Kernel per-CPU data state. We cache this stuff on the first
+ * access.
+ *
+ * XXXRW: Possibly, this (and kvmpcpu_nl) should be per-kvm_t, in case the
+ * consumer has multiple handles in flight to differently configured
+ * kernels/crashdumps.
+ */
+static void **pcpu_data;
+static int maxcpu;
+static int mp_ncpus;
+#ifdef __OFFSET_BY_PCPU
+static unsigned long __pcpu;
+#endif
+
+static int
+_kvm_pcpu_init(kvm_t *kd)
+{
+ size_t len;
+ int max;
+ void *data;
+
+ if (kvm_nlist(kd, kvm_pcpu_nl) < 0)
+ return (-1);
+ if (kvm_pcpu_nl[NL_CPUID_TO_PCPU].n_value == 0) {
+ _kvm_err(kd, kd->program, "unable to find cpuid_to_pcpu");
+ return (-1);
+ }
+ if (kvm_pcpu_nl[NL_MP_MAXCPUS].n_value == 0) {
+ _kvm_err(kd, kd->program, "unable to find mp_maxcpus");
+ return (-1);
+ }
+ if (kvm_read(kd, kvm_pcpu_nl[NL_MP_MAXCPUS].n_value, &max,
+ sizeof(max)) != sizeof(max)) {
+ _kvm_err(kd, kd->program, "cannot read mp_maxcpus");
+ return (-1);
+ }
+ if (kvm_pcpu_nl[NL_MP_NCPUS].n_value == 0) {
+ _kvm_err(kd, kd->program, "unable to find mp_ncpus");
+ return (-1);
+ }
+ if (kvm_read(kd, kvm_pcpu_nl[NL_MP_NCPUS].n_value, &mp_ncpus,
+ sizeof(mp_ncpus)) != sizeof(mp_ncpus)) {
+ _kvm_err(kd, kd->program, "cannot read mp_ncpus");
+ return (-1);
+ }
+#ifdef __OFFSET_BY_PCPU
+ if (kvm_pcpu_nl[NL___PCPU].n_value == 0) {
+ _kvm_err(kd, kd->program, "unable to find __pcpu");
+ return (-1);
+ }
+ if (kvm_read(kd, kvm_pcpu_nl[NL___PCPU].n_value, &__pcpu,
+ sizeof(__pcpu)) != sizeof(__pcpu)) {
+ _kvm_err(kd, kd->program, "cannot read __pcpu");
+ return (-1);
+ }
+#endif
+ len = max * sizeof(void *);
+ data = malloc(len);
+ if (data == NULL) {
+ _kvm_err(kd, kd->program, "out of memory");
+ return (-1);
+ }
+ if (kvm_read(kd, kvm_pcpu_nl[NL_CPUID_TO_PCPU].n_value, data, len) !=
+ (ssize_t)len) {
+ _kvm_err(kd, kd->program, "cannot read cpuid_to_pcpu array");
+ free(data);
+ return (-1);
+ }
+ pcpu_data = data;
+ maxcpu = max;
+ return (0);
+}
+
+static void
+_kvm_pcpu_clear(void)
+{
+
+ maxcpu = 0;
+ free(pcpu_data);
+ pcpu_data = NULL;
+}
+
+void *
+kvm_getpcpu(kvm_t *kd, int cpu)
+{
+ char *buf;
+
+ if (kd == NULL) {
+ _kvm_pcpu_clear();
+ return (NULL);
+ }
+
+ if (maxcpu == 0)
+ if (_kvm_pcpu_init(kd) < 0)
+ return ((void *)-1);
+
+ if (cpu >= maxcpu || pcpu_data[cpu] == NULL)
+ return (NULL);
+
+ buf = malloc(sizeof(struct pcpu));
+ if (buf == NULL) {
+ _kvm_err(kd, kd->program, "out of memory");
+ return ((void *)-1);
+ }
+ if (kvm_read(kd, (uintptr_t)pcpu_data[cpu], buf,
+ sizeof(struct pcpu)) != sizeof(struct pcpu)) {
+ _kvm_err(kd, kd->program, "unable to read per-CPU data");
+ free(buf);
+ return ((void *)-1);
+ }
+ return (buf);
+}
+
+int
+kvm_getmaxcpu(kvm_t *kd)
+{
+
+ if (kd == NULL) {
+ _kvm_pcpu_clear();
+ return (0);
+ }
+
+ if (maxcpu == 0)
+ if (_kvm_pcpu_init(kd) < 0)
+ return (-1);
+ return (maxcpu);
+}
+
+int
+kvm_getncpus(kvm_t *kd)
+{
+
+ if (mp_ncpus == 0)
+ if (_kvm_pcpu_init(kd) < 0)
+ return (-1);
+ return (mp_ncpus);
+}
+
+static int
+_kvm_dpcpu_setcpu(kvm_t *kd, u_int cpu, int report_error)
+{
+
+ if (!kd->dpcpu_initialized) {
+ if (report_error)
+ _kvm_err(kd, kd->program, "%s: not initialized",
+ __func__);
+ return (-1);
+ }
+ if (cpu >= kd->dpcpu_maxcpus) {
+ if (report_error)
+ _kvm_err(kd, kd->program, "%s: CPU %u too big",
+ __func__, cpu);
+ return (-1);
+ }
+ if (kd->dpcpu_off[cpu] == 0) {
+ if (report_error)
+ _kvm_err(kd, kd->program, "%s: CPU %u not found",
+ __func__, cpu);
+ return (-1);
+ }
+ kd->dpcpu_curcpu = cpu;
+ kd->dpcpu_curoff = kd->dpcpu_off[cpu];
+ return (0);
+}
+
+/*
+ * Set up libkvm to handle dynamic per-CPU memory.
+ */
+static int
+_kvm_dpcpu_init(kvm_t *kd)
+{
+ struct kvm_nlist nl[] = {
+#define NLIST_START_SET_PCPU 0
+ { .n_name = "___start_" DPCPU_SETNAME },
+#define NLIST_STOP_SET_PCPU 1
+ { .n_name = "___stop_" DPCPU_SETNAME },
+#define NLIST_DPCPU_OFF 2
+ { .n_name = "_dpcpu_off" },
+#define NLIST_MP_MAXCPUS 3
+ { .n_name = "_mp_maxcpus" },
+ { .n_name = NULL },
+ };
+ uintptr_t *dpcpu_off_buf;
+ size_t len;
+ u_int dpcpu_maxcpus;
+
+ /*
+ * XXX: This only works for native kernels for now.
+ */
+ if (!kvm_native(kd))
+ return (-1);
+
+ /*
+ * Locate and cache locations of important symbols using the internal
+ * version of _kvm_nlist, turning off initialization to avoid
+ * recursion in case of unresolveable symbols.
+ */
+ if (_kvm_nlist(kd, nl, 0) != 0)
+ return (-1);
+ if (kvm_read(kd, nl[NLIST_MP_MAXCPUS].n_value, &dpcpu_maxcpus,
+ sizeof(dpcpu_maxcpus)) != sizeof(dpcpu_maxcpus))
+ return (-1);
+ len = dpcpu_maxcpus * sizeof(*dpcpu_off_buf);
+ dpcpu_off_buf = malloc(len);
+ if (dpcpu_off_buf == NULL)
+ return (-1);
+ if (kvm_read(kd, nl[NLIST_DPCPU_OFF].n_value, dpcpu_off_buf, len) !=
+ (ssize_t)len) {
+ free(dpcpu_off_buf);
+ return (-1);
+ }
+ kd->dpcpu_start = nl[NLIST_START_SET_PCPU].n_value;
+ kd->dpcpu_stop = nl[NLIST_STOP_SET_PCPU].n_value;
+ kd->dpcpu_maxcpus = dpcpu_maxcpus;
+ kd->dpcpu_off = dpcpu_off_buf;
+ kd->dpcpu_initialized = 1;
+ (void)_kvm_dpcpu_setcpu(kd, 0, 0);
+ return (0);
+}
+
+/*
+ * Check whether the dpcpu module has been initialized successfully or not,
+ * initialize it if permitted.
+ */
+int
+_kvm_dpcpu_initialized(kvm_t *kd, int intialize)
+{
+
+ if (kd->dpcpu_initialized || !intialize)
+ return (kd->dpcpu_initialized);
+
+ (void)_kvm_dpcpu_init(kd);
+
+ return (kd->dpcpu_initialized);
+}
+
+/*
+ * Check whether the value is within the dpcpu symbol range and only if so
+ * adjust the offset relative to the current offset.
+ */
+kvaddr_t
+_kvm_dpcpu_validaddr(kvm_t *kd, kvaddr_t value)
+{
+
+ if (value == 0)
+ return (value);
+
+ if (!kd->dpcpu_initialized)
+ return (value);
+
+ if (value < kd->dpcpu_start || value >= kd->dpcpu_stop)
+ return (value);
+
+ return (kd->dpcpu_curoff + value);
+}
+
+int
+kvm_dpcpu_setcpu(kvm_t *kd, u_int cpu)
+{
+ int ret;
+
+ if (!kd->dpcpu_initialized) {
+ ret = _kvm_dpcpu_init(kd);
+ if (ret != 0) {
+ _kvm_err(kd, kd->program, "%s: init failed",
+ __func__);
+ return (ret);
+ }
+ }
+
+ return (_kvm_dpcpu_setcpu(kd, cpu, 1));
+}
+
+/*
+ * Obtain a per-CPU copy for given cpu from UMA_ZONE_PCPU allocation.
+ */
+ssize_t
+kvm_read_zpcpu(kvm_t *kd, u_long base, void *buf, size_t size, int cpu)
+{
+
+ if (!kvm_native(kd))
+ return (-1);
+ if (mp_ncpus == 0)
+ if (_kvm_pcpu_init(kd) < 0)
+ return (0);
+
+#ifdef __OFFSET_BY_PCPU
+ base += __pcpu;
+#endif
+ return (kvm_read(kd, (uintptr_t)(base + sizeof(struct pcpu) * cpu),
+ buf, size));
+}
+
+/*
+ * Fetch value of a counter(9).
+ */
+uint64_t
+kvm_counter_u64_fetch(kvm_t *kd, u_long base)
+{
+ uint64_t r, c;
+
+ if (mp_ncpus == 0)
+ if (_kvm_pcpu_init(kd) < 0)
+ return (0);
+
+ r = 0;
+ for (int i = 0; i < mp_ncpus; i++) {
+ if (kvm_read_zpcpu(kd, base, &c, sizeof(c), i) != sizeof(c))
+ return (0);
+ r += c;
+ }
+
+ return (r);
+}
diff --git a/lib/libkvm/kvm_powerpc.c b/lib/libkvm/kvm_powerpc.c
new file mode 100644
index 000000000000..61db08f3a0ae
--- /dev/null
+++ b/lib/libkvm/kvm_powerpc.c
@@ -0,0 +1,234 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2008, Juniper Networks, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/kerneldump.h>
+#include <sys/mman.h>
+
+#include <elf.h>
+#include <kvm.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "kvm_private.h"
+
+struct vmstate {
+ void *map;
+ size_t mapsz;
+ size_t dmphdrsz;
+ Elf32_Ehdr *eh;
+ Elf32_Phdr *ph;
+};
+
+static int
+valid_elf_header(Elf32_Ehdr *eh)
+{
+
+ if (!IS_ELF(*eh))
+ return (0);
+ if (eh->e_ident[EI_CLASS] != ELFCLASS32)
+ return (0);
+ if (eh->e_ident[EI_DATA] != ELFDATA2MSB)
+ return (0);
+ if (eh->e_ident[EI_VERSION] != EV_CURRENT)
+ return (0);
+ if (eh->e_ident[EI_OSABI] != ELFOSABI_STANDALONE)
+ return (0);
+ if (be16toh(eh->e_type) != ET_CORE)
+ return (0);
+ if (be16toh(eh->e_machine) != EM_PPC)
+ return (0);
+ /* Can't think of anything else to check... */
+ return (1);
+}
+
+static size_t
+dump_header_size(struct kerneldumpheader *dh)
+{
+
+ if (strcmp(dh->magic, KERNELDUMPMAGIC) != 0)
+ return (0);
+ if (strcmp(dh->architecture, "powerpc") != 0)
+ return (0);
+ /* That should do it... */
+ return (sizeof(*dh));
+}
+
+/*
+ * Map the ELF headers into the process' address space. We do this in two
+ * steps: first the ELF header itself and using that information the whole
+ * set of headers.
+ */
+static int
+powerpc_maphdrs(kvm_t *kd)
+{
+ struct vmstate *vm;
+ size_t mapsz;
+
+ vm = kd->vmst;
+
+ vm->mapsz = sizeof(*vm->eh) + sizeof(struct kerneldumpheader);
+ vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
+ if (vm->map == MAP_FAILED) {
+ _kvm_err(kd, kd->program, "cannot map corefile");
+ return (-1);
+ }
+ vm->dmphdrsz = 0;
+ vm->eh = vm->map;
+ if (!valid_elf_header(vm->eh)) {
+ /*
+ * Hmmm, no ELF header. Maybe we still have a dump header.
+ * This is normal when the core file wasn't created by
+ * savecore(8), but instead was dumped over TFTP. We can
+ * easily skip the dump header...
+ */
+ vm->dmphdrsz = dump_header_size(vm->map);
+ if (vm->dmphdrsz == 0)
+ goto inval;
+ vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
+ if (!valid_elf_header(vm->eh))
+ goto inval;
+ }
+ mapsz = be16toh(vm->eh->e_phentsize) * be16toh(vm->eh->e_phnum) +
+ be32toh(vm->eh->e_phoff);
+ munmap(vm->map, vm->mapsz);
+
+ /* Map all headers. */
+ vm->mapsz = vm->dmphdrsz + mapsz;
+ vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
+ if (vm->map == MAP_FAILED) {
+ _kvm_err(kd, kd->program, "cannot map corefile headers");
+ return (-1);
+ }
+ vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
+ vm->ph = (void *)((uintptr_t)vm->eh + be32toh(vm->eh->e_phoff));
+ return (0);
+
+ inval:
+ _kvm_err(kd, kd->program, "invalid corefile");
+ return (-1);
+}
+
+/*
+ * Determine the offset within the corefile corresponding the virtual
+ * address. Return the number of contiguous bytes in the corefile or
+ * 0 when the virtual address is invalid.
+ */
+static size_t
+powerpc_va2off(kvm_t *kd, kvaddr_t va, off_t *ofs)
+{
+ struct vmstate *vm = kd->vmst;
+ Elf32_Phdr *ph;
+ int nph;
+
+ ph = vm->ph;
+ nph = be16toh(vm->eh->e_phnum);
+ while (nph && (va < be32toh(ph->p_vaddr) ||
+ va >= be32toh(ph->p_vaddr) + be32toh(ph->p_memsz))) {
+ nph--;
+ ph = (void *)((uintptr_t)ph + be16toh(vm->eh->e_phentsize));
+ }
+ if (nph == 0)
+ return (0);
+
+ /* Segment found. Return file offset and range. */
+ *ofs = vm->dmphdrsz + be32toh(ph->p_offset) +
+ (va - be32toh(ph->p_vaddr));
+ return (be32toh(ph->p_memsz) - (va - be32toh(ph->p_vaddr)));
+}
+
+static void
+_powerpc_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ if (vm->eh != MAP_FAILED)
+ munmap(vm->eh, vm->mapsz);
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_powerpc_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_PPC) &&
+ kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
+}
+
+static int
+_powerpc_initvtop(kvm_t *kd)
+{
+
+ kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
+ if (kd->vmst == NULL)
+ return (-1);
+
+ if (powerpc_maphdrs(kd) == -1)
+ return (-1);
+
+ return (0);
+}
+
+static int
+_powerpc_kvatop(kvm_t *kd, kvaddr_t va, off_t *ofs)
+{
+ struct vmstate *vm;
+
+ vm = kd->vmst;
+ if (be32toh(vm->ph->p_paddr) == 0xffffffff)
+ return ((int)powerpc_va2off(kd, va, ofs));
+
+ _kvm_err(kd, kd->program, "Raw corefile not supported");
+ return (0);
+}
+
+static int
+_powerpc_native(kvm_t *kd __unused)
+{
+
+#if defined(__powerpc__) && !defined(__powerpc64__)
+ return (1);
+#else
+ return (0);
+#endif
+}
+
+static struct kvm_arch kvm_powerpc = {
+ .ka_probe = _powerpc_probe,
+ .ka_initvtop = _powerpc_initvtop,
+ .ka_freevtop = _powerpc_freevtop,
+ .ka_kvatop = _powerpc_kvatop,
+ .ka_native = _powerpc_native,
+};
+
+KVM_ARCH(kvm_powerpc);
diff --git a/lib/libkvm/kvm_powerpc64.c b/lib/libkvm/kvm_powerpc64.c
new file mode 100644
index 000000000000..f0292ec8d757
--- /dev/null
+++ b/lib/libkvm/kvm_powerpc64.c
@@ -0,0 +1,267 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2008, Juniper Networks, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/kerneldump.h>
+#include <sys/mman.h>
+
+#include <elf.h>
+#include <kvm.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "kvm_private.h"
+
+struct vmstate {
+ void *map;
+ size_t mapsz;
+ size_t dmphdrsz;
+ Elf64_Ehdr *eh;
+ Elf64_Phdr *ph;
+};
+
+static int
+valid_elf_header(kvm_t *kd, Elf64_Ehdr *eh)
+{
+
+ if (!IS_ELF(*eh))
+ return (0);
+ if (eh->e_ident[EI_CLASS] != ELFCLASS64)
+ return (0);
+ if (eh->e_ident[EI_DATA] != ELFDATA2MSB &&
+ eh->e_ident[EI_DATA] != ELFDATA2LSB)
+ return (0);
+ if (eh->e_ident[EI_VERSION] != EV_CURRENT)
+ return (0);
+ if (eh->e_ident[EI_OSABI] != ELFOSABI_STANDALONE)
+ return (0);
+ if (_kvm16toh(kd, eh->e_type) != ET_CORE)
+ return (0);
+ if (_kvm16toh(kd, eh->e_machine) != EM_PPC64)
+ return (0);
+ /* Can't think of anything else to check... */
+ return (1);
+}
+
+static size_t
+dump_header_size(struct kerneldumpheader *dh)
+{
+
+ if (strcmp(dh->magic, KERNELDUMPMAGIC) != 0)
+ return (0);
+ if (strcmp(dh->architecture, "powerpc64") != 0 &&
+ strcmp(dh->architecture, "powerpc64le") != 0)
+ return (0);
+ /* That should do it... */
+ return (sizeof(*dh));
+}
+
+/*
+ * Map the ELF headers into the process' address space. We do this in two
+ * steps: first the ELF header itself and using that information the whole
+ * set of headers.
+ */
+static int
+powerpc_maphdrs(kvm_t *kd)
+{
+ struct vmstate *vm;
+ size_t mapsz;
+
+ vm = kd->vmst;
+
+ vm->mapsz = sizeof(*vm->eh) + sizeof(struct kerneldumpheader);
+ vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
+ if (vm->map == MAP_FAILED) {
+ _kvm_err(kd, kd->program, "cannot map corefile");
+ return (-1);
+ }
+ vm->dmphdrsz = 0;
+ vm->eh = vm->map;
+ if (!valid_elf_header(kd, vm->eh)) {
+ /*
+ * Hmmm, no ELF header. Maybe we still have a dump header.
+ * This is normal when the core file wasn't created by
+ * savecore(8), but instead was dumped over TFTP. We can
+ * easily skip the dump header...
+ */
+ vm->dmphdrsz = dump_header_size(vm->map);
+ if (vm->dmphdrsz == 0)
+ goto inval;
+ vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
+ if (!valid_elf_header(kd, vm->eh))
+ goto inval;
+ }
+ mapsz = _kvm16toh(kd, vm->eh->e_phentsize) *
+ _kvm16toh(kd, vm->eh->e_phnum) + _kvm64toh(kd, vm->eh->e_phoff);
+ munmap(vm->map, vm->mapsz);
+
+ /* Map all headers. */
+ vm->mapsz = vm->dmphdrsz + mapsz;
+ vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
+ if (vm->map == MAP_FAILED) {
+ _kvm_err(kd, kd->program, "cannot map corefile headers");
+ return (-1);
+ }
+ vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
+ vm->ph = (void *)((uintptr_t)vm->eh +
+ (uintptr_t)_kvm64toh(kd, vm->eh->e_phoff));
+ return (0);
+
+ inval:
+ _kvm_err(kd, kd->program, "invalid corefile");
+ return (-1);
+}
+
+/*
+ * Determine the offset within the corefile corresponding the virtual
+ * address. Return the number of contiguous bytes in the corefile or
+ * 0 when the virtual address is invalid.
+ */
+static size_t
+powerpc64_va2off(kvm_t *kd, kvaddr_t va, off_t *ofs)
+{
+ struct vmstate *vm = kd->vmst;
+ Elf64_Phdr *ph;
+ int nph;
+
+ ph = vm->ph;
+ nph = _kvm16toh(kd, vm->eh->e_phnum);
+ while (nph && (va < _kvm64toh(kd, ph->p_vaddr) ||
+ va >= _kvm64toh(kd, ph->p_vaddr) + _kvm64toh(kd, ph->p_memsz))) {
+ nph--;
+ ph = (void *)((uintptr_t)ph +
+ _kvm16toh(kd, vm->eh->e_phentsize));
+ }
+ if (nph == 0)
+ return (0);
+
+ /* Segment found. Return file offset and range. */
+ *ofs = vm->dmphdrsz + _kvm64toh(kd, ph->p_offset) +
+ (va - _kvm64toh(kd, ph->p_vaddr));
+ return (_kvm64toh(kd, ph->p_memsz) -
+ (va - _kvm64toh(kd, ph->p_vaddr)));
+}
+
+static void
+_powerpc64_freevtop(kvm_t *kd)
+{
+ struct vmstate *vm = kd->vmst;
+
+ if (vm->eh != MAP_FAILED)
+ munmap(vm->eh, vm->mapsz);
+ free(vm);
+ kd->vmst = NULL;
+}
+
+static int
+_powerpc64_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
+ kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
+}
+
+static int
+_powerpc64le_probe(kvm_t *kd)
+{
+
+ return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
+ kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
+}
+
+static int
+_powerpc64_initvtop(kvm_t *kd)
+{
+
+ kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
+ if (kd->vmst == NULL)
+ return (-1);
+
+ if (powerpc_maphdrs(kd) == -1)
+ return (-1);
+
+ return (0);
+}
+
+static int
+_powerpc64_kvatop(kvm_t *kd, kvaddr_t va, off_t *ofs)
+{
+ struct vmstate *vm;
+
+ vm = kd->vmst;
+ if (_kvm64toh(kd, vm->ph->p_paddr) == 0xffffffffffffffff)
+ return ((int)powerpc64_va2off(kd, va, ofs));
+
+ _kvm_err(kd, kd->program, "Raw corefile not supported");
+ return (0);
+}
+
+static int
+_powerpc64_native(kvm_t *kd __unused)
+{
+
+#if defined(__powerpc64__) && BYTE_ORDER == BIG_ENDIAN
+ return (1);
+#else
+ return (0);
+#endif
+}
+
+static int
+_powerpc64le_native(kvm_t *kd __unused)
+{
+
+#if defined(__powerpc64__) && BYTE_ORDER == LITTLE_ENDIAN
+ return (1);
+#else
+ return (0);
+#endif
+}
+
+static struct kvm_arch kvm_powerpc64 = {
+ .ka_probe = _powerpc64_probe,
+ .ka_initvtop = _powerpc64_initvtop,
+ .ka_freevtop = _powerpc64_freevtop,
+ .ka_kvatop = _powerpc64_kvatop,
+ .ka_native = _powerpc64_native,
+};
+
+static struct kvm_arch kvm_powerpc64le = {
+ .ka_probe = _powerpc64le_probe,
+ .ka_initvtop = _powerpc64_initvtop,
+ .ka_freevtop = _powerpc64_freevtop,
+ .ka_kvatop = _powerpc64_kvatop,
+ .ka_native = _powerpc64le_native,
+};
+
+KVM_ARCH(kvm_powerpc64);
+KVM_ARCH(kvm_powerpc64le);
diff --git a/lib/libkvm/kvm_powerpc64.h b/lib/libkvm/kvm_powerpc64.h
new file mode 100644
index 000000000000..efd7ce0d3e7f
--- /dev/null
+++ b/lib/libkvm/kvm_powerpc64.h
@@ -0,0 +1,79 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Leandro Lupori
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __KVM_POWERPC64_H__
+#define __KVM_POWERPC64_H__
+
+/* Debug stuff */
+#define KVM_PPC64_DBG 0
+#if KVM_PPC64_DBG
+#include <stdio.h>
+#define dprintf(fmt, ...) printf(fmt, ## __VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+
+#define PPC64_KERNBASE 0x100100ULL
+
+/* Page params */
+#define PPC64_PAGE_SHIFT 12
+#define PPC64_PAGE_SIZE (1ULL << PPC64_PAGE_SHIFT)
+#define PPC64_PAGE_MASK (PPC64_PAGE_SIZE - 1)
+
+#define ppc64_round_page(x) roundup2((kvaddr_t)(x), PPC64_PAGE_SIZE)
+
+#define PPC64_MMU_G5 "mmu_g5"
+#define PPC64_MMU_PHYP "mmu_phyp"
+
+/* MMU interface */
+#define PPC64_MMU_OPS(kd) (kd)->vmst->mmu.ops
+#define PPC64_MMU_OP(kd, op, ...) PPC64_MMU_OPS(kd)->op((kd), ## __VA_ARGS__)
+#define PPC64_MMU_DATA(kd) (kd)->vmst->mmu.data
+
+struct ppc64_mmu_ops {
+ int (*init)(kvm_t *);
+ void (*cleanup)(kvm_t *);
+ int (*kvatop)(kvm_t *, kvaddr_t, off_t *);
+ int (*walk_pages)(kvm_t *, kvm_walk_pages_cb_t *, void *);
+};
+
+struct ppc64_mmu {
+ struct ppc64_mmu_ops *ops;
+ void *data;
+};
+
+struct vmstate {
+ struct minidumphdr hdr;
+ uint64_t kimg_start;
+ uint64_t kimg_end;
+ struct ppc64_mmu mmu;
+};
+
+extern struct ppc64_mmu_ops *ppc64_mmu_ops_hpt;
+
+#endif /* !__KVM_POWERPC64_H__ */
diff --git a/lib/libkvm/kvm_private.c b/lib/libkvm/kvm_private.c
new file mode 100644
index 000000000000..a3b650787f53
--- /dev/null
+++ b/lib/libkvm/kvm_private.c
@@ -0,0 +1,824 @@
+/*-
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/fnv_hash.h>
+
+#define _WANT_VNET
+
+#include <sys/user.h>
+#include <sys/linker.h>
+#include <sys/pcpu.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include <stdbool.h>
+#include <net/vnet.h>
+
+#include <assert.h>
+#include <fcntl.h>
+#include <vm/vm.h>
+#include <kvm.h>
+#include <limits.h>
+#include <paths.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include "kvm_private.h"
+
+/*
+ * Routines private to libkvm.
+ */
+
+/* from src/lib/libc/gen/nlist.c */
+int __fdnlist(int, struct nlist *);
+
+/*
+ * Report an error using printf style arguments. "program" is kd->program
+ * on hard errors, and 0 on soft errors, so that under sun error emulation,
+ * only hard errors are printed out (otherwise, programs like gdb will
+ * generate tons of error messages when trying to access bogus pointers).
+ */
+void
+_kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (program != NULL) {
+ (void)fprintf(stderr, "%s: ", program);
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fputc('\n', stderr);
+ } else
+ (void)vsnprintf(kd->errbuf,
+ sizeof(kd->errbuf), fmt, ap);
+
+ va_end(ap);
+}
+
+void
+_kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
+{
+ va_list ap;
+ int n;
+
+ va_start(ap, fmt);
+ if (program != NULL) {
+ (void)fprintf(stderr, "%s: ", program);
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, ": %s\n", strerror(errno));
+ } else {
+ char *cp = kd->errbuf;
+
+ (void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
+ n = strlen(cp);
+ (void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
+ strerror(errno));
+ }
+ va_end(ap);
+}
+
+void *
+_kvm_malloc(kvm_t *kd, size_t n)
+{
+ void *p;
+
+ if ((p = calloc(n, sizeof(char))) == NULL)
+ _kvm_err(kd, kd->program, "can't allocate %zu bytes: %s",
+ n, strerror(errno));
+ return (p);
+}
+
+int
+_kvm_probe_elf_kernel(kvm_t *kd, int class, int machine)
+{
+
+ return (kd->nlehdr.e_ident[EI_CLASS] == class &&
+ ((machine == EM_PPC || machine == EM_PPC64) ?
+ kd->nlehdr.e_type == ET_DYN : kd->nlehdr.e_type == ET_EXEC) &&
+ kd->nlehdr.e_machine == machine);
+}
+
+int
+_kvm_is_minidump(kvm_t *kd)
+{
+ char minihdr[8];
+
+ if (kd->rawdump)
+ return (0);
+ if (pread(kd->pmfd, &minihdr, 8, 0) == 8 &&
+ memcmp(&minihdr, "minidump", 8) == 0)
+ return (1);
+ return (0);
+}
+
+/*
+ * The powerpc backend has a hack to strip a leading kerneldump
+ * header from the core before treating it as an ELF header.
+ *
+ * We can add that here if we can get a change to libelf to support
+ * an initial offset into the file. Alternatively we could patch
+ * savecore to extract cores from a regular file instead.
+ */
+int
+_kvm_read_core_phdrs(kvm_t *kd, size_t *phnump, GElf_Phdr **phdrp)
+{
+ GElf_Ehdr ehdr;
+ GElf_Phdr *phdr;
+ Elf *elf;
+ size_t i, phnum;
+
+ elf = elf_begin(kd->pmfd, ELF_C_READ, NULL);
+ if (elf == NULL) {
+ _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
+ return (-1);
+ }
+ if (elf_kind(elf) != ELF_K_ELF) {
+ _kvm_err(kd, kd->program, "invalid core");
+ goto bad;
+ }
+ if (gelf_getclass(elf) != kd->nlehdr.e_ident[EI_CLASS]) {
+ _kvm_err(kd, kd->program, "invalid core");
+ goto bad;
+ }
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
+ goto bad;
+ }
+ if (ehdr.e_type != ET_CORE) {
+ _kvm_err(kd, kd->program, "invalid core");
+ goto bad;
+ }
+ if (ehdr.e_machine != kd->nlehdr.e_machine) {
+ _kvm_err(kd, kd->program, "invalid core");
+ goto bad;
+ }
+
+ if (elf_getphdrnum(elf, &phnum) == -1) {
+ _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
+ goto bad;
+ }
+
+ phdr = calloc(phnum, sizeof(*phdr));
+ if (phdr == NULL) {
+ _kvm_err(kd, kd->program, "failed to allocate phdrs");
+ goto bad;
+ }
+
+ for (i = 0; i < phnum; i++) {
+ if (gelf_getphdr(elf, i, &phdr[i]) == NULL) {
+ free(phdr);
+ _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
+ goto bad;
+ }
+ }
+ elf_end(elf);
+ *phnump = phnum;
+ *phdrp = phdr;
+ return (0);
+
+bad:
+ elf_end(elf);
+ return (-1);
+}
+
+/*
+ * Transform v such that only bits [bit0, bitN) may be set. Generates a
+ * bitmask covering the number of bits, then shifts so +bit0+ is the first.
+ */
+static uint64_t
+bitmask_range(uint64_t v, uint64_t bit0, uint64_t bitN)
+{
+ if (bit0 == 0 && bitN == BITS_IN(v))
+ return (v);
+
+ return (v & (((1ULL << (bitN - bit0)) - 1ULL) << bit0));
+}
+
+/*
+ * Returns the number of bits in a given byte array range starting at a
+ * given base, from bit0 to bitN. bit0 may be non-zero in the case of
+ * counting backwards from bitN.
+ */
+static uint64_t
+popcount_bytes(uint64_t *addr, uint32_t bit0, uint32_t bitN)
+{
+ uint32_t res = bitN - bit0;
+ uint64_t count = 0;
+ uint32_t bound;
+
+ /* Align to 64-bit boundary on the left side if needed. */
+ if ((bit0 % BITS_IN(*addr)) != 0) {
+ bound = MIN(bitN, roundup2(bit0, BITS_IN(*addr)));
+ count += __bitcount64(bitmask_range(*addr, bit0, bound));
+ res -= (bound - bit0);
+ addr++;
+ }
+
+ while (res > 0) {
+ bound = MIN(res, BITS_IN(*addr));
+ count += __bitcount64(bitmask_range(*addr, 0, bound));
+ res -= bound;
+ addr++;
+ }
+
+ return (count);
+}
+
+void *
+_kvm_pmap_get(kvm_t *kd, u_long idx, size_t len)
+{
+ uintptr_t off = idx * len;
+
+ if ((off_t)off >= kd->pt_sparse_off)
+ return (NULL);
+ return (void *)((uintptr_t)kd->page_map + off);
+}
+
+void *
+_kvm_map_get(kvm_t *kd, u_long pa, unsigned int page_size)
+{
+ off_t off;
+ uintptr_t addr;
+
+ off = _kvm_pt_find(kd, pa, page_size);
+ if (off == -1)
+ return NULL;
+
+ addr = (uintptr_t)kd->page_map + off;
+ if (off >= kd->pt_sparse_off)
+ addr = (uintptr_t)kd->sparse_map + (off - kd->pt_sparse_off);
+ return (void *)addr;
+}
+
+int
+_kvm_pt_init(kvm_t *kd, size_t dump_avail_size, off_t dump_avail_off,
+ size_t map_len, off_t map_off, off_t sparse_off, int page_size)
+{
+ uint64_t *addr;
+ uint32_t *popcount_bin;
+ int bin_popcounts = 0;
+ uint64_t pc_bins, res;
+ ssize_t rd;
+
+ kd->dump_avail_size = dump_avail_size;
+ if (dump_avail_size > 0) {
+ kd->dump_avail = mmap(NULL, kd->dump_avail_size, PROT_READ,
+ MAP_PRIVATE, kd->pmfd, dump_avail_off);
+ } else {
+ /*
+ * Older version minidumps don't provide dump_avail[],
+ * so the bitmap is fully populated from 0 to
+ * last_pa. Create an implied dump_avail that
+ * expresses this.
+ */
+ kd->dump_avail = calloc(4, sizeof(uint64_t));
+ kd->dump_avail[1] = _kvm64toh(kd, map_len * 8 * page_size);
+ }
+
+ /*
+ * Map the bitmap specified by the arguments.
+ */
+ kd->pt_map = _kvm_malloc(kd, map_len);
+ if (kd->pt_map == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate %zu bytes for bitmap",
+ map_len);
+ return (-1);
+ }
+ rd = pread(kd->pmfd, kd->pt_map, map_len, map_off);
+ if (rd < 0 || rd != (ssize_t)map_len) {
+ _kvm_err(kd, kd->program, "cannot read %zu bytes for bitmap",
+ map_len);
+ return (-1);
+ }
+ kd->pt_map_size = map_len;
+
+ /*
+ * Generate a popcount cache for every POPCOUNT_BITS in the bitmap,
+ * so lookups only have to calculate the number of bits set between
+ * a cache point and their bit. This reduces lookups to O(1),
+ * without significantly increasing memory requirements.
+ *
+ * Round up the number of bins so that 'upper half' lookups work for
+ * the final bin, if needed. The first popcount is 0, since no bits
+ * precede bit 0, so add 1 for that also. Without this, extra work
+ * would be needed to handle the first PTEs in _kvm_pt_find().
+ */
+ addr = kd->pt_map;
+ res = map_len;
+ pc_bins = 1 + (res * NBBY + POPCOUNT_BITS / 2) / POPCOUNT_BITS;
+ kd->pt_popcounts = calloc(pc_bins, sizeof(uint32_t));
+ if (kd->pt_popcounts == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate popcount bins");
+ return (-1);
+ }
+
+ for (popcount_bin = &kd->pt_popcounts[1]; res > 0;
+ addr++, res -= sizeof(*addr)) {
+ *popcount_bin += popcount_bytes(addr, 0,
+ MIN(res * NBBY, BITS_IN(*addr)));
+ if (++bin_popcounts == POPCOUNTS_IN(*addr)) {
+ popcount_bin++;
+ *popcount_bin = *(popcount_bin - 1);
+ bin_popcounts = 0;
+ }
+ }
+
+ assert(pc_bins * sizeof(*popcount_bin) ==
+ ((uintptr_t)popcount_bin - (uintptr_t)kd->pt_popcounts));
+
+ kd->pt_sparse_off = sparse_off;
+ kd->pt_sparse_size = (uint64_t)*popcount_bin * page_size;
+ kd->pt_page_size = page_size;
+
+ /*
+ * Map the sparse page array. This is useful for performing point
+ * lookups of specific pages, e.g. for kvm_walk_pages. Generally,
+ * this is much larger than is reasonable to read in up front, so
+ * mmap it in instead.
+ */
+ kd->sparse_map = mmap(NULL, kd->pt_sparse_size, PROT_READ,
+ MAP_PRIVATE, kd->pmfd, kd->pt_sparse_off);
+ if (kd->sparse_map == MAP_FAILED) {
+ _kvm_err(kd, kd->program, "cannot map %" PRIu64
+ " bytes from fd %d offset %jd for sparse map: %s",
+ kd->pt_sparse_size, kd->pmfd,
+ (intmax_t)kd->pt_sparse_off, strerror(errno));
+ return (-1);
+ }
+ return (0);
+}
+
+int
+_kvm_pmap_init(kvm_t *kd, uint32_t pmap_size, off_t pmap_off)
+{
+ ssize_t exp_len = pmap_size;
+
+ kd->page_map_size = pmap_size;
+ kd->page_map_off = pmap_off;
+ kd->page_map = _kvm_malloc(kd, pmap_size);
+ if (kd->page_map == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate %u bytes "
+ "for page map", pmap_size);
+ return (-1);
+ }
+ if (pread(kd->pmfd, kd->page_map, pmap_size, pmap_off) != exp_len) {
+ _kvm_err(kd, kd->program, "cannot read %d bytes from "
+ "offset %jd for page map", pmap_size, (intmax_t)pmap_off);
+ return (-1);
+ }
+ return (0);
+}
+
+static inline uint64_t
+dump_avail_n(kvm_t *kd, long i)
+{
+ return (_kvm64toh(kd, kd->dump_avail[i]));
+}
+
+uint64_t
+_kvm_pa_bit_id(kvm_t *kd, uint64_t pa, unsigned int page_size)
+{
+ uint64_t adj;
+ long i;
+
+ adj = 0;
+ for (i = 0; dump_avail_n(kd, i + 1) != 0; i += 2) {
+ if (pa >= dump_avail_n(kd, i + 1)) {
+ adj += howmany(dump_avail_n(kd, i + 1), page_size) -
+ dump_avail_n(kd, i) / page_size;
+ } else {
+ return (pa / page_size -
+ dump_avail_n(kd, i) / page_size + adj);
+ }
+ }
+ return (_KVM_BIT_ID_INVALID);
+}
+
+uint64_t
+_kvm_bit_id_pa(kvm_t *kd, uint64_t bit_id, unsigned int page_size)
+{
+ uint64_t sz;
+ long i;
+
+ for (i = 0; dump_avail_n(kd, i + 1) != 0; i += 2) {
+ sz = howmany(dump_avail_n(kd, i + 1), page_size) -
+ dump_avail_n(kd, i) / page_size;
+ if (bit_id < sz) {
+ return (rounddown2(dump_avail_n(kd, i), page_size) +
+ bit_id * page_size);
+ }
+ bit_id -= sz;
+ }
+ return (_KVM_PA_INVALID);
+}
+
+/*
+ * Find the offset for the given physical page address; returns -1 otherwise.
+ *
+ * A page's offset is represented by the sparse page base offset plus the
+ * number of bits set before its bit multiplied by page size. This means
+ * that if a page exists in the dump, it's necessary to know how many pages
+ * in the dump precede it. Reduce this O(n) counting to O(1) by caching the
+ * number of bits set at POPCOUNT_BITS intervals.
+ *
+ * Then to find the number of pages before the requested address, simply
+ * index into the cache and count the number of bits set between that cache
+ * bin and the page's bit. Halve the number of bytes that have to be
+ * checked by also counting down from the next higher bin if it's closer.
+ */
+off_t
+_kvm_pt_find(kvm_t *kd, uint64_t pa, unsigned int page_size)
+{
+ uint64_t *bitmap = kd->pt_map;
+ uint64_t pte_bit_id = _kvm_pa_bit_id(kd, pa, page_size);
+ uint64_t pte_u64 = pte_bit_id / BITS_IN(*bitmap);
+ uint64_t popcount_id = pte_bit_id / POPCOUNT_BITS;
+ uint64_t pte_mask = 1ULL << (pte_bit_id % BITS_IN(*bitmap));
+ uint64_t bitN;
+ uint32_t count;
+
+ /* Check whether the page address requested is in the dump. */
+ if (pte_bit_id == _KVM_BIT_ID_INVALID ||
+ pte_bit_id >= (kd->pt_map_size * NBBY) ||
+ (bitmap[pte_u64] & pte_mask) == 0)
+ return (-1);
+
+ /*
+ * Add/sub popcounts from the bitmap until the PTE's bit is reached.
+ * For bits that are in the upper half between the calculated
+ * popcount id and the next one, use the next one and subtract to
+ * minimize the number of popcounts required.
+ */
+ if ((pte_bit_id % POPCOUNT_BITS) < (POPCOUNT_BITS / 2)) {
+ count = kd->pt_popcounts[popcount_id] + popcount_bytes(
+ bitmap + popcount_id * POPCOUNTS_IN(*bitmap),
+ 0, pte_bit_id - popcount_id * POPCOUNT_BITS);
+ } else {
+ /*
+ * Counting in reverse is trickier, since we must avoid
+ * reading from bytes that are not in range, and invert.
+ */
+ uint64_t pte_u64_bit_off = pte_u64 * BITS_IN(*bitmap);
+
+ popcount_id++;
+ bitN = MIN(popcount_id * POPCOUNT_BITS,
+ kd->pt_map_size * BITS_IN(uint8_t));
+ count = kd->pt_popcounts[popcount_id] - popcount_bytes(
+ bitmap + pte_u64,
+ pte_bit_id - pte_u64_bit_off, bitN - pte_u64_bit_off);
+ }
+
+ /*
+ * This can only happen if the core is truncated. Treat these
+ * entries as if they don't exist, since their backing doesn't.
+ */
+ if (count >= (kd->pt_sparse_size / page_size))
+ return (-1);
+
+ return (kd->pt_sparse_off + (uint64_t)count * page_size);
+}
+
+static int
+kvm_fdnlist(kvm_t *kd, struct kvm_nlist *list)
+{
+ kvaddr_t addr;
+ int error, nfail;
+
+ if (kd->resolve_symbol == NULL) {
+ struct nlist *nl;
+ int count, i;
+
+ for (count = 0; list[count].n_name != NULL &&
+ list[count].n_name[0] != '\0'; count++)
+ ;
+ nl = calloc(count + 1, sizeof(*nl));
+ for (i = 0; i < count; i++)
+ nl[i].n_name = list[i].n_name;
+ nfail = __fdnlist(kd->nlfd, nl);
+ for (i = 0; i < count; i++) {
+ list[i].n_type = nl[i].n_type;
+ list[i].n_value = nl[i].n_value;
+ }
+ free(nl);
+ return (nfail);
+ }
+
+ nfail = 0;
+ while (list->n_name != NULL && list->n_name[0] != '\0') {
+ error = kd->resolve_symbol(list->n_name, &addr);
+ if (error != 0) {
+ nfail++;
+ list->n_value = 0;
+ list->n_type = 0;
+ } else {
+ list->n_value = addr;
+ list->n_type = N_DATA | N_EXT;
+ }
+ list++;
+ }
+ return (nfail);
+}
+
+/*
+ * Walk the list of unresolved symbols, generate a new list and prefix the
+ * symbol names, try again, and merge back what we could resolve.
+ */
+static int
+kvm_fdnlist_prefix(kvm_t *kd, struct kvm_nlist *nl, int missing,
+ const char *prefix, kvaddr_t (*validate_fn)(kvm_t *, kvaddr_t))
+{
+ struct kvm_nlist *n, *np, *p;
+ char *cp, *ce;
+ const char *ccp;
+ size_t len;
+ int slen, unresolved;
+
+ /*
+ * Calculate the space we need to malloc for nlist and names.
+ * We are going to store the name twice for later lookups: once
+ * with the prefix and once the unmodified name delmited by \0.
+ */
+ len = 0;
+ unresolved = 0;
+ for (p = nl; p->n_name && p->n_name[0]; ++p) {
+ if (p->n_type != N_UNDF)
+ continue;
+ len += sizeof(struct kvm_nlist) + strlen(prefix) +
+ 2 * (strlen(p->n_name) + 1);
+ unresolved++;
+ }
+ if (unresolved == 0)
+ return (unresolved);
+ /* Add space for the terminating nlist entry. */
+ len += sizeof(struct kvm_nlist);
+ unresolved++;
+
+ /* Alloc one chunk for (nlist, [names]) and setup pointers. */
+ n = np = malloc(len);
+ bzero(n, len);
+ if (n == NULL)
+ return (missing);
+ cp = ce = (char *)np;
+ cp += unresolved * sizeof(struct kvm_nlist);
+ ce += len;
+
+ /* Generate shortened nlist with special prefix. */
+ unresolved = 0;
+ for (p = nl; p->n_name && p->n_name[0]; ++p) {
+ if (p->n_type != N_UNDF)
+ continue;
+ *np = *p;
+ /* Save the new\0orig. name so we can later match it again. */
+ slen = snprintf(cp, ce - cp, "%s%s%c%s", prefix,
+ (prefix[0] != '\0' && p->n_name[0] == '_') ?
+ (p->n_name + 1) : p->n_name, '\0', p->n_name);
+ if (slen < 0 || slen >= ce - cp)
+ continue;
+ np->n_name = cp;
+ cp += slen + 1;
+ np++;
+ unresolved++;
+ }
+
+ /* Do lookup on the reduced list. */
+ np = n;
+ unresolved = kvm_fdnlist(kd, np);
+
+ /* Check if we could resolve further symbols and update the list. */
+ if (unresolved >= 0 && unresolved < missing) {
+ /* Find the first freshly resolved entry. */
+ for (; np->n_name && np->n_name[0]; np++)
+ if (np->n_type != N_UNDF)
+ break;
+ /*
+ * The lists are both in the same order,
+ * so we can walk them in parallel.
+ */
+ for (p = nl; np->n_name && np->n_name[0] &&
+ p->n_name && p->n_name[0]; ++p) {
+ if (p->n_type != N_UNDF)
+ continue;
+ /* Skip expanded name and compare to orig. one. */
+ ccp = np->n_name + strlen(np->n_name) + 1;
+ if (strcmp(ccp, p->n_name) != 0)
+ continue;
+ /* Update nlist with new, translated results. */
+ p->n_type = np->n_type;
+ if (validate_fn)
+ p->n_value = (*validate_fn)(kd, np->n_value);
+ else
+ p->n_value = np->n_value;
+ missing--;
+ /* Find next freshly resolved entry. */
+ for (np++; np->n_name && np->n_name[0]; np++)
+ if (np->n_type != N_UNDF)
+ break;
+ }
+ }
+ /* We could assert missing = unresolved here. */
+
+ free(n);
+ return (unresolved);
+}
+
+int
+_kvm_nlist(kvm_t *kd, struct kvm_nlist *nl, int initialize)
+{
+ struct kvm_nlist *p;
+ int nvalid;
+ struct kld_sym_lookup lookup;
+ int error;
+ const char *prefix = "";
+ char symname[1024]; /* XXX-BZ symbol name length limit? */
+ int tried_vnet, tried_dpcpu;
+
+ /*
+ * If we can't use the kld symbol lookup, revert to the
+ * slow library call.
+ */
+ if (!ISALIVE(kd)) {
+ error = kvm_fdnlist(kd, nl);
+ if (error <= 0) /* Hard error or success. */
+ return (error);
+
+ if (_kvm_vnet_initialized(kd, initialize))
+ error = kvm_fdnlist_prefix(kd, nl, error,
+ VNET_SYMPREFIX, _kvm_vnet_validaddr);
+
+ if (error > 0 && _kvm_dpcpu_initialized(kd, initialize))
+ error = kvm_fdnlist_prefix(kd, nl, error,
+ DPCPU_SYMPREFIX, _kvm_dpcpu_validaddr);
+
+ return (error);
+ }
+
+ /*
+ * We can use the kld lookup syscall. Go through each nlist entry
+ * and look it up with a kldsym(2) syscall.
+ */
+ nvalid = 0;
+ tried_vnet = 0;
+ tried_dpcpu = 0;
+again:
+ for (p = nl; p->n_name && p->n_name[0]; ++p) {
+ if (p->n_type != N_UNDF)
+ continue;
+
+ lookup.version = sizeof(lookup);
+ lookup.symvalue = 0;
+ lookup.symsize = 0;
+
+ error = snprintf(symname, sizeof(symname), "%s%s", prefix,
+ (prefix[0] != '\0' && p->n_name[0] == '_') ?
+ (p->n_name + 1) : p->n_name);
+ if (error < 0 || error >= (int)sizeof(symname))
+ continue;
+ lookup.symname = symname;
+ if (lookup.symname[0] == '_')
+ lookup.symname++;
+
+ if (kldsym(0, KLDSYM_LOOKUP, &lookup) != -1) {
+ p->n_type = N_TEXT;
+ if (_kvm_vnet_initialized(kd, initialize) &&
+ strcmp(prefix, VNET_SYMPREFIX) == 0)
+ p->n_value =
+ _kvm_vnet_validaddr(kd, lookup.symvalue);
+ else if (_kvm_dpcpu_initialized(kd, initialize) &&
+ strcmp(prefix, DPCPU_SYMPREFIX) == 0)
+ p->n_value =
+ _kvm_dpcpu_validaddr(kd, lookup.symvalue);
+ else
+ p->n_value = lookup.symvalue;
+ ++nvalid;
+ /* lookup.symsize */
+ }
+ }
+
+ /*
+ * Check the number of entries that weren't found. If they exist,
+ * try again with a prefix for virtualized or DPCPU symbol names.
+ */
+ error = ((p - nl) - nvalid);
+ if (error && _kvm_vnet_initialized(kd, initialize) && !tried_vnet) {
+ tried_vnet = 1;
+ prefix = VNET_SYMPREFIX;
+ goto again;
+ }
+ if (error && _kvm_dpcpu_initialized(kd, initialize) && !tried_dpcpu) {
+ tried_dpcpu = 1;
+ prefix = DPCPU_SYMPREFIX;
+ goto again;
+ }
+
+ /*
+ * Return the number of entries that weren't found. If they exist,
+ * also fill internal error buffer.
+ */
+ error = ((p - nl) - nvalid);
+ if (error)
+ _kvm_syserr(kd, kd->program, "kvm_nlist");
+ return (error);
+}
+
+int
+_kvm_bitmap_init(struct kvm_bitmap *bm, u_long bitmapsize, u_long *idx)
+{
+
+ *idx = ULONG_MAX;
+ bm->map = calloc(bitmapsize, sizeof *bm->map);
+ if (bm->map == NULL)
+ return (0);
+ bm->size = bitmapsize;
+ return (1);
+}
+
+void
+_kvm_bitmap_set(struct kvm_bitmap *bm, u_long bm_index)
+{
+ uint8_t *byte = &bm->map[bm_index / 8];
+
+ if (bm_index / 8 < bm->size)
+ *byte |= (1UL << (bm_index % 8));
+}
+
+int
+_kvm_bitmap_next(struct kvm_bitmap *bm, u_long *idx)
+{
+ u_long first_invalid = bm->size * CHAR_BIT;
+
+ if (*idx == ULONG_MAX)
+ *idx = 0;
+ else
+ (*idx)++;
+
+ /* Find the next valid idx. */
+ for (; *idx < first_invalid; (*idx)++) {
+ unsigned int mask = 1U << (*idx % CHAR_BIT);
+ if ((bm->map[*idx / CHAR_BIT] & mask) != 0)
+ break;
+ }
+
+ return (*idx < first_invalid);
+}
+
+void
+_kvm_bitmap_deinit(struct kvm_bitmap *bm)
+{
+
+ free(bm->map);
+}
+
+int
+_kvm_visit_cb(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg, u_long pa,
+ u_long kmap_vaddr, u_long dmap_vaddr, vm_prot_t prot, size_t len,
+ unsigned int page_size)
+{
+ unsigned int pgsz = page_size ? page_size : len;
+ struct kvm_page p = {
+ .kp_version = LIBKVM_WALK_PAGES_VERSION,
+ .kp_paddr = pa,
+ .kp_kmap_vaddr = kmap_vaddr,
+ .kp_dmap_vaddr = dmap_vaddr,
+ .kp_prot = prot,
+ .kp_offset = _kvm_pt_find(kd, pa, pgsz),
+ .kp_len = len,
+ };
+
+ return cb(&p, arg);
+}
diff --git a/lib/libkvm/kvm_private.h b/lib/libkvm/kvm_private.h
new file mode 100644
index 000000000000..0ab16dc58755
--- /dev/null
+++ b/lib/libkvm/kvm_private.h
@@ -0,0 +1,195 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/endian.h>
+#include <sys/linker_set.h>
+#include <gelf.h>
+
+struct kvm_arch {
+ int (*ka_probe)(kvm_t *);
+ int (*ka_initvtop)(kvm_t *);
+ void (*ka_freevtop)(kvm_t *);
+ int (*ka_kvatop)(kvm_t *, kvaddr_t, off_t *);
+ int (*ka_native)(kvm_t *);
+ int (*ka_walk_pages)(kvm_t *, kvm_walk_pages_cb_t *, void *);
+ kssize_t (*ka_kerndisp)(kvm_t *);
+};
+
+#define KVM_ARCH(ka) DATA_SET(kvm_arch, ka)
+
+struct __kvm {
+ struct kvm_arch *arch;
+ /*
+ * a string to be prepended to error messages
+ * provided for compatibility with sun's interface
+ * if this value is null, errors are saved in errbuf[]
+ */
+ const char *program;
+ char *errp; /* XXX this can probably go away */
+ char errbuf[_POSIX2_LINE_MAX];
+#define ISALIVE(kd) ((kd)->vmfd >= 0)
+ int pmfd; /* physical memory file (or crashdump) */
+ int vmfd; /* virtual memory file (-1 if crashdump) */
+ int nlfd; /* namelist file (e.g., /kernel) */
+ GElf_Ehdr nlehdr; /* ELF file header for namelist file */
+ int (*resolve_symbol)(const char *, kvaddr_t *);
+ struct kinfo_proc *procbase;
+ char *argspc; /* (dynamic) storage for argv strings */
+ int arglen; /* length of the above */
+ char **argv; /* (dynamic) storage for argv pointers */
+ int argc; /* length of above (not actual # present) */
+ char *argbuf; /* (dynamic) temporary storage */
+ /*
+ * Kernel virtual address translation state. This only gets filled
+ * in for dead kernels; otherwise, the running kernel (i.e. kmem)
+ * will do the translations for us. It could be big, so we
+ * only allocate it if necessary.
+ */
+ struct vmstate *vmst;
+ int rawdump; /* raw dump format */
+ int writable; /* physical memory is writable */
+
+ int vnet_initialized; /* vnet fields set up */
+ kvaddr_t vnet_start; /* start of kernel's vnet region */
+ kvaddr_t vnet_stop; /* stop of kernel's vnet region */
+ kvaddr_t vnet_current; /* vnet we're working with */
+ kvaddr_t vnet_base; /* vnet base of current vnet */
+
+ /*
+ * Dynamic per-CPU kernel memory. We translate symbols, on-demand,
+ * to the data associated with dpcpu_curcpu, set with
+ * kvm_dpcpu_setcpu().
+ */
+ int dpcpu_initialized; /* dpcpu fields set up */
+ kvaddr_t dpcpu_start; /* start of kernel's dpcpu region */
+ kvaddr_t dpcpu_stop; /* stop of kernel's dpcpu region */
+ u_int dpcpu_maxcpus; /* size of base array */
+ uintptr_t *dpcpu_off; /* base array, indexed by CPU ID */
+ u_int dpcpu_curcpu; /* CPU we're currently working with */
+ kvaddr_t dpcpu_curoff; /* dpcpu base of current CPU */
+
+ /* Page table lookup structures. */
+ uint64_t *pt_map;
+ size_t pt_map_size;
+ uint64_t *dump_avail; /* actually word sized */
+ size_t dump_avail_size;
+ off_t pt_sparse_off;
+ uint64_t pt_sparse_size;
+ uint32_t *pt_popcounts;
+ unsigned int pt_page_size;
+
+ /* Page & sparse map structures. */
+ void *page_map;
+ uint32_t page_map_size;
+ off_t page_map_off;
+ void *sparse_map;
+};
+
+struct kvm_bitmap {
+ uint8_t *map;
+ u_long size;
+};
+
+/* Page table lookup constants. */
+#define POPCOUNT_BITS 1024
+#define BITS_IN(v) (sizeof(v) * NBBY)
+#define POPCOUNTS_IN(v) (POPCOUNT_BITS / BITS_IN(v))
+
+/*
+ * Functions used internally by kvm, but across kvm modules.
+ */
+static inline uint16_t
+_kvm16toh(kvm_t *kd, uint16_t val)
+{
+
+ if (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return (le16toh(val));
+ else
+ return (be16toh(val));
+}
+
+static inline uint32_t
+_kvm32toh(kvm_t *kd, uint32_t val)
+{
+
+ if (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return (le32toh(val));
+ else
+ return (be32toh(val));
+}
+
+static inline uint64_t
+_kvm64toh(kvm_t *kd, uint64_t val)
+{
+
+ if (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return (le64toh(val));
+ else
+ return (be64toh(val));
+}
+
+uint64_t _kvm_pa_bit_id(kvm_t *kd, uint64_t pa, unsigned int page_size);
+uint64_t _kvm_bit_id_pa(kvm_t *kd, uint64_t bit_id, unsigned int page_size);
+#define _KVM_PA_INVALID ULONG_MAX
+#define _KVM_BIT_ID_INVALID ULONG_MAX
+
+int _kvm_bitmap_init(struct kvm_bitmap *, u_long, u_long *);
+void _kvm_bitmap_set(struct kvm_bitmap *, u_long);
+int _kvm_bitmap_next(struct kvm_bitmap *, u_long *);
+void _kvm_bitmap_deinit(struct kvm_bitmap *);
+
+void _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
+ __printflike(3, 4);
+void _kvm_freeprocs(kvm_t *kd);
+void *_kvm_malloc(kvm_t *kd, size_t);
+int _kvm_nlist(kvm_t *, struct kvm_nlist *, int);
+void *_kvm_realloc(kvm_t *kd, void *, size_t);
+void _kvm_syserr (kvm_t *kd, const char *program, const char *fmt, ...)
+ __printflike(3, 4);
+int _kvm_vnet_selectpid(kvm_t *, pid_t);
+int _kvm_vnet_initialized(kvm_t *, int);
+kvaddr_t _kvm_vnet_validaddr(kvm_t *, kvaddr_t);
+int _kvm_dpcpu_initialized(kvm_t *, int);
+kvaddr_t _kvm_dpcpu_validaddr(kvm_t *, kvaddr_t);
+int _kvm_probe_elf_kernel(kvm_t *, int, int);
+int _kvm_is_minidump(kvm_t *);
+int _kvm_read_core_phdrs(kvm_t *, size_t *, GElf_Phdr **);
+int _kvm_pt_init(kvm_t *, size_t, off_t, size_t, off_t, off_t, int);
+off_t _kvm_pt_find(kvm_t *, uint64_t, unsigned int);
+int _kvm_visit_cb(kvm_t *, kvm_walk_pages_cb_t *, void *, u_long,
+ u_long, u_long, vm_prot_t, size_t, unsigned int);
+int _kvm_pmap_init(kvm_t *, uint32_t, off_t);
+void * _kvm_pmap_get(kvm_t *, u_long, size_t);
+void * _kvm_map_get(kvm_t *, u_long, unsigned int);
diff --git a/lib/libkvm/kvm_proc.c b/lib/libkvm/kvm_proc.c
new file mode 100644
index 000000000000..fed483978e62
--- /dev/null
+++ b/lib/libkvm/kvm_proc.c
@@ -0,0 +1,783 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+/*
+ * Proc traversal interface for kvm. ps and w are (probably) the exclusive
+ * users of this code, so we've factored it out into a separate module.
+ * Thus, we keep this grunge out of the other kvm applications (i.e.,
+ * most other applications are interested only in open/close/read/nlist).
+ */
+
+#include <sys/param.h>
+#define _WANT_UCRED /* make ucred.h give us 'struct ucred' */
+#include <sys/ucred.h>
+#include <sys/queue.h>
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
+#include <sys/_task.h>
+#include <sys/cpuset.h>
+#include <sys/user.h>
+#include <sys/proc.h>
+#define _WANT_PRISON /* make jail.h give us 'struct prison' */
+#include <sys/jail.h>
+#include <sys/exec.h>
+#include <sys/stat.h>
+#include <sys/sysent.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/file.h>
+#include <sys/conf.h>
+#define _WANT_KW_EXITCODE
+#include <sys/wait.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <sys/sysctl.h>
+
+#include <limits.h>
+#include <memory.h>
+#include <paths.h>
+
+#include "kvm_private.h"
+
+#define KREAD(kd, addr, obj) \
+ (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
+
+static int ticks;
+static int hz;
+static uint64_t cpu_tick_frequency;
+
+/*
+ * From sys/kern/kern_tc.c. Depends on cpu_tick_frequency, which is
+ * read/initialized before this function is ever called.
+ */
+static uint64_t
+cputick2usec(uint64_t tick)
+{
+ if (cpu_tick_frequency == 0)
+ return (0);
+ return ((tick / cpu_tick_frequency) * 1000000ULL) +
+ ((tick % cpu_tick_frequency) * 1000000ULL) / cpu_tick_frequency;
+}
+
+/*
+ * Read proc's from memory file into buffer bp, which has space to hold
+ * at most maxcnt procs.
+ */
+static int
+kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p,
+ struct kinfo_proc *bp, int maxcnt)
+{
+ int cnt = 0;
+ struct kinfo_proc kinfo_proc, *kp;
+ struct pgrp pgrp;
+ struct session sess;
+ struct cdev t_cdev;
+ struct tty tty;
+ struct vmspace vmspace;
+ struct sigacts sigacts;
+#if 0
+ struct pstats pstats;
+#endif
+ struct ucred ucred;
+ struct prison pr;
+ struct thread mtd;
+ struct proc proc;
+ struct proc pproc;
+ struct sysentvec sysent;
+ char svname[KI_EMULNAMELEN];
+ struct thread *td = NULL;
+ bool first_thread;
+
+ kp = &kinfo_proc;
+ kp->ki_structsize = sizeof(kinfo_proc);
+ /*
+ * Loop on the processes, then threads within the process if requested.
+ */
+ if (what == KERN_PROC_ALL)
+ what |= KERN_PROC_INC_THREAD;
+ for (; cnt < maxcnt && p != NULL; p = LIST_NEXT(&proc, p_list)) {
+ memset(kp, 0, sizeof *kp);
+ if (KREAD(kd, (u_long)p, &proc)) {
+ _kvm_err(kd, kd->program, "can't read proc at %p", p);
+ return (-1);
+ }
+ if (proc.p_state == PRS_NEW)
+ continue;
+ if (KREAD(kd, (u_long)proc.p_ucred, &ucred) == 0) {
+ kp->ki_uid = ucred.cr_uid;
+ kp->ki_ruid = ucred.cr_ruid;
+ kp->ki_svuid = ucred.cr_svuid;
+ kp->ki_rgid = ucred.cr_rgid;
+ kp->ki_svgid = ucred.cr_svgid;
+ kp->ki_cr_flags = 0;
+ if (ucred.cr_flags & CRED_FLAG_CAPMODE)
+ kp->ki_cr_flags |= KI_CRF_CAPABILITY_MODE;
+ if (1 + ucred.cr_ngroups > KI_NGROUPS) {
+ kp->ki_ngroups = KI_NGROUPS;
+ kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW;
+ } else
+ kp->ki_ngroups = 1 + ucred.cr_ngroups;
+ kp->ki_groups[0] = ucred.cr_gid;
+ kvm_read(kd, (u_long)ucred.cr_groups, kp->ki_groups + 1,
+ (kp->ki_ngroups - 1) * sizeof(gid_t));
+ if (ucred.cr_prison != NULL) {
+ if (KREAD(kd, (u_long)ucred.cr_prison, &pr)) {
+ _kvm_err(kd, kd->program,
+ "can't read prison at %p",
+ ucred.cr_prison);
+ return (-1);
+ }
+ kp->ki_jid = pr.pr_id;
+ }
+ }
+
+ switch(what & ~KERN_PROC_INC_THREAD) {
+
+ case KERN_PROC_GID:
+ if (kp->ki_groups[0] != (gid_t)arg)
+ continue;
+ break;
+
+ case KERN_PROC_PID:
+ if (proc.p_pid != (pid_t)arg)
+ continue;
+ break;
+
+ case KERN_PROC_RGID:
+ if (kp->ki_rgid != (gid_t)arg)
+ continue;
+ break;
+
+ case KERN_PROC_UID:
+ if (kp->ki_uid != (uid_t)arg)
+ continue;
+ break;
+
+ case KERN_PROC_RUID:
+ if (kp->ki_ruid != (uid_t)arg)
+ continue;
+ break;
+ }
+ /*
+ * We're going to add another proc to the set. If this
+ * will overflow the buffer, assume the reason is because
+ * nprocs (or the proc list) is corrupt and declare an error.
+ */
+ if (cnt >= maxcnt) {
+ _kvm_err(kd, kd->program, "nprocs corrupt");
+ return (-1);
+ }
+ /*
+ * gather kinfo_proc
+ */
+ kp->ki_paddr = p;
+ kp->ki_addr = 0; /* XXX uarea */
+ /* kp->ki_kstack = proc.p_thread.td_kstack; XXXKSE */
+ kp->ki_args = proc.p_args;
+ kp->ki_numthreads = proc.p_numthreads;
+ kp->ki_tracep = NULL; /* XXXKIB do not expose ktr_io_params */
+ kp->ki_textvp = proc.p_textvp;
+ kp->ki_fd = proc.p_fd;
+ kp->ki_pd = proc.p_pd;
+ kp->ki_vmspace = proc.p_vmspace;
+ if (proc.p_sigacts != NULL) {
+ if (KREAD(kd, (u_long)proc.p_sigacts, &sigacts)) {
+ _kvm_err(kd, kd->program,
+ "can't read sigacts at %p", proc.p_sigacts);
+ return (-1);
+ }
+ kp->ki_sigignore = sigacts.ps_sigignore;
+ kp->ki_sigcatch = sigacts.ps_sigcatch;
+ }
+#if 0
+ if ((proc.p_flag & P_INMEM) && proc.p_stats != NULL) {
+ if (KREAD(kd, (u_long)proc.p_stats, &pstats)) {
+ _kvm_err(kd, kd->program,
+ "can't read stats at %x", proc.p_stats);
+ return (-1);
+ }
+ kp->ki_start = pstats.p_start;
+
+ /*
+ * XXX: The times here are probably zero and need
+ * to be calculated from the raw data in p_rux and
+ * p_crux.
+ */
+ kp->ki_rusage = pstats.p_ru;
+ kp->ki_childstime = pstats.p_cru.ru_stime;
+ kp->ki_childutime = pstats.p_cru.ru_utime;
+ /* Some callers want child-times in a single value */
+ timeradd(&kp->ki_childstime, &kp->ki_childutime,
+ &kp->ki_childtime);
+ }
+#endif
+ if (proc.p_oppid)
+ kp->ki_ppid = proc.p_oppid;
+ else if (proc.p_pptr) {
+ if (KREAD(kd, (u_long)proc.p_pptr, &pproc)) {
+ _kvm_err(kd, kd->program,
+ "can't read pproc at %p", proc.p_pptr);
+ return (-1);
+ }
+ kp->ki_ppid = pproc.p_pid;
+ } else
+ kp->ki_ppid = 0;
+ if (proc.p_pgrp == NULL)
+ goto nopgrp;
+ if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
+ _kvm_err(kd, kd->program, "can't read pgrp at %p",
+ proc.p_pgrp);
+ return (-1);
+ }
+ kp->ki_pgid = pgrp.pg_id;
+ kp->ki_jobc = -1; /* Or calculate? Arguably not. */
+ if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
+ _kvm_err(kd, kd->program, "can't read session at %p",
+ pgrp.pg_session);
+ return (-1);
+ }
+ kp->ki_sid = sess.s_sid;
+ (void)memcpy(kp->ki_login, sess.s_login,
+ sizeof(kp->ki_login));
+ if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
+ if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
+ _kvm_err(kd, kd->program,
+ "can't read tty at %p", sess.s_ttyp);
+ return (-1);
+ }
+ if (tty.t_dev != NULL) {
+ if (KREAD(kd, (u_long)tty.t_dev, &t_cdev)) {
+ _kvm_err(kd, kd->program,
+ "can't read cdev at %p",
+ tty.t_dev);
+ return (-1);
+ }
+#if 0
+ kp->ki_tdev = t_cdev.si_udev;
+#else
+ kp->ki_tdev = NODEV;
+#endif
+ }
+ if (tty.t_pgrp != NULL) {
+ if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
+ _kvm_err(kd, kd->program,
+ "can't read tpgrp at %p",
+ tty.t_pgrp);
+ return (-1);
+ }
+ kp->ki_tpgid = pgrp.pg_id;
+ } else
+ kp->ki_tpgid = -1;
+ if (tty.t_session != NULL) {
+ if (KREAD(kd, (u_long)tty.t_session, &sess)) {
+ _kvm_err(kd, kd->program,
+ "can't read session at %p",
+ tty.t_session);
+ return (-1);
+ }
+ kp->ki_tsid = sess.s_sid;
+ }
+ } else {
+nopgrp:
+ kp->ki_tdev = NODEV;
+ }
+
+ (void)kvm_read(kd, (u_long)proc.p_vmspace,
+ (char *)&vmspace, sizeof(vmspace));
+ kp->ki_size = vmspace.vm_map.size;
+ /*
+ * Approximate the kernel's method of calculating
+ * this field.
+ */
+#define pmap_resident_count(pm) ((pm)->pm_stats.resident_count)
+ kp->ki_rssize = pmap_resident_count(&vmspace.vm_pmap);
+ kp->ki_swrss = vmspace.vm_swrss;
+ kp->ki_tsize = vmspace.vm_tsize;
+ kp->ki_dsize = vmspace.vm_dsize;
+ kp->ki_ssize = vmspace.vm_ssize;
+
+ switch (what & ~KERN_PROC_INC_THREAD) {
+
+ case KERN_PROC_PGRP:
+ if (kp->ki_pgid != (pid_t)arg)
+ continue;
+ break;
+
+ case KERN_PROC_SESSION:
+ if (kp->ki_sid != (pid_t)arg)
+ continue;
+ break;
+
+ case KERN_PROC_TTY:
+ if ((proc.p_flag & P_CONTROLT) == 0 ||
+ kp->ki_tdev != (dev_t)arg)
+ continue;
+ break;
+ }
+ if (proc.p_comm[0] != 0)
+ strlcpy(kp->ki_comm, proc.p_comm, MAXCOMLEN);
+ (void)kvm_read(kd, (u_long)proc.p_sysent, (char *)&sysent,
+ sizeof(sysent));
+ (void)kvm_read(kd, (u_long)sysent.sv_name, (char *)&svname,
+ sizeof(svname));
+ if (svname[0] != 0)
+ strlcpy(kp->ki_emul, svname, KI_EMULNAMELEN);
+ kp->ki_runtime = cputick2usec(proc.p_rux.rux_runtime);
+ kp->ki_pid = proc.p_pid;
+ kp->ki_xstat = KW_EXITCODE(proc.p_xexit, proc.p_xsig);
+ kp->ki_acflag = proc.p_acflag;
+ kp->ki_lock = proc.p_lock;
+ kp->ki_tdev_freebsd11 = kp->ki_tdev; /* truncate */
+
+ /* Per-thread items; iterate as appropriate. */
+ td = TAILQ_FIRST(&proc.p_threads);
+ for (first_thread = true; cnt < maxcnt && td != NULL &&
+ (first_thread || (what & KERN_PROC_INC_THREAD));
+ first_thread = false) {
+ if (proc.p_state != PRS_ZOMBIE) {
+ if (KREAD(kd, (u_long)td, &mtd)) {
+ _kvm_err(kd, kd->program,
+ "can't read thread at %p", td);
+ return (-1);
+ }
+ if (what & KERN_PROC_INC_THREAD)
+ td = TAILQ_NEXT(&mtd, td_plist);
+ } else
+ td = NULL;
+ if ((proc.p_state != PRS_ZOMBIE) && mtd.td_wmesg)
+ (void)kvm_read(kd, (u_long)mtd.td_wmesg,
+ kp->ki_wmesg, WMESGLEN);
+ else
+ memset(kp->ki_wmesg, 0, WMESGLEN);
+ if (proc.p_pgrp == NULL) {
+ kp->ki_kiflag = 0;
+ } else {
+ kp->ki_kiflag = sess.s_ttyvp ? KI_CTTY : 0;
+ if (sess.s_leader == p)
+ kp->ki_kiflag |= KI_SLEADER;
+ }
+ if ((proc.p_state != PRS_ZOMBIE) &&
+ (mtd.td_blocked != 0)) {
+ kp->ki_kiflag |= KI_LOCKBLOCK;
+ if (mtd.td_lockname)
+ (void)kvm_read(kd,
+ (u_long)mtd.td_lockname,
+ kp->ki_lockname, LOCKNAMELEN);
+ else
+ memset(kp->ki_lockname, 0,
+ LOCKNAMELEN);
+ kp->ki_lockname[LOCKNAMELEN] = 0;
+ } else
+ kp->ki_kiflag &= ~KI_LOCKBLOCK;
+ kp->ki_siglist = proc.p_siglist;
+ if (proc.p_state != PRS_ZOMBIE) {
+ SIGSETOR(kp->ki_siglist, mtd.td_siglist);
+ kp->ki_sigmask = mtd.td_sigmask;
+ kp->ki_swtime = (ticks - proc.p_swtick) / hz;
+ kp->ki_flag = proc.p_flag;
+ kp->ki_sflag = 0;
+ kp->ki_nice = proc.p_nice;
+ kp->ki_traceflag = proc.p_traceflag;
+ if (proc.p_state == PRS_NORMAL) {
+ if (TD_ON_RUNQ(&mtd) ||
+ TD_CAN_RUN(&mtd) ||
+ TD_IS_RUNNING(&mtd)) {
+ kp->ki_stat = SRUN;
+ } else if (TD_GET_STATE(&mtd) ==
+ TDS_INHIBITED) {
+ if (P_SHOULDSTOP(&proc)) {
+ kp->ki_stat = SSTOP;
+ } else if (
+ TD_IS_SLEEPING(&mtd)) {
+ kp->ki_stat = SSLEEP;
+ } else if (TD_ON_LOCK(&mtd)) {
+ kp->ki_stat = SLOCK;
+ } else {
+ kp->ki_stat = SWAIT;
+ }
+ }
+ } else {
+ kp->ki_stat = SIDL;
+ }
+ /* Stuff from the thread */
+ kp->ki_pri.pri_level = mtd.td_priority;
+ kp->ki_pri.pri_native = mtd.td_base_pri;
+ kp->ki_lastcpu = mtd.td_lastcpu;
+ kp->ki_wchan = mtd.td_wchan;
+ kp->ki_oncpu = mtd.td_oncpu;
+ if (mtd.td_name[0] != '\0')
+ strlcpy(kp->ki_tdname, mtd.td_name,
+ sizeof(kp->ki_tdname));
+ else
+ memset(kp->ki_tdname, 0,
+ sizeof(kp->ki_tdname));
+ kp->ki_pctcpu = 0;
+ kp->ki_rqindex = 0;
+
+ /*
+ * Note: legacy fields; wraps at NO_CPU_OLD
+ * or the old max CPU value as appropriate
+ */
+ if (mtd.td_lastcpu == NOCPU)
+ kp->ki_lastcpu_old = NOCPU_OLD;
+ else if (mtd.td_lastcpu > MAXCPU_OLD)
+ kp->ki_lastcpu_old = MAXCPU_OLD;
+ else
+ kp->ki_lastcpu_old = mtd.td_lastcpu;
+
+ if (mtd.td_oncpu == NOCPU)
+ kp->ki_oncpu_old = NOCPU_OLD;
+ else if (mtd.td_oncpu > MAXCPU_OLD)
+ kp->ki_oncpu_old = MAXCPU_OLD;
+ else
+ kp->ki_oncpu_old = mtd.td_oncpu;
+ kp->ki_tid = mtd.td_tid;
+ } else {
+ memset(&kp->ki_sigmask, 0,
+ sizeof(kp->ki_sigmask));
+ kp->ki_stat = SZOMB;
+ kp->ki_tid = 0;
+ }
+
+ bcopy(&kinfo_proc, bp, sizeof(kinfo_proc));
+ ++bp;
+ ++cnt;
+ }
+ }
+ return (cnt);
+}
+
+/*
+ * Build proc info array by reading in proc list from a crash dump.
+ * Return number of procs read. maxcnt is the max we will read.
+ */
+static int
+kvm_deadprocs(kvm_t *kd, int what, int arg, u_long a_allproc,
+ u_long a_zombproc, int maxcnt)
+{
+ struct kinfo_proc *bp = kd->procbase;
+ int acnt, zcnt = 0;
+ struct proc *p;
+
+ if (KREAD(kd, a_allproc, &p)) {
+ _kvm_err(kd, kd->program, "cannot read allproc");
+ return (-1);
+ }
+ acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
+ if (acnt < 0)
+ return (acnt);
+
+ if (a_zombproc != 0) {
+ if (KREAD(kd, a_zombproc, &p)) {
+ _kvm_err(kd, kd->program, "cannot read zombproc");
+ return (-1);
+ }
+ zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
+ if (zcnt < 0)
+ zcnt = 0;
+ }
+
+ return (acnt + zcnt);
+}
+
+struct kinfo_proc *
+kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt)
+{
+ int mib[4], st, nprocs;
+ size_t size, osize;
+ int temp_op;
+
+ if (kd->procbase != 0) {
+ free((void *)kd->procbase);
+ /*
+ * Clear this pointer in case this call fails. Otherwise,
+ * kvm_close() will free it again.
+ */
+ kd->procbase = 0;
+ }
+ if (ISALIVE(kd)) {
+ size = 0;
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = op;
+ mib[3] = arg;
+ temp_op = op & ~KERN_PROC_INC_THREAD;
+ st = sysctl(mib,
+ temp_op == KERN_PROC_ALL || temp_op == KERN_PROC_PROC ?
+ 3 : 4, NULL, &size, NULL, 0);
+ if (st == -1) {
+ _kvm_syserr(kd, kd->program, "kvm_getprocs");
+ return (0);
+ }
+ /*
+ * We can't continue with a size of 0 because we pass
+ * it to realloc() (via _kvm_realloc()), and passing 0
+ * to realloc() results in undefined behavior.
+ */
+ if (size == 0) {
+ /*
+ * XXX: We should probably return an invalid,
+ * but non-NULL, pointer here so any client
+ * program trying to dereference it will
+ * crash. However, _kvm_freeprocs() calls
+ * free() on kd->procbase if it isn't NULL,
+ * and free()'ing a junk pointer isn't good.
+ * Then again, _kvm_freeprocs() isn't used
+ * anywhere . . .
+ */
+ kd->procbase = _kvm_malloc(kd, 1);
+ goto liveout;
+ }
+ do {
+ size += size / 10;
+ kd->procbase = (struct kinfo_proc *)
+ _kvm_realloc(kd, kd->procbase, size);
+ if (kd->procbase == NULL)
+ return (0);
+ osize = size;
+ st = sysctl(mib, temp_op == KERN_PROC_ALL ||
+ temp_op == KERN_PROC_PROC ? 3 : 4,
+ kd->procbase, &size, NULL, 0);
+ } while (st == -1 && errno == ENOMEM && size == osize);
+ if (st == -1) {
+ _kvm_syserr(kd, kd->program, "kvm_getprocs");
+ return (0);
+ }
+ /*
+ * We have to check the size again because sysctl()
+ * may "round up" oldlenp if oldp is NULL; hence it
+ * might've told us that there was data to get when
+ * there really isn't any.
+ */
+ if (size > 0 &&
+ kd->procbase->ki_structsize != sizeof(struct kinfo_proc)) {
+ _kvm_err(kd, kd->program,
+ "kinfo_proc size mismatch (expected %zu, got %d)",
+ sizeof(struct kinfo_proc),
+ kd->procbase->ki_structsize);
+ return (0);
+ }
+liveout:
+ nprocs = size == 0 ? 0 : size / kd->procbase->ki_structsize;
+ } else {
+ struct nlist nl[6], *p;
+ struct nlist nlz[2];
+
+ nl[0].n_name = "_nprocs";
+ nl[1].n_name = "_allproc";
+ nl[2].n_name = "_ticks";
+ nl[3].n_name = "_hz";
+ nl[4].n_name = "_cpu_tick_frequency";
+ nl[5].n_name = 0;
+
+ nlz[0].n_name = "_zombproc";
+ nlz[1].n_name = 0;
+
+ if (!kd->arch->ka_native(kd)) {
+ _kvm_err(kd, kd->program,
+ "cannot read procs from non-native core");
+ return (0);
+ }
+
+ if (kvm_nlist(kd, nl) != 0) {
+ for (p = nl; p->n_type != 0; ++p)
+ ;
+ _kvm_err(kd, kd->program,
+ "%s: no such symbol", p->n_name);
+ return (0);
+ }
+ (void) kvm_nlist(kd, nlz); /* attempt to get zombproc */
+ if (KREAD(kd, nl[0].n_value, &nprocs)) {
+ _kvm_err(kd, kd->program, "can't read nprocs");
+ return (0);
+ }
+ /*
+ * If returning all threads, we don't know how many that
+ * might be. Presume that there are, on average, no more
+ * than 10 threads per process.
+ */
+ if (op == KERN_PROC_ALL || (op & KERN_PROC_INC_THREAD))
+ nprocs *= 10; /* XXX */
+ if (KREAD(kd, nl[2].n_value, &ticks)) {
+ _kvm_err(kd, kd->program, "can't read ticks");
+ return (0);
+ }
+ if (KREAD(kd, nl[3].n_value, &hz)) {
+ _kvm_err(kd, kd->program, "can't read hz");
+ return (0);
+ }
+ if (KREAD(kd, nl[4].n_value, &cpu_tick_frequency)) {
+ _kvm_err(kd, kd->program,
+ "can't read cpu_tick_frequency");
+ return (0);
+ }
+ size = nprocs * sizeof(struct kinfo_proc);
+ kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
+ if (kd->procbase == NULL)
+ return (0);
+
+ nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
+ nlz[0].n_value, nprocs);
+ if (nprocs <= 0) {
+ _kvm_freeprocs(kd);
+ nprocs = 0;
+ }
+#ifdef notdef
+ else {
+ size = nprocs * sizeof(struct kinfo_proc);
+ kd->procbase = realloc(kd->procbase, size);
+ }
+#endif
+ }
+ *cnt = nprocs;
+ return (kd->procbase);
+}
+
+void
+_kvm_freeprocs(kvm_t *kd)
+{
+
+ free(kd->procbase);
+ kd->procbase = NULL;
+}
+
+void *
+_kvm_realloc(kvm_t *kd, void *p, size_t n)
+{
+ void *np;
+
+ np = reallocf(p, n);
+ if (np == NULL)
+ _kvm_err(kd, kd->program, "out of memory");
+ return (np);
+}
+
+/*
+ * Get the command args or environment.
+ */
+static char **
+kvm_argv(kvm_t *kd, const struct kinfo_proc *kp, int env, int nchr)
+{
+ int oid[4];
+ int i;
+ size_t bufsz;
+ static int buflen;
+ static char *buf, *p;
+ static char **bufp;
+ static int argc;
+ char **nbufp;
+
+ if (!ISALIVE(kd)) {
+ _kvm_err(kd, kd->program,
+ "cannot read user space from dead kernel");
+ return (NULL);
+ }
+
+ if (nchr == 0 || nchr > ARG_MAX)
+ nchr = ARG_MAX;
+ if (buflen == 0) {
+ buf = malloc(nchr);
+ if (buf == NULL) {
+ _kvm_err(kd, kd->program, "cannot allocate memory");
+ return (NULL);
+ }
+ argc = 32;
+ bufp = malloc(sizeof(char *) * argc);
+ if (bufp == NULL) {
+ free(buf);
+ buf = NULL;
+ _kvm_err(kd, kd->program, "cannot allocate memory");
+ return (NULL);
+ }
+ buflen = nchr;
+ } else if (nchr > buflen) {
+ p = realloc(buf, nchr);
+ if (p != NULL) {
+ buf = p;
+ buflen = nchr;
+ }
+ }
+ oid[0] = CTL_KERN;
+ oid[1] = KERN_PROC;
+ oid[2] = env ? KERN_PROC_ENV : KERN_PROC_ARGS;
+ oid[3] = kp->ki_pid;
+ bufsz = buflen;
+ if (sysctl(oid, 4, buf, &bufsz, 0, 0) == -1) {
+ /*
+ * If the supplied buf is too short to hold the requested
+ * value the sysctl returns with ENOMEM. The buf is filled
+ * with the truncated value and the returned bufsz is equal
+ * to the requested len.
+ */
+ if (errno != ENOMEM || bufsz != (size_t)buflen)
+ return (NULL);
+ buf[bufsz - 1] = '\0';
+ errno = 0;
+ } else if (bufsz == 0)
+ return (NULL);
+ i = 0;
+ p = buf;
+ do {
+ bufp[i++] = p;
+ p += strlen(p) + 1;
+ if (i >= argc) {
+ argc += argc;
+ nbufp = realloc(bufp, sizeof(char *) * argc);
+ if (nbufp == NULL)
+ return (NULL);
+ bufp = nbufp;
+ }
+ } while (p < buf + bufsz);
+ bufp[i++] = 0;
+ return (bufp);
+}
+
+char **
+kvm_getargv(kvm_t *kd, const struct kinfo_proc *kp, int nchr)
+{
+ return (kvm_argv(kd, kp, 0, nchr));
+}
+
+char **
+kvm_getenvv(kvm_t *kd, const struct kinfo_proc *kp, int nchr)
+{
+ return (kvm_argv(kd, kp, 1, nchr));
+}
diff --git a/lib/libkvm/kvm_read.3 b/lib/libkvm/kvm_read.3
new file mode 100644
index 000000000000..c3e9c3b31e92
--- /dev/null
+++ b/lib/libkvm/kvm_read.3
@@ -0,0 +1,108 @@
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd November 27, 2015
+.Dt KVM_READ 3
+.Os
+.Sh NAME
+.Nm kvm_read ,
+.Nm kvm_read2 ,
+.Nm kvm_write
+.Nd read or write kernel virtual memory
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.Ft ssize_t
+.Fn kvm_read "kvm_t *kd" "unsigned long addr" "void *buf" "size_t nbytes"
+.Ft ssize_t
+.Fn kvm_read2 "kvm_t *kd" "kvaddr_t addr" "void *buf" "size_t nbytes"
+.Ft ssize_t
+.Fn kvm_write "kvm_t *kd" "unsigned long addr" "const void *buf" "size_t nbytes"
+.Sh DESCRIPTION
+The
+.Fn kvm_read ,
+.Fn kvm_read2 ,
+and
+.Fn kvm_write
+functions are used to read and write kernel virtual memory (or a crash
+dump file).
+See
+.Fn kvm_open 3
+for information regarding opening kernel virtual memory and crash dumps.
+.Pp
+The
+.Fn kvm_read
+and
+.Fn kvm_read2
+functions transfer
+.Fa nbytes
+bytes of data from
+the kernel space address
+.Fa addr
+to
+.Fa buf .
+Conversely,
+.Fn kvm_write
+transfers data from
+.Fa buf
+to
+.Fa addr .
+Unlike their SunOS counterparts, these functions cannot be used to
+read or write process address spaces.
+.Pp
+The
+.Fn kvm_read2
+function uses a different type
+.Pq Vt kvaddr_t
+for the
+.Fa addr
+argument to allow use of addresses larger than
+.Dv ULONG_MAX
+when examining non-native kernel images.
+.Sh RETURN VALUES
+Upon success, the number of bytes actually transferred is returned.
+Otherwise, -1 is returned.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3
+.Sh HISTORY
+The
+.Fn kvm_read2
+function first appeared in
+.Fx 11.0 .
diff --git a/lib/libkvm/kvm_riscv.h b/lib/libkvm/kvm_riscv.h
new file mode 100644
index 000000000000..fc0e4895a748
--- /dev/null
+++ b/lib/libkvm/kvm_riscv.h
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 2015 John H. Baldwin <jhb@FreeBSD.org>
+ * Copyright (c) 2019 Mitchell Horne
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __KVM_RISCV_H__
+#define __KVM_RISCV_H__
+
+#ifdef __riscv
+#include <machine/pte.h>
+#endif
+
+typedef uint64_t riscv_physaddr_t;
+typedef uint64_t riscv_pt_entry_t;
+
+#define RISCV_PAGE_SHIFT 12
+#define RISCV_PAGE_SIZE (1 << RISCV_PAGE_SHIFT)
+#define RISCV_PAGE_MASK (RISCV_PAGE_SIZE - 1)
+
+/* Source: sys/riscv/include/pte.h */
+#define RISCV_L3_SHIFT 12
+#define RISCV_L3_SIZE (1 << L3_SHIFT)
+#define RISCV_L3_OFFSET (L3_SIZE - 1)
+
+#define RISCV_PTE_SW_MANAGED (1 << 9)
+#define RISCV_PTE_SW_WIRED (1 << 8)
+#define RISCV_PTE_D (1 << 7) /* Dirty */
+#define RISCV_PTE_A (1 << 6) /* Accessed */
+#define RISCV_PTE_G (1 << 5) /* Global */
+#define RISCV_PTE_U (1 << 4) /* User */
+#define RISCV_PTE_X (1 << 3) /* Execute */
+#define RISCV_PTE_W (1 << 2) /* Write */
+#define RISCV_PTE_R (1 << 1) /* Read */
+#define RISCV_PTE_V (1 << 0) /* Valid */
+#define RISCV_PTE_RWX (RISCV_PTE_R | RISCV_PTE_W | RISCV_PTE_X)
+
+#define RISCV_PTE_PPN0_S 10
+
+#ifdef __riscv
+_Static_assert(sizeof(pt_entry_t) == sizeof(riscv_pt_entry_t),
+ "pt_entry_t size mismatch");
+
+_Static_assert(PAGE_SHIFT == RISCV_PAGE_SHIFT, "PAGE_SHIFT mismatch");
+_Static_assert(PAGE_SIZE == RISCV_PAGE_SIZE, "PAGE_SIZE mismatch");
+_Static_assert(PAGE_MASK == RISCV_PAGE_MASK, "PAGE_MASK mismatch");
+
+_Static_assert(L3_SHIFT == RISCV_L3_SHIFT, "L3_SHIFT mismatch");
+_Static_assert(L3_SIZE == RISCV_L3_SIZE, "L3_SIZE mismatch");
+_Static_assert(L3_OFFSET == RISCV_L3_OFFSET, "L3_OFFSET mismatch");
+_Static_assert(PTE_PPN0_S == RISCV_PTE_PPN0_S, "PTE_PPN0_S mismatch");
+
+_Static_assert(PTE_SW_MANAGED == RISCV_PTE_SW_MANAGED,
+ "PTE_SW_MANAGED mismatch");
+_Static_assert(PTE_SW_WIRED == RISCV_PTE_SW_WIRED, "PTE_SW_WIRED mismatch");
+_Static_assert(PTE_D == RISCV_PTE_D, "PTE_D mismatch");
+_Static_assert(PTE_A == RISCV_PTE_A, "PTE_A mismatch");
+_Static_assert(PTE_G == RISCV_PTE_G, "PTE_G mismatch");
+_Static_assert(PTE_U == RISCV_PTE_U, "PTE_U mismatch");
+_Static_assert(PTE_X == RISCV_PTE_X, "PTE_X mismatch");
+_Static_assert(PTE_W == RISCV_PTE_W, "PTE_W mismatch");
+_Static_assert(PTE_R == RISCV_PTE_R, "PTE_R mismatch");
+_Static_assert(PTE_V == RISCV_PTE_V, "PTE_V mismatch");
+_Static_assert(PTE_RWX == RISCV_PTE_RWX, "PTE_RWX mismatch");
+#endif
+
+#endif /* !__KVM_RISCV_H__ */
diff --git a/lib/libkvm/kvm_vnet.c b/lib/libkvm/kvm_vnet.c
new file mode 100644
index 000000000000..a60663d483e1
--- /dev/null
+++ b/lib/libkvm/kvm_vnet.c
@@ -0,0 +1,244 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2009 Robert N. M. Watson
+ * Copyright (c) 2009 Bjoern A. Zeeb <bz@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+
+#define _WANT_PRISON
+#define _WANT_UCRED
+#define _WANT_VNET
+
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
+#include <sys/_task.h>
+#include <sys/jail.h>
+#include <sys/proc.h>
+#include <sys/types.h>
+
+#include <stdbool.h>
+#include <net/vnet.h>
+
+#include <kvm.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "kvm_private.h"
+
+/*
+ * Set up libkvm to handle virtual network stack symbols by selecting a
+ * starting pid.
+ */
+int
+_kvm_vnet_selectpid(kvm_t *kd, pid_t pid)
+{
+ struct proc proc;
+ struct ucred cred;
+ struct prison prison;
+ struct vnet vnet;
+ struct kvm_nlist nl[] = {
+ /*
+ * Note: kvm_nlist strips the first '_' so add an extra one
+ * here to __{start,stop}_set_vnet.
+ */
+#define NLIST_START_VNET 0
+ { .n_name = "___start_" VNET_SETNAME },
+#define NLIST_STOP_VNET 1
+ { .n_name = "___stop_" VNET_SETNAME },
+#define NLIST_VNET_HEAD 2
+ { .n_name = "vnet_head" },
+#define NLIST_ALLPROC 3
+ { .n_name = "allproc" },
+#define NLIST_DUMPTID 4
+ { .n_name = "dumptid" },
+#define NLIST_PROC0 5
+ { .n_name = "proc0" },
+ { .n_name = NULL },
+ };
+ uintptr_t procp, credp;
+#define VMCORE_VNET_OF_PROC0
+#ifndef VMCORE_VNET_OF_PROC0
+ struct thread td;
+ uintptr_t tdp;
+#endif
+ lwpid_t dumptid;
+
+ /*
+ * XXX: This only works for native kernels for now.
+ */
+ if (!kvm_native(kd))
+ return (-1);
+
+ /*
+ * Locate and cache locations of important symbols
+ * using the internal version of _kvm_nlist, turning
+ * off initialization to avoid recursion in case of
+ * unresolveable symbols.
+ */
+ if (_kvm_nlist(kd, nl, 0) != 0) {
+ /*
+ * XXX-BZ: ___start_/___stop_VNET_SETNAME may fail.
+ * For now do not report an error here as we are called
+ * internally and in `void context' until we merge the
+ * functionality to optionally activate this into programs.
+ * By that time we can properly fail and let the callers
+ * handle the error.
+ */
+ /* _kvm_err(kd, kd->program, "%s: no namelist", __func__); */
+ return (-1);
+ }
+
+ /*
+ * Auto-detect if this is a crashdump by reading dumptid.
+ */
+ dumptid = 0;
+ if (nl[NLIST_DUMPTID].n_value) {
+ if (kvm_read(kd, nl[NLIST_DUMPTID].n_value, &dumptid,
+ sizeof(dumptid)) != sizeof(dumptid)) {
+ _kvm_err(kd, kd->program, "%s: dumptid", __func__);
+ return (-1);
+ }
+ }
+
+ /*
+ * First, find the process for this pid. If we are working on a
+ * dump, either locate the thread dumptid is referring to or proc0.
+ * Based on either, take the address of the ucred.
+ */
+ credp = 0;
+
+ procp = nl[NLIST_ALLPROC].n_value;
+#ifdef VMCORE_VNET_OF_PROC0
+ if (dumptid > 0) {
+ procp = nl[NLIST_PROC0].n_value;
+ pid = 0;
+ }
+#endif
+ while (procp != 0) {
+ if (kvm_read(kd, procp, &proc, sizeof(proc)) != sizeof(proc)) {
+ _kvm_err(kd, kd->program, "%s: proc", __func__);
+ return (-1);
+ }
+#ifndef VMCORE_VNET_OF_PROC0
+ if (dumptid > 0) {
+ tdp = (uintptr_t)TAILQ_FIRST(&proc.p_threads);
+ while (tdp != 0) {
+ if (kvm_read(kd, tdp, &td, sizeof(td)) !=
+ sizeof(td)) {
+ _kvm_err(kd, kd->program, "%s: thread",
+ __func__);
+ return (-1);
+ }
+ if (td.td_tid == dumptid) {
+ credp = (uintptr_t)td.td_ucred;
+ break;
+ }
+ tdp = (uintptr_t)TAILQ_NEXT(&td, td_plist);
+ }
+ } else
+#endif
+ if (proc.p_pid == pid)
+ credp = (uintptr_t)proc.p_ucred;
+ if (credp != 0)
+ break;
+ procp = (uintptr_t)LIST_NEXT(&proc, p_list);
+ }
+ if (credp == 0) {
+ _kvm_err(kd, kd->program, "%s: pid/tid not found", __func__);
+ return (-1);
+ }
+ if (kvm_read(kd, (uintptr_t)credp, &cred, sizeof(cred)) !=
+ sizeof(cred)) {
+ _kvm_err(kd, kd->program, "%s: cred", __func__);
+ return (-1);
+ }
+ if (cred.cr_prison == NULL) {
+ _kvm_err(kd, kd->program, "%s: no jail", __func__);
+ return (-1);
+ }
+ if (kvm_read(kd, (uintptr_t)cred.cr_prison, &prison, sizeof(prison)) !=
+ sizeof(prison)) {
+ _kvm_err(kd, kd->program, "%s: prison", __func__);
+ return (-1);
+ }
+ if (prison.pr_vnet == NULL) {
+ _kvm_err(kd, kd->program, "%s: no vnet", __func__);
+ return (-1);
+ }
+ if (kvm_read(kd, (uintptr_t)prison.pr_vnet, &vnet, sizeof(vnet)) !=
+ sizeof(vnet)) {
+ _kvm_err(kd, kd->program, "%s: vnet", __func__);
+ return (-1);
+ }
+ if (vnet.vnet_magic_n != VNET_MAGIC_N) {
+ _kvm_err(kd, kd->program, "%s: invalid vnet magic#", __func__);
+ return (-1);
+ }
+ kd->vnet_initialized = 1;
+ kd->vnet_start = nl[NLIST_START_VNET].n_value;
+ kd->vnet_stop = nl[NLIST_STOP_VNET].n_value;
+ kd->vnet_current = (uintptr_t)prison.pr_vnet;
+ kd->vnet_base = vnet.vnet_data_base;
+ return (0);
+}
+
+/*
+ * Check whether the vnet module has been initialized successfully
+ * or not, initialize it if permitted.
+ */
+int
+_kvm_vnet_initialized(kvm_t *kd, int intialize)
+{
+
+ if (kd->vnet_initialized || !intialize)
+ return (kd->vnet_initialized);
+
+ (void) _kvm_vnet_selectpid(kd, getpid());
+
+ return (kd->vnet_initialized);
+}
+
+/*
+ * Check whether the value is within the vnet symbol range and
+ * only if so adjust the offset relative to the current base.
+ */
+kvaddr_t
+_kvm_vnet_validaddr(kvm_t *kd, kvaddr_t value)
+{
+
+ if (value == 0)
+ return (value);
+
+ if (!kd->vnet_initialized)
+ return (value);
+
+ if (value < kd->vnet_start || value >= kd->vnet_stop)
+ return (value);
+
+ return (kd->vnet_base + value);
+}
diff --git a/lib/libkvm/tests/Makefile b/lib/libkvm/tests/Makefile
new file mode 100644
index 000000000000..5b4220327670
--- /dev/null
+++ b/lib/libkvm/tests/Makefile
@@ -0,0 +1,20 @@
+.include <bsd.own.mk>
+
+ATF_TESTS_C+= kvm_close_test
+ATF_TESTS_C+= kvm_geterr_test
+ATF_TESTS_C+= kvm_open_test
+ATF_TESTS_C+= kvm_open2_test
+ATF_TESTS_C+= kvm_read_test
+
+CFLAGS.kvm_geterr_test+= -I${.CURDIR:H}
+CFLAGS.kvm_read_test+= -I${.CURDIR:H}
+
+LIBADD+= kvm
+
+BINDIR= ${TESTSDIR}
+
+.for t in kvm_geterr_test kvm_open_test kvm_open2_test kvm_read_test
+SRCS.$t= $t.c kvm_test_common.c
+.endfor
+
+.include <bsd.test.mk>
diff --git a/lib/libkvm/tests/Makefile.depend b/lib/libkvm/tests/Makefile.depend
new file mode 100644
index 000000000000..c77538d2822e
--- /dev/null
+++ b/lib/libkvm/tests/Makefile.depend
@@ -0,0 +1,19 @@
+# Autogenerated - do NOT edit!
+
+DIRDEPS = \
+ gnu/lib/csu \
+ include \
+ include/xlocale \
+ lib/${CSU_DIR} \
+ lib/atf/libatf-c \
+ lib/libc \
+ lib/libcompiler_rt \
+ lib/libelf \
+ lib/libkvm \
+
+
+.include <dirdeps.mk>
+
+.if ${DEP_RELDIR} == ${_DEP_RELDIR}
+# local dependencies - needed for -jN in clean tree
+.endif
diff --git a/lib/libkvm/tests/kvm_close_test.c b/lib/libkvm/tests/kvm_close_test.c
new file mode 100644
index 000000000000..ed8e4f34c868
--- /dev/null
+++ b/lib/libkvm/tests/kvm_close_test.c
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2017 Enji Cooper <ngie@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <kvm.h>
+#include <signal.h>
+
+#include <atf-c.h>
+
+ATF_TC(kvm_close_negative_test_NULL);
+ATF_TC_HEAD(kvm_close_negative_test_NULL, tc)
+{
+
+ atf_tc_set_md_var(tc, "descr",
+ "test that kvm_close(NULL) succeeds without error");
+}
+
+ATF_TC_BODY(kvm_close_negative_test_NULL, tc)
+{
+
+ ATF_REQUIRE_ERRNO(EINVAL, kvm_close(NULL) == -1);
+}
+
+ATF_TP_ADD_TCS(tp)
+{
+
+ ATF_TP_ADD_TC(tp, kvm_close_negative_test_NULL);
+
+ return (atf_no_error());
+}
diff --git a/lib/libkvm/tests/kvm_geterr_test.c b/lib/libkvm/tests/kvm_geterr_test.c
new file mode 100644
index 000000000000..01f21dc6a3d5
--- /dev/null
+++ b/lib/libkvm/tests/kvm_geterr_test.c
@@ -0,0 +1,138 @@
+/*-
+ * Copyright (c) 2017 Enji Cooper <ngie@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <kvm.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <atf-c.h>
+
+#include "kvm_private.h"
+
+#include "kvm_test_common.h"
+
+ATF_TC(kvm_geterr_negative_test_NULL);
+ATF_TC_HEAD(kvm_geterr_negative_test_NULL, tc)
+{
+
+ atf_tc_set_md_var(tc, "descr",
+ "test that kvm_geterr(NULL) returns NULL");
+}
+
+ATF_TC_BODY(kvm_geterr_negative_test_NULL, tc)
+{
+
+ ATF_REQUIRE(!errbuf_has_error(kvm_geterr(NULL)));
+}
+
+/* 1100090 was where kvm_open2(3) was introduced. */
+#if __FreeBSD_version >= 1100091
+ATF_TC(kvm_geterr_positive_test_error);
+ATF_TC_HEAD(kvm_geterr_positive_test_error, tc)
+{
+
+ atf_tc_set_md_var(tc, "descr",
+ "test that kvm_geterr(kd) when kd doesn't contain an error returns \"\"");
+ atf_tc_set_md_var(tc, "require.user", "root");
+}
+
+ATF_TC_BODY(kvm_geterr_positive_test_error, tc)
+{
+ kvm_t *kd;
+ char *error_msg;
+
+ errbuf_clear();
+ kd = kvm_open2(NULL, NULL, O_RDONLY, errbuf, NULL);
+ ATF_CHECK(!errbuf_has_error(errbuf));
+ ATF_REQUIRE_MSG(kd != NULL, "kvm_open2 failed: %s", errbuf);
+ ATF_REQUIRE_MSG(kvm_write(kd, 0, NULL, 0) == -1,
+ "kvm_write succeeded unexpectedly on an O_RDONLY file descriptor");
+ error_msg = kvm_geterr(kd);
+ ATF_CHECK(errbuf_has_error(error_msg));
+ ATF_REQUIRE_MSG(kvm_close(kd) == 0, "kvm_close failed: %s",
+ strerror(errno));
+}
+
+ATF_TC(kvm_geterr_positive_test_no_error);
+ATF_TC_HEAD(kvm_geterr_positive_test_no_error, tc)
+{
+
+ atf_tc_set_md_var(tc, "descr",
+ "test that kvm_geterr(kd) when kd contains an error returns an error message");
+ atf_tc_set_md_var(tc, "require.user", "root");
+}
+
+ATF_TC_BODY(kvm_geterr_positive_test_no_error, tc)
+{
+#define ALL_IS_WELL "that ends well"
+ kvm_t *kd;
+ char *error_msg;
+ struct nlist nl[] = {
+#define SYMNAME "_mp_maxcpus"
+#define X_MAXCPUS 0
+ { SYMNAME, 0, 0, 0, 0 },
+ { NULL, 0, 0, 0, 0 },
+ };
+ ssize_t rc;
+ int mp_maxcpus, retcode;
+
+ errbuf_clear();
+ kd = kvm_open2(NULL, NULL, O_RDONLY, errbuf, NULL);
+ ATF_CHECK(!errbuf_has_error(errbuf));
+ ATF_REQUIRE_MSG(kd != NULL, "kvm_open2 failed: %s", errbuf);
+ retcode = kvm_nlist(kd, nl);
+ ATF_REQUIRE_MSG(retcode != -1,
+ "kvm_nlist failed (returned %d): %s", retcode, kvm_geterr(kd));
+ if (nl[X_MAXCPUS].n_type == 0)
+ atf_tc_skip("symbol (\"%s\") couldn't be found", SYMNAME);
+ _kvm_err(kd, NULL, "%s", ALL_IS_WELL); /* XXX: internal API */
+ rc = kvm_read(kd, nl[X_MAXCPUS].n_value, &mp_maxcpus,
+ sizeof(mp_maxcpus));
+
+ ATF_REQUIRE_MSG(rc != -1, "kvm_read failed: %s", kvm_geterr(kd));
+ error_msg = kvm_geterr(kd);
+ ATF_REQUIRE_MSG(strcmp(error_msg, ALL_IS_WELL) == 0,
+ "error message changed: %s", error_msg);
+ ATF_REQUIRE_MSG(kvm_close(kd) == 0, "kvm_close failed: %s",
+ strerror(errno));
+}
+#endif
+
+ATF_TP_ADD_TCS(tp)
+{
+
+ ATF_TP_ADD_TC(tp, kvm_geterr_negative_test_NULL);
+#if __FreeBSD_version >= 1100091
+ ATF_TP_ADD_TC(tp, kvm_geterr_positive_test_error);
+ ATF_TP_ADD_TC(tp, kvm_geterr_positive_test_no_error);
+#endif
+
+ return (atf_no_error());
+}
diff --git a/lib/libkvm/tests/kvm_open2_test.c b/lib/libkvm/tests/kvm_open2_test.c
new file mode 100644
index 000000000000..0ad0b47de948
--- /dev/null
+++ b/lib/libkvm/tests/kvm_open2_test.c
@@ -0,0 +1,113 @@
+/*-
+ * Copyright (c) 2017 Enji Cooper <ngie@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <kvm.h>
+#include <limits.h>
+#include <paths.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <atf-c.h>
+
+#include "kvm_test_common.h"
+
+ATF_TC_WITHOUT_HEAD(kvm_open2_negative_test_nonexistent_corefile);
+ATF_TC_BODY(kvm_open2_negative_test_nonexistent_corefile, tc)
+{
+
+ errbuf_clear();
+ ATF_CHECK(kvm_open2(NULL, "/nonexistent", O_RDONLY, NULL, NULL) == NULL);
+ ATF_CHECK(!errbuf_has_error(errbuf));
+ errbuf_clear();
+ ATF_CHECK(kvm_open2(NULL, "/nonexistent", O_RDONLY,
+ errbuf, NULL) == NULL);
+ ATF_CHECK(errbuf_has_error(errbuf));
+}
+
+ATF_TC_WITHOUT_HEAD(kvm_open2_negative_test_nonexistent_execfile);
+ATF_TC_BODY(kvm_open2_negative_test_nonexistent_execfile, tc)
+{
+
+ errbuf_clear();
+ ATF_CHECK(kvm_open2("/nonexistent", _PATH_DEVZERO, O_RDONLY,
+ NULL, NULL) == NULL);
+ ATF_CHECK(strlen(errbuf) == 0);
+ errbuf_clear();
+ ATF_CHECK(kvm_open2("/nonexistent", _PATH_DEVZERO, O_RDONLY,
+ errbuf, NULL) == NULL);
+ ATF_CHECK(errbuf_has_error(errbuf));
+}
+
+ATF_TC(kvm_open2_negative_test_invalid_corefile);
+ATF_TC_HEAD(kvm_open2_negative_test_invalid_corefile, tc)
+{
+
+ atf_tc_set_md_var(tc, "require.user", "root");
+}
+
+ATF_TC_BODY(kvm_open2_negative_test_invalid_corefile, tc)
+{
+ kvm_t *kd;
+
+ errbuf_clear();
+ atf_utils_create_file("some-file", "this is a text file");
+ kd = kvm_open2(NULL, "some-file", O_RDONLY, errbuf, NULL);
+ ATF_CHECK(errbuf_has_error(errbuf));
+ ATF_REQUIRE_MSG(kd == NULL, "kvm_open2 succeeded");
+}
+
+ATF_TC(kvm_open2_negative_test_invalid_execfile);
+ATF_TC_HEAD(kvm_open2_negative_test_invalid_execfile, tc)
+{
+
+ atf_tc_set_md_var(tc, "require.user", "root");
+}
+
+ATF_TC_BODY(kvm_open2_negative_test_invalid_execfile, tc)
+{
+ kvm_t *kd;
+
+ errbuf_clear();
+ atf_utils_create_file("some-file", "this is a text file");
+ kd = kvm_open2("some-file", "/bin/sh", O_RDONLY, errbuf, NULL);
+ ATF_CHECK(errbuf_has_error(errbuf));
+ ATF_REQUIRE_MSG(kd == NULL, "kvm_open2 succeeded unexpectedly");
+}
+
+ATF_TP_ADD_TCS(tp)
+{
+
+ ATF_TP_ADD_TC(tp, kvm_open2_negative_test_invalid_corefile);
+ ATF_TP_ADD_TC(tp, kvm_open2_negative_test_invalid_execfile);
+ ATF_TP_ADD_TC(tp, kvm_open2_negative_test_nonexistent_corefile);
+ ATF_TP_ADD_TC(tp, kvm_open2_negative_test_nonexistent_execfile);
+
+ return (atf_no_error());
+}
diff --git a/lib/libkvm/tests/kvm_open_test.c b/lib/libkvm/tests/kvm_open_test.c
new file mode 100644
index 000000000000..977f9ed5c3e2
--- /dev/null
+++ b/lib/libkvm/tests/kvm_open_test.c
@@ -0,0 +1,101 @@
+/*-
+ * Copyright (c) 2017 Enji Cooper <ngie@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <kvm.h>
+#include <limits.h>
+#include <paths.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <atf-c.h>
+
+#include "kvm_test_common.h"
+
+ATF_TC_WITHOUT_HEAD(kvm_open_negative_test_nonexistent_corefile);
+ATF_TC_BODY(kvm_open_negative_test_nonexistent_corefile, tc)
+{
+
+ ATF_CHECK(kvm_open(NULL, "/nonexistent", NULL, O_RDONLY, NULL) == NULL);
+ ATF_CHECK(kvm_open(NULL, "/nonexistent", NULL, O_RDONLY,
+ getprogname()) == NULL);
+}
+
+ATF_TC_WITHOUT_HEAD(kvm_open_negative_test_nonexistent_execfile);
+ATF_TC_BODY(kvm_open_negative_test_nonexistent_execfile, tc)
+{
+
+ ATF_CHECK(kvm_open("/nonexistent", _PATH_DEVZERO, NULL, O_RDONLY,
+ NULL) == NULL);
+ ATF_CHECK(kvm_open("/nonexistent", _PATH_DEVZERO, NULL, O_RDONLY,
+ getprogname()) == NULL);
+}
+
+ATF_TC(kvm_open_negative_test_invalid_corefile);
+ATF_TC_HEAD(kvm_open_negative_test_invalid_corefile, tc)
+{
+
+ atf_tc_set_md_var(tc, "require.user", "root");
+}
+
+ATF_TC_BODY(kvm_open_negative_test_invalid_corefile, tc)
+{
+ kvm_t *kd;
+
+ atf_utils_create_file("some-file", "this is a text file");
+ kd = kvm_open(NULL, "some-file", NULL, O_RDONLY, getprogname());
+ ATF_REQUIRE_MSG(kd == NULL, "kvm_open didn't return NULL on failure");
+}
+
+ATF_TC(kvm_open_negative_test_invalid_execfile);
+ATF_TC_HEAD(kvm_open_negative_test_invalid_execfile, tc)
+{
+
+ atf_tc_set_md_var(tc, "require.user", "root");
+}
+
+ATF_TC_BODY(kvm_open_negative_test_invalid_execfile, tc)
+{
+ kvm_t *kd;
+
+ atf_utils_create_file("some-file", "this is a text file");
+ kd = kvm_open("some-file", "/bin/sh", NULL, O_RDONLY, getprogname());
+ ATF_REQUIRE_MSG(kd == NULL, "kvm_open succeeded unexpectedly");
+}
+
+ATF_TP_ADD_TCS(tp)
+{
+
+ ATF_TP_ADD_TC(tp, kvm_open_negative_test_invalid_corefile);
+ ATF_TP_ADD_TC(tp, kvm_open_negative_test_invalid_execfile);
+ ATF_TP_ADD_TC(tp, kvm_open_negative_test_nonexistent_corefile);
+ ATF_TP_ADD_TC(tp, kvm_open_negative_test_nonexistent_execfile);
+
+ return (atf_no_error());
+}
diff --git a/lib/libkvm/tests/kvm_read_test.c b/lib/libkvm/tests/kvm_read_test.c
new file mode 100644
index 000000000000..38cf337cb7c5
--- /dev/null
+++ b/lib/libkvm/tests/kvm_read_test.c
@@ -0,0 +1,95 @@
+/*-
+ * Copyright (c) 2020 Alfredo Dal'Ava Junior <alfredo@freebsd.org>
+ * Copyright (c) 2017 Enji Cooper <ngie@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: FreeBSD: src/lib/libkvm/tests/kvm_geterr_test.c
+ */
+
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <kvm.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <atf-c.h>
+
+#include "kvm_test_common.h"
+
+ATF_TC(kvm_read_positive_test_no_error);
+ATF_TC_HEAD(kvm_read_positive_test_no_error, tc)
+{
+
+ atf_tc_set_md_var(tc, "descr",
+ "test that kvm_read returns a sane value");
+ atf_tc_set_md_var(tc, "require.user", "root");
+}
+
+ATF_TC_BODY(kvm_read_positive_test_no_error, tc)
+{
+ kvm_t *kd;
+ struct nlist nl[] = {
+#define SYMNAME "_mp_maxcpus"
+#define X_MAXCPUS 0
+ { SYMNAME, 0, 0, 0, 0 },
+ { NULL, 0, 0, 0, 0 },
+ };
+ ssize_t rc;
+ int sysctl_maxcpus, mp_maxcpus, retcode;
+ size_t len = sizeof(sysctl_maxcpus);
+
+ errbuf_clear();
+ kd = kvm_open(NULL, NULL, NULL, O_RDONLY, errbuf);
+ ATF_CHECK(!errbuf_has_error(errbuf));
+ ATF_REQUIRE_MSG(kd != NULL, "kvm_open failed: %s", errbuf);
+ retcode = kvm_nlist(kd, nl);
+ ATF_REQUIRE_MSG(retcode != -1,
+ "kvm_nlist failed (returned %d): %s", retcode, kvm_geterr(kd));
+ if (nl[X_MAXCPUS].n_type == 0)
+ atf_tc_skip("symbol (\"%s\") couldn't be found", SYMNAME);
+
+ rc = kvm_read(kd, nl[X_MAXCPUS].n_value, &mp_maxcpus,
+ sizeof(mp_maxcpus));
+
+ ATF_REQUIRE_MSG(rc != -1, "kvm_read failed: %s", kvm_geterr(kd));
+ ATF_REQUIRE_MSG(kvm_close(kd) == 0, "kvm_close failed: %s",
+ strerror(errno));
+
+ /* Check if value read from kvm_read is sane */
+ retcode = sysctlbyname("kern.smp.maxcpus", &sysctl_maxcpus, &len, NULL, 0);
+ ATF_REQUIRE_MSG(retcode == 0, "sysctl read failed : %d", retcode);
+ ATF_REQUIRE_EQ_MSG(mp_maxcpus, sysctl_maxcpus,
+ "failed: kvm_read of mp_maxcpus returned %d but sysctl maxcpus returned %d",
+ mp_maxcpus, sysctl_maxcpus);
+}
+
+ATF_TP_ADD_TCS(tp)
+{
+
+ ATF_TP_ADD_TC(tp, kvm_read_positive_test_no_error);
+ return (atf_no_error());
+}
diff --git a/lib/libkvm/tests/kvm_test_common.c b/lib/libkvm/tests/kvm_test_common.c
new file mode 100644
index 000000000000..0eff73cdc20a
--- /dev/null
+++ b/lib/libkvm/tests/kvm_test_common.c
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2017 Enji Cooper <ngie@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <string.h>
+
+#include "kvm_test_common.h"
+
+char errbuf[_POSIX2_LINE_MAX];
+
+void
+errbuf_clear(void)
+{
+
+ strcpy(errbuf, "");
+}
+
+bool
+errbuf_has_error(const char *_errbuf)
+{
+
+ return (strcmp(_errbuf, ""));
+}
diff --git a/lib/libkvm/tests/kvm_test_common.h b/lib/libkvm/tests/kvm_test_common.h
new file mode 100644
index 000000000000..e62f1edf92b2
--- /dev/null
+++ b/lib/libkvm/tests/kvm_test_common.h
@@ -0,0 +1,37 @@
+/*-
+ * Copyright (c) 2017 Enji Cooper <ngie@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __KVM_TEST_COMMON_H__
+
+#include <sys/types.h>
+#include <limits.h>
+#include <stdbool.h>
+
+extern char errbuf[_POSIX2_LINE_MAX];
+
+void errbuf_clear(void);
+bool errbuf_has_error(const char *);
+
+#endif