aboutsummaryrefslogtreecommitdiff
path: root/sys/amd64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/include/vmm.h31
-rw-r--r--sys/amd64/include/vmm_dev.h2
-rw-r--r--sys/amd64/vmm/intel/vmx.c2
-rw-r--r--sys/amd64/vmm/io/ppt.c7
-rw-r--r--sys/amd64/vmm/io/ppt.h6
-rw-r--r--sys/amd64/vmm/io/vlapic.c2
-rw-r--r--sys/amd64/vmm/vmm.c161
-rw-r--r--sys/amd64/vmm/vmm_lapic.c2
8 files changed, 43 insertions, 170 deletions
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index ad67510fecf3..5cf1ae2d769c 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -122,33 +122,7 @@ enum x2apic_state {
#define VM_INTINFO_HWEXCEPTION (3 << 8)
#define VM_INTINFO_SWINTR (4 << 8)
-/*
- * The VM name has to fit into the pathname length constraints of devfs,
- * governed primarily by SPECNAMELEN. The length is the total number of
- * characters in the full path, relative to the mount point and not
- * including any leading '/' characters.
- * A prefix and a suffix are added to the name specified by the user.
- * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters
- * longer for future use.
- * The suffix is a string that identifies a bootrom image or some similar
- * image that is attached to the VM. A separator character gets added to
- * the suffix automatically when generating the full path, so it must be
- * accounted for, reducing the effective length by 1.
- * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37
- * bytes for FreeBSD 12. A minimum length is set for safety and supports
- * a SPECNAMELEN as small as 32 on old systems.
- */
-#define VM_MAX_PREFIXLEN 10
-#define VM_MAX_SUFFIXLEN 15
-#define VM_MIN_NAMELEN 6
-#define VM_MAX_NAMELEN \
- (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1)
-
#ifdef _KERNEL
-#include <sys/kassert.h>
-
-CTASSERT(VM_MAX_NAMELEN >= VM_MIN_NAMELEN);
-
struct vm;
struct vm_exception;
struct vm_mem;
@@ -232,8 +206,6 @@ struct vmm_ops {
extern const struct vmm_ops vmm_ops_intel;
extern const struct vmm_ops vmm_ops_amd;
-extern u_int vm_maxcpu; /* maximum virtual cpus */
-
int vm_create(const char *name, struct vm **retvm);
struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
void vm_disable_vcpu_creation(struct vm *vm);
@@ -383,7 +355,8 @@ vcpu_should_yield(struct vcpu *vcpu)
#endif
void *vcpu_stats(struct vcpu *vcpu);
-void vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr);
+void vcpu_notify_event(struct vcpu *vcpu);
+void vcpu_notify_lapic(struct vcpu *vcpu);
struct vm_mem *vm_mem(struct vm *vm);
struct vatpic *vm_atpic(struct vm *vm);
struct vatpit *vm_atpit(struct vm *vm);
diff --git a/sys/amd64/include/vmm_dev.h b/sys/amd64/include/vmm_dev.h
index 441330fd57b8..f1c07a983a4b 100644
--- a/sys/amd64/include/vmm_dev.h
+++ b/sys/amd64/include/vmm_dev.h
@@ -34,6 +34,8 @@
#include <machine/vmm.h>
#include <machine/vmm_snapshot.h>
+#include <dev/vmm/vmm_param.h>
+
struct vm_memmap {
vm_paddr_t gpa;
int segid; /* memory segment */
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 842281ab862e..4189c1214b40 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -27,7 +27,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_bhyve_snapshot.h"
#include <sys/param.h>
@@ -58,6 +57,7 @@
#include <machine/vmm_instruction_emul.h>
#include <machine/vmm_snapshot.h>
+#include <dev/vmm/vmm_dev.h>
#include <dev/vmm/vmm_ktr.h>
#include <dev/vmm/vmm_mem.h>
diff --git a/sys/amd64/vmm/io/ppt.c b/sys/amd64/vmm/io/ppt.c
index 2cb459fb848f..6feac5dcbbed 100644
--- a/sys/amd64/vmm/io/ppt.c
+++ b/sys/amd64/vmm/io/ppt.c
@@ -336,13 +336,6 @@ ppt_teardown_msix(struct pptdev *ppt)
}
int
-ppt_avail_devices(void)
-{
-
- return (num_pptdevs);
-}
-
-int
ppt_assigned_devices(struct vm *vm)
{
struct pptdev *ppt;
diff --git a/sys/amd64/vmm/io/ppt.h b/sys/amd64/vmm/io/ppt.h
index f97c399564d7..9377f34d50e6 100644
--- a/sys/amd64/vmm/io/ppt.h
+++ b/sys/amd64/vmm/io/ppt.h
@@ -43,12 +43,6 @@ int ppt_assigned_devices(struct vm *vm);
bool ppt_is_mmio(struct vm *vm, vm_paddr_t gpa);
/*
- * Returns the number of devices sequestered by the ppt driver for assignment
- * to virtual machines.
- */
-int ppt_avail_devices(void);
-
-/*
* The following functions should never be called directly.
* Use 'vm_assign_pptdev()' and 'vm_unassign_pptdev()' instead.
*/
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index 9879dfa164a4..afd5045de574 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -456,7 +456,7 @@ vlapic_fire_lvt(struct vlapic *vlapic, u_int lvt)
return (0);
}
if (vlapic_set_intr_ready(vlapic, vec, false))
- vcpu_notify_event(vlapic->vcpu, true);
+ vcpu_notify_lapic(vlapic->vcpu);
break;
case APIC_LVT_DM_NMI:
vm_inject_nmi(vlapic->vcpu);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index f2bea0d82b5c..2890e990633d 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -31,7 +31,6 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
-#include <sys/module.h>
#include <sys/sysctl.h>
#include <sys/malloc.h>
#include <sys/pcpu.h>
@@ -189,8 +188,6 @@ struct vm {
#define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \
VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
-static int vmm_initialized;
-
static void vmmops_panic(void);
static void
@@ -270,11 +267,7 @@ static int trap_wbinvd;
SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0,
"WBINVD triggers a VM-exit");
-u_int vm_maxcpu;
-SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
- &vm_maxcpu, 0, "Maximum number of vCPUs");
-
-static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
+static void vcpu_notify_event_locked(struct vcpu *vcpu);
/* global statistics */
VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
@@ -299,14 +292,6 @@ VMM_STAT(VMEXIT_USERSPACE, "number of vm exits handled in userspace");
VMM_STAT(VMEXIT_RENDEZVOUS, "number of times rendezvous pending at exit");
VMM_STAT(VMEXIT_EXCEPTION, "number of vm exits due to exceptions");
-/*
- * Upper limit on vm_maxcpu. Limited by use of uint16_t types for CPU
- * counts as well as range of vpid values for VT-x and by the capacity
- * of cpuset_t masks. The call to new_unrhdr() in vpid_init() in
- * vmx.c requires 'vm_maxcpu + 1 <= 0xffff', hence the '- 1' below.
- */
-#define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE)
-
#ifdef KTR
static const char *
vcpu_state2str(enum vcpu_state state)
@@ -402,22 +387,12 @@ vm_exitinfo_cpuset(struct vcpu *vcpu)
return (&vcpu->exitinfo_cpuset);
}
-static int
-vmm_init(void)
+int
+vmm_modinit(void)
{
if (!vmm_is_hw_supported())
return (ENXIO);
- vm_maxcpu = mp_ncpus;
- TUNABLE_INT_FETCH("hw.vmm.maxcpu", &vm_maxcpu);
-
- if (vm_maxcpu > VM_MAXCPU) {
- printf("vmm: vm_maxcpu clamped to %u\n", VM_MAXCPU);
- vm_maxcpu = VM_MAXCPU;
- }
- if (vm_maxcpu == 0)
- vm_maxcpu = 1;
-
vmm_host_state_init();
vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
@@ -431,70 +406,17 @@ vmm_init(void)
return (vmmops_modinit(vmm_ipinum));
}
-static int
-vmm_handler(module_t mod, int what, void *arg)
+int
+vmm_modcleanup(void)
{
- int error;
-
- switch (what) {
- case MOD_LOAD:
- if (vmm_is_hw_supported()) {
- error = vmmdev_init();
- if (error != 0)
- break;
- error = vmm_init();
- if (error == 0)
- vmm_initialized = 1;
- else
- (void)vmmdev_cleanup();
- } else {
- error = ENXIO;
- }
- break;
- case MOD_UNLOAD:
- if (vmm_is_hw_supported()) {
- error = vmmdev_cleanup();
- if (error == 0) {
- vmm_suspend_p = NULL;
- vmm_resume_p = NULL;
- iommu_cleanup();
- if (vmm_ipinum != IPI_AST)
- lapic_ipi_free(vmm_ipinum);
- error = vmmops_modcleanup();
- /*
- * Something bad happened - prevent new
- * VMs from being created
- */
- if (error)
- vmm_initialized = 0;
- }
- } else {
- error = 0;
- }
- break;
- default:
- error = 0;
- break;
- }
- return (error);
+ vmm_suspend_p = NULL;
+ vmm_resume_p = NULL;
+ iommu_cleanup();
+ if (vmm_ipinum != IPI_AST)
+ lapic_ipi_free(vmm_ipinum);
+ return (vmmops_modcleanup());
}
-static moduledata_t vmm_kmod = {
- "vmm",
- vmm_handler,
- NULL
-};
-
-/*
- * vmm initialization has the following dependencies:
- *
- * - VT-x initialization requires smp_rendezvous() and therefore must happen
- * after SMP is fully functional (after SI_SUB_SMP).
- * - vmm device initialization requires an initialized devfs.
- */
-DECLARE_MODULE(vmm, vmm_kmod, MAX(SI_SUB_SMP, SI_SUB_DEVFS) + 1, SI_ORDER_ANY);
-MODULE_VERSION(vmm, 1);
-
static void
vm_init(struct vm *vm, bool create)
{
@@ -573,29 +495,12 @@ vm_unlock_vcpus(struct vm *vm)
sx_unlock(&vm->vcpus_init_lock);
}
-/*
- * The default CPU topology is a single thread per package.
- */
-u_int cores_per_package = 1;
-u_int threads_per_core = 1;
-
int
vm_create(const char *name, struct vm **retvm)
{
struct vm *vm;
int error;
- /*
- * If vmm.ko could not be successfully initialized then don't attempt
- * to create the virtual machine.
- */
- if (!vmm_initialized)
- return (ENXIO);
-
- if (name == NULL || strnlen(name, VM_MAX_NAMELEN + 1) ==
- VM_MAX_NAMELEN + 1)
- return (EINVAL);
-
vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
error = vm_mem_init(&vm->mem, 0, VM_MAXUSER_ADDRESS_LA48);
if (error != 0) {
@@ -609,8 +514,8 @@ vm_create(const char *name, struct vm **retvm)
M_ZERO);
vm->sockets = 1;
- vm->cores = cores_per_package; /* XXX backwards compatibility */
- vm->threads = threads_per_core; /* XXX backwards compatibility */
+ vm->cores = 1; /* XXX backwards compatibility */
+ vm->threads = 1; /* XXX backwards compatibility */
vm->maxcpus = vm_maxcpu;
vm_init(vm, true);
@@ -1028,7 +933,7 @@ vcpu_wait_idle(struct vcpu *vcpu)
KASSERT(vcpu->state != VCPU_IDLE, ("vcpu already idle"));
vcpu->reqidle = 1;
- vcpu_notify_event_locked(vcpu, false);
+ vcpu_notify_event_locked(vcpu);
VMM_CTR1(vcpu, "vcpu state change from %s to "
"idle requested", vcpu_state2str(vcpu->state));
msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
@@ -1509,7 +1414,7 @@ vm_handle_suspend(struct vcpu *vcpu, bool *retu)
*/
for (i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &vm->suspended_cpus)) {
- vcpu_notify_event(vm_vcpu(vm, i), false);
+ vcpu_notify_event(vm_vcpu(vm, i));
}
}
@@ -1583,7 +1488,7 @@ vm_suspend(struct vm *vm, enum vm_suspend_how how)
*/
for (i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &vm->active_cpus))
- vcpu_notify_event(vm_vcpu(vm, i), false);
+ vcpu_notify_event(vm_vcpu(vm, i));
}
return (0);
@@ -2063,7 +1968,7 @@ vm_inject_nmi(struct vcpu *vcpu)
{
vcpu->nmi_pending = 1;
- vcpu_notify_event(vcpu, false);
+ vcpu_notify_event(vcpu);
return (0);
}
@@ -2090,7 +1995,7 @@ vm_inject_extint(struct vcpu *vcpu)
{
vcpu->extint_pending = 1;
- vcpu_notify_event(vcpu, false);
+ vcpu_notify_event(vcpu);
return (0);
}
@@ -2261,14 +2166,14 @@ vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
vm->debug_cpus = vm->active_cpus;
for (int i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &vm->active_cpus))
- vcpu_notify_event(vm_vcpu(vm, i), false);
+ vcpu_notify_event(vm_vcpu(vm, i));
}
} else {
if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
return (EINVAL);
CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
- vcpu_notify_event(vcpu, false);
+ vcpu_notify_event(vcpu);
}
return (0);
}
@@ -2376,7 +2281,7 @@ vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
* to the host_cpu to cause the vcpu to trap into the hypervisor.
*/
static void
-vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
+vcpu_notify_event_locked(struct vcpu *vcpu)
{
int hostcpu;
@@ -2384,12 +2289,7 @@ vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
if (vcpu->state == VCPU_RUNNING) {
KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
if (hostcpu != curcpu) {
- if (lapic_intr) {
- vlapic_post_intr(vcpu->vlapic, hostcpu,
- vmm_ipinum);
- } else {
- ipi_cpu(hostcpu, vmm_ipinum);
- }
+ ipi_cpu(hostcpu, vmm_ipinum);
} else {
/*
* If the 'vcpu' is running on 'curcpu' then it must
@@ -2407,10 +2307,21 @@ vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
}
void
-vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr)
+vcpu_notify_event(struct vcpu *vcpu)
{
vcpu_lock(vcpu);
- vcpu_notify_event_locked(vcpu, lapic_intr);
+ vcpu_notify_event_locked(vcpu);
+ vcpu_unlock(vcpu);
+}
+
+void
+vcpu_notify_lapic(struct vcpu *vcpu)
+{
+ vcpu_lock(vcpu);
+ if (vcpu->state == VCPU_RUNNING && vcpu->hostcpu != curcpu)
+ vlapic_post_intr(vcpu->vlapic, vcpu->hostcpu, vmm_ipinum);
+ else
+ vcpu_notify_event_locked(vcpu);
vcpu_unlock(vcpu);
}
@@ -2472,7 +2383,7 @@ restart:
*/
for (i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &dest))
- vcpu_notify_event(vm_vcpu(vm, i), false);
+ vcpu_notify_event(vm_vcpu(vm, i));
}
return (vm_handle_rendezvous(vcpu));
diff --git a/sys/amd64/vmm/vmm_lapic.c b/sys/amd64/vmm/vmm_lapic.c
index 0cae01f172ec..63bdee69bb59 100644
--- a/sys/amd64/vmm/vmm_lapic.c
+++ b/sys/amd64/vmm/vmm_lapic.c
@@ -61,7 +61,7 @@ lapic_set_intr(struct vcpu *vcpu, int vector, bool level)
vlapic = vm_lapic(vcpu);
if (vlapic_set_intr_ready(vlapic, vector, level))
- vcpu_notify_event(vcpu, true);
+ vcpu_notify_lapic(vcpu);
return (0);
}