diff options
Diffstat (limited to 'sys/amd64/vmm/vmm.c')
| -rw-r--r-- | sys/amd64/vmm/vmm.c | 161 |
1 files changed, 36 insertions, 125 deletions
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c index f2bea0d82b5c..2890e990633d 100644 --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -31,7 +31,6 @@ #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> -#include <sys/module.h> #include <sys/sysctl.h> #include <sys/malloc.h> #include <sys/pcpu.h> @@ -189,8 +188,6 @@ struct vm { #define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \ VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4) -static int vmm_initialized; - static void vmmops_panic(void); static void @@ -270,11 +267,7 @@ static int trap_wbinvd; SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0, "WBINVD triggers a VM-exit"); -u_int vm_maxcpu; -SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, - &vm_maxcpu, 0, "Maximum number of vCPUs"); - -static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr); +static void vcpu_notify_event_locked(struct vcpu *vcpu); /* global statistics */ VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus"); @@ -299,14 +292,6 @@ VMM_STAT(VMEXIT_USERSPACE, "number of vm exits handled in userspace"); VMM_STAT(VMEXIT_RENDEZVOUS, "number of times rendezvous pending at exit"); VMM_STAT(VMEXIT_EXCEPTION, "number of vm exits due to exceptions"); -/* - * Upper limit on vm_maxcpu. Limited by use of uint16_t types for CPU - * counts as well as range of vpid values for VT-x and by the capacity - * of cpuset_t masks. The call to new_unrhdr() in vpid_init() in - * vmx.c requires 'vm_maxcpu + 1 <= 0xffff', hence the '- 1' below. - */ -#define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE) - #ifdef KTR static const char * vcpu_state2str(enum vcpu_state state) @@ -402,22 +387,12 @@ vm_exitinfo_cpuset(struct vcpu *vcpu) return (&vcpu->exitinfo_cpuset); } -static int -vmm_init(void) +int +vmm_modinit(void) { if (!vmm_is_hw_supported()) return (ENXIO); - vm_maxcpu = mp_ncpus; - TUNABLE_INT_FETCH("hw.vmm.maxcpu", &vm_maxcpu); - - if (vm_maxcpu > VM_MAXCPU) { - printf("vmm: vm_maxcpu clamped to %u\n", VM_MAXCPU); - vm_maxcpu = VM_MAXCPU; - } - if (vm_maxcpu == 0) - vm_maxcpu = 1; - vmm_host_state_init(); vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : @@ -431,70 +406,17 @@ vmm_init(void) return (vmmops_modinit(vmm_ipinum)); } -static int -vmm_handler(module_t mod, int what, void *arg) +int +vmm_modcleanup(void) { - int error; - - switch (what) { - case MOD_LOAD: - if (vmm_is_hw_supported()) { - error = vmmdev_init(); - if (error != 0) - break; - error = vmm_init(); - if (error == 0) - vmm_initialized = 1; - else - (void)vmmdev_cleanup(); - } else { - error = ENXIO; - } - break; - case MOD_UNLOAD: - if (vmm_is_hw_supported()) { - error = vmmdev_cleanup(); - if (error == 0) { - vmm_suspend_p = NULL; - vmm_resume_p = NULL; - iommu_cleanup(); - if (vmm_ipinum != IPI_AST) - lapic_ipi_free(vmm_ipinum); - error = vmmops_modcleanup(); - /* - * Something bad happened - prevent new - * VMs from being created - */ - if (error) - vmm_initialized = 0; - } - } else { - error = 0; - } - break; - default: - error = 0; - break; - } - return (error); + vmm_suspend_p = NULL; + vmm_resume_p = NULL; + iommu_cleanup(); + if (vmm_ipinum != IPI_AST) + lapic_ipi_free(vmm_ipinum); + return (vmmops_modcleanup()); } -static moduledata_t vmm_kmod = { - "vmm", - vmm_handler, - NULL -}; - -/* - * vmm initialization has the following dependencies: - * - * - VT-x initialization requires smp_rendezvous() and therefore must happen - * after SMP is fully functional (after SI_SUB_SMP). - * - vmm device initialization requires an initialized devfs. - */ -DECLARE_MODULE(vmm, vmm_kmod, MAX(SI_SUB_SMP, SI_SUB_DEVFS) + 1, SI_ORDER_ANY); -MODULE_VERSION(vmm, 1); - static void vm_init(struct vm *vm, bool create) { @@ -573,29 +495,12 @@ vm_unlock_vcpus(struct vm *vm) sx_unlock(&vm->vcpus_init_lock); } -/* - * The default CPU topology is a single thread per package. - */ -u_int cores_per_package = 1; -u_int threads_per_core = 1; - int vm_create(const char *name, struct vm **retvm) { struct vm *vm; int error; - /* - * If vmm.ko could not be successfully initialized then don't attempt - * to create the virtual machine. - */ - if (!vmm_initialized) - return (ENXIO); - - if (name == NULL || strnlen(name, VM_MAX_NAMELEN + 1) == - VM_MAX_NAMELEN + 1) - return (EINVAL); - vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); error = vm_mem_init(&vm->mem, 0, VM_MAXUSER_ADDRESS_LA48); if (error != 0) { @@ -609,8 +514,8 @@ vm_create(const char *name, struct vm **retvm) M_ZERO); vm->sockets = 1; - vm->cores = cores_per_package; /* XXX backwards compatibility */ - vm->threads = threads_per_core; /* XXX backwards compatibility */ + vm->cores = 1; /* XXX backwards compatibility */ + vm->threads = 1; /* XXX backwards compatibility */ vm->maxcpus = vm_maxcpu; vm_init(vm, true); @@ -1028,7 +933,7 @@ vcpu_wait_idle(struct vcpu *vcpu) KASSERT(vcpu->state != VCPU_IDLE, ("vcpu already idle")); vcpu->reqidle = 1; - vcpu_notify_event_locked(vcpu, false); + vcpu_notify_event_locked(vcpu); VMM_CTR1(vcpu, "vcpu state change from %s to " "idle requested", vcpu_state2str(vcpu->state)); msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); @@ -1509,7 +1414,7 @@ vm_handle_suspend(struct vcpu *vcpu, bool *retu) */ for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->suspended_cpus)) { - vcpu_notify_event(vm_vcpu(vm, i), false); + vcpu_notify_event(vm_vcpu(vm, i)); } } @@ -1583,7 +1488,7 @@ vm_suspend(struct vm *vm, enum vm_suspend_how how) */ for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) - vcpu_notify_event(vm_vcpu(vm, i), false); + vcpu_notify_event(vm_vcpu(vm, i)); } return (0); @@ -2063,7 +1968,7 @@ vm_inject_nmi(struct vcpu *vcpu) { vcpu->nmi_pending = 1; - vcpu_notify_event(vcpu, false); + vcpu_notify_event(vcpu); return (0); } @@ -2090,7 +1995,7 @@ vm_inject_extint(struct vcpu *vcpu) { vcpu->extint_pending = 1; - vcpu_notify_event(vcpu, false); + vcpu_notify_event(vcpu); return (0); } @@ -2261,14 +2166,14 @@ vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) vm->debug_cpus = vm->active_cpus; for (int i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &vm->active_cpus)) - vcpu_notify_event(vm_vcpu(vm, i), false); + vcpu_notify_event(vm_vcpu(vm, i)); } } else { if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) return (EINVAL); CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); - vcpu_notify_event(vcpu, false); + vcpu_notify_event(vcpu); } return (0); } @@ -2376,7 +2281,7 @@ vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state) * to the host_cpu to cause the vcpu to trap into the hypervisor. */ static void -vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr) +vcpu_notify_event_locked(struct vcpu *vcpu) { int hostcpu; @@ -2384,12 +2289,7 @@ vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr) if (vcpu->state == VCPU_RUNNING) { KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); if (hostcpu != curcpu) { - if (lapic_intr) { - vlapic_post_intr(vcpu->vlapic, hostcpu, - vmm_ipinum); - } else { - ipi_cpu(hostcpu, vmm_ipinum); - } + ipi_cpu(hostcpu, vmm_ipinum); } else { /* * If the 'vcpu' is running on 'curcpu' then it must @@ -2407,10 +2307,21 @@ vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr) } void -vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr) +vcpu_notify_event(struct vcpu *vcpu) { vcpu_lock(vcpu); - vcpu_notify_event_locked(vcpu, lapic_intr); + vcpu_notify_event_locked(vcpu); + vcpu_unlock(vcpu); +} + +void +vcpu_notify_lapic(struct vcpu *vcpu) +{ + vcpu_lock(vcpu); + if (vcpu->state == VCPU_RUNNING && vcpu->hostcpu != curcpu) + vlapic_post_intr(vcpu->vlapic, vcpu->hostcpu, vmm_ipinum); + else + vcpu_notify_event_locked(vcpu); vcpu_unlock(vcpu); } @@ -2472,7 +2383,7 @@ restart: */ for (i = 0; i < vm->maxcpus; i++) { if (CPU_ISSET(i, &dest)) - vcpu_notify_event(vm_vcpu(vm, i), false); + vcpu_notify_event(vm_vcpu(vm, i)); } return (vm_handle_rendezvous(vcpu)); |
