diff options
author | Andriy Gapon <avg@FreeBSD.org> | 2018-01-31 11:14:26 +0000 |
---|---|---|
committer | Andriy Gapon <avg@FreeBSD.org> | 2018-01-31 11:14:26 +0000 |
commit | 6a8b7aa424356fdd25df88e9b1a192b2d143f4fa (patch) | |
tree | bd38d938de60bc7f08eb58c28b1ec5a729c8152d /sys/amd64 | |
parent | cadf7a004acbaf71194083015a2338095a39f8d7 (diff) | |
download | src-6a8b7aa424356fdd25df88e9b1a192b2d143f4fa.tar.gz src-6a8b7aa424356fdd25df88e9b1a192b2d143f4fa.zip |
Notes
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/vmm/amd/svm.c | 100 |
1 files changed, 24 insertions, 76 deletions
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c index 5a3de1ecc372..a47dce10842d 100644 --- a/sys/amd64/vmm/amd/svm.c +++ b/sys/amd64/vmm/amd/svm.c @@ -933,7 +933,6 @@ svm_update_virqinfo(struct svm_softc *sc, int vcpu) struct vm *vm; struct vlapic *vlapic; struct vmcb_ctrl *ctrl; - int pending; vm = sc->vm; vlapic = vm_lapic(vm, vcpu); @@ -942,20 +941,9 @@ svm_update_virqinfo(struct svm_softc *sc, int vcpu) /* Update %cr8 in the emulated vlapic */ vlapic_set_cr8(vlapic, ctrl->v_tpr); - /* - * If V_IRQ indicates that the interrupt injection attempted on then - * last VMRUN was successful then update the vlapic accordingly. - */ - if (ctrl->v_intr_vector != 0) { - pending = ctrl->v_irq; - KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " - "v_intr_vector %d", __func__, ctrl->v_intr_vector)); - KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); - VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, - pending ? "pending" : "accepted"); - if (!pending) - vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); - } + /* Virtual interrupt injection is not used. */ + KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " + "v_intr_vector %d", __func__, ctrl->v_intr_vector)); } static void @@ -1024,12 +1012,7 @@ disable_intr_window_exiting(struct svm_softc *sc, int vcpu) return; } -#ifdef KTR - if (ctrl->v_intr_vector == 0) - VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); - else - VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); -#endif + VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); ctrl->v_irq = 0; ctrl->v_intr_vector = 0; svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); @@ -1574,14 +1557,14 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) struct vmcb_state *state; struct svm_vcpu *vcpustate; uint8_t v_tpr; - int vector, need_intr_window, pending_apic_vector; + int vector, need_intr_window; + int extint_pending; state = svm_get_vmcb_state(sc, vcpu); ctrl = svm_get_vmcb_ctrl(sc, vcpu); vcpustate = svm_get_vcpu(sc, vcpu); need_intr_window = 0; - pending_apic_vector = 0; if (vcpustate->nextrip != state->rip) { ctrl->intr_shadow = 0; @@ -1651,40 +1634,19 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) } } - if (!vm_extint_pending(sc->vm, vcpu)) { - /* - * APIC interrupts are delivered using the V_IRQ offload. - * - * The primary benefit is that the hypervisor doesn't need to - * deal with the various conditions that inhibit interrupts. - * It also means that TPR changes via CR8 will be handled - * without any hypervisor involvement. - * - * Note that the APIC vector must remain pending in the vIRR - * until it is confirmed that it was delivered to the guest. - * This can be confirmed based on the value of V_IRQ at the - * next #VMEXIT (1 = pending, 0 = delivered). - * - * Also note that it is possible that another higher priority - * vector can become pending before this vector is delivered - * to the guest. This is alright because vcpu_notify_event() - * will send an IPI and force the vcpu to trap back into the - * hypervisor. The higher priority vector will be injected on - * the next VMRUN. - */ - if (vlapic_pending_intr(vlapic, &vector)) { - KASSERT(vector >= 16 && vector <= 255, - ("invalid vector %d from local APIC", vector)); - pending_apic_vector = vector; - } - goto done; + extint_pending = vm_extint_pending(sc->vm, vcpu); + if (!extint_pending) { + if (!vlapic_pending_intr(vlapic, &vector)) + goto done; + KASSERT(vector >= 16 && vector <= 255, + ("invalid vector %d from local APIC", vector)); + } else { + /* Ask the legacy pic for a vector to inject */ + vatpic_pending_intr(sc->vm, &vector); + KASSERT(vector >= 0 && vector <= 255, + ("invalid vector %d from INTR", vector)); } - /* Ask the legacy pic for a vector to inject */ - vatpic_pending_intr(sc->vm, &vector); - KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", - vector)); - /* * If the guest has disabled interrupts or is in an interrupt shadow * then we cannot inject the pending interrupt. @@ -1710,14 +1672,14 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) goto done; } - /* - * Legacy PIC interrupts are delivered via the event injection - * mechanism. - */ svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); - vm_extint_clear(sc->vm, vcpu); - vatpic_intr_accepted(sc->vm, vector); + if (!extint_pending) { + vlapic_intr_accepted(vlapic, vector); + } else { + vm_extint_clear(sc->vm, vcpu); + vatpic_intr_accepted(sc->vm, vector); + } /* * Force a VM-exit as soon as the vcpu is ready to accept another @@ -1747,21 +1709,7 @@ done: svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); } - if (pending_apic_vector) { - /* - * If an APIC vector is being injected then interrupt window - * exiting is not possible on this VMRUN. - */ - KASSERT(!need_intr_window, ("intr_window exiting impossible")); - VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", - pending_apic_vector); - - ctrl->v_irq = 1; - ctrl->v_ign_tpr = 0; - ctrl->v_intr_vector = pending_apic_vector; - ctrl->v_intr_prio = pending_apic_vector >> 4; - svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); - } else if (need_intr_window) { + if (need_intr_window) { /* * We use V_IRQ in conjunction with the VINTR intercept to * trap into the hypervisor as soon as a virtual interrupt |