aboutsummaryrefslogtreecommitdiff
path: root/emulators/xen-kernel/files/xsa220-4.7.patch
diff options
context:
space:
mode:
Diffstat (limited to 'emulators/xen-kernel/files/xsa220-4.7.patch')
-rw-r--r--emulators/xen-kernel/files/xsa220-4.7.patch133
1 files changed, 133 insertions, 0 deletions
diff --git a/emulators/xen-kernel/files/xsa220-4.7.patch b/emulators/xen-kernel/files/xsa220-4.7.patch
new file mode 100644
index 000000000000..894c72a3da53
--- /dev/null
+++ b/emulators/xen-kernel/files/xsa220-4.7.patch
@@ -0,0 +1,133 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86: avoid leaking PKRU and BND* between vCPU-s
+
+PKRU is explicitly "XSAVE-managed but not XSAVE-enabled", so guests
+might access the register (via {RD,WR}PKRU) without setting XCR0.PKRU.
+Force context switching as well as migrating the register as soon as
+CR4.PKE is being set the first time.
+
+For MPX (BND<n>, BNDCFGU, and BNDSTATUS) the situation is less clear,
+and the SDM has not entirely consistent information for that case.
+While experimentally the instructions don't change register state as
+long as the two XCR0 bits aren't both 1, be on the safe side and enable
+both if BNDCFGS.EN is being set the first time.
+
+This is XSA-220.
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+@@ -2452,6 +2452,27 @@ int hvm_set_cr4(unsigned long value, boo
+ paging_update_paging_modes(v);
+ }
+
++ /*
++ * {RD,WR}PKRU are not gated on XCR0.PKRU and hence an oddly behaving
++ * guest may enable the feature in CR4 without enabling it in XCR0. We
++ * need to context switch / migrate PKRU nevertheless.
++ */
++ if ( (value & X86_CR4_PKE) && !(v->arch.xcr0_accum & XSTATE_PKRU) )
++ {
++ int rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
++ get_xcr0() | XSTATE_PKRU);
++
++ if ( rc )
++ {
++ HVM_DBG_LOG(DBG_LEVEL_1, "Failed to force XCR0.PKRU: %d", rc);
++ goto gpf;
++ }
++
++ if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
++ get_xcr0() & ~XSTATE_PKRU) )
++ /* nothing, best effort only */;
++ }
++
+ return X86EMUL_OKAY;
+
+ gpf:
+--- a/xen/arch/x86/hvm/vmx/vmx.c
++++ b/xen/arch/x86/hvm/vmx/vmx.c
+@@ -31,6 +31,7 @@
+ #include <asm/regs.h>
+ #include <asm/cpufeature.h>
+ #include <asm/processor.h>
++#include <asm/xstate.h>
+ #include <asm/guest_access.h>
+ #include <asm/debugreg.h>
+ #include <asm/msr.h>
+@@ -783,6 +784,45 @@ static int vmx_load_vmcs_ctxt(struct vcp
+ return 0;
+ }
+
++static bool_t vmx_set_guest_bndcfgs(struct vcpu *v, u64 val)
++{
++ if ( !cpu_has_mpx || !cpu_has_vmx_mpx ||
++ !is_canonical_address(val) ||
++ (val & IA32_BNDCFGS_RESERVED) )
++ return 0;
++
++ /*
++ * While MPX instructions are supposed to be gated on XCR0.BND*, let's
++ * nevertheless force the relevant XCR0 bits on when the feature is being
++ * enabled in BNDCFGS.
++ */
++ if ( (val & IA32_BNDCFGS_ENABLE) &&
++ !(v->arch.xcr0_accum & (XSTATE_BNDREGS | XSTATE_BNDCSR)) )
++ {
++ uint64_t xcr0 = get_xcr0();
++ int rc;
++
++ if ( v != current )
++ return 0;
++
++ rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
++ xcr0 | XSTATE_BNDREGS | XSTATE_BNDCSR);
++
++ if ( rc )
++ {
++ HVM_DBG_LOG(DBG_LEVEL_1, "Failed to force XCR0.BND*: %d", rc);
++ return 0;
++ }
++
++ if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK, xcr0) )
++ /* nothing, best effort only */;
++ }
++
++ __vmwrite(GUEST_BNDCFGS, val);
++
++ return 1;
++}
++
+ static unsigned int __init vmx_init_msr(void)
+ {
+ return (cpu_has_mpx && cpu_has_vmx_mpx) +
+@@ -822,11 +862,8 @@ static int vmx_load_msr(struct vcpu *v,
+ switch ( ctxt->msr[i].index )
+ {
+ case MSR_IA32_BNDCFGS:
+- if ( cpu_has_mpx && cpu_has_vmx_mpx &&
+- is_canonical_address(ctxt->msr[i].val) &&
+- !(ctxt->msr[i].val & IA32_BNDCFGS_RESERVED) )
+- __vmwrite(GUEST_BNDCFGS, ctxt->msr[i].val);
+- else if ( ctxt->msr[i].val )
++ if ( !vmx_set_guest_bndcfgs(v, ctxt->msr[i].val) &&
++ ctxt->msr[i].val )
+ err = -ENXIO;
+ break;
+ case MSR_IA32_XSS:
+@@ -2878,11 +2915,8 @@ static int vmx_msr_write_intercept(unsig
+ break;
+ }
+ case MSR_IA32_BNDCFGS:
+- if ( !cpu_has_mpx || !cpu_has_vmx_mpx ||
+- !is_canonical_address(msr_content) ||
+- (msr_content & IA32_BNDCFGS_RESERVED) )
++ if ( !vmx_set_guest_bndcfgs(v, msr_content) )
+ goto gp_fault;
+- __vmwrite(GUEST_BNDCFGS, msr_content);
+ break;
+ case IA32_FEATURE_CONTROL_MSR:
+ case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS: