diff options
Diffstat (limited to 'emulators/xen-kernel/files/0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch')
-rw-r--r-- | emulators/xen-kernel/files/0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch | 399 |
1 files changed, 399 insertions, 0 deletions
diff --git a/emulators/xen-kernel/files/0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch b/emulators/xen-kernel/files/0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch new file mode 100644 index 000000000000..576fe192be03 --- /dev/null +++ b/emulators/xen-kernel/files/0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch @@ -0,0 +1,399 @@ +From 0e6c6fc449000d97f9fa87ed1fbe23f0cf21406b Mon Sep 17 00:00:00 2001 +From: Andrew Cooper <andrew.cooper3@citrix.com> +Date: Wed, 17 Jan 2018 17:22:34 +0100 +Subject: [PATCH] x86/entry: Remove support for partial cpu_user_regs frames + +Save all GPRs on entry to Xen. + +The entry_int82() path is via a DPL1 gate, only usable by 32bit PV guests, so +can get away with only saving the 32bit registers. All other entrypoints can +be reached from 32 or 64bit contexts. + +This is part of XSA-254. + +Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> +Reviewed-by: Wei Liu <wei.liu2@citrix.com> +Acked-by: Jan Beulich <jbeulich@suse.com> +master commit: f9eb74789af77e985ae653193f3622263499f674 +master date: 2018-01-05 19:57:07 +0000 +--- + tools/tests/x86_emulator/x86_emulate.c | 1 - + xen/arch/x86/domain.c | 1 - + xen/arch/x86/traps.c | 2 - + xen/arch/x86/x86_64/compat/entry.S | 7 ++- + xen/arch/x86/x86_64/entry.S | 12 ++-- + xen/arch/x86/x86_64/traps.c | 13 ++-- + xen/arch/x86/x86_emulate.c | 1 - + xen/arch/x86/x86_emulate/x86_emulate.c | 8 +-- + xen/common/wait.c | 1 - + xen/include/asm-x86/asm_defns.h | 107 +++------------------------------ + 10 files changed, 26 insertions(+), 127 deletions(-) + +diff --git a/tools/tests/x86_emulator/x86_emulate.c b/tools/tests/x86_emulator/x86_emulate.c +index 10e3f61baa..c12527a50b 100644 +--- a/tools/tests/x86_emulator/x86_emulate.c ++++ b/tools/tests/x86_emulator/x86_emulate.c +@@ -24,7 +24,6 @@ typedef bool bool_t; + #endif + + #define cpu_has_amd_erratum(nr) 0 +-#define mark_regs_dirty(r) ((void)(r)) + + #define __packed __attribute__((packed)) + +diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c +index ceeadabbfd..6539b75fa7 100644 +--- a/xen/arch/x86/domain.c ++++ b/xen/arch/x86/domain.c +@@ -148,7 +148,6 @@ static void noreturn continue_idle_domain(struct vcpu *v) + static void noreturn continue_nonidle_domain(struct vcpu *v) + { + check_wakeup_from_wait(); +- mark_regs_dirty(guest_cpu_user_regs()); + reset_stack_and_jump(ret_from_intr); + } + +diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c +index 05b4b0811d..0928c9b235 100644 +--- a/xen/arch/x86/traps.c ++++ b/xen/arch/x86/traps.c +@@ -2456,7 +2456,6 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) + goto fail; + if ( admin_io_okay(port, op_bytes, currd) ) + { +- mark_regs_dirty(regs); + io_emul(regs); + } + else +@@ -2486,7 +2485,6 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) + goto fail; + if ( admin_io_okay(port, op_bytes, currd) ) + { +- mark_regs_dirty(regs); + io_emul(regs); + if ( (op_bytes == 1) && pv_post_outb_hook ) + pv_post_outb_hook(port, regs->eax); +diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S +index 794bb44266..7ee01597a3 100644 +--- a/xen/arch/x86/x86_64/compat/entry.S ++++ b/xen/arch/x86/x86_64/compat/entry.S +@@ -15,7 +15,8 @@ + ENTRY(compat_hypercall) + ASM_CLAC + pushq $0 +- SAVE_VOLATILE type=TRAP_syscall compat=1 ++ movl $TRAP_syscall, 4(%rsp) ++ SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */ + CR4_PV32_RESTORE + + cmpb $0,untrusted_msi(%rip) +@@ -127,7 +128,6 @@ compat_test_guest_events: + /* %rbx: struct vcpu */ + compat_process_softirqs: + sti +- andl $~TRAP_regs_partial,UREGS_entry_vector(%rsp) + call do_softirq + jmp compat_test_all_events + +@@ -268,7 +268,8 @@ ENTRY(cstar_enter) + pushq $FLAT_USER_CS32 + pushq %rcx + pushq $0 +- SAVE_VOLATILE TRAP_syscall ++ movl $TRAP_syscall, 4(%rsp) ++ SAVE_ALL + GET_CURRENT(bx) + movq VCPU_domain(%rbx),%rcx + cmpb $0,DOMAIN_is_32bit_pv(%rcx) +diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S +index 708d9b9402..cebb1e4f4f 100644 +--- a/xen/arch/x86/x86_64/entry.S ++++ b/xen/arch/x86/x86_64/entry.S +@@ -97,7 +97,8 @@ ENTRY(lstar_enter) + pushq $FLAT_KERNEL_CS64 + pushq %rcx + pushq $0 +- SAVE_VOLATILE TRAP_syscall ++ movl $TRAP_syscall, 4(%rsp) ++ SAVE_ALL + GET_CURRENT(bx) + testb $TF_kernel_mode,VCPU_thread_flags(%rbx) + jz switch_to_kernel +@@ -192,7 +193,6 @@ test_guest_events: + /* %rbx: struct vcpu */ + process_softirqs: + sti +- SAVE_PRESERVED + call do_softirq + jmp test_all_events + +@@ -246,7 +246,8 @@ GLOBAL(sysenter_eflags_saved) + pushq $3 /* ring 3 null cs */ + pushq $0 /* null rip */ + pushq $0 +- SAVE_VOLATILE TRAP_syscall ++ movl $TRAP_syscall, 4(%rsp) ++ SAVE_ALL + GET_CURRENT(bx) + cmpb $0,VCPU_sysenter_disables_events(%rbx) + movq VCPU_sysenter_addr(%rbx),%rax +@@ -263,7 +264,6 @@ UNLIKELY_END(sysenter_nt_set) + leal (,%rcx,TBF_INTERRUPT),%ecx + UNLIKELY_START(z, sysenter_gpf) + movq VCPU_trap_ctxt(%rbx),%rsi +- SAVE_PRESERVED + movl $TRAP_gp_fault,UREGS_entry_vector(%rsp) + movl %eax,TRAPBOUNCE_error_code(%rdx) + movq TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rsi),%rax +@@ -281,7 +281,8 @@ UNLIKELY_END(sysenter_gpf) + ENTRY(int80_direct_trap) + ASM_CLAC + pushq $0 +- SAVE_VOLATILE 0x80 ++ movl $0x80, 4(%rsp) ++ SAVE_ALL + + cmpb $0,untrusted_msi(%rip) + UNLIKELY_START(ne, msi_check) +@@ -309,7 +310,6 @@ int80_slow_path: + * IDT entry with DPL==0. + */ + movl $((0x80 << 3) | X86_XEC_IDT),UREGS_error_code(%rsp) +- SAVE_PRESERVED + movl $TRAP_gp_fault,UREGS_entry_vector(%rsp) + /* A GPF wouldn't have incremented the instruction pointer. */ + subq $2,UREGS_rip(%rsp) +diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c +index 22816100fd..bf8dfcbdee 100644 +--- a/xen/arch/x86/x86_64/traps.c ++++ b/xen/arch/x86/x86_64/traps.c +@@ -81,15 +81,10 @@ static void _show_registers( + regs->rbp, regs->rsp, regs->r8); + printk("r9: %016lx r10: %016lx r11: %016lx\n", + regs->r9, regs->r10, regs->r11); +- if ( !(regs->entry_vector & TRAP_regs_partial) ) +- { +- printk("r12: %016lx r13: %016lx r14: %016lx\n", +- regs->r12, regs->r13, regs->r14); +- printk("r15: %016lx cr0: %016lx cr4: %016lx\n", +- regs->r15, crs[0], crs[4]); +- } +- else +- printk("cr0: %016lx cr4: %016lx\n", crs[0], crs[4]); ++ printk("r12: %016lx r13: %016lx r14: %016lx\n", ++ regs->r12, regs->r13, regs->r14); ++ printk("r15: %016lx cr0: %016lx cr4: %016lx\n", ++ regs->r15, crs[0], crs[4]); + printk("cr3: %016lx cr2: %016lx\n", crs[3], crs[2]); + printk("fsb: %016lx gsb: %016lx gss: %016lx\n", + crs[5], crs[6], crs[7]); +diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c +index 28132b5dbc..43730026c2 100644 +--- a/xen/arch/x86/x86_emulate.c ++++ b/xen/arch/x86/x86_emulate.c +@@ -11,7 +11,6 @@ + + #include <xen/domain_page.h> + #include <asm/x86_emulate.h> +-#include <asm/asm_defns.h> /* mark_regs_dirty() */ + #include <asm/processor.h> /* current_cpu_info */ + #include <asm/amd.h> /* cpu_has_amd_erratum() */ + +diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c +index 4ee3df9247..fcfe9f7de7 100644 +--- a/xen/arch/x86/x86_emulate/x86_emulate.c ++++ b/xen/arch/x86/x86_emulate/x86_emulate.c +@@ -1424,10 +1424,10 @@ decode_register( + case 9: p = ®s->r9; break; + case 10: p = ®s->r10; break; + case 11: p = ®s->r11; break; +- case 12: mark_regs_dirty(regs); p = ®s->r12; break; +- case 13: mark_regs_dirty(regs); p = ®s->r13; break; +- case 14: mark_regs_dirty(regs); p = ®s->r14; break; +- case 15: mark_regs_dirty(regs); p = ®s->r15; break; ++ case 12: p = ®s->r12; break; ++ case 13: p = ®s->r13; break; ++ case 14: p = ®s->r14; break; ++ case 15: p = ®s->r15; break; + #endif + default: BUG(); p = NULL; break; + } +diff --git a/xen/common/wait.c b/xen/common/wait.c +index 4ac98c07fe..398f653174 100644 +--- a/xen/common/wait.c ++++ b/xen/common/wait.c +@@ -128,7 +128,6 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv) + unsigned long dummy; + u32 entry_vector = cpu_info->guest_cpu_user_regs.entry_vector; + +- cpu_info->guest_cpu_user_regs.entry_vector &= ~TRAP_regs_partial; + ASSERT(wqv->esp == 0); + + /* Save current VCPU affinity; force wakeup on *this* CPU only. */ +diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h +index 279d70298f..6e5c079ad8 100644 +--- a/xen/include/asm-x86/asm_defns.h ++++ b/xen/include/asm-x86/asm_defns.h +@@ -17,15 +17,6 @@ + void ret_from_intr(void); + #endif + +-#ifdef CONFIG_FRAME_POINTER +-/* Indicate special exception stack frame by inverting the frame pointer. */ +-#define SETUP_EXCEPTION_FRAME_POINTER(offs) \ +- leaq offs(%rsp),%rbp; \ +- notq %rbp +-#else +-#define SETUP_EXCEPTION_FRAME_POINTER(offs) +-#endif +- + #ifndef NDEBUG + #define ASSERT_INTERRUPT_STATUS(x, msg) \ + pushf; \ +@@ -42,31 +33,6 @@ void ret_from_intr(void); + #define ASSERT_INTERRUPTS_DISABLED \ + ASSERT_INTERRUPT_STATUS(z, "INTERRUPTS DISABLED") + +-/* +- * This flag is set in an exception frame when registers R12-R15 did not get +- * saved. +- */ +-#define _TRAP_regs_partial 16 +-#define TRAP_regs_partial (1 << _TRAP_regs_partial) +-/* +- * This flag gets set in an exception frame when registers R12-R15 possibly +- * get modified from their originally saved values and hence need to be +- * restored even if the normal call flow would restore register values. +- * +- * The flag being set implies _TRAP_regs_partial to be unset. Restoring +- * R12-R15 thus is +- * - required when this flag is set, +- * - safe when _TRAP_regs_partial is unset. +- */ +-#define _TRAP_regs_dirty 17 +-#define TRAP_regs_dirty (1 << _TRAP_regs_dirty) +- +-#define mark_regs_dirty(r) ({ \ +- struct cpu_user_regs *r__ = (r); \ +- ASSERT(!((r__)->entry_vector & TRAP_regs_partial)); \ +- r__->entry_vector |= TRAP_regs_dirty; \ +-}) +- + #ifdef __ASSEMBLY__ + # define _ASM_EX(p) p-. + #else +@@ -236,7 +202,7 @@ static always_inline void stac(void) + #endif + + #ifdef __ASSEMBLY__ +-.macro SAVE_ALL op ++.macro SAVE_ALL op, compat=0 + .ifeqs "\op", "CLAC" + ASM_CLAC + .else +@@ -255,40 +221,6 @@ static always_inline void stac(void) + movq %rdx,UREGS_rdx(%rsp) + movq %rcx,UREGS_rcx(%rsp) + movq %rax,UREGS_rax(%rsp) +- movq %r8,UREGS_r8(%rsp) +- movq %r9,UREGS_r9(%rsp) +- movq %r10,UREGS_r10(%rsp) +- movq %r11,UREGS_r11(%rsp) +- movq %rbx,UREGS_rbx(%rsp) +- movq %rbp,UREGS_rbp(%rsp) +- SETUP_EXCEPTION_FRAME_POINTER(UREGS_rbp) +- movq %r12,UREGS_r12(%rsp) +- movq %r13,UREGS_r13(%rsp) +- movq %r14,UREGS_r14(%rsp) +- movq %r15,UREGS_r15(%rsp) +-.endm +- +-/* +- * Save all registers not preserved by C code or used in entry/exit code. Mark +- * the frame as partial. +- * +- * @type: exception type +- * @compat: R8-R15 don't need saving, and the frame nevertheless is complete +- */ +-.macro SAVE_VOLATILE type compat=0 +-.if \compat +- movl $\type,UREGS_entry_vector-UREGS_error_code(%rsp) +-.else +- movl $\type|TRAP_regs_partial,\ +- UREGS_entry_vector-UREGS_error_code(%rsp) +-.endif +- addq $-(UREGS_error_code-UREGS_r15),%rsp +- cld +- movq %rdi,UREGS_rdi(%rsp) +- movq %rsi,UREGS_rsi(%rsp) +- movq %rdx,UREGS_rdx(%rsp) +- movq %rcx,UREGS_rcx(%rsp) +- movq %rax,UREGS_rax(%rsp) + .if !\compat + movq %r8,UREGS_r8(%rsp) + movq %r9,UREGS_r9(%rsp) +@@ -297,20 +229,17 @@ static always_inline void stac(void) + .endif + movq %rbx,UREGS_rbx(%rsp) + movq %rbp,UREGS_rbp(%rsp) +- SETUP_EXCEPTION_FRAME_POINTER(UREGS_rbp) +-.endm +- +-/* +- * Complete a frame potentially only partially saved. +- */ +-.macro SAVE_PRESERVED +- btrl $_TRAP_regs_partial,UREGS_entry_vector(%rsp) +- jnc 987f ++#ifdef CONFIG_FRAME_POINTER ++/* Indicate special exception stack frame by inverting the frame pointer. */ ++ leaq UREGS_rbp(%rsp), %rbp ++ notq %rbp ++#endif ++.if !\compat + movq %r12,UREGS_r12(%rsp) + movq %r13,UREGS_r13(%rsp) + movq %r14,UREGS_r14(%rsp) + movq %r15,UREGS_r15(%rsp) +-987: ++.endif + .endm + + #define LOAD_ONE_REG(reg, compat) \ +@@ -351,33 +280,13 @@ static always_inline void stac(void) + * @compat: R8-R15 don't need reloading + */ + .macro RESTORE_ALL adj=0 compat=0 +-.if !\compat +- testl $TRAP_regs_dirty,UREGS_entry_vector(%rsp) +-.endif + LOAD_C_CLOBBERED \compat + .if !\compat +- jz 987f + movq UREGS_r15(%rsp),%r15 + movq UREGS_r14(%rsp),%r14 + movq UREGS_r13(%rsp),%r13 + movq UREGS_r12(%rsp),%r12 +-#ifndef NDEBUG +- .subsection 1 +-987: testl $TRAP_regs_partial,UREGS_entry_vector(%rsp) +- jnz 987f +- cmpq UREGS_r15(%rsp),%r15 +- jne 789f +- cmpq UREGS_r14(%rsp),%r14 +- jne 789f +- cmpq UREGS_r13(%rsp),%r13 +- jne 789f +- cmpq UREGS_r12(%rsp),%r12 +- je 987f +-789: BUG /* Corruption of partial register state. */ +- .subsection 0 +-#endif + .endif +-987: + LOAD_ONE_REG(bp, \compat) + LOAD_ONE_REG(bx, \compat) + subq $-(UREGS_error_code-UREGS_r15+\adj), %rsp +-- +2.15.1 + |