[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/3] x86/spec-ctrl: Drop SPEC_CTRL_{ENTRY_FROM,EXIT_TO}_HVM
These were written before Spectre/Meltdown went public, and there was large uncertainty in how the protections would evolve. As it turns out, they're very specific to Intel hardware, and not very suitable for AMD. Expand and drop the macros. No change at all for VT-x. For AMD, the only relevant piece of functionality is DO_OVERWRITE_RSB, although we will soon be adding (different) logic to handle MSR_SPEC_CTRL. This has a marginal improvement of removing an unconditional pile of long-nops from the vmentry/exit path. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Wei Liu <wl@xxxxxxx> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx> CC: Kevin Tian <kevin.tian@xxxxxxxxx> --- xen/arch/x86/hvm/svm/entry.S | 5 +++-- xen/arch/x86/hvm/vmx/entry.S | 8 ++++++-- xen/arch/x86/include/asm/spec_ctrl_asm.h | 17 ++--------------- 3 files changed, 11 insertions(+), 19 deletions(-) diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S index e208a4b32ae7..276215d36aff 100644 --- a/xen/arch/x86/hvm/svm/entry.S +++ b/xen/arch/x86/hvm/svm/entry.S @@ -59,7 +59,7 @@ __UNLIKELY_END(nsvm_hap) mov VCPUMSR_spec_ctrl_raw(%rax), %eax /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ - SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + /* SPEC_CTRL_EXIT_TO_SVM (nothing currently) */ pop %r15 pop %r14 @@ -86,7 +86,8 @@ __UNLIKELY_END(nsvm_hap) GET_CURRENT(bx) - SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */ + /* SPEC_CTRL_ENTRY_FROM_SVM Req: b=curr %rsp=regs/cpuinfo, Clob: ac */ + ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ stgi diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S index 27c8c5ca4943..30139ae58e9d 100644 --- a/xen/arch/x86/hvm/vmx/entry.S +++ b/xen/arch/x86/hvm/vmx/entry.S @@ -33,7 +33,9 @@ ENTRY(vmx_asm_vmexit_handler) movb $1,VCPU_vmx_launched(%rbx) mov %rax,VCPU_hvm_guest_cr2(%rbx) - SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */ + /* SPEC_CTRL_ENTRY_FROM_VMX Req: b=curr %rsp=regs/cpuinfo, Clob: acd */ + ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM + ALTERNATIVE "", DO_SPEC_CTRL_ENTRY_FROM_HVM, X86_FEATURE_SC_MSR_HVM /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ /* Hardware clears MSR_DEBUGCTL on VMExit. Reinstate it if debugging Xen. */ @@ -80,7 +82,9 @@ UNLIKELY_END(realmode) mov VCPUMSR_spec_ctrl_raw(%rax), %eax /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ - SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + /* SPEC_CTRL_EXIT_TO_VMX Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + ALTERNATIVE "", DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM + ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), X86_FEATURE_SC_VERW_HVM mov VCPU_hvm_guest_cr2(%rbx),%rax diff --git a/xen/arch/x86/include/asm/spec_ctrl_asm.h b/xen/arch/x86/include/asm/spec_ctrl_asm.h index cb34299a865b..18ecfcd70375 100644 --- a/xen/arch/x86/include/asm/spec_ctrl_asm.h +++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h @@ -68,14 +68,14 @@ * * The following ASM fragments implement this algorithm. See their local * comments for further details. - * - SPEC_CTRL_ENTRY_FROM_HVM + * - SPEC_CTRL_ENTRY_FROM_{SVM,VMX} (See appropriate entry.S files) * - SPEC_CTRL_ENTRY_FROM_PV * - SPEC_CTRL_ENTRY_FROM_INTR * - SPEC_CTRL_ENTRY_FROM_INTR_IST * - SPEC_CTRL_EXIT_TO_XEN_IST * - SPEC_CTRL_EXIT_TO_XEN * - SPEC_CTRL_EXIT_TO_PV - * - SPEC_CTRL_EXIT_TO_HVM + * - SPEC_CTRL_EXIT_TO_{SVM,VMX} */ .macro DO_OVERWRITE_RSB tmp=rax @@ -225,12 +225,6 @@ wrmsr .endm -/* Use after a VMEXIT from an HVM guest. */ -#define SPEC_CTRL_ENTRY_FROM_HVM \ - ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM; \ - ALTERNATIVE "", DO_SPEC_CTRL_ENTRY_FROM_HVM, \ - X86_FEATURE_SC_MSR_HVM - /* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */ #define SPEC_CTRL_ENTRY_FROM_PV \ ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \ @@ -255,13 +249,6 @@ ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), \ X86_FEATURE_SC_VERW_PV -/* Use when exiting to HVM guest context. */ -#define SPEC_CTRL_EXIT_TO_HVM \ - ALTERNATIVE "", \ - DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM; \ - ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), \ - X86_FEATURE_SC_VERW_HVM - /* * Use in IST interrupt/exception context. May interrupt Xen or PV context. * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume -- 2.11.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |