[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging-4.7] x86/spec_ctrl: Rename bits of infrastructure to avoid NATIVE and VMEXIT
commit f666dab271327c0f70d094f0cd44ea423663ba5b Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Tue May 29 10:32:04 2018 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue May 29 10:32:04 2018 +0200 x86/spec_ctrl: Rename bits of infrastructure to avoid NATIVE and VMEXIT In hindsight, using NATIVE and VMEXIT as naming terminology was not clever. A future change wants to split SPEC_CTRL_EXIT_TO_GUEST into PV and HVM specific implementations, and using VMEXIT as a term is completely wrong. Take the opportunity to fix some stale documentation in spec_ctrl_asm.h. The IST helpers were missing from the large comment block, and since SPEC_CTRL_ENTRY_FROM_INTR_IST was introduced, we've gained a new piece of functionality which currently depends on the fine grain control, which exists in lieu of livepatching. Note this in the comment. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> master commit: d9822b8a38114e96e4516dc998f4055249364d5d master date: 2018-05-16 12:19:10 +0100 --- xen/arch/x86/cpu/common.c | 8 ++++---- xen/arch/x86/hvm/svm/entry.S | 4 ++-- xen/arch/x86/hvm/vmx/entry.S | 4 ++-- xen/arch/x86/spec_ctrl.c | 20 ++++++++++---------- xen/arch/x86/x86_64/compat/entry.S | 2 +- xen/arch/x86/x86_64/entry.S | 2 +- xen/include/asm-x86/cpufeature.h | 4 ++-- xen/include/asm-x86/spec_ctrl_asm.h | 36 +++++++++++++++++++++++++----------- 8 files changed, 47 insertions(+), 33 deletions(-) diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c index 335d464568..d6dff4ca66 100644 --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -420,12 +420,12 @@ void identify_cpu(struct cpuinfo_x86 *c) if (test_bit(X86_FEATURE_SC_MSR, boot_cpu_data.x86_capability)) __set_bit(X86_FEATURE_SC_MSR, c->x86_capability); - if (test_bit(X86_FEATURE_RSB_NATIVE, + if (test_bit(X86_FEATURE_SC_RSB_PV, boot_cpu_data.x86_capability)) - __set_bit(X86_FEATURE_RSB_NATIVE, c->x86_capability); - if (test_bit(X86_FEATURE_RSB_VMEXIT, + __set_bit(X86_FEATURE_SC_RSB_PV, c->x86_capability); + if (test_bit(X86_FEATURE_SC_RSB_HVM, boot_cpu_data.x86_capability)) - __set_bit(X86_FEATURE_RSB_VMEXIT, c->x86_capability); + __set_bit(X86_FEATURE_SC_RSB_HVM, c->x86_capability); if (test_bit(X86_FEATURE_NO_XPTI, boot_cpu_data.x86_capability)) __set_bit(X86_FEATURE_NO_XPTI, c->x86_capability); diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S index 289e94639c..d824bcd317 100644 --- a/xen/arch/x86/hvm/svm/entry.S +++ b/xen/arch/x86/hvm/svm/entry.S @@ -81,7 +81,7 @@ UNLIKELY_END(svm_trace) mov VCPU_arch_spec_ctrl(%rbx), %eax /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ - SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ pop %r15 pop %r14 @@ -106,7 +106,7 @@ UNLIKELY_END(svm_trace) GET_CURRENT(bx) - SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */ + SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ mov VCPU_svm_vmcb(%rbx),%rcx diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S index 7aa0e852ee..f1528e8f9d 100644 --- a/xen/arch/x86/hvm/vmx/entry.S +++ b/xen/arch/x86/hvm/vmx/entry.S @@ -37,7 +37,7 @@ ENTRY(vmx_asm_vmexit_handler) movb $1,VCPU_vmx_launched(%rbx) mov %rax,VCPU_hvm_guest_cr2(%rbx) - SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */ + SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ mov %rsp,%rdi @@ -72,7 +72,7 @@ UNLIKELY_END(realmode) mov VCPU_arch_spec_ctrl(%rbx), %eax /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ - SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ mov VCPU_hvm_guest_cr2(%rbx),%rax diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c index fc35fe8bab..a67daa2788 100644 --- a/xen/arch/x86/spec_ctrl.c +++ b/xen/arch/x86/spec_ctrl.c @@ -35,8 +35,8 @@ static enum ind_thunk { THUNK_JMP, } opt_thunk __initdata = THUNK_DEFAULT; static int8_t __initdata opt_ibrs = -1; -static bool_t __initdata opt_rsb_native = 1; -static bool_t __initdata opt_rsb_vmexit = 1; +static bool_t __initdata opt_rsb_pv = 1; +static bool_t __initdata opt_rsb_hvm = 1; bool_t __read_mostly opt_ibpb = 1; uint8_t __read_mostly default_xen_spec_ctrl; uint8_t __read_mostly default_spec_ctrl_flags; @@ -69,9 +69,9 @@ static int __init parse_bti(const char *s) else if ( (val = parse_boolean("ibpb", s, ss)) >= 0 ) opt_ibpb = val; else if ( (val = parse_boolean("rsb_native", s, ss)) >= 0 ) - opt_rsb_native = val; + opt_rsb_pv = val; else if ( (val = parse_boolean("rsb_vmexit", s, ss)) >= 0 ) - opt_rsb_vmexit = val; + opt_rsb_hvm = val; else rc = -EINVAL; @@ -116,8 +116,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) default_xen_spec_ctrl & SPEC_CTRL_IBRS ? " IBRS+" : " IBRS-" : "", opt_ibpb ? " IBPB" : "", - boot_cpu_has(X86_FEATURE_RSB_NATIVE) ? " RSB_NATIVE" : "", - boot_cpu_has(X86_FEATURE_RSB_VMEXIT) ? " RSB_VMEXIT" : ""); + boot_cpu_has(X86_FEATURE_SC_RSB_PV) ? " RSB_NATIVE" : "", + boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB_VMEXIT" : ""); printk("XPTI: %s\n", boot_cpu_has(X86_FEATURE_NO_XPTI) ? "disabled" : "enabled"); @@ -307,9 +307,9 @@ void __init init_speculation_mitigations(void) * If a processors speculates to 32bit PV guest kernel mappings, it is * speculating in 64bit supervisor mode, and can leak data. */ - if ( opt_rsb_native ) + if ( opt_rsb_pv ) { - __set_bit(X86_FEATURE_RSB_NATIVE, boot_cpu_data.x86_capability); + __set_bit(X86_FEATURE_SC_RSB_PV, boot_cpu_data.x86_capability); default_spec_ctrl_flags |= SCF_ist_rsb; } @@ -317,8 +317,8 @@ void __init init_speculation_mitigations(void) * HVM guests can always poison the RSB to point at Xen supervisor * mappings. */ - if ( opt_rsb_vmexit ) - __set_bit(X86_FEATURE_RSB_VMEXIT, boot_cpu_data.x86_capability); + if ( opt_rsb_hvm ) + __set_bit(X86_FEATURE_SC_RSB_HVM, boot_cpu_data.x86_capability); /* Check we have hardware IBPB support before using it... */ if ( !boot_cpu_has(X86_FEATURE_IBRSB) && !boot_cpu_has(X86_FEATURE_IBPB) ) diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S index 40f44001c5..3865225786 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -237,7 +237,7 @@ ENTRY(compat_restore_all_guest) mov VCPU_arch_spec_ctrl(%rbx), %eax /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ - SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ RESTORE_ALL adj=8 compat=1 .Lft0: iretq diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index df265ec7b7..1c4f0149f7 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -78,7 +78,7 @@ restore_all_guest: mov %r15d, %eax /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ - SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ RESTORE_ALL testw $TRAP_syscall,4(%rsp) diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h index 66efc5b017..e472176218 100644 --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -31,8 +31,8 @@ #define X86_FEATURE_IND_THUNK_JMP ((FSCAPINTS+0)*32+ 12) /* Use IND_THUNK_JMP */ #define X86_FEATURE_XEN_IBPB ((FSCAPINTS+0)*32+ 13) /* IBRSB || IBPB */ #define X86_FEATURE_SC_MSR ((FSCAPINTS+0)*32+ 14) /* MSR_SPEC_CTRL used by Xen */ -#define X86_FEATURE_RSB_NATIVE ((FSCAPINTS+0)*32+ 16) /* RSB overwrite needed for native */ -#define X86_FEATURE_RSB_VMEXIT ((FSCAPINTS+0)*32+ 17) /* RSB overwrite needed for vmexit */ +#define X86_FEATURE_SC_RSB_PV ((FSCAPINTS+0)*32+ 16) /* RSB overwrite needed for PV */ +#define X86_FEATURE_SC_RSB_HVM ((FSCAPINTS+0)*32+ 17) /* RSB overwrite needed for HVM */ #define X86_FEATURE_NO_XPTI ((FSCAPINTS+0)*32+ 18) /* XPTI mitigation not in use */ #define cpufeat_word(idx) ((idx) / 32) diff --git a/xen/include/asm-x86/spec_ctrl_asm.h b/xen/include/asm-x86/spec_ctrl_asm.h index 941aeb7164..b330e20e0e 100644 --- a/xen/include/asm-x86/spec_ctrl_asm.h +++ b/xen/include/asm-x86/spec_ctrl_asm.h @@ -72,11 +72,14 @@ * * The following ASM fragments implement this algorithm. See their local * comments for further details. - * - SPEC_CTRL_ENTRY_FROM_VMEXIT + * - SPEC_CTRL_ENTRY_FROM_HVM * - SPEC_CTRL_ENTRY_FROM_PV * - SPEC_CTRL_ENTRY_FROM_INTR + * - SPEC_CTRL_ENTRY_FROM_INTR_IST + * - SPEC_CTRL_EXIT_TO_XEN_IST * - SPEC_CTRL_EXIT_TO_XEN - * - SPEC_CTRL_EXIT_TO_GUEST + * - SPEC_CTRL_EXIT_TO_PV + * - SPEC_CTRL_EXIT_TO_HVM */ .macro DO_OVERWRITE_RSB tmp=rax @@ -117,7 +120,7 @@ mov %\tmp, %rsp /* Restore old %rsp */ .endm -.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT +.macro DO_SPEC_CTRL_ENTRY_FROM_HVM /* * Requires %rbx=current, %rsp=regs/cpuinfo * Clobbers %rax, %rcx, %rdx @@ -216,23 +219,23 @@ .endm /* Use after a VMEXIT from an HVM guest. */ -#define SPEC_CTRL_ENTRY_FROM_VMEXIT \ +#define SPEC_CTRL_ENTRY_FROM_HVM \ ALTERNATIVE __stringify(ASM_NOP40), \ - DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT; \ + DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM; \ ALTERNATIVE __stringify(ASM_NOP36), \ - DO_SPEC_CTRL_ENTRY_FROM_VMEXIT, X86_FEATURE_SC_MSR + DO_SPEC_CTRL_ENTRY_FROM_HVM, X86_FEATURE_SC_MSR /* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */ #define SPEC_CTRL_ENTRY_FROM_PV \ ALTERNATIVE __stringify(ASM_NOP40), \ - DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \ + DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \ ALTERNATIVE __stringify(ASM_NOP25), \ __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR /* Use in interrupt/exception context. May interrupt Xen or PV context. */ #define SPEC_CTRL_ENTRY_FROM_INTR \ ALTERNATIVE __stringify(ASM_NOP40), \ - DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \ + DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \ ALTERNATIVE __stringify(ASM_NOP33), \ __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR @@ -241,12 +244,22 @@ ALTERNATIVE __stringify(ASM_NOP17), \ DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR -/* Use when exiting to guest context. */ -#define SPEC_CTRL_EXIT_TO_GUEST \ +/* Use when exiting to PV guest context. */ +#define SPEC_CTRL_EXIT_TO_PV \ ALTERNATIVE __stringify(ASM_NOP24), \ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR -/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */ +/* Use when exiting to HVM guest context. */ +#define SPEC_CTRL_EXIT_TO_HVM \ + ALTERNATIVE __stringify(ASM_NOP24), \ + DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR + +/* + * Use in IST interrupt/exception context. May interrupt Xen or PV context. + * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume + * path to avoid using MSR_SPEC_CTRL before the microcode introducing it has + * been reloaded. + */ .macro SPEC_CTRL_ENTRY_FROM_INTR_IST /* * Requires %rsp=regs, %r14=stack_end @@ -293,6 +306,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise): UNLIKELY_END(\@_serialise) .endm +/* Use when exiting to Xen in IST context. */ .macro SPEC_CTRL_EXIT_TO_XEN_IST /* * Requires %rbx=stack_end -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.7 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |