|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 1/6] x86: NOP out XPTI entry/exit code when it's not in use
Introduce a synthetic feature flag to use alternative instruction
patching to NOP out all code on entry/exit paths. Having NOPs here is
generally better than using conditional branches.
Also change the limit on the number of bytes we can patch in one go to
that resulting from the encoding in struct alt_instr - there's no point
reducing it below that limit, and without a check being in place that
the limit isn't actually exceeded, such an artificial boundary is a
latent risk.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Tested-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: Juergen Gross <jgross@xxxxxxxx>
---
v3: Also patch NMI/#MC paths. Re-base.
v2: Introduce and use ALTERNATIVE_NOP. Re-base.
--- a/xen/arch/x86/alternative.c
+++ b/xen/arch/x86/alternative.c
@@ -26,7 +26,7 @@
#include <asm/nmi.h>
#include <xen/livepatch.h>
-#define MAX_PATCH_LEN (255-1)
+#define MAX_PATCH_LEN 255
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3709,7 +3709,7 @@ long do_mmu_update(
* to the page lock we hold, its pinned status, and uses on
* this (v)CPU.
*/
- if ( !rc && this_cpu(root_pgt) &&
+ if ( !rc && !cpu_has_no_xpti &&
((page->u.inuse.type_info & PGT_count_mask) >
(1 + !!(page->u.inuse.type_info & PGT_pinned) +
(pagetable_get_pfn(curr->arch.guest_table) == mfn) +
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -169,6 +169,9 @@ static int __init parse_smap_param(const
}
custom_param("smap", parse_smap_param);
+static int8_t __initdata opt_xpti = -1;
+boolean_param("xpti", opt_xpti);
+
bool __read_mostly acpi_disabled;
bool __initdata acpi_force;
static char __initdata acpi_param[10] = "";
@@ -1541,6 +1544,13 @@ void __init noreturn __start_xen(unsigne
cr4_pv32_mask = mmu_cr4_features & XEN_CR4_PV32_BITS;
+ if ( opt_xpti < 0 )
+ opt_xpti = boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
+ if ( opt_xpti )
+ setup_clear_cpu_cap(X86_FEATURE_NO_XPTI);
+ else
+ setup_force_cpu_cap(X86_FEATURE_NO_XPTI);
+
if ( cpu_has_fsgsbase )
set_in_cr4(X86_CR4_FSGSBASE);
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -752,8 +752,6 @@ static int clone_mapping(const void *ptr
return 0;
}
-static __read_mostly int8_t opt_xpti = -1;
-boolean_param("xpti", opt_xpti);
DEFINE_PER_CPU(root_pgentry_t *, root_pgt);
static root_pgentry_t common_pgt;
@@ -766,7 +764,7 @@ static int setup_cpu_root_pgt(unsigned i
unsigned int off;
int rc;
- if ( !opt_xpti )
+ if ( cpu_has_no_xpti )
return 0;
rpt = alloc_xen_pagetable();
@@ -1047,9 +1045,6 @@ void __init smp_prepare_cpus(unsigned in
stack_base[0] = stack_start;
- if ( opt_xpti < 0 )
- opt_xpti = boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-
rc = setup_cpu_root_pgt(0);
if ( rc )
panic("Error %d setting up PV root page table\n", rc);
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -194,7 +194,7 @@ ENTRY(compat_post_handle_exception)
/* See lstar_enter for entry register state. */
ENTRY(cstar_enter)
- /* sti could live here when we don't switch page tables below. */
+ ALTERNATIVE nop, sti, X86_FEATURE_NO_XPTI
CR4_PV32_RESTORE
movq 8(%rsp),%rax /* Restore %rax. */
movq $FLAT_KERNEL_SS,8(%rsp)
@@ -209,6 +209,7 @@ ENTRY(cstar_enter)
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
GET_STACK_END(bx)
+.Lcstar_cr3_start:
mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
neg %rcx
jz .Lcstar_cr3_okay
@@ -218,6 +219,8 @@ ENTRY(cstar_enter)
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Lcstar_cr3_okay:
sti
+.Lcstar_cr3_end:
+ ALTERNATIVE_NOP .Lcstar_cr3_start, .Lcstar_cr3_end, X86_FEATURE_NO_XPTI
movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
movq VCPU_domain(%rbx),%rcx
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -45,6 +45,7 @@ restore_all_guest:
mov VCPUMSR_spec_ctrl_raw(%rdx), %r15d
/* Copy guest mappings and switch to per-CPU root page table. */
+.Lrag_cr3_start:
mov VCPU_cr3(%rbx), %r9
GET_STACK_END(dx)
mov STACK_CPUINFO_FIELD(pv_cr3)(%rdx), %rdi
@@ -52,7 +53,6 @@ restore_all_guest:
movabs $DIRECTMAP_VIRT_START, %rcx
mov %rdi, %rax
and %rsi, %rdi
- jz .Lrag_keep_cr3
and %r9, %rsi
add %rcx, %rdi
add %rcx, %rsi
@@ -74,7 +74,8 @@ restore_all_guest:
mov %rdi, %cr4
mov %rax, %cr3
mov %rsi, %cr4
-.Lrag_keep_cr3:
+.Lrag_cr3_end:
+ ALTERNATIVE_NOP .Lrag_cr3_start, .Lrag_cr3_end, X86_FEATURE_NO_XPTI
/* Restore stashed SPEC_CTRL value. */
mov %r15d, %eax
@@ -121,6 +122,7 @@ restore_all_xen:
* case we return to late PV exit code (from an NMI or #MC).
*/
GET_STACK_END(bx)
+.Lrax_cr3_start:
mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rdx
mov STACK_CPUINFO_FIELD(pv_cr3)(%rbx), %rax
test %rdx, %rdx
@@ -136,6 +138,8 @@ UNLIKELY_START(g, exit_cr3)
mov %rax, %cr3
mov %rsi, %cr4
UNLIKELY_END(exit_cr3)
+.Lrax_cr3_end:
+ ALTERNATIVE_NOP .Lrax_cr3_start, .Lrax_cr3_end, X86_FEATURE_NO_XPTI
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
SPEC_CTRL_EXIT_TO_XEN_IST /* Req: %rbx=end, Clob: acd */
@@ -160,7 +164,7 @@ UNLIKELY_END(exit_cr3)
* %ss must be saved into the space left by the trampoline.
*/
ENTRY(lstar_enter)
- /* sti could live here when we don't switch page tables below. */
+ ALTERNATIVE nop, sti, X86_FEATURE_NO_XPTI
movq 8(%rsp),%rax /* Restore %rax. */
movq $FLAT_KERNEL_SS,8(%rsp)
pushq %r11
@@ -174,6 +178,7 @@ ENTRY(lstar_enter)
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
GET_STACK_END(bx)
+.Llstar_cr3_start:
mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
neg %rcx
jz .Llstar_cr3_okay
@@ -183,6 +188,8 @@ ENTRY(lstar_enter)
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Llstar_cr3_okay:
sti
+.Llstar_cr3_end:
+ ALTERNATIVE_NOP .Llstar_cr3_start, .Llstar_cr3_end, X86_FEATURE_NO_XPTI
movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
@@ -265,7 +272,7 @@ process_trap:
jmp test_all_events
ENTRY(sysenter_entry)
- /* sti could live here when we don't switch page tables below. */
+ ALTERNATIVE nop, sti, X86_FEATURE_NO_XPTI
pushq $FLAT_USER_SS
pushq $0
pushfq
@@ -281,6 +288,7 @@ GLOBAL(sysenter_eflags_saved)
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
GET_STACK_END(bx)
+.Lsyse_cr3_start:
mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
neg %rcx
jz .Lsyse_cr3_okay
@@ -290,6 +298,8 @@ GLOBAL(sysenter_eflags_saved)
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Lsyse_cr3_okay:
sti
+.Lsyse_cr3_end:
+ ALTERNATIVE_NOP .Lsyse_cr3_start, .Lsyse_cr3_end, X86_FEATURE_NO_XPTI
movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
cmpb $0,VCPU_sysenter_disables_events(%rbx)
@@ -331,6 +341,7 @@ ENTRY(int80_direct_trap)
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
GET_STACK_END(bx)
+.Lint80_cr3_start:
mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
neg %rcx
jz .Lint80_cr3_okay
@@ -340,6 +351,8 @@ ENTRY(int80_direct_trap)
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Lint80_cr3_okay:
sti
+.Lint80_cr3_end:
+ ALTERNATIVE_NOP .Lint80_cr3_start, .Lint80_cr3_end, X86_FEATURE_NO_XPTI
cmpb $0,untrusted_msi(%rip)
UNLIKELY_START(ne, msi_check)
@@ -539,6 +552,7 @@ ENTRY(common_interrupt)
SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+.Lintr_cr3_start:
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
mov %rcx, %r15
neg %rcx
@@ -557,9 +571,14 @@ ENTRY(common_interrupt)
CR4_PV32_RESTORE
movq %rsp,%rdi
callq do_IRQ
+.Lintr_cr3_restore:
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
+.Lintr_cr3_end:
jmp ret_from_intr
+ ALTERNATIVE_NOP .Lintr_cr3_restore, .Lintr_cr3_end, X86_FEATURE_NO_XPTI
+ ALTERNATIVE_NOP .Lintr_cr3_start, .Lintr_cr3_okay, X86_FEATURE_NO_XPTI
+
/* No special register assumptions. */
ENTRY(ret_from_intr)
GET_CURRENT(bx)
@@ -581,6 +600,7 @@ GLOBAL(handle_exception)
SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+.Lxcpt_cr3_start:
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
mov %rcx, %r15
neg %rcx
@@ -647,7 +667,9 @@ handle_exception_saved:
PERFC_INCR(exceptions, %rax, %rbx)
mov (%rdx, %rax, 8), %rdx
INDIRECT_CALL %rdx
+.Lxcpt_cr3_restore1:
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
+.Lxcpt_cr3_end1:
testb $3,UREGS_cs(%rsp)
jz restore_all_xen
leaq VCPU_trap_bounce(%rbx),%rdx
@@ -680,9 +702,17 @@ exception_with_ints_disabled:
rep; movsq # make room for ec/ev
1: movq UREGS_error_code(%rsp),%rax # ec/ev
movq %rax,UREGS_kernel_sizeof(%rsp)
+.Lxcpt_cr3_restore2:
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
+.Lxcpt_cr3_end2:
jmp restore_all_xen # return to fixup code
+ ALTERNATIVE_NOP .Lxcpt_cr3_restore1, .Lxcpt_cr3_end1, \
+ X86_FEATURE_NO_XPTI
+ ALTERNATIVE_NOP .Lxcpt_cr3_restore2, .Lxcpt_cr3_end2, \
+ X86_FEATURE_NO_XPTI
+ ALTERNATIVE_NOP .Lxcpt_cr3_start, .Lxcpt_cr3_okay, X86_FEATURE_NO_XPTI
+
/* No special register assumptions. */
FATAL_exception_with_ints_disabled:
xorl %esi,%esi
@@ -798,6 +828,7 @@ handle_ist_exception:
SPEC_CTRL_ENTRY_FROM_INTR_IST /* Req: %rsp=regs, %r14=end, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+.List_cr3_start:
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
mov %rcx, %r15
neg %rcx
@@ -828,10 +859,15 @@ handle_ist_exception:
leaq exception_table(%rip),%rdx
mov (%rdx, %rax, 8), %rdx
INDIRECT_CALL %rdx
+.List_cr3_restore:
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
+.List_cr3_end:
cmpb $TRAP_nmi,UREGS_entry_vector(%rsp)
jne ret_from_intr
+ ALTERNATIVE_NOP .List_cr3_restore, .List_cr3_end, X86_FEATURE_NO_XPTI
+ ALTERNATIVE_NOP .List_cr3_start, .List_cr3_okay, X86_FEATURE_NO_XPTI
+
/* We want to get straight to the IRET on the NMI exit path. */
testb $3,UREGS_cs(%rsp)
jz restore_all_xen
--- a/xen/include/asm-x86/alternative-asm.h
+++ b/xen/include/asm-x86/alternative-asm.h
@@ -101,6 +101,13 @@
#undef decl_orig
#undef as_true
+/* Macro to replace an entire range by suitable NOPs. */
+.macro ALTERNATIVE_NOP start, end, feature
+ .pushsection .altinstructions, "a", @progbits
+ altinstruction_entry \start, \start, \feature, "\end - \start", 0, 0
+ .popsection
+.endm
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_ALTERNATIVE_ASM_H_ */
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -110,6 +110,7 @@
#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
#define cpu_has_aperfmperf boot_cpu_has(X86_FEATURE_APERFMPERF)
#define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH)
+#define cpu_has_no_xpti boot_cpu_has(X86_FEATURE_NO_XPTI)
enum _cache_type {
CACHE_TYPE_NULL = 0,
--- a/xen/include/asm-x86/cpufeatures.h
+++ b/xen/include/asm-x86/cpufeatures.h
@@ -29,4 +29,5 @@ XEN_CPUFEATURE(XEN_IBPB, (FSCAPIN
XEN_CPUFEATURE(XEN_IBRS_SET, (FSCAPINTS+0)*32+16) /* IBRSB && IRBS set in
Xen */
XEN_CPUFEATURE(XEN_IBRS_CLEAR, (FSCAPINTS+0)*32+17) /* IBRSB && IBRS clear in
Xen */
XEN_CPUFEATURE(RSB_NATIVE, (FSCAPINTS+0)*32+18) /* RSB overwrite needed
for native */
-XEN_CPUFEATURE(RSB_VMEXIT, (FSCAPINTS+0)*32+20) /* RSB overwrite needed
for vmexit */
+XEN_CPUFEATURE(RSB_VMEXIT, (FSCAPINTS+0)*32+19) /* RSB overwrite needed
for vmexit */
+XEN_CPUFEATURE(NO_XPTI, (FSCAPINTS+0)*32+20) /* XPTI mitigation not in
use */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |