[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 09/17] x86: create syscall stub for per-domain mapping



The current syscall stub can't be used mapped in the per domain area
as required by XPTI due to the distance for jumping into the common
interrupt handling code is larger than 2GB. Using just an indirect
jump isn't going to work as this will require mitigations against
Spectre.

So use a new trampoline which is no longer unique to a (v)cpu, but
can be mapped into the per-domain area as needed. For addressing the
stack use the knowledge that the primary stack will be in the next
page after the trampoline coding so we can save %rsp via a %rip
relative access without needing any further register.

For being able to easily switch between per-cpu and per-vcpu stubs add
a macro for the per-cpu stub size and add the prototypes of
[cl]star_enter() to a header.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V3:
- completely new per-vcpu stub containing Spectre mitigation
---
 xen/arch/x86/pv/Makefile           |  1 +
 xen/arch/x86/pv/xpti-stub.S        | 61 ++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/x86_64/compat/entry.S |  1 +
 xen/arch/x86/x86_64/entry.S        |  1 +
 xen/arch/x86/x86_64/traps.c        |  3 +-
 xen/include/asm-x86/system.h       |  5 ++++
 6 files changed, 70 insertions(+), 2 deletions(-)
 create mode 100644 xen/arch/x86/pv/xpti-stub.S

diff --git a/xen/arch/x86/pv/Makefile b/xen/arch/x86/pv/Makefile
index a12e4fbd1a..3f6b5506dc 100644
--- a/xen/arch/x86/pv/Makefile
+++ b/xen/arch/x86/pv/Makefile
@@ -17,3 +17,4 @@ obj-y += xpti.o
 
 obj-bin-y += dom0_build.init.o
 obj-bin-y += gpr_switch.o
+obj-bin-y += xpti-stub.o
diff --git a/xen/arch/x86/pv/xpti-stub.S b/xen/arch/x86/pv/xpti-stub.S
new file mode 100644
index 0000000000..efa1e3f661
--- /dev/null
+++ b/xen/arch/x86/pv/xpti-stub.S
@@ -0,0 +1,61 @@
+/*
+ * Syscall stubs mappable to per-vcpu area in order to mitigate Meltdown 
attack.
+ * The stack page will be mapped just after the stub page, so its distance
+ * is well known.
+ *
+ * Copyright (c) 2018, Juergen Gross
+ */
+
+        .file "pv/xpti-stub.S"
+
+#include <asm/asm_defns.h>
+#include <public/xen.h>
+
+        .align PAGE_SIZE
+
+        .equ xpti_regs, . + 2 * PAGE_SIZE - CPUINFO_sizeof
+
+ENTRY(xpti_lstar)
+        mov   %rsp, xpti_regs+UREGS_rsp(%rip)
+        lea   xpti_regs+UREGS_rsp(%rip), %rsp
+        movq  $FLAT_KERNEL_SS, 8(%rsp)
+        pushq %r11
+        pushq $FLAT_KERNEL_CS64
+        pushq %rcx
+        pushq $0
+        movl  $TRAP_syscall, 4(%rsp)
+        SAVE_ALL
+        mov   %rsp, %r12
+
+        sti
+
+        SPEC_CTRL_ENTRY_FROM_PV /* Req: %r12=regs, %rsp=cpuinfo, Clob: acd */
+        /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
+        movabsq $lstar_common, %rax
+        INDIRECT_LOCAL_JMP %rax
+
+ENTRY(xpti_cstar)
+        mov   %rsp, xpti_regs+UREGS_rsp(%rip)
+        lea   xpti_regs+UREGS_rsp(%rip), %rsp
+        movq  $FLAT_KERNEL_SS, 8(%rsp)
+        pushq %r11
+        pushq $FLAT_USER_CS32
+        pushq %rcx
+        pushq $0
+        movl  $TRAP_syscall, 4(%rsp)
+        SAVE_ALL
+        movq  %rsp, %r12
+
+        sti
+
+        SPEC_CTRL_ENTRY_FROM_PV /* Req: %r12=regs, %rsp=cpuinfo, Clob: acd */
+        /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
+        movabsq $cstar_common, %rax
+        INDIRECT_LOCAL_JMP %rax
+
+local__x86_indirect_thunk_rax:
+        GEN_INDIRECT_THUNK_BODY rax
+
+        .align PAGE_SIZE
diff --git a/xen/arch/x86/x86_64/compat/entry.S 
b/xen/arch/x86/x86_64/compat/entry.S
index eced1475b7..206bc9a05a 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -215,6 +215,7 @@ ENTRY(cstar_enter)
         SPEC_CTRL_ENTRY_FROM_PV /* Req: %r12=regs, %rsp=cpuinfo, Clob: acd */
         /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
 
+GLOBAL(cstar_common)
         CR4_PV32_RESTORE
         GET_CURRENT(bx)
         movq  VCPU_domain(%rbx),%rcx
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index f067a74b0f..69590d0b17 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -120,6 +120,7 @@ ENTRY(lstar_enter)
         SPEC_CTRL_ENTRY_FROM_PV /* Req: %r12=regs, %rsp=cpuinfo, Clob: acd */
         /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
 
+GLOBAL(lstar_common)
         GET_CURRENT(bx)
         testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
         jz    switch_to_kernel
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 3652f5ff21..bd4d37c2ad 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -291,8 +291,6 @@ static unsigned int write_stub_trampoline(
 }
 
 DEFINE_PER_CPU(struct stubs, stubs);
-void lstar_enter(void);
-void cstar_enter(void);
 
 void subarch_percpu_traps_init(void)
 {
@@ -315,6 +313,7 @@ void subarch_percpu_traps_init(void)
     offset = write_stub_trampoline(stub_page + (stub_va & ~PAGE_MASK),
                                    stub_va, stack_bottom,
                                    (unsigned long)lstar_enter);
+    ASSERT(offset == STUB_TRAMPOLINE_SIZE_PERCPU);
     stub_va += offset;
 
     if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index 8ac170371b..06afc59822 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -230,6 +230,11 @@ static inline int local_irq_is_enabled(void)
 
 void trap_init(void);
 void init_idt_traps(void);
+#define STUB_TRAMPOLINE_SIZE_PERCPU   32
+void lstar_enter(void);
+void cstar_enter(void);
+void xpti_lstar(void);
+void xpti_cstar(void);
 void load_system_tables(void);
 void percpu_traps_init(void);
 void subarch_percpu_traps_init(void);
-- 
2.13.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.