[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH] xen: get rid of paravirt op adjust_exception_frame



When running as Xen pv-guest the exception frame on the stack contains
%r11 and %rcx additional to the other data pushed by the processor.

Instead of having a paravirt op being called for each exception type
prepend the Xen specific code to each exception entry. When running as
Xen pv-guest just use the exception entry with prepended instructions,
otherwise use the entry without the Xen specific code.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
I'm sure there are several comments regarding how to hide e.g.
pv_idt_prologue or where to put the definition of the Xen specific
prologue (now just defined in arch/x86/entry/calling.h).

This is the main reason for making this patch RFC right now.

I have tested the kernel to boot successfully native and as Xen dom0
(pv, of course).
---
 arch/x86/entry/calling.h              |  6 ++++++
 arch/x86/entry/entry_64.S             | 17 ++---------------
 arch/x86/entry/entry_64_compat.S      |  3 +--
 arch/x86/include/asm/desc.h           |  7 +++++++
 arch/x86/include/asm/paravirt.h       |  5 -----
 arch/x86/include/asm/paravirt_types.h |  4 ----
 arch/x86/kernel/asm-offsets_64.c      |  1 -
 arch/x86/kernel/paravirt.c            |  3 ---
 arch/x86/xen/enlighten_pv.c           |  8 ++++++--
 arch/x86/xen/irq.c                    |  3 ---
 arch/x86/xen/setup.c                  |  3 ++-
 arch/x86/xen/smp_pv.c                 |  2 +-
 arch/x86/xen/xen-asm_64.S             |  6 ------
 arch/x86/xen/xen-ops.h                |  2 +-
 14 files changed, 26 insertions(+), 44 deletions(-)

diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 05ed3d393da7..8b315ee49c93 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -227,3 +227,9 @@ For 32-bit we have the following conventions - kernel is 
built with
 .Lafter_call_\@:
 #endif
 .endm
+
+#ifdef CONFIG_XEN_PV
+#define PV_ENTRY(sym) ENTRY(_xen_##sym); pop %rcx; pop %r11; .globl sym; sym:
+#else
+#define PV_ENTRY(sym) ENTRY(sym)
+#endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 607d72c4a485..780ea67fb7ea 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -740,14 +740,13 @@ apicinterrupt IRQ_WORK_VECTOR                     
irq_work_interrupt              smp_irq_work_interrupt
 #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
 
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
-ENTRY(\sym)
+PV_ENTRY(\sym)
        /* Sanity check */
        .if \shift_ist != -1 && \paranoid == 0
        .error "using shift_ist requires paranoid=1"
        .endif
 
        ASM_CLAC
-       PARAVIRT_ADJUST_EXCEPTION_FRAME
 
        .ifeq \has_error_code
        pushq   $-1                             /* ORIG_RAX: no syscall to 
restart */
@@ -1161,19 +1160,7 @@ ENTRY(error_exit)
 END(error_exit)
 
 /* Runs on exception stack */
-ENTRY(nmi)
-       /*
-        * Fix up the exception frame if we're on Xen.
-        * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
-        * one value to the stack on native, so it may clobber the rdx
-        * scratch slot, but it won't clobber any of the important
-        * slots past it.
-        *
-        * Xen is a different story, because the Xen frame itself overlaps
-        * the "NMI executing" variable.
-        */
-       PARAVIRT_ADJUST_EXCEPTION_FRAME
-
+PV_ENTRY(nmi)
        /*
         * We allow breakpoints in NMIs. If a breakpoint occurs, then
         * the iretq it performs will take us out of NMI context.
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index e1721dafbcb1..9fd8c8f6004e 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -290,11 +290,10 @@ END(entry_SYSCALL_compat)
  * edi  arg5
  * ebp  arg6
  */
-ENTRY(entry_INT80_compat)
+PV_ENTRY(entry_INT80_compat)
        /*
         * Interrupts are off on entry.
         */
-       PARAVIRT_ADJUST_EXCEPTION_FRAME
        ASM_CLAC                        /* Do this early to minimize exposure */
        SWAPGS
 
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index d0a21b12dd58..ff19be44877a 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -85,9 +85,16 @@ static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
 
 #ifdef CONFIG_X86_64
 
+#ifdef CONFIG_XEN_PV
+extern unsigned int pv_idt_prologue;
+#else
+#define pv_idt_prologue 0
+#endif
+
 static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long 
func,
                             unsigned dpl, unsigned ist, unsigned seg)
 {
+       func -= pv_idt_prologue;
        gate->offset_low        = PTR_LOW(func);
        gate->segment           = __KERNEL_CS;
        gate->ist               = ist;
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 55fa56fe4e45..e0da3d7d6609 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -962,11 +962,6 @@ extern void default_banner(void);
 #define GET_CR2_INTO_RAX                               \
        call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
 
-#define PARAVIRT_ADJUST_EXCEPTION_FRAME                                        
\
-       PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
-                 CLBR_NONE,                                            \
-                 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
-
 #define USERGS_SYSRET64                                                        
\
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
                  CLBR_NONE,                                            \
diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 7465d6fe336f..5027c7e50d67 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -194,10 +194,6 @@ struct pv_irq_ops {
 
        void (*safe_halt)(void);
        void (*halt)(void);
-
-#ifdef CONFIG_X86_64
-       void (*adjust_exception_frame)(void);
-#endif
 };
 
 struct pv_mmu_ops {
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 99332f550c48..cf42206926af 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -20,7 +20,6 @@ static char syscalls_ia32[] = {
 int main(void)
 {
 #ifdef CONFIG_PARAVIRT
-       OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, 
adjust_exception_frame);
        OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
        OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
        BLANK();
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 3586996fc50d..f88e3bd460ae 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -319,9 +319,6 @@ __visible struct pv_irq_ops pv_irq_ops = {
        .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
        .safe_halt = native_safe_halt,
        .halt = native_halt,
-#ifdef CONFIG_X86_64
-       .adjust_exception_frame = paravirt_nop,
-#endif
 };
 
 __visible struct pv_cpu_ops pv_cpu_ops = {
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index f33eef4ebd12..a8cb60b64b86 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -88,6 +88,7 @@
 #include "pmu.h"
 
 void *xen_initial_gdt;
+unsigned int pv_idt_prologue;
 
 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
 
@@ -620,7 +621,7 @@ static int cvt_gate_to_trap(int vector, const gate_desc 
*val,
 
        info->vector = vector;
 
-       addr = gate_offset(*val);
+       addr = gate_offset(*val) + pv_idt_prologue;
 #ifdef CONFIG_X86_64
        /*
         * Look for known traps using IST, and substitute them
@@ -657,7 +658,7 @@ static int cvt_gate_to_trap(int vector, const gate_desc 
*val,
                        return 0;
        }
 #endif /* CONFIG_X86_64 */
-       info->address = addr;
+       info->address = addr - pv_idt_prologue;
 
        info->cs = gate_segment(*val);
        info->flags = val->dpl;
@@ -1264,6 +1265,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
        pv_info = xen_info;
        pv_init_ops = xen_init_ops;
        pv_cpu_ops = xen_cpu_ops;
+#ifdef CONFIG_X86_64
+       pv_idt_prologue = 3; /* size of pop %rcx; pop %r11; */
+#endif
 
        x86_platform.get_nmi_reason = xen_get_nmi_reason;
 
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 33e92955e09d..d4eff5676cfa 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -123,9 +123,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
 
        .safe_halt = xen_safe_halt,
        .halt = xen_halt,
-#ifdef CONFIG_X86_64
-       .adjust_exception_frame = xen_adjust_exception_frame,
-#endif
 };
 
 void __init xen_init_irq_ops(void)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index a5bf7c451435..3d2c01a902bc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -1015,7 +1015,8 @@ void __init xen_pvmmu_arch_setup(void)
        HYPERVISOR_vm_assist(VMASST_CMD_enable,
                             VMASST_TYPE_pae_extended_cr3);
 
-       if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
+       if (register_callback(CALLBACKTYPE_event,
+                             xen_hypervisor_callback - pv_idt_prologue) ||
            register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
                BUG();
 
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index aae32535f4ec..7bbf52b0939d 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -322,7 +322,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct 
*idle)
        ctxt->gs_base_kernel = per_cpu_offset(cpu);
 #endif
        ctxt->event_callback_eip    =
-               (unsigned long)xen_hypervisor_callback;
+               (unsigned long)xen_hypervisor_callback - pv_idt_prologue;
        ctxt->failsafe_callback_eip =
                (unsigned long)xen_failsafe_callback;
        ctxt->user_regs.cs = __KERNEL_CS;
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index c3df43141e70..8db45fdba96d 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -22,12 +22,6 @@
 
 #include "xen-asm.h"
 
-ENTRY(xen_adjust_exception_frame)
-       mov 8+0(%rsp), %rcx
-       mov 8+8(%rsp), %r11
-       ret $16
-ENDPROC(xen_adjust_exception_frame)
-
 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
 /*
  * Xen64 iret frame:
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 9a440a42c618..e09253f01806 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -17,6 +17,7 @@ void xen_syscall32_target(void);
 #endif
 
 extern void *xen_initial_gdt;
+extern unsigned int pv_idt_prologue;
 
 struct trap_info;
 void xen_copy_trap_info(struct trap_info *traps);
@@ -144,7 +145,6 @@ DECL_ASM(void, xen_restore_fl_direct, unsigned long);
 __visible void xen_iret(void);
 __visible void xen_sysret32(void);
 __visible void xen_sysret64(void);
-__visible void xen_adjust_exception_frame(void);
 
 extern int xen_panic_handler_init(void);
 
-- 
2.12.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.