[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/6] xen/x86/pvh: Save %rbx in xen_pvh_early_cpu_init()



x86-64 ABI requires that functions preserve %rbx. When
xen_pvh_early_cpu_init() is executed on boot cpu it is invoked as a
function and 'cpuid' instruction will clobber %rbx. (This is not a
concern on secondary processors since there xen_pvh_early_cpu_init() is
the entry point and thus does not need to preserve registers.)

Since we cannot access stack on secondary processors (PTE's NX bit will
cause #GP on AMD --- see commit a2ef5dc2c7cb ("x86/xen: Set EFER.NX and
EFER.SCE in PVH guests")) we can't always save %rbx on the stack. We
work around this by creating a new entry point for those processors ---
xen_pvh_early_cpu_init_secondary(). Note that on secondary CPUs we will
load %rbx with garbage, which is OK.

As a side effect of these changes we don't need to save %rsi anymore,
since we can use %rbx instead of %rsi as a temporary register.

(We could store %rbx into another scratch register such as %r8 but since
we will need new entry point for 32-bit PVH anyway we go with this
approach for symmetry)

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 arch/x86/xen/smp.c      |  3 ++-
 arch/x86/xen/smp.h      |  4 ++++
 arch/x86/xen/xen-head.S | 14 +++++++-------
 3 files changed, 13 insertions(+), 8 deletions(-)

diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 8648438..ca7ee1f 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -423,7 +423,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct 
*idle)
                 * bit set. This means before DS/SS is touched, NX in
                 * EFER must be set. Hence the following assembly glue code.
                 */
-               ctxt->user_regs.eip = (unsigned long)xen_pvh_early_cpu_init;
+               ctxt->user_regs.eip =
+                       (unsigned long)xen_pvh_early_cpu_init_secondary;
                ctxt->user_regs.rdi = cpu;
                ctxt->user_regs.rsi = true;  /* entry == true */
        }
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index 963d62a..bf2b43c 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -10,10 +10,14 @@ extern void xen_send_IPI_self(int vector);
 
 #ifdef CONFIG_XEN_PVH
 extern void xen_pvh_early_cpu_init(int cpu, bool entry);
+extern void xen_pvh_early_cpu_init_secondary(int cpu, bool entry);
 #else
 static inline void xen_pvh_early_cpu_init(int cpu, bool entry)
 {
 }
+static inline void xen_pvh_early_cpu_init_secondary(int cpu, bool entry)
+{
+}
 #endif
 
 #endif
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 8afdfcc..b1508a8 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -56,28 +56,28 @@ ENTRY(startup_xen)
  * @entry: true if this is a secondary vcpu coming up on this entry
  *         point, false if this is the boot CPU being initialized for
  *         the first time (%rsi)
- *
- * Note: This is called as a function on the boot CPU, and is the entry point
- *       on the secondary CPU.
  */
 ENTRY(xen_pvh_early_cpu_init)
-       mov     %rsi, %r11
+       mov     %rbx, -8(%rsp)
 
+/* Entry point for secondary CPUs */
+ENTRY(xen_pvh_early_cpu_init_secondary)
        /* Gather features to see if NX implemented. */
        mov     $0x80000001, %eax
        cpuid
-       mov     %edx, %esi
+       mov     %edx, %ebx
 
        mov     $MSR_EFER, %ecx
        rdmsr
        bts     $_EFER_SCE, %eax
 
-       bt      $20, %esi
+       bt      $20, %ebx
        jnc     1f              /* No NX, skip setting it */
        bts     $_EFER_NX, %eax
 1:     wrmsr
+       mov     -8(%rsp), %rbx
 #ifdef CONFIG_SMP
-       cmp     $0, %r11b
+       cmp     $0, %esi
        jne     cpu_bringup_and_idle
 #endif
        ret
-- 
1.8.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.