[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] arm: use a per-VCPU stack



---
 xen/arch/arm/asm-offsets.c    |    4 ++
 xen/arch/arm/domain.c         |  115 ++++++++++++----------------------------
 xen/arch/arm/domain_build.c   |    3 +-
 xen/arch/arm/entry.S          |   16 ++++++
 xen/arch/arm/setup.c          |    3 +-
 xen/include/asm-arm/current.h |   41 ++++++++------
 xen/include/asm-arm/domain.h  |   21 +++++++-
 xen/include/asm-arm/regs.h    |    4 +-
 xen/include/asm-arm/system.h  |    2 +
 9 files changed, 104 insertions(+), 105 deletions(-)

diff --git a/xen/arch/arm/asm-offsets.c b/xen/arch/arm/asm-offsets.c
index ee5d5d4..cc1a72a 100644
--- a/xen/arch/arm/asm-offsets.c
+++ b/xen/arch/arm/asm-offsets.c
@@ -7,6 +7,7 @@
 
 #include <xen/config.h>
 #include <xen/types.h>
+#include <xen/sched.h>
 #include <public/xen.h>
 #include <asm/current.h>
 
@@ -65,7 +66,10 @@ void __dummy__(void)
    BLANK();
 
    DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
+
+   OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context);
 }
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 0b55934..52a3128 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -14,7 +14,7 @@
 #include "gic.h"
 #include "vtimer.h"
 
-DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
+DEFINE_PER_CPU(struct pcpu_info, pcpu_info);
 
 static void continue_idle_domain(struct vcpu *v)
 {
@@ -44,7 +44,7 @@ void idle_loop(void)
 
 static void ctxt_switch_from(struct vcpu *p)
 {
-
+    context_saved(p);
 }
 
 static void ctxt_switch_to(struct vcpu *n)
@@ -52,52 +52,36 @@ static void ctxt_switch_to(struct vcpu *n)
     p2m_load_VTTBR(n->domain);
 }
 
-static void __context_switch(void)
+static void schedule_tail(struct vcpu *prev)
 {
-    struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
-    unsigned int          cpu = smp_processor_id();
-    struct vcpu          *p = per_cpu(curr_vcpu, cpu);
-    struct vcpu          *n = current;
-
-    ASSERT(p != n);
-    ASSERT(cpumask_empty(n->vcpu_dirty_cpumask));
-
-    if ( !is_idle_vcpu(p) )
-    {
-        memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
-        ctxt_switch_from(p);
-    }
+    /* Re-enable interrupts before restoring state which may fault. */
+    local_irq_enable();
 
-    if ( !is_idle_vcpu(n) )
-    {
-        memcpy(stack_regs, &n->arch.user_regs, CTXT_SWITCH_STACK_BYTES);
-        ctxt_switch_to(n);
-    }
-
-    per_cpu(curr_vcpu, cpu) = n;
+    ctxt_switch_from(prev);
 
+    /* TODO
+       update_runstate_area(current);
+    */
+    ctxt_switch_to(current);
 }
 
-static void schedule_tail(struct vcpu *v)
+static void continue_new_vcpu(struct vcpu *prev)
 {
-    if ( is_idle_vcpu(v) )
-        continue_idle_domain(v);
+    schedule_tail(prev);
+
+    if ( is_idle_vcpu(current) )
+        continue_idle_domain(current);
     else
-        continue_nonidle_domain(v);
+        continue_nonidle_domain(current);
 }
 
 void context_switch(struct vcpu *prev, struct vcpu *next)
 {
-    unsigned int cpu = smp_processor_id();
-
     ASSERT(local_irq_is_enabled());
-
-    printk("context switch %d:%d%s -> %d:%d%s\n",
-           prev->domain->domain_id, prev->vcpu_id, is_idle_vcpu(prev) ? " 
(idle)" : "",
-           next->domain->domain_id, next->vcpu_id, is_idle_vcpu(next) ? " 
(idle)" : "");
+    ASSERT(prev != next);
+    ASSERT(cpumask_empty(next->vcpu_dirty_cpumask));
 
     /* TODO
-       if (prev != next)
        update_runstate_area(prev);
     */
 
@@ -105,60 +89,19 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 
     set_current(next);
 
-    if ( (per_cpu(curr_vcpu, cpu) == next) ||
-         (is_idle_vcpu(next) && cpu_online(cpu)) )
-    {
-        local_irq_enable();
-    }
-    else
-    {
-        __context_switch();
-
-        /* Re-enable interrupts before restoring state which may fault. */
-        local_irq_enable();
-    }
-
-    context_saved(prev);
-
-    /* TODO
-       if (prev != next)
-       update_runstate_area(next);
-    */
-
-    schedule_tail(next);
-    BUG();
+    prev = __context_switch(prev, next);
 
+    schedule_tail(prev);
 }
 
 void continue_running(struct vcpu *same)
 {
-    schedule_tail(same);
-    BUG();
-}
-
-int __sync_local_execstate(void)
-{
-    unsigned long flags;
-    int switch_required;
-
-    local_irq_save(flags);
-
-    switch_required = (this_cpu(curr_vcpu) != current);
-
-    if ( switch_required )
-    {
-        ASSERT(current == idle_vcpu[smp_processor_id()]);
-        __context_switch();
-    }
-
-    local_irq_restore(flags);
-
-    return switch_required;
+    /* Nothing to do */
 }
 
 void sync_local_execstate(void)
 {
-    (void)__sync_local_execstate();
+    /* Nothing to do -- no lazy switching */
 }
 
 void startup_cpu_idle_loop(void)
@@ -213,6 +156,18 @@ int vcpu_initialise(struct vcpu *v)
 {
     int rc = 0;
 
+    v->arch.stack = alloc_xenheap_pages(STACK_ORDER, 
MEMF_node(vcpu_to_node(v)));
+    if ( v->arch.stack == NULL )
+        return -ENOMEM;
+
+    v->arch.cpu_info = (struct cpu_info *)(v->arch.stack
+                                           + STACK_SIZE
+                                           - sizeof(struct cpu_info));
+
+    memset(&v->arch.saved_context, 0, sizeof(v->arch.saved_context));
+    v->arch.saved_context.sp = (uint32_t)v->arch.cpu_info;
+    v->arch.saved_context.pc = (uint32_t)continue_new_vcpu;
+
     if ( (rc = vcpu_vgic_init(v)) != 0 )
         return rc;
 
@@ -224,7 +179,7 @@ int vcpu_initialise(struct vcpu *v)
 
 void vcpu_destroy(struct vcpu *v)
 {
-
+    free_xenheap_pages(v->arch.stack, STACK_ORDER);
 }
 
 int arch_domain_create(struct domain *d, unsigned int domcr_flags)
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index cbbc0b9..9240209 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -5,6 +5,7 @@
 #include <xen/domain_page.h>
 #include <xen/sched.h>
 #include <asm/irq.h>
+#include <asm/regs.h>
 
 #include "gic.h"
 #include "kernel.h"
@@ -71,7 +72,7 @@ int construct_dom0(struct domain *d)
     int rc;
 
     struct vcpu *v = d->vcpu[0];
-    struct cpu_user_regs *regs = &v->arch.user_regs;
+    struct cpu_user_regs *regs = &v->arch.cpu_info->guest_cpu_user_regs;
 
     /* Sanity! */
     BUG_ON(d->domain_id != 0);
diff --git a/xen/arch/arm/entry.S b/xen/arch/arm/entry.S
index 16a8f36..0b9cce5 100644
--- a/xen/arch/arm/entry.S
+++ b/xen/arch/arm/entry.S
@@ -105,3 +105,19 @@ ENTRY(return_to_hypervisor)
        pop {r0-r12}
        add sp, #(UREGS_R8_fiq - UREGS_sp); /* SP, LR, SPSR, PC */
        eret
+
+/*
+ * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
+ *
+ * r0 - prev
+ * r1 - next
+ *
+ * Returns prev in r0
+ */
+ENTRY(__context_switch)
+       add     ip, r0, #VCPU_arch_saved_context
+       stmia   ip!, {r4 - sl, fp, sp, lr}      /* Save register state */
+
+       add     r4, r1, #VCPU_arch_saved_context
+       ldmia   r4, {r4 - sl, fp, sp, pc}       /* Load registers and return */
+
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 4c1d89c..55d3df0 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -42,7 +42,7 @@
 static unsigned int __initdata max_cpus = NR_CPUS;
 
 /* Xen stack for bringing up the first CPU. */
-unsigned char init_stack[STACK_SIZE] __attribute__((__aligned__(STACK_SIZE)));
+unsigned char __initdata init_stack[STACK_SIZE] 
__attribute__((__aligned__(STACK_SIZE)));
 
 extern char __init_begin[], __init_end[], __bss_start[];
 
@@ -61,7 +61,6 @@ static void __init init_idle_domain(void)
 {
         scheduler_init();
         set_current(idle_vcpu[0]);
-        this_cpu(curr_vcpu) = current;
         /* TODO: setup_idle_pagetable(); */
 }
 
diff --git a/xen/include/asm-arm/current.h b/xen/include/asm-arm/current.h
index 826efa5..1753e8e 100644
--- a/xen/include/asm-arm/current.h
+++ b/xen/include/asm-arm/current.h
@@ -5,32 +5,44 @@
 #include <xen/percpu.h>
 #include <public/xen.h>
 
+#include <asm/percpu.h>
+
 #ifndef __ASSEMBLY__
 
 struct vcpu;
 
+struct pcpu_info {
+    unsigned int processor_id;
+    struct vcpu *current_vcpu;
+};
+
+DECLARE_PER_CPU(struct pcpu_info, pcpu_info);
+
+static inline struct pcpu_info *get_pcpu_info(void)
+{
+    return &this_cpu(pcpu_info);
+}
+
 struct cpu_info {
     struct cpu_user_regs guest_cpu_user_regs;
     unsigned long elr;
-    unsigned int processor_id;
-    struct vcpu *current_vcpu;
-    unsigned long per_cpu_offset;
+    unsigned long pad;
 };
 
 static inline struct cpu_info *get_cpu_info(void)
 {
-        register unsigned long sp asm ("sp");
-        return (struct cpu_info *)((sp & ~(STACK_SIZE - 1)) + STACK_SIZE - 
sizeof(struct cpu_info));
+    register unsigned long sp asm ("sp");
+    return (struct cpu_info *)((sp & ~(STACK_SIZE - 1)) + STACK_SIZE - 
sizeof(struct cpu_info));
+    //return (struct cpu_info*)(get_pcpu_info()->current_vcpu->arch.stack + 
STACK_SIZE - sizeof(struct cpu_info));
 }
 
-#define get_current()         (get_cpu_info()->current_vcpu)
-#define set_current(vcpu)     (get_cpu_info()->current_vcpu = (vcpu))
+#define get_current()         (get_pcpu_info()->current_vcpu)
+#define set_current(vcpu)     (get_pcpu_info()->current_vcpu = (vcpu))
 #define current               (get_current())
 
-#define get_processor_id()    (get_cpu_info()->processor_id)
+#define get_processor_id()    (get_pcpu_info()->processor_id)
 #define set_processor_id(id)  do {                                      \
-    struct cpu_info *ci__ = get_cpu_info();                             \
-    ci__->per_cpu_offset = __per_cpu_offset[ci__->processor_id = (id)]; \
+    get_pcpu_info()->processor_id = (id);                               \
 } while (0)
 
 #define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs)
@@ -39,15 +51,8 @@ static inline struct cpu_info *get_cpu_info(void)
     __asm__ __volatile__ (                      \
         "mov sp,%0; b "STR(__fn)      \
         : : "r" (guest_cpu_user_regs()) : "memory" )
-#endif
 
-
-/*
- * Which VCPU's state is currently running on each CPU?
- * This is not necesasrily the same as 'current' as a CPU may be
- * executing a lazy state switch.
- */
-DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
+#endif
 
 #endif /* __ARM_CURRENT_H__ */
 /*
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 3372d14..c1afd19 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -47,7 +47,26 @@ struct arch_domain
 
 struct arch_vcpu
 {
-    struct cpu_user_regs user_regs;
+    struct {
+        uint32_t    r4;
+        uint32_t    r5;
+        uint32_t    r6;
+        uint32_t    r7;
+        uint32_t    r8;
+        uint32_t    r9;
+        uint32_t    sl;
+        uint32_t    fp;
+        uint32_t    sp;
+        uint32_t    pc;
+    } saved_context;
+
+    void *stack;
+
+    /*
+     * Points into ->stack, more convenient than doing pointer arith
+     * all the time.
+     */
+    struct cpu_info *cpu_info;
 
     uint32_t sctlr;
     uint32_t ttbr0, ttbr1, ttbcr;
diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h
index ee095bf..54f6ed8 100644
--- a/xen/include/asm-arm/regs.h
+++ b/xen/include/asm-arm/regs.h
@@ -28,9 +28,7 @@
     (diff == 0);                                                              \
 })
 
-#define return_reg(v) ((v)->arch.user_regs.r0)
-
-#define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs))
+#define return_reg(v) ((v)->arch.cpu_info->guest_cpu_user_regs.r0)
 
 #endif /* __ARM_REGS_H__ */
 /*
diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
index 731d89f..7963ea5 100644
--- a/xen/include/asm-arm/system.h
+++ b/xen/include/asm-arm/system.h
@@ -191,6 +191,8 @@ static inline int local_fiq_is_enabled(void)
     return !!(flags & PSR_FIQ_MASK);
 }
 
+extern struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next);
+
 #endif
 /*
  * Local variables:
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.