[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 12/25] arm: domain



From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

Domain creation and destruction, vcpu initialization and destruction,
arch specific scheduling functions called by common code.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
 xen/arch/arm/domain.c        |  253 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/domain.h |   43 +++++++
 2 files changed, 296 insertions(+), 0 deletions(-)
 create mode 100644 xen/arch/arm/domain.c
 create mode 100644 xen/include/asm-arm/domain.h

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
new file mode 100644
index 0000000..d706b5f
--- /dev/null
+++ b/xen/arch/arm/domain.c
@@ -0,0 +1,253 @@
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/softirq.h>
+#include <xen/wait.h>
+#include <xen/errno.h>
+
+#include <asm/current.h>
+#include <asm/regs.h>
+#include <asm/p2m.h>
+#include <asm/irq.h>
+
+DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
+
+static void continue_idle_domain(struct vcpu *v)
+{
+    reset_stack_and_jump(idle_loop);
+}
+
+static void continue_nonidle_domain(struct vcpu *v)
+{
+    /* check_wakeup_from_wait(); */
+    reset_stack_and_jump(return_from_trap);
+}
+
+void idle_loop(void)
+{
+    for ( ; ; )
+    {
+               /* TODO
+                  if ( cpu_is_offline(smp_processor_id()) )
+                  play_dead();
+                  (*pm_idle)();
+                  BUG();
+               */
+        do_tasklet();
+        do_softirq();
+    }
+}
+
+static void ctxt_switch_from(struct vcpu *p)
+{
+
+}
+
+static void ctxt_switch_to(struct vcpu *n)
+{
+    p2m_load_VTTBR(n->domain);
+}
+
+static void __context_switch(void)
+{
+    struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
+    unsigned int          cpu = smp_processor_id();
+    struct vcpu          *p = per_cpu(curr_vcpu, cpu);
+    struct vcpu          *n = current;
+
+    ASSERT(p != n);
+    ASSERT(cpumask_empty(n->vcpu_dirty_cpumask));
+
+    if ( !is_idle_vcpu(p) )
+    {
+        memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
+        ctxt_switch_from(p);
+    }
+
+    if ( !is_idle_vcpu(n) )
+    {
+        memcpy(stack_regs, &n->arch.user_regs, CTXT_SWITCH_STACK_BYTES);
+        ctxt_switch_to(n);
+    }
+
+    per_cpu(curr_vcpu, cpu) = n;
+
+}
+
+static void schedule_tail(struct vcpu *v)
+{
+    if ( is_idle_vcpu(v) )
+        continue_idle_domain(v);
+    else
+        continue_nonidle_domain(v);
+}
+
+void context_switch(struct vcpu *prev, struct vcpu *next)
+{
+    unsigned int cpu = smp_processor_id();
+
+    ASSERT(local_irq_is_enabled());
+
+    printk("context switch %d:%d%s -> %d:%d%s\n",
+           prev->domain->domain_id, prev->vcpu_id, is_idle_vcpu(prev) ? " 
(idle)" : "",
+           next->domain->domain_id, next->vcpu_id, is_idle_vcpu(next) ? " 
(idle)" : "");
+
+       /* TODO
+          if (prev != next)
+          update_runstate_area(prev);
+       */
+
+    local_irq_disable();
+
+    set_current(next);
+
+    if ( (per_cpu(curr_vcpu, cpu) == next) ||
+         (is_idle_vcpu(next) && cpu_online(cpu)) )
+    {
+        local_irq_enable();
+    }
+    else
+    {
+        __context_switch();
+
+        /* Re-enable interrupts before restoring state which may fault. */
+        local_irq_enable();
+    }
+
+    context_saved(prev);
+
+       /* TODO
+          if (prev != next)
+          update_runstate_area(next);
+       */
+
+    schedule_tail(next);
+    BUG();
+
+}
+
+void continue_running(struct vcpu *same)
+{
+    schedule_tail(same);
+    BUG();
+}
+
+int __sync_local_execstate(void)
+{
+    unsigned long flags;
+    int switch_required;
+
+    local_irq_save(flags);
+
+    switch_required = (this_cpu(curr_vcpu) != current);
+
+    if ( switch_required )
+    {
+        ASSERT(current == idle_vcpu[smp_processor_id()]);
+        __context_switch();
+    }
+
+    local_irq_restore(flags);
+
+    return switch_required;
+}
+
+void sync_local_execstate(void)
+{
+    (void)__sync_local_execstate();
+}
+
+void startup_cpu_idle_loop(void)
+{
+        struct vcpu *v = current;
+
+        ASSERT(is_idle_vcpu(v));
+               /* TODO
+                  cpumask_set_cpu(v->processor, 
v->domain->domain_dirty_cpumask);
+                  cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask);
+               */
+
+        reset_stack_and_jump(idle_loop);
+}
+
+struct domain *alloc_domain_struct(void)
+{
+    struct domain *d;
+    BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE);
+    d = alloc_xenheap_pages(0, 0);
+    if ( d != NULL )
+        clear_page(d);
+    return d;
+}
+
+void free_domain_struct(struct domain *d)
+{
+    free_xenheap_page(d);
+}
+
+void dump_pageframe_info(struct domain *d)
+{
+
+}
+
+struct vcpu *alloc_vcpu_struct(void)
+{
+    struct vcpu *v;
+    BUILD_BUG_ON(sizeof(*v) > PAGE_SIZE);
+    v = alloc_xenheap_pages(0, 0);
+    if ( v != NULL )
+        clear_page(v);
+    return v;
+}
+
+void free_vcpu_struct(struct vcpu *v)
+{
+    free_xenheap_page(v);
+}
+
+int vcpu_initialise(struct vcpu *v)
+{
+    int rc = 0;
+
+    return rc;
+}
+
+void vcpu_destroy(struct vcpu *v)
+{
+
+}
+
+int arch_domain_create(struct domain *d, unsigned int domcr_flags)
+{
+    int rc;
+
+    d->max_vcpus = 8;
+
+    rc = 0;
+fail:
+    return rc;
+}
+
+void arch_domain_destroy(struct domain *d)
+{
+    /* p2m_destroy */
+    /* domain_vgic_destroy */
+}
+
+void arch_dump_domain_info(struct domain *d)
+{
+}
+
+void arch_dump_vcpu_info(struct vcpu *v)
+{
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
new file mode 100644
index 0000000..c226bdf
--- /dev/null
+++ b/xen/include/asm-arm/domain.h
@@ -0,0 +1,43 @@
+#ifndef __ASM_DOMAIN_H__
+#define __ASM_DOMAIN_H__
+
+#include <xen/config.h>
+#include <xen/cache.h>
+#include <asm/page.h>
+#include <asm/p2m.h>
+
+struct pending_irq
+{
+    int irq;
+    struct irq_desc *desc; /* only set it the irq corresponds to a physical 
irq */
+    uint8_t priority;
+    struct list_head link;
+};
+
+struct arch_domain
+{
+}  __cacheline_aligned;
+
+struct arch_vcpu
+{
+    struct cpu_user_regs user_regs;
+
+    uint32_t sctlr;
+    uint32_t ttbr0, ttbr1, ttbcr;
+
+}  __cacheline_aligned;
+
+void vcpu_show_execution_state(struct vcpu *);
+void vcpu_show_registers(const struct vcpu *);
+
+#endif /* __ASM_DOMAIN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.