[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 04/16] xen: sched: implement .init_pdata in all schedulers



by borrowing some of the code of .alloc_pdata, i.e.,
the bits that perform initializations, leaving only
actual allocations in there, when any, which is the
case for Credit1 and RTDS.

On the other hand, in Credit2, since we don't really
need any per-pCPU data allocation, everything that was
being done in .alloc_pdata, is now done in .init_pdata.
And the fact that now .alloc_pdata can be left undefined,
allows us to just get rid of it.

Still for Credit2, the fact that .init_pdata is called
during CPU_STARTING (rather than CPU_UP_PREPARE) kills
the need for the scheduler to setup a similar callback
itself, simplifying the code.

And thanks to such simplification, it is now also ok to
move some of the logic meant at double checking that a
cpu was (or was not) initialized, into ASSERTS (rather
than an if() and a BUG_ON).

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Meng Xu <mengxu@xxxxxxxxxxxxx>
Cc: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/sched_credit.c  |   20 +++++++++---
 xen/common/sched_credit2.c |   72 +++-----------------------------------------
 xen/common/sched_rt.c      |    9 ++++--
 3 files changed, 26 insertions(+), 75 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 288749f..d4a0f5e 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -526,8 +526,6 @@ static void *
 csched_alloc_pdata(const struct scheduler *ops, int cpu)
 {
     struct csched_pcpu *spc;
-    struct csched_private *prv = CSCHED_PRIV(ops);
-    unsigned long flags;
 
     /* Allocate per-PCPU info */
     spc = xzalloc(struct csched_pcpu);
@@ -540,6 +538,19 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu)
         return ERR_PTR(-ENOMEM);
     }
 
+    return spc;
+}
+
+static void
+csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
+{
+    struct csched_private *prv = CSCHED_PRIV(ops);
+    struct csched_pcpu * const spc = pdata;
+    unsigned long flags;
+
+    /* cpu data needs to be allocated, but STILL uninitialized */
+    ASSERT(spc && spc->runq.next == spc->runq.prev && spc->runq.next == NULL);
+
     spin_lock_irqsave(&prv->lock, flags);
 
     /* Initialize/update system-wide config */
@@ -560,16 +571,12 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu)
     INIT_LIST_HEAD(&spc->runq);
     spc->runq_sort_last = prv->runq_sort;
     spc->idle_bias = nr_cpu_ids - 1;
-    if ( per_cpu(schedule_data, cpu).sched_priv == NULL )
-        per_cpu(schedule_data, cpu).sched_priv = spc;
 
     /* Start off idling... */
     BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)));
     cpumask_set_cpu(cpu, prv->idlers);
 
     spin_unlock_irqrestore(&prv->lock, flags);
-
-    return spc;
 }
 
 #ifndef NDEBUG
@@ -2051,6 +2058,7 @@ static const struct scheduler sched_credit_def = {
     .alloc_vdata    = csched_alloc_vdata,
     .free_vdata     = csched_free_vdata,
     .alloc_pdata    = csched_alloc_pdata,
+    .init_pdata     = csched_init_pdata,
     .free_pdata     = csched_free_pdata,
     .alloc_domdata  = csched_alloc_domdata,
     .free_domdata   = csched_free_domdata,
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 36dc9ee..b1642a8 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1968,7 +1968,8 @@ static void deactivate_runqueue(struct csched2_private 
*prv, int rqi)
     cpumask_clear_cpu(rqi, &prv->active_queues);
 }
 
-static void init_pcpu(const struct scheduler *ops, int cpu)
+static void
+csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     unsigned rqi;
     unsigned long flags;
@@ -1978,12 +1979,7 @@ static void init_pcpu(const struct scheduler *ops, int 
cpu)
 
     spin_lock_irqsave(&prv->lock, flags);
 
-    if ( cpumask_test_cpu(cpu, &prv->initialized) )
-    {
-        printk("%s: Strange, cpu %d already initialized!\n", __func__, cpu);
-        spin_unlock_irqrestore(&prv->lock, flags);
-        return;
-    }
+    ASSERT(!cpumask_test_cpu(cpu, &prv->initialized));
 
     /* Figure out which runqueue to put it in */
     rqi = 0;
@@ -2033,20 +2029,6 @@ static void init_pcpu(const struct scheduler *ops, int 
cpu)
     return;
 }
 
-static void *
-csched2_alloc_pdata(const struct scheduler *ops, int cpu)
-{
-    /* Check to see if the cpu is online yet */
-    /* Note: cpu 0 doesn't get a STARTING callback */
-    if ( cpu == 0 || cpu_to_socket(cpu) != XEN_INVALID_SOCKET_ID )
-        init_pcpu(ops, cpu);
-    else
-        printk("%s: cpu %d not online yet, deferring initializatgion\n",
-               __func__, cpu);
-
-    return NULL;
-}
-
 static void
 csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
@@ -2058,7 +2040,7 @@ csched2_free_pdata(const struct scheduler *ops, void 
*pcpu, int cpu)
 
     spin_lock_irqsave(&prv->lock, flags);
 
-    BUG_ON(!cpumask_test_cpu(cpu, &prv->initialized));
+    ASSERT(cpumask_test_cpu(cpu, &prv->initialized));
     
     /* Find the old runqueue and remove this cpu from it */
     rqi = prv->runq_map[cpu];
@@ -2096,49 +2078,6 @@ csched2_free_pdata(const struct scheduler *ops, void 
*pcpu, int cpu)
 }
 
 static int
-csched2_cpu_starting(int cpu)
-{
-    struct scheduler *ops;
-
-    /* Hope this is safe from cpupools switching things around. :-) */
-    ops = per_cpu(scheduler, cpu);
-
-    if ( ops->alloc_pdata == csched2_alloc_pdata )
-        init_pcpu(ops, cpu);
-
-    return NOTIFY_DONE;
-}
-
-static int cpu_credit2_callback(
-    struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
-    unsigned int cpu = (unsigned long)hcpu;
-    int rc = 0;
-
-    switch ( action )
-    {
-    case CPU_STARTING:
-        csched2_cpu_starting(cpu);
-        break;
-    default:
-        break;
-    }
-
-    return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
-}
-
-static struct notifier_block cpu_credit2_nfb = {
-    .notifier_call = cpu_credit2_callback
-};
-
-static int
-csched2_global_init(void)
-{
-    register_cpu_notifier(&cpu_credit2_nfb);
-    return 0;
-}
-
-static int
 csched2_init(struct scheduler *ops)
 {
     int i;
@@ -2216,12 +2155,11 @@ static const struct scheduler sched_credit2_def = {
 
     .dump_cpu_state = csched2_dump_pcpu,
     .dump_settings  = csched2_dump,
-    .global_init    = csched2_global_init,
     .init           = csched2_init,
     .deinit         = csched2_deinit,
     .alloc_vdata    = csched2_alloc_vdata,
     .free_vdata     = csched2_free_vdata,
-    .alloc_pdata    = csched2_alloc_pdata,
+    .init_pdata     = csched2_init_pdata,
     .free_pdata     = csched2_free_pdata,
     .alloc_domdata  = csched2_alloc_domdata,
     .free_domdata   = csched2_free_domdata,
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index ac8019f..b6ac3ad 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -649,8 +649,8 @@ rt_deinit(struct scheduler *ops)
  * Point per_cpu spinlock to the global system lock;
  * All cpu have same global system lock
  */
-static void *
-rt_alloc_pdata(const struct scheduler *ops, int cpu)
+static void
+rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     struct rt_private *prv = rt_priv(ops);
     spinlock_t *old_lock;
@@ -663,7 +663,11 @@ rt_alloc_pdata(const struct scheduler *ops, int cpu)
 
     /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
     spin_unlock_irqrestore(old_lock, flags);
+}
 
+static void *
+rt_alloc_pdata(const struct scheduler *ops, int cpu)
+{
     if ( !alloc_cpumask_var(&_cpumask_scratch[cpu]) )
         return ERR_PTR(-ENOMEM);
 
@@ -1395,6 +1399,7 @@ static const struct scheduler sched_rtds_def = {
     .deinit         = rt_deinit,
     .alloc_pdata    = rt_alloc_pdata,
     .free_pdata     = rt_free_pdata,
+    .init_pdata     = rt_init_pdata,
     .alloc_domdata  = rt_alloc_domdata,
     .free_domdata   = rt_free_domdata,
     .init_domain    = rt_dom_init,


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.