[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1 of 1] sched: rework locking for dump debugkey.



As in all other paths, locking should be dealt with in the
specific schedulers, not in schedule.c.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>

diff -r 15ab61865ecb xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Tue Jan 17 12:40:52 2012 +0000
+++ b/xen/common/sched_credit.c Wed Jan 18 15:02:30 2012 +0000
@@ -1451,11 +1451,16 @@ static void
 csched_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct list_head *runq, *iter;
+    struct csched_private *prv = CSCHED_PRIV(ops);
     struct csched_pcpu *spc;
     struct csched_vcpu *svc;
+    unsigned long flags;
     int loop;
 #define cpustr keyhandler_scratch
 
+    /* Domains' parameters are changed under csched_priv lock */
+    spin_lock_irqsave(&prv->lock, flags);
+
     spc = CSCHED_PCPU(cpu);
     runq = &spc->runq;
 
@@ -1464,6 +1469,12 @@ csched_dump_pcpu(const struct scheduler 
     cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
     printk("core=%s\n", cpustr);
 
+    /*
+     * We need runq lock as well, and as there's one runq per CPU,
+     * this is the correct one to take for this CPU/runq.
+     */
+    pcpu_schedule_lock(cpu);
+
     /* current VCPU */
     svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
     if ( svc )
@@ -1482,6 +1493,9 @@ csched_dump_pcpu(const struct scheduler 
             csched_dump_vcpu(svc);
         }
     }
+
+    pcpu_schedule_unlock(cpu);
+    spin_unlock_irqrestore(&prv->lock, flags);
 #undef cpustr
 }
 
@@ -1493,7 +1507,7 @@ csched_dump(const struct scheduler *ops)
     int loop;
     unsigned long flags;
 
-    spin_lock_irqsave(&(prv->lock), flags);
+    spin_lock_irqsave(&prv->lock, flags);
 
 #define idlers_buf keyhandler_scratch
 
@@ -1537,12 +1551,14 @@ csched_dump(const struct scheduler *ops)
             svc = list_entry(iter_svc, struct csched_vcpu, active_vcpu_elem);
 
             printk("\t%3d: ", ++loop);
+            vcpu_schedule_lock(svc->vcpu);
             csched_dump_vcpu(svc);
+            vcpu_schedule_unlock(svc->vcpu);
         }
     }
 #undef idlers_buf
 
-    spin_unlock_irqrestore(&(prv->lock), flags);
+    spin_unlock_irqrestore(&prv->lock, flags);
 }
 
 static int
diff -r 15ab61865ecb xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c        Tue Jan 17 12:40:52 2012 +0000
+++ b/xen/common/sched_credit2.c        Wed Jan 18 15:02:30 2012 +0000
@@ -53,8 +53,6 @@
  * credit2 wiki page:
  *  http://wiki.xen.org/wiki/Credit2_Scheduler_Development
  * TODO:
- * + Immediate bug-fixes
- *  - Do per-runqueue, grab proper lock for dump debugkey
  * + Multiple sockets
  *  - Detect cpu layout and make runqueue map, one per L2 (make_runq_map())
  *  - Simple load balancer / runqueue assignment
@@ -1759,12 +1757,16 @@ csched_dump_vcpu(struct csched_vcpu *svc
 static void
 csched_dump_pcpu(const struct scheduler *ops, int cpu)
 {
+    struct csched_private *prv = CSCHED_PRIV(ops);
     struct list_head *runq, *iter;
     struct csched_vcpu *svc;
+    unsigned long flags;
+    spinlock_t *lock;
     int loop;
     char cpustr[100];
 
-    /* FIXME: Do locking properly for access to runqueue structures */
+    /* Domains' parameters are changed under csched_priv lock */
+    spin_lock_irqsave(&prv->lock, flags);
 
     runq = &RQD(ops, cpu)->runq;
 
@@ -1773,6 +1775,13 @@ csched_dump_pcpu(const struct scheduler 
     cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
     printk("core=%s\n", cpustr);
 
+    /*
+     * We need runq lock as well, and here's how we get to it
+     * for the requested cpu.
+     */
+    lock = per_cpu(schedule_data, cpu).schedule_lock;
+    spin_lock(lock);
+
     /* current VCPU */
     svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
     if ( svc )
@@ -1791,6 +1800,9 @@ csched_dump_pcpu(const struct scheduler 
             csched_dump_vcpu(svc);
         }
     }
+
+    spin_unlock(lock);
+    spin_unlock_irqrestore(&prv->lock, flags);
 }
 
 static void
@@ -1798,8 +1810,11 @@ csched_dump(const struct scheduler *ops)
 {
     struct list_head *iter_sdom, *iter_svc;
     struct csched_private *prv = CSCHED_PRIV(ops);
+    unsigned long flags;
     int i, loop;
 
+    spin_lock_irqsave(&prv->lock, flags);
+
     printk("Active queues: %d\n"
            "\tdefault-weight     = %d\n",
            cpumask_weight(&prv->active_queues),
@@ -1822,7 +1837,6 @@ csched_dump(const struct scheduler *ops)
                fraction);
 
     }
-    /* FIXME: Locking! */
 
     printk("Domain info:\n");
     loop = 0;
@@ -1831,10 +1845,10 @@ csched_dump(const struct scheduler *ops)
         struct csched_dom *sdom;
         sdom = list_entry(iter_sdom, struct csched_dom, sdom_elem);
 
-       printk("\tDomain: %d w %d v %d\n\t", 
-              sdom->dom->domain_id, 
-              sdom->weight, 
-              sdom->nr_vcpus);
+        printk("\tDomain: %d w %d v %d\n\t", 
+               sdom->dom->domain_id, 
+               sdom->weight, 
+               sdom->nr_vcpus);
 
         list_for_each( iter_svc, &sdom->vcpu )
         {
@@ -1842,9 +1856,13 @@ csched_dump(const struct scheduler *ops)
             svc = list_entry(iter_svc, struct csched_vcpu, sdom_elem);
 
             printk("\t%3d: ", ++loop);
+            vcpu_schedule_lock(svc->vcpu);
             csched_dump_vcpu(svc);
+            vcpu_schedule_unlock(svc->vcpu);
         }
     }
+
+    spin_unlock_irqrestore(&prv->lock, flags);
 }
 
 static void activate_runqueue(struct csched_private *prv, int rqi)
diff -r 15ab61865ecb xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Tue Jan 17 12:40:52 2012 +0000
+++ b/xen/common/sched_sedf.c   Wed Jan 18 15:02:30 2012 +0000
@@ -1219,13 +1219,25 @@ static void sedf_dump_domain(struct vcpu
 /* Dumps all domains on the specified cpu */
 static void sedf_dump_cpu_state(const struct scheduler *ops, int i)
 {
+    struct sedf_priv_info *prv = SEDF_PRIV(ops);
     struct list_head      *list, *queue, *tmp;
     struct sedf_vcpu_info *d_inf;
     struct domain         *d;
     struct vcpu    *ed;
+    unsigned long flags;
     int loop = 0;
+
+    /* Domains' parameters are changed under scheduler lock */
+    spin_lock_irqsave(&prv->lock, flags);
  
     printk("now=%"PRIu64"\n",NOW());
+
+    /*
+     * We need runq lock as well, and as there's one runq per CPU,
+     * this is the correct one to take for this CPU/runq.
+     */
+    pcpu_schedule_lock(i);
+
     queue = RUNQ(i);
     printk("RUNQ rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
            (unsigned long) queue->next, (unsigned long) queue->prev);
@@ -1288,6 +1300,9 @@ static void sedf_dump_cpu_state(const st
         }
     }
     rcu_read_unlock(&domlist_read_lock);
+
+    pcpu_schedule_unlock(i);
+    spin_unlock_irqrestore(&prv->lock, flags);
 }
 
 
diff -r 15ab61865ecb xen/common/schedule.c
--- a/xen/common/schedule.c     Tue Jan 17 12:40:52 2012 +0000
+++ b/xen/common/schedule.c     Wed Jan 18 15:02:30 2012 +0000
@@ -1417,6 +1417,7 @@ void schedule_dump(struct cpupool *c)
     struct scheduler *sched;
     cpumask_t        *cpus;
 
+    /* Proper locking is provided by each scheduler */
     sched = (c == NULL) ? &ops : c->sched;
     cpus = (c == NULL) ? &cpupool_free_cpus : c->cpu_valid;
     printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
@@ -1424,10 +1425,8 @@ void schedule_dump(struct cpupool *c)
 
     for_each_cpu (i, cpus)
     {
-        pcpu_schedule_lock(i);
         printk("CPU[%02d] ", i);
         SCHED_OP(sched, dump_cpu_state, i);
-        pcpu_schedule_unlock(i);
     }
 }
 

-- 
<<This happens because I choose it to happen!>> (Raistlin Majere)
-------------------------------------------------------------------
Dario Faggioli, http://retis.sssup.it/people/faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)
PhD Candidate, ReTiS Lab, Scuola Superiore Sant'Anna, Pisa (Italy)

Attachment: rework-locking-for-dump-status.patch
Description: Text Data

Attachment: signature.asc
Description: This is a digitally signed message part

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.