|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC/RFT][PATCH 3 of 3] Introduce proper locking in sedf.
Add a global scheduler lock in sedf, as we have in credit and credit2,
mostly for preventing races while adjusting scheduling parameters. Also,
add runq locking within sedf_adjust_weights since we are touching
scheduling sensitive values in there.
Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
diff -r 17653f5c2995 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c Wed Nov 23 15:20:28 2011 +0100
+++ b/xen/common/sched_sedf.c Wed Nov 23 15:36:22 2011 +0100
@@ -61,6 +61,11 @@ struct sedf_dom_info {
struct domain *domain;
};
+struct sedf_priv_info {
+ /* lock for the whole pluggable scheduler, nests inside cpupool_lock */
+ spinlock_t lock;
+};
+
struct sedf_vcpu_info {
struct vcpu *vcpu;
struct list_head list;
@@ -115,6 +120,8 @@ struct sedf_cpu_info {
s_time_t current_slice_expires;
};
+#define SEDF_PRIV(_ops) \
+ ((struct sedf_priv_info *)((_ops)->sched_data))
#define EDOM_INFO(d) ((struct sedf_vcpu_info *)((d)->sched_priv))
#define CPU_INFO(cpu) \
((struct sedf_cpu_info *)per_cpu(schedule_data, cpu).sched_priv)
@@ -772,6 +779,31 @@ static struct task_slice sedf_do_extra_s
}
+static int sedf_init(struct scheduler *ops)
+{
+ struct sedf_priv_info *prv;
+
+ prv = xzalloc(struct sedf_priv_info);
+ if ( prv == NULL )
+ return -ENOMEM;
+
+ ops->sched_data = prv;
+ spin_lock_init(&prv->lock);
+
+ return 0;
+}
+
+
+static void sedf_deinit(const struct scheduler *ops)
+{
+ struct sedf_priv_info *prv;
+
+ prv = SEDF_PRIV(ops);
+ if ( prv != NULL )
+ xfree(prv);
+}
+
+
/* Main scheduling function
Reasons for calling this function are:
-timeslice for the current period used up
@@ -1247,11 +1279,18 @@ static void sedf_dump_domain(struct vcpu
/* dumps all domains on the specified cpu */
static void sedf_dump_cpu_state(const struct scheduler *ops, int i)
{
+ struct sedf_priv_info *prv = SEDF_PRIV(ops);
struct list_head *list, *queue, *tmp;
struct sedf_vcpu_info *d_inf;
struct domain *d;
struct vcpu *ed;
int loop = 0;
+ unsigned long flags;
+
+ /* Lock the scheduler, we want to be sure we dump a
+ * consistent set of parameters.
+ */
+ spin_lock_irqsave(&prv->lock, flags);
printk("now=%"PRIu64"\n",NOW());
queue = RUNQ(i);
@@ -1316,6 +1355,8 @@ static void sedf_dump_cpu_state(const st
}
}
rcu_read_unlock(&domlist_read_lock);
+
+ spin_unlock_irqrestore(&prv->lock, flags);
}
@@ -1335,7 +1376,9 @@ static int sedf_adjust_weights(struct cp
return -ENOMEM;
}
- /* Sum across all weights. */
+ /* Sum across all weights. Notice that no runq locking is needed
+ * here: the caller holds sedf_priv_info->lock and we're not changing
+ * anything that is accessed during scheduling. */
rcu_read_lock(&domlist_read_lock);
for_each_domain_in_cpupool( d, c )
{
@@ -1365,7 +1408,9 @@ static int sedf_adjust_weights(struct cp
}
rcu_read_unlock(&domlist_read_lock);
- /* Adjust all slices (and periods) to the new weight. */
+ /* Adjust all slices (and periods) to the new weight. Unlike above, we
+ * need to take thr runq lock for the various VCPUs: we're modyfing
+ * slice and period which are referenced during scheduling. */
rcu_read_lock(&domlist_read_lock);
for_each_domain_in_cpupool( d, c )
{
@@ -1375,12 +1420,15 @@ static int sedf_adjust_weights(struct cp
continue;
if ( EDOM_INFO(p)->weight )
{
+ /* Interrupts already off */
+ vcpu_schedule_lock(p);
EDOM_INFO(p)->period_orig =
EDOM_INFO(p)->period = WEIGHT_PERIOD;
EDOM_INFO(p)->slice_orig =
EDOM_INFO(p)->slice =
(EDOM_INFO(p)->weight *
(WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[cpu])) / sumw[cpu];
+ vcpu_schedule_unlock(p);
}
}
}
@@ -1396,6 +1444,8 @@ static int sedf_adjust_weights(struct cp
/* set or fetch domain scheduling parameters */
static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct
xen_domctl_scheduler_op *op)
{
+ struct sedf_priv_info *prv = SEDF_PRIV(ops);
+ unsigned long flags;
struct vcpu *v;
int rc = 0;
@@ -1404,11 +1454,12 @@ static int sedf_adjust(const struct sche
p->domain_id, op->u.sedf.period, op->u.sedf.slice,
op->u.sedf.latency, (op->u.sedf.extratime)?"yes":"no");
- /* Serialize everything against runq lock to prevent concurrent update
- * (notice all non-current VCPUs of the domain have been paused in the
- * caller). */
- if ( p == current->domain )
- vcpu_schedule_lock_irq(current);
+ /* Serialize against the pluggable scheduler lock to protect from
+ * concurrent updates. We need to take the runq lock for the VCPUs
+ * as well, since we are touching extraweight, weight, slice and
+ * period. As in sched_credit2.c, runq locks nest inside the
+ * pluggable scheduler lock. */
+ spin_lock_irqsave(&prv->lock, flags);
if ( op->cmd == XEN_DOMCTL_SCHEDOP_putinfo )
{
@@ -1427,43 +1478,52 @@ static int sedf_adjust(const struct sche
/* Weight-driven domains with extratime only. */
for_each_vcpu ( p, v )
{
+ /* (Here and everywhere in the following) IRQs are already
off,
+ * hence vcpu_spin_lock() is the one. */
+ vcpu_schedule_lock(v);
EDOM_INFO(v)->extraweight = op->u.sedf.weight;
EDOM_INFO(v)->weight = 0;
EDOM_INFO(v)->slice = 0;
EDOM_INFO(v)->period = WEIGHT_PERIOD;
+ vcpu_schedule_unlock(v);
}
}
else
{
/* Weight-driven domains with real-time execution. */
- for_each_vcpu ( p, v )
+ for_each_vcpu ( p, v ) {
+ vcpu_schedule_lock(v);
EDOM_INFO(v)->weight = op->u.sedf.weight;
+ vcpu_schedule_lock(v);
+ }
}
}
else
{
+ /*
+ * Sanity checking: note that disabling extra weight requires
+ * that we set a non-zero slice.
+ */
+ if ( (op->u.sedf.period > PERIOD_MAX) ||
+ (op->u.sedf.period < PERIOD_MIN) ||
+ (op->u.sedf.slice > op->u.sedf.period) ||
+ (op->u.sedf.slice < SLICE_MIN) )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
/* Time-driven domains. */
for_each_vcpu ( p, v )
{
- /*
- * Sanity checking: note that disabling extra weight requires
- * that we set a non-zero slice.
- */
- if ( (op->u.sedf.period > PERIOD_MAX) ||
- (op->u.sedf.period < PERIOD_MIN) ||
- (op->u.sedf.slice > op->u.sedf.period) ||
- (op->u.sedf.slice < SLICE_MIN) )
- {
- rc = -EINVAL;
- goto out;
- }
-
+ vcpu_schedule_lock(v);
EDOM_INFO(v)->weight = 0;
EDOM_INFO(v)->extraweight = 0;
EDOM_INFO(v)->period_orig =
EDOM_INFO(v)->period = op->u.sedf.period;
EDOM_INFO(v)->slice_orig =
EDOM_INFO(v)->slice = op->u.sedf.slice;
+ vcpu_schedule_unlock(v);
}
}
@@ -1473,11 +1533,13 @@ static int sedf_adjust(const struct sche
for_each_vcpu ( p, v )
{
+ vcpu_schedule_lock(v);
EDOM_INFO(v)->status =
(EDOM_INFO(v)->status &
~EXTRA_AWARE) | (op->u.sedf.extratime & EXTRA_AWARE);
EDOM_INFO(v)->latency = op->u.sedf.latency;
extraq_check(v);
+ vcpu_schedule_lock(v);
}
}
else if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
@@ -1496,17 +1558,19 @@ static int sedf_adjust(const struct sche
}
out:
- if ( p == current->domain )
- vcpu_schedule_unlock_irq(current);
+ spin_unlock_irqrestore(&prv->lock, flags);
PRINT(2,"sedf_adjust_finished with return code %d\n", rc);
return rc;
}
+static struct sedf_priv_info _sedf_priv;
+
const struct scheduler sched_sedf_def = {
- .name = "Simple EDF Scheduler",
- .opt_name = "sedf",
- .sched_id = XEN_SCHEDULER_SEDF,
+ .name = "Simple EDF Scheduler",
+ .opt_name = "sedf",
+ .sched_id = XEN_SCHEDULER_SEDF,
+ .sched_data = &_sedf_priv,
.init_domain = sedf_init_domain,
.destroy_domain = sedf_destroy_domain,
@@ -1520,6 +1584,9 @@ const struct scheduler sched_sedf_def =
.alloc_domdata = sedf_alloc_domdata,
.free_domdata = sedf_free_domdata,
+ .init = sedf_init,
+ .deinit = sedf_deinit,
+
.do_schedule = sedf_do_schedule,
.pick_cpu = sedf_pick_cpu,
.dump_cpu_state = sedf_dump_cpu_state,
--
<<This happens because I choose it to happen!>> (Raistlin Majere)
-------------------------------------------------------------------
Dario Faggioli, http://retis.sssup.it/people/faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)
PhD Candidate, ReTiS Lab, Scuola Superiore Sant'Anna, Pisa (Italy)
Attachment:
signature.asc _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |