[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 1/5] xen: sched: null: refactor core around vcpu_deassign()



vcpu_deassign() has only one caller: _vcpu_remove().
Let's consolidate the two functions into one.

No functional change intended.

Signed-off-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/common/sched_null.c |   76 ++++++++++++++++++++++-------------------------
 1 file changed, 35 insertions(+), 41 deletions(-)

diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 784db71027..f372172c32 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -359,9 +359,14 @@ static void vcpu_assign(struct null_private *prv, struct 
vcpu *v,
     }
 }
 
-static void vcpu_deassign(struct null_private *prv, struct vcpu *v,
-                          unsigned int cpu)
+static void vcpu_deassign(struct null_private *prv, struct vcpu *v)
 {
+    unsigned int bs;
+    unsigned int cpu = v->processor;
+    struct null_vcpu *wvc;
+
+    ASSERT(list_empty(&null_vcpu(v)->waitq_elem));
+
     per_cpu(npc, cpu).vcpu = NULL;
     cpumask_set_cpu(cpu, &prv->cpus_free);
 
@@ -378,6 +383,32 @@ static void vcpu_deassign(struct null_private *prv, struct 
vcpu *v,
         d.cpu = cpu;
         __trace_var(TRC_SNULL_VCPU_DEASSIGN, 1, sizeof(d), &d);
     }
+
+    spin_lock(&prv->waitq_lock);
+
+    /*
+     * If v is assigned to a pCPU, let's see if there is someone waiting,
+     * suitable to be assigned to it (prioritizing vcpus that have
+     * soft-affinity with cpu).
+     */
+    for_each_affinity_balance_step( bs )
+    {
+        list_for_each_entry( wvc, &prv->waitq, waitq_elem )
+        {
+            if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) )
+                continue;
+
+            if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
+            {
+                list_del_init(&wvc->waitq_elem);
+                vcpu_assign(prv, wvc->vcpu, cpu);
+                cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+                spin_unlock(&prv->waitq_lock);
+                return;
+            }
+        }
+    }
+    spin_unlock(&prv->waitq_lock);
 }
 
 /* Change the scheduler of cpu to us (null). */
@@ -469,43 +500,6 @@ static void null_vcpu_insert(const struct scheduler *ops, 
struct vcpu *v)
     SCHED_STAT_CRANK(vcpu_insert);
 }
 
-static void _vcpu_remove(struct null_private *prv, struct vcpu *v)
-{
-    unsigned int bs;
-    unsigned int cpu = v->processor;
-    struct null_vcpu *wvc;
-
-    ASSERT(list_empty(&null_vcpu(v)->waitq_elem));
-
-    vcpu_deassign(prv, v, cpu);
-
-    spin_lock(&prv->waitq_lock);
-
-    /*
-     * If v is assigned to a pCPU, let's see if there is someone waiting,
-     * suitable to be assigned to it (prioritizing vcpus that have
-     * soft-affinity with cpu).
-     */
-    for_each_affinity_balance_step( bs )
-    {
-        list_for_each_entry( wvc, &prv->waitq, waitq_elem )
-        {
-            if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) )
-                continue;
-
-            if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
-            {
-                list_del_init(&wvc->waitq_elem);
-                vcpu_assign(prv, wvc->vcpu, cpu);
-                cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
-                spin_unlock(&prv->waitq_lock);
-                return;
-            }
-        }
-    }
-    spin_unlock(&prv->waitq_lock);
-}
-
 static void null_vcpu_remove(const struct scheduler *ops, struct vcpu *v)
 {
     struct null_private *prv = null_priv(ops);
@@ -529,7 +523,7 @@ static void null_vcpu_remove(const struct scheduler *ops, 
struct vcpu *v)
     ASSERT(per_cpu(npc, v->processor).vcpu == v);
     ASSERT(!cpumask_test_cpu(v->processor, &prv->cpus_free));
 
-    _vcpu_remove(prv, v);
+    vcpu_deassign(prv, v);
 
  out:
     vcpu_schedule_unlock_irq(lock, v);
@@ -615,7 +609,7 @@ static void null_vcpu_migrate(const struct scheduler *ops, 
struct vcpu *v,
      */
     if ( likely(list_empty(&nvc->waitq_elem)) )
     {
-        _vcpu_remove(prv, v);
+        vcpu_deassign(prv, v);
         SCHED_STAT_CRANK(migrate_running);
     }
     else


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.