[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH v1 2/5] tasklet: Add cross CPU feeding of per-cpu tasklets.



Since the per-cpu tasklets are lockfree and run only
within their own CPU context - we need a mechanism
for 'tasklet_schedule_on_cpu' from other CPUs to
insert tasklets in the destination per-cpu lists.

We use an IPI mechanism which function implements an
per-cpu 'feeding' list and a global lock.

When the IPI happens on the target CPU it will drain
its per-cpu feeding list and add them in the per-cpu
tasklet vector. With that in place we can now swap
over all of the softirq_tasklet users to use it.
That means we can also eliminate the percpu_tasklet
scaffolding.

Note that we don't do this tasklet schedule on other
CPUs that often - during microcode update and when
doing hypercall_continuation.

This could be squashed in "tasklet: Introduce per-cpu
tasklet for softirq." but the author thought it would
be an easier aid in understanding the code with these
parts split out.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c       |  2 +-
 xen/common/tasklet.c         | 50 +++++++++++++++++++++++++++++++++++++-------
 xen/drivers/passthrough/io.c |  2 +-
 xen/include/xen/tasklet.h    |  2 --
 4 files changed, 44 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4b4cad1..94b18ba 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2270,7 +2270,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
     if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: 
hvm_funcs.vcpu_destroy */
         goto fail3;
 
-    percpu_tasklet_init(
+    softirq_tasklet_init(
         &v->arch.hvm_vcpu.assert_evtchn_irq_tasklet,
         (void(*)(unsigned long))hvm_assert_evtchn_irq,
         (unsigned long)v);
diff --git a/xen/common/tasklet.c b/xen/common/tasklet.c
index 319866f..d8f3cb3 100644
--- a/xen/common/tasklet.c
+++ b/xen/common/tasklet.c
@@ -30,8 +30,36 @@ static DEFINE_PER_CPU(struct list_head, 
softirq_tasklet_list);
 
 /* Protects all lists and tasklet structures. */
 static DEFINE_SPINLOCK(tasklet_lock);
+static DEFINE_SPINLOCK(feeder_lock);
 
 static DEFINE_PER_CPU(struct list_head, softirq_list);
+static DEFINE_PER_CPU(struct list_head, tasklet_feeder);
+
+static void percpu_tasklet_feed(void *arg)
+{
+    unsigned long flags;
+    struct tasklet *t;
+    struct list_head *dst_list;
+    struct list_head *list = &__get_cpu_var(tasklet_feeder);
+
+    spin_lock_irqsave(&feeder_lock, flags);
+
+    if ( list_empty(list) )
+        goto out;
+
+    while ( !list_empty(list) )
+    {
+        t = list_entry(list->next, struct tasklet, list);
+        BUG_ON(!t->is_percpu);
+        list_del(&t->list);
+
+        dst_list = &__get_cpu_var(softirq_list);
+        list_add_tail(&t->list, dst_list);
+    }
+    raise_softirq(TASKLET_SOFTIRQ_PERCPU);
+out:
+    spin_unlock_irqrestore(&feeder_lock, flags);
+}
 
 static void tasklet_enqueue(struct tasklet *t)
 {
@@ -44,7 +72,18 @@ static void tasklet_enqueue(struct tasklet *t)
 
         INIT_LIST_HEAD(&t->list);
         BUG_ON( !t->is_softirq );
-        BUG_ON( cpu != smp_processor_id() ); /* Not implemented yet. */
+
+        if ( cpu != smp_processor_id() )
+        {
+            spin_lock_irqsave(&feeder_lock, flags);
+
+            list = &per_cpu(tasklet_feeder, cpu);
+            list_add_tail(&t->list, list);
+
+            spin_unlock_irqrestore(&feeder_lock, flags);
+            on_selected_cpus(cpumask_of(cpu), percpu_tasklet_feed, NULL, 1);
+            return;
+        }
 
         local_irq_save(flags);
 
@@ -314,13 +353,6 @@ void softirq_tasklet_init(
 {
     tasklet_init(t, func, data);
     t->is_softirq = 1;
-}
-
-void percpu_tasklet_init(
-    struct tasklet *t, void (*func)(unsigned long), unsigned long data)
-{
-    tasklet_init(t, func, data);
-    t->is_softirq = 1;
     t->is_percpu = 1;
 }
 
@@ -335,12 +367,14 @@ static int cpu_callback(
         INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
         INIT_LIST_HEAD(&per_cpu(softirq_tasklet_list, cpu));
         INIT_LIST_HEAD(&per_cpu(softirq_list, cpu));
+        INIT_LIST_HEAD(&per_cpu(tasklet_feeder, cpu));
         break;
     case CPU_UP_CANCELED:
     case CPU_DEAD:
         migrate_tasklets_from_cpu(cpu, &per_cpu(tasklet_list, cpu));
         migrate_tasklets_from_cpu(cpu, &per_cpu(softirq_tasklet_list, cpu));
         migrate_tasklets_from_cpu(cpu, &per_cpu(softirq_list, cpu));
+        migrate_tasklets_from_cpu(cpu, &per_cpu(tasklet_feeder, cpu));
         break;
     default:
         break;
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 740cee5..ef75b94 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -114,7 +114,7 @@ int pt_irq_create_bind(
             spin_unlock(&d->event_lock);
             return -ENOMEM;
         }
-        percpu_tasklet_init(
+        softirq_tasklet_init(
             &hvm_irq_dpci->dirq_tasklet,
             hvm_dirq_assist, (unsigned long)d);
         for ( i = 0; i < NR_HVM_IRQS; i++ )
diff --git a/xen/include/xen/tasklet.h b/xen/include/xen/tasklet.h
index 9497c47..530a5e7 100644
--- a/xen/include/xen/tasklet.h
+++ b/xen/include/xen/tasklet.h
@@ -99,8 +99,6 @@ void tasklet_init(
     struct tasklet *t, void (*func)(unsigned long), unsigned long data);
 void softirq_tasklet_init(
     struct tasklet *t, void (*func)(unsigned long), unsigned long data);
-void percpu_tasklet_init(
-    struct tasklet *t, void (*func)(unsigned long), unsigned long data);
 void tasklet_subsys_init(void);
 
 #endif /* __XEN_TASKLET_H__ */
-- 
1.9.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.