[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH V2 07/14] Genneralized event channel operations



Add struct xen_evtchn_ops *eops in struct domain to reference current
operation function set.

When building a domain, the default operation set is 2-level operation
set.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/common/event_channel.c |   64 ++++++++++++++++++++++++++++++++------------
 xen/include/xen/sched.h    |    2 ++
 2 files changed, 49 insertions(+), 17 deletions(-)

diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 1df2b76..e8faf7d 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -51,6 +51,15 @@
 
 #define consumer_is_xen(e) (!!(e)->xen_consumer)
 
+/* N-level event channel should implement following operations */
+struct xen_evtchn_ops {
+    void (*set_pending)(struct vcpu *v, int port);
+    void (*clear_pending)(struct domain *d, int port);
+    int  (*unmask)(unsigned int port);
+    int  (*is_pending)(struct domain *d, int port);
+    int  (*is_masked)(struct domain *d, int port);
+};
+
 /*
  * The function alloc_unbound_xen_event_channel() allows an arbitrary
  * notifier function to be specified. However, very few unique functions
@@ -94,9 +103,6 @@ static uint8_t 
get_xen_consumer(xen_event_channel_notification_t fn)
 /* Get the notification function for a given Xen-bound event channel. */
 #define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1])
 
-static void evtchn_set_pending(struct vcpu *v, int port);
-static void evtchn_clear_pending(struct domain *d, int port);
-
 static int virq_is_global(uint32_t virq)
 {
     int rc;
@@ -157,16 +163,25 @@ static int get_free_port(struct domain *d)
     return port;
 }
 
-int evtchn_is_pending(struct domain *d, int port)
+static int evtchn_is_pending_l2(struct domain *d, int port)
 {
     return test_bit(port, &shared_info(d, evtchn_pending));
 }
 
-int evtchn_is_masked(struct domain *d, int port)
+static int evtchn_is_masked_l2(struct domain *d, int port)
 {
     return test_bit(port, &shared_info(d, evtchn_mask));
 }
 
+int evtchn_is_pending(struct domain *d, int port)
+{
+    return d->eops->is_pending(d, port);
+}
+
+int evtchn_is_masked(struct domain *d, int port)
+{
+    return d->eops->is_masked(d, port);
+}
 
 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
 {
@@ -258,7 +273,7 @@ static long 
evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
      * We may have lost notifications on the remote unbound port. Fix that up
      * here by conservatively always setting a notification on the local port.
      */
-    evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
+    ld->eops->set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
 
     bind->local_port = lport;
 
@@ -540,7 +555,7 @@ static long __evtchn_close(struct domain *d1, int port1)
     }
 
     /* Clear pending event to avoid unexpected behavior on re-bind. */
-    evtchn_clear_pending(d1, port1);
+    d1->eops->clear_pending(d1, port1);
 
     /* Reset binding to vcpu0 when the channel is freed. */
     chn1->state          = ECS_FREE;
@@ -605,10 +620,10 @@ int evtchn_send(struct domain *d, unsigned int lport)
         if ( consumer_is_xen(rchn) )
             (*xen_notification_fn(rchn))(rvcpu, rport);
         else
-            evtchn_set_pending(rvcpu, rport);
+            rd->eops->set_pending(rvcpu, rport);
         break;
     case ECS_IPI:
-        evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
+        ld->eops->set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
         break;
     case ECS_UNBOUND:
         /* silently drop the notification */
@@ -623,7 +638,7 @@ out:
     return ret;
 }
 
-static void evtchn_set_pending(struct vcpu *v, int port)
+static void evtchn_set_pending_l2(struct vcpu *v, int port)
 {
     struct domain *d = v->domain;
     int vcpuid;
@@ -664,7 +679,7 @@ static void evtchn_set_pending(struct vcpu *v, int port)
     }
 }
 
-static void evtchn_clear_pending(struct domain *d, int port)
+static void evtchn_clear_pending_l2(struct domain *d, int port)
 {
     clear_bit(port, &shared_info(d, evtchn_pending));
 }
@@ -678,6 +693,7 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
 {
     unsigned long flags;
     int port;
+    struct domain *d = v->domain;
 
     ASSERT(!virq_is_global(virq));
 
@@ -687,7 +703,7 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
     if ( unlikely(port == 0) )
         goto out;
 
-    evtchn_set_pending(v, port);
+    d->eops->set_pending(v, port);
 
  out:
     spin_unlock_irqrestore(&v->virq_lock, flags);
@@ -716,7 +732,7 @@ static void send_guest_global_virq(struct domain *d, 
uint32_t virq)
         goto out;
 
     chn = evtchn_from_port(d, port);
-    evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
+    d->eops->set_pending(d->vcpu[chn->notify_vcpu_id], port);
 
  out:
     spin_unlock_irqrestore(&v->virq_lock, flags);
@@ -740,7 +756,7 @@ void send_guest_pirq(struct domain *d, const struct pirq 
*pirq)
     }
 
     chn = evtchn_from_port(d, port);
-    evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
+    d->eops->set_pending(d->vcpu[chn->notify_vcpu_id], port);
 }
 
 static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly;
@@ -932,7 +948,7 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int 
vcpu_id)
 }
 
 
-int evtchn_unmask(unsigned int port)
+static int evtchn_unmask_l2(unsigned int port)
 {
     struct domain *d = current->domain;
     struct vcpu   *v;
@@ -959,6 +975,12 @@ int evtchn_unmask(unsigned int port)
     return 0;
 }
 
+int evtchn_unmask(unsigned int port)
+{
+    struct domain *d = current->domain;
+    return d->eops->unmask(port);
+}
+
 
 static long evtchn_reset(evtchn_reset_t *r)
 {
@@ -1179,12 +1201,19 @@ void notify_via_xen_event_channel(struct domain *ld, 
int lport)
         rd    = lchn->u.interdomain.remote_dom;
         rport = lchn->u.interdomain.remote_port;
         rchn  = evtchn_from_port(rd, rport);
-        evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
+        rd->eops->set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
     }
 
     spin_unlock(&ld->event_lock);
 }
 
+static struct xen_evtchn_ops __read_mostly xen_evtchn_ops_l2 = {
+    .set_pending      = evtchn_set_pending_l2,
+    .clear_pending    = evtchn_clear_pending_l2,
+    .unmask           = evtchn_unmask_l2,
+    .is_pending       = evtchn_is_pending_l2,
+    .is_masked        = evtchn_is_masked_l2,
+};
 
 int evtchn_init(struct domain *d)
 {
@@ -1197,6 +1226,7 @@ int evtchn_init(struct domain *d)
 
     spin_lock_init(&d->event_lock);
     d->evtchn_level = EVTCHN_DEFAULT_LEVEL; /* = 2 */
+    d->eops = &xen_evtchn_ops_l2;
     if ( get_free_port(d) != 0 ) {
         free_xenheap_page(d->evtchn);
         return -EINVAL;
@@ -1272,7 +1302,6 @@ void evtchn_move_pirqs(struct vcpu *v)
     spin_unlock(&d->event_lock);
 }
 
-
 static void domain_dump_evtchn_info(struct domain *d)
 {
     unsigned int port;
@@ -1334,6 +1363,7 @@ static void domain_dump_evtchn_info(struct domain *d)
     spin_unlock(&d->event_lock);
 }
 
+
 static void dump_evtchn_info(unsigned char key)
 {
     struct domain *d;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index eae9baf..df3b877 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -69,6 +69,7 @@ extern struct domain *dom0;
 
 #define EVTCHNS_PER_BUCKET 512
 #define NR_EVTCHN_BUCKETS  (NR_EVENT_CHANNELS / EVTCHNS_PER_BUCKET)
+struct xen_evtchn_ops;
 
 struct evtchn
 {
@@ -279,6 +280,7 @@ struct domain
     struct evtchn  **evtchn;
     spinlock_t       event_lock;
     unsigned int     evtchn_level;
+    struct xen_evtchn_ops *eops;
 
     struct grant_table *grant_table;
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.