[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCHv1 4/6] evtchn: use a per-event channel lock for sending events
When sending an event, use a new per-event channel lock to safely validate the event channel state. This new lock must be held when changing event channel state. To avoid having to take the remote event channel locks when sending to an interdomain event channel, the local and remote channel locks are both held when binding or closing an interdomain event channel. This significantly increases the number of events that can be sent from multiple VCPUs. But, struct evtchn increases in size, reducing the number that fit into a single page to 64 (instead of 128). Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx> --- xen/common/event_channel.c | 84 +++++++++++++++++++++++++++++++++++++------- xen/include/xen/sched.h | 1 + 2 files changed, 73 insertions(+), 12 deletions(-) diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 482c3ac..71747d1 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -139,6 +139,7 @@ static struct evtchn *alloc_evtchn_bucket(struct domain *d, unsigned int port) return NULL; } chn[i].port = port + i; + spin_lock_init(&chn[i].lock); } return chn; } @@ -228,11 +229,15 @@ static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc) if ( rc ) goto out; + spin_lock(&chn->lock); + chn->state = ECS_UNBOUND; if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF ) chn->u.unbound.remote_domid = current->domain->domain_id; evtchn_port_init(d, chn); + spin_unlock(&chn->lock); + alloc->port = port; out: @@ -243,6 +248,30 @@ static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc) } +static void double_evtchn_lock(struct domain *ld, struct evtchn *lchn, + struct domain *rd, struct evtchn *rchn) +{ + if ( ld < rd || (ld == rd && lchn->port < rchn->port) ) + { + spin_lock(&lchn->lock); + spin_lock(&rchn->lock); + } + else + { + if ( ld != rd || lchn->port != rchn->port ) + spin_lock(&rchn->lock); + spin_lock(&lchn->lock); + } +} + +static void double_evtchn_unlock(struct domain *ld, struct evtchn *lchn, + struct domain *rd, struct evtchn *rchn) +{ + spin_unlock(&lchn->lock); + if ( ld != rd || lchn->port != rchn->port ) + spin_unlock(&rchn->lock); +} + static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) { struct evtchn *lchn, *rchn; @@ -285,6 +314,8 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) if ( rc ) goto out; + double_evtchn_lock(ld, lchn, rd, rchn); + lchn->u.interdomain.remote_dom = rd; lchn->u.interdomain.remote_port = rport; lchn->state = ECS_INTERDOMAIN; @@ -300,6 +331,8 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) */ evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn); + double_evtchn_unlock(ld, lchn, rd, rchn); + bind->local_port = lport; out: @@ -340,11 +373,16 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind) ERROR_EXIT(port); chn = evtchn_from_port(d, port); + + spin_lock(&chn->lock); + chn->state = ECS_VIRQ; chn->notify_vcpu_id = vcpu; chn->u.virq = virq; evtchn_port_init(d, chn); + spin_unlock(&chn->lock); + v->virq_to_evtchn[virq] = bind->port = port; out: @@ -371,10 +409,15 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind) ERROR_EXIT(port); chn = evtchn_from_port(d, port); + + spin_lock(&chn->lock); + chn->state = ECS_IPI; chn->notify_vcpu_id = vcpu; evtchn_port_init(d, chn); + spin_unlock(&chn->lock); + bind->port = port; out: @@ -449,11 +492,15 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) goto out; } + spin_lock(&chn->lock); + chn->state = ECS_PIRQ; chn->u.pirq.irq = pirq; link_pirq_port(port, chn, v); evtchn_port_init(d, chn); + spin_unlock(&chn->lock); + bind->port = port; #ifdef CONFIG_X86 @@ -467,7 +514,6 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) return rc; } - static long __evtchn_close(struct domain *d1, int port1) { struct domain *d2 = NULL; @@ -574,15 +620,24 @@ static long __evtchn_close(struct domain *d1, int port1) BUG_ON(chn2->state != ECS_INTERDOMAIN); BUG_ON(chn2->u.interdomain.remote_dom != d1); + double_evtchn_lock(d1, chn1, d2, chn2); + + free_evtchn(d1, chn1); + chn2->state = ECS_UNBOUND; chn2->u.unbound.remote_domid = d1->domain_id; - break; + + double_evtchn_unlock(d1, chn1, d2, chn2); + + goto out; default: BUG(); } - free_evtchn(chn1); + spin_lock(&chn1->lock); + free_evtchn(d1, chn1); + spin_unlock(&chn1->lock); out: if ( d2 != NULL ) @@ -609,21 +664,18 @@ int evtchn_send(struct domain *ld, unsigned int lport) struct domain *rd; int rport, ret = 0; - spin_lock(&ld->event_lock); - - if ( unlikely(!port_is_valid(ld, lport)) ) - { - spin_unlock(&ld->event_lock); + if ( unlikely(lport >= atomic_read(&ld->valid_evtchns)) ) return -EINVAL; - } lchn = evtchn_from_port(ld, lport); + spin_lock(&lchn->lock); + /* Guest cannot send via a Xen-attached event channel. */ if ( unlikely(consumer_is_xen(lchn)) ) { - spin_unlock(&ld->event_lock); - return -EINVAL; + ret = -EINVAL; + goto out; } ret = xsm_evtchn_send(XSM_HOOK, ld, lchn); @@ -652,7 +704,7 @@ int evtchn_send(struct domain *ld, unsigned int lport) } out: - spin_unlock(&ld->event_lock); + spin_unlock(&lchn->lock); return ret; } @@ -1163,11 +1215,15 @@ int alloc_unbound_xen_event_channel( if ( rc ) goto out; + spin_lock(&chn->lock); + chn->state = ECS_UNBOUND; chn->xen_consumer = get_xen_consumer(notification_fn); chn->notify_vcpu_id = lvcpu; chn->u.unbound.remote_domid = remote_domid; + spin_unlock(&chn->lock); + out: spin_unlock(&ld->event_lock); @@ -1214,6 +1270,8 @@ void notify_via_xen_event_channel(struct domain *ld, int lport) lchn = evtchn_from_port(ld, lport); ASSERT(consumer_is_xen(lchn)); + spin_lock(&lchn->lock); + if ( likely(lchn->state == ECS_INTERDOMAIN) ) { rd = lchn->u.interdomain.remote_dom; @@ -1221,6 +1279,8 @@ void notify_via_xen_event_channel(struct domain *ld, int lport) evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn); } + spin_unlock(&lchn->lock); + spin_unlock(&ld->event_lock); } diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index fab2e08..292e28f 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -79,6 +79,7 @@ extern domid_t hardware_domid; struct evtchn { + spinlock_t lock; #define ECS_FREE 0 /* Channel is available for use. */ #define ECS_RESERVED 1 /* Channel is reserved. */ #define ECS_UNBOUND 2 /* Channel is waiting to bind to a remote domain. */ -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |