[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v4 01/10] evtchn: use per-channel lock where possible
Neither evtchn_status() nor domain_dump_evtchn_info() nor flask_get_peer_sid() need to hold the per-domain lock - they all only read a single channel's state (at a time, in the dump case). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v4: New. --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -968,15 +968,16 @@ int evtchn_status(evtchn_status_t *statu if ( d == NULL ) return -ESRCH; - spin_lock(&d->event_lock); - if ( !port_is_valid(d, port) ) { - rc = -EINVAL; - goto out; + rcu_unlock_domain(d); + return -EINVAL; } chn = evtchn_from_port(d, port); + + evtchn_read_lock(chn); + if ( consumer_is_xen(chn) ) { rc = -EACCES; @@ -1021,7 +1022,7 @@ int evtchn_status(evtchn_status_t *statu status->vcpu = chn->notify_vcpu_id; out: - spin_unlock(&d->event_lock); + evtchn_read_unlock(chn); rcu_unlock_domain(d); return rc; @@ -1576,22 +1577,32 @@ void evtchn_move_pirqs(struct vcpu *v) static void domain_dump_evtchn_info(struct domain *d) { unsigned int port; - int irq; printk("Event channel information for domain %d:\n" "Polling vCPUs: {%*pbl}\n" " port [p/m/s]\n", d->domain_id, d->max_vcpus, d->poll_mask); - spin_lock(&d->event_lock); - for ( port = 1; port_is_valid(d, port); ++port ) { - const struct evtchn *chn; + struct evtchn *chn; char *ssid; + if ( !(port & 0x3f) ) + process_pending_softirqs(); + chn = evtchn_from_port(d, port); + + if ( !evtchn_read_trylock(chn) ) + { + printk(" %4u in flux\n", port); + continue; + } + if ( chn->state == ECS_FREE ) + { + evtchn_read_unlock(chn); continue; + } printk(" %4u [%d/%d/", port, @@ -1601,26 +1612,49 @@ static void domain_dump_evtchn_info(stru printk("]: s=%d n=%d x=%d", chn->state, chn->notify_vcpu_id, chn->xen_consumer); + ssid = xsm_show_security_evtchn(d, chn); + switch ( chn->state ) { case ECS_UNBOUND: printk(" d=%d", chn->u.unbound.remote_domid); break; + case ECS_INTERDOMAIN: printk(" d=%d p=%d", chn->u.interdomain.remote_dom->domain_id, chn->u.interdomain.remote_port); break; - case ECS_PIRQ: - irq = domain_pirq_to_irq(d, chn->u.pirq.irq); - printk(" p=%d i=%d", chn->u.pirq.irq, irq); + + case ECS_PIRQ: { + unsigned int pirq = chn->u.pirq.irq; + + /* + * The per-channel locks nest inside the per-domain one, so we + * can't acquire the latter without first letting go of the former. + */ + evtchn_read_unlock(chn); + chn = NULL; + if ( spin_trylock(&d->event_lock) ) + { + int irq = domain_pirq_to_irq(d, pirq); + + spin_unlock(&d->event_lock); + printk(" p=%u i=%d", pirq, irq); + } + else + printk(" p=%u i=?", pirq); break; + } + case ECS_VIRQ: printk(" v=%d", chn->u.virq); break; } - ssid = xsm_show_security_evtchn(d, chn); + if ( chn ) + evtchn_read_unlock(chn); + if (ssid) { printk(" Z=%s\n", ssid); xfree(ssid); @@ -1628,8 +1662,6 @@ static void domain_dump_evtchn_info(stru printk("\n"); } } - - spin_unlock(&d->event_lock); } static void dump_evtchn_info(unsigned char key) --- a/xen/xsm/flask/flask_op.c +++ b/xen/xsm/flask/flask_op.c @@ -555,12 +555,13 @@ static int flask_get_peer_sid(struct xen struct evtchn *chn; struct domain_security_struct *dsec; - spin_lock(&d->event_lock); - if ( !port_is_valid(d, arg->evtchn) ) - goto out; + return -EINVAL; chn = evtchn_from_port(d, arg->evtchn); + + evtchn_read_lock(chn); + if ( chn->state != ECS_INTERDOMAIN ) goto out; @@ -573,7 +574,7 @@ static int flask_get_peer_sid(struct xen rv = 0; out: - spin_unlock(&d->event_lock); + evtchn_read_unlock(chn); return rv; }
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |