[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging-4.12] evtchn: evtchn_reset() shouldn't succeed with still-open ports
commit 9dda47cb702ccb9663aec9c78ac3fdc3d4076b1c Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Tue Sep 22 17:09:59 2020 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Sep 22 17:09:59 2020 +0200 evtchn: evtchn_reset() shouldn't succeed with still-open ports While the function closes all ports, it does so without holding any lock, and hence racing requests may be issued causing new ports to get opened. This would have been problematic in particular if such a newly opened port had a port number above the new implementation limit (i.e. when switching from FIFO to 2-level) after the reset, as prior to "evtchn: relax port_is_valid()" this could have led to e.g. evtchn_close()'s "BUG_ON(!port_is_valid(d2, port2))" to trigger. Introduce a counter of active ports and check that it's (still) no larger then the number of Xen internally used ones after obtaining the necessary lock in evtchn_reset(). As to the access model of the new {active,xen}_evtchns fields - while all writes get done using write_atomic(), reads ought to use read_atomic() only when outside of a suitably locked region. Note that as of now evtchn_bind_virq() and evtchn_bind_ipi() don't have a need to call check_free_port(). This is part of XSA-343. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx> Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx> --- xen/common/event_channel.c | 39 +++++++++++++++++++++++++++++++++------ xen/include/xen/sched.h | 10 ++++++++++ 2 files changed, 43 insertions(+), 6 deletions(-) diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 08ffe0f063..6a566917b1 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -188,6 +188,8 @@ int evtchn_allocate_port(struct domain *d, evtchn_port_t port) write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET); } + write_atomic(&d->active_evtchns, d->active_evtchns + 1); + return 0; } @@ -211,11 +213,26 @@ static int get_free_port(struct domain *d) return -ENOSPC; } +/* + * Check whether a port is still marked free, and if so update the domain + * counter accordingly. To be used on function exit paths. + */ +static void check_free_port(struct domain *d, evtchn_port_t port) +{ + if ( port_is_valid(d, port) && + evtchn_from_port(d, port)->state == ECS_FREE ) + write_atomic(&d->active_evtchns, d->active_evtchns - 1); +} + void evtchn_free(struct domain *d, struct evtchn *chn) { /* Clear pending event to avoid unexpected behavior on re-bind. */ evtchn_port_clear_pending(d, chn); + if ( consumer_is_xen(chn) ) + write_atomic(&d->xen_evtchns, d->xen_evtchns - 1); + write_atomic(&d->active_evtchns, d->active_evtchns - 1); + /* Reset binding to vcpu0 when the channel is freed. */ chn->state = ECS_FREE; chn->notify_vcpu_id = 0; @@ -258,6 +275,7 @@ static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc) alloc->port = port; out: + check_free_port(d, port); spin_unlock(&d->event_lock); rcu_unlock_domain(d); @@ -351,6 +369,7 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) bind->local_port = lport; out: + check_free_port(ld, lport); spin_unlock(&ld->event_lock); if ( ld != rd ) spin_unlock(&rd->event_lock); @@ -488,7 +507,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) struct domain *d = current->domain; struct vcpu *v = d->vcpu[0]; struct pirq *info; - int port, pirq = bind->pirq; + int port = 0, pirq = bind->pirq; long rc; if ( (pirq < 0) || (pirq >= d->nr_pirqs) ) @@ -536,6 +555,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) arch_evtchn_bind_pirq(d, pirq); out: + check_free_port(d, port); spin_unlock(&d->event_lock); return rc; @@ -1011,10 +1031,10 @@ int evtchn_unmask(unsigned int port) return 0; } - int evtchn_reset(struct domain *d) { unsigned int i; + int rc = 0; if ( d != current->domain && !d->controller_pause_count ) return -EINVAL; @@ -1024,7 +1044,9 @@ int evtchn_reset(struct domain *d) spin_lock(&d->event_lock); - if ( d->evtchn_fifo ) + if ( d->active_evtchns > d->xen_evtchns ) + rc = -EAGAIN; + else if ( d->evtchn_fifo ) { /* Switching back to 2-level ABI. */ evtchn_fifo_destroy(d); @@ -1033,7 +1055,7 @@ int evtchn_reset(struct domain *d) spin_unlock(&d->event_lock); - return 0; + return rc; } static long evtchn_set_priority(const struct evtchn_set_priority *set_priority) @@ -1219,10 +1241,9 @@ int alloc_unbound_xen_event_channel( spin_lock(&ld->event_lock); - rc = get_free_port(ld); + port = rc = get_free_port(ld); if ( rc < 0 ) goto out; - port = rc; chn = evtchn_from_port(ld, port); rc = xsm_evtchn_unbound(XSM_TARGET, ld, chn, remote_domid); @@ -1238,7 +1259,10 @@ int alloc_unbound_xen_event_channel( spin_unlock(&chn->lock); + write_atomic(&ld->xen_evtchns, ld->xen_evtchns + 1); + out: + check_free_port(ld, port); spin_unlock(&ld->event_lock); return rc < 0 ? rc : port; @@ -1314,6 +1338,7 @@ int evtchn_init(struct domain *d, unsigned int max_port) return -EINVAL; } evtchn_from_port(d, 0)->state = ECS_RESERVED; + write_atomic(&d->active_evtchns, 0); #if MAX_VIRT_CPUS > BITS_PER_LONG d->poll_mask = xzalloc_array(unsigned long, BITS_TO_LONGS(d->max_vcpus)); @@ -1340,6 +1365,8 @@ void evtchn_destroy(struct domain *d) for ( i = 0; port_is_valid(d, i); i++ ) evtchn_close(d, i, 0); + ASSERT(!d->active_evtchns); + clear_global_virq_handlers(d); evtchn_fifo_destroy(d); diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 0dea5ecbf5..8d0c817d7d 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -346,6 +346,16 @@ struct domain struct evtchn **evtchn_group[NR_EVTCHN_GROUPS]; /* all other buckets */ unsigned int max_evtchn_port; /* max permitted port number */ unsigned int valid_evtchns; /* number of allocated event channels */ + /* + * Number of in-use event channels. Writers should use write_atomic(). + * Readers need to use read_atomic() only when not holding event_lock. + */ + unsigned int active_evtchns; + /* + * Number of event channels used internally by Xen (not subject to + * EVTCHNOP_reset). Read/write access like for active_evtchns. + */ + unsigned int xen_evtchns; spinlock_t event_lock; const struct evtchn_port_ops *evtchn_port_ops; struct evtchn_fifo_domain *evtchn_fifo; -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.12
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |