[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 1/4] xen: evtchn: make evtchn_reset() ready for soft reset
We need to close all event channel so the domain performing soft reset will be able to open them back. Interdomain channels are, however, special. We need to keep track of who opened it as in (the most common) case it was opened by the control domain we won't be able (and allowed) to re-establish it. Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> --- xen/common/event_channel.c | 52 +++++++++++++++++++++++++++------------------- xen/include/xen/event.h | 3 +++ xen/include/xen/sched.h | 1 + 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index fae242d..3204c74 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -274,11 +274,13 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) lchn->u.interdomain.remote_dom = rd; lchn->u.interdomain.remote_port = rport; + lchn->u.interdomain.opened_by = current->domain; lchn->state = ECS_INTERDOMAIN; evtchn_port_init(ld, lchn); rchn->u.interdomain.remote_dom = ld; rchn->u.interdomain.remote_port = lport; + rchn->u.interdomain.opened_by = current->domain; rchn->state = ECS_INTERDOMAIN; /* @@ -933,26 +935,30 @@ int evtchn_unmask(unsigned int port) } -static long evtchn_reset(evtchn_reset_t *r) +void evtchn_reset(struct domain *d, bool_t soft_reset) { - domid_t dom = r->dom; - struct domain *d; - int i, rc; - - d = rcu_lock_domain_by_any_id(dom); - if ( d == NULL ) - return -ESRCH; - - rc = xsm_evtchn_reset(XSM_TARGET, current->domain, d); - if ( rc ) - goto out; + int i; + struct evtchn *chn; + /* + * ECS_INTERDOMAIN channels with port number suitable for the 2-level ABI + * opened by other domains should remain opened as the domain doing soft + * reset won't be able to reopen them. + * __evtchn_close() also leaves consumer_is_xen() channels open. + */ for ( i = 0; port_is_valid(d, i); i++ ) + { + chn = evtchn_from_port(d, i); + if ( !soft_reset || + i >= (BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d)) || + chn->state != ECS_INTERDOMAIN || + chn->u.interdomain.opened_by == d ) (void)__evtchn_close(d, i); + } spin_lock(&d->event_lock); - if ( (dom == DOMID_SELF) && d->evtchn_fifo ) + if ( (d == current->domain) && d->evtchn_fifo ) { /* * Guest domain called EVTCHNOP_reset with DOMID_SELF, destroying @@ -964,13 +970,6 @@ static long evtchn_reset(evtchn_reset_t *r) } spin_unlock(&d->event_lock); - - rc = 0; - -out: - rcu_unlock_domain(d); - - return rc; } static long evtchn_set_priority(const struct evtchn_set_priority *set_priority) @@ -1097,9 +1096,20 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) case EVTCHNOP_reset: { struct evtchn_reset reset; + struct domain *d; + if ( copy_from_guest(&reset, arg, 1) != 0 ) return -EFAULT; - rc = evtchn_reset(&reset); + + d = rcu_lock_domain_by_any_id(reset.dom); + if ( d == NULL ) + return -ESRCH; + + rc = xsm_evtchn_reset(XSM_TARGET, current->domain, d); + if ( !rc ) + evtchn_reset(d, 0); + + rcu_unlock_domain(d); break; } diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h index 690f865..d0479a6 100644 --- a/xen/include/xen/event.h +++ b/xen/include/xen/event.h @@ -130,6 +130,9 @@ void evtchn_check_pollers(struct domain *d, unsigned int port); void evtchn_2l_init(struct domain *d); +/* Close all event channels and reset to 2-level ABI */ +void evtchn_reset(struct domain *d, bool_t soft_reset); + /* * Low-level event channel port ops. */ diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 80c6f62..13b6b86 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -98,6 +98,7 @@ struct evtchn struct { evtchn_port_t remote_port; struct domain *remote_dom; + struct domain *opened_by; } interdomain; /* state == ECS_INTERDOMAIN */ struct { u32 irq; -- 1.9.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |