[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH V4 16/18] Implement 3-level event channel routines
3-level event channel ABI is fully functional at this point, set corresponding bit in ABI bitmap as well. Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx> --- xen/common/event_channel.c | 124 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 108 insertions(+), 16 deletions(-) diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 4cf172b..504d769 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -33,7 +33,22 @@ #include <xsm/xsm.h> /* A bitmap of supported extended event channel ABIs */ -uint64_t extended_event_channel = EVTCHN_EXTENDED_NONE; +uint64_t extended_event_channel = (EVTCHN_EXTENDED_NONE | + EVTCHN_EXTENDED_L3); + +static inline const char * evtchn_abi_str(unsigned int abi) +{ + switch ( abi ) + { + case EVTCHN_EXTENDED_NONE: + return "2-level"; + case EVTCHN_EXTENDED_L3: + return "3-level"; + default: + BUG(); + } + return ""; /* make compiler happy */ +} #define ERROR_EXIT(_errno) \ do { \ @@ -625,10 +640,33 @@ out: return ret; } +static void __check_vcpu_polling(struct vcpu *v, int port) +{ + int vcpuid; + struct domain *d = v->domain; + + /* Check if some VCPU might be polling for this event. */ + if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) ) + return; + + /* Wake any interested (or potentially interested) pollers. */ + for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus); + vcpuid < d->max_vcpus; + vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) ) + { + v = d->vcpu[vcpuid]; + if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) && + test_and_clear_bit(vcpuid, d->poll_mask) ) + { + v->poll_evtchn = 0; + vcpu_unblock(v); + } + } +} + static void evtchn_set_pending_l2(struct vcpu *v, int port) { struct domain *d = v->domain; - int vcpuid; /* * The following bit operations must happen in strict order. @@ -647,23 +685,36 @@ static void evtchn_set_pending_l2(struct vcpu *v, int port) vcpu_mark_events_pending(v); } - /* Check if some VCPU might be polling for this event. */ - if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) ) + __check_vcpu_polling(v, port); +} + +static void evtchn_set_pending_l3(struct vcpu *v, int port) +{ + struct domain *d = v->domain; + unsigned int l1bit = port >> (EVTCHN_WORD_BITORDER(d) << 1); + unsigned int l2bit = port >> EVTCHN_WORD_BITORDER(d); + + if (unlikely(!v->evtchn_pending_sel_l2)) return; - /* Wake any interested (or potentially interested) pollers. */ - for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus); - vcpuid < d->max_vcpus; - vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) ) + /* + * The following bit operations must happen in strict order. + * NB. On x86, the atomic bit operations also act as memory barriers. + * There is therefore sufficiently strict ordering for this architecture -- + * others may require explicit memory barriers. + */ + + if ( test_and_set_bit(port, d->evtchn_pending) ) + return; + + if ( !test_bit(port, d->evtchn_mask) && + !test_and_set_bit(l2bit, v->evtchn_pending_sel_l2) && + !test_and_set_bit(l1bit, &vcpu_info(v, evtchn_pending_sel)) ) { - v = d->vcpu[vcpuid]; - if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) && - test_and_clear_bit(vcpuid, d->poll_mask) ) - { - v->poll_evtchn = 0; - vcpu_unblock(v); - } + vcpu_mark_events_pending(v); } + + __check_vcpu_polling(v, port); } static void evtchn_set_pending(struct vcpu *v, int port) @@ -675,6 +726,9 @@ static void evtchn_set_pending(struct vcpu *v, int port) case EVTCHN_EXTENDED_NONE: evtchn_set_pending_l2(v, port); break; + case EVTCHN_EXTENDED_L3: + evtchn_set_pending_l3(v, port); + break; default: BUG(); } @@ -975,6 +1029,38 @@ static int evtchn_unmask_l2(unsigned int port) return 0; } +static int evtchn_unmask_l3(unsigned int port) +{ + struct domain *d = current->domain; + struct vcpu *v; + unsigned int l1bit = port >> (EVTCHN_WORD_BITORDER(d) << 1); + unsigned int l2bit = port >> EVTCHN_WORD_BITORDER(d); + + ASSERT(spin_is_locked(&d->event_lock)); + + if ( unlikely(!port_is_valid(d, port)) ) + return -EINVAL; + + v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id]; + + if (unlikely(!v->evtchn_pending_sel_l2)) + return -EINVAL; + + /* + * These operations must happen in strict order. Based on + * include/xen/event.h:evtchn_set_pending(). + */ + if ( test_and_clear_bit(port, d->evtchn_mask) && + test_bit (port, d->evtchn_pending) && + !test_and_set_bit (l2bit, v->evtchn_pending_sel_l2) && + !test_and_set_bit (l1bit, &vcpu_info(v, evtchn_pending_sel)) ) + { + vcpu_mark_events_pending(v); + } + + return 0; +} + int evtchn_unmask(unsigned int port) { struct domain *d = current->domain; @@ -985,6 +1071,9 @@ int evtchn_unmask(unsigned int port) case EVTCHN_EXTENDED_NONE: rc = evtchn_unmask_l2(port); break; + case EVTCHN_EXTENDED_L3: + rc = evtchn_unmask_l3(port); + break; default: BUG(); } @@ -1546,8 +1635,11 @@ static void domain_dump_evtchn_info(struct domain *d) bitmap_scnlistprintf(keyhandler_scratch, sizeof(keyhandler_scratch), d->poll_mask, d->max_vcpus); printk("Event channel information for domain %d:\n" + "Using %s event channel ABI\n" "Polling vCPUs: {%s}\n" - " port [p/m]\n", d->domain_id, keyhandler_scratch); + " port [p/m]\n", + d->domain_id, evtchn_abi_str(d->evtchn_extended), + keyhandler_scratch); spin_lock(&d->event_lock); -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |