[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 15/16] Infrastructure for manipulating 3-level event channel pages
NOTE: the registration call is always failed because other part of the code is not yet completed. Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx> --- xen/common/event_channel.c | 278 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 278 insertions(+) diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 1ce97b0..c448c60 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -26,6 +26,7 @@ #include <xen/compat.h> #include <xen/guest_access.h> #include <xen/keyhandler.h> +#include <xen/paging.h> #include <asm/current.h> #include <public/xen.h> @@ -1024,6 +1025,258 @@ out: } +static long __map_l3_arrays(struct domain *d, xen_pfn_t *pending, + xen_pfn_t *mask, int nr_pages) +{ + int rc; + void *mapping; + struct page_info *pginfo; + unsigned long gfn; + int pending_count = 0, mask_count = 0; + +#define __MAP(src, dst, cnt) \ + for ( (cnt) = 0; (cnt) < nr_pages; (cnt)++ ) \ + { \ + rc = -EINVAL; \ + gfn = (src)[(cnt)]; \ + pginfo = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); \ + if ( !pginfo ) \ + goto err; \ + if ( !get_page_type(pginfo, PGT_writable_page) ) \ + { \ + put_page(pginfo); \ + goto err; \ + } \ + mapping = __map_domain_page_global(pginfo); \ + if ( !mapping ) \ + { \ + put_page_and_type(pginfo); \ + rc = -ENOMEM; \ + goto err; \ + } \ + (dst)[(cnt)] = mapping; \ + } + + __MAP(pending, d->evtchn_pending, pending_count) + __MAP(mask, d->evtchn_mask, mask_count) +#undef __MAP + + rc = 0; + + err: + return rc; +} + +static void __unmap_l3_arrays(struct domain *d) +{ + int i; + unsigned long mfn; + + for ( i = 0; i < EVTCHN_MAX_L3_PAGES; i++ ) + { + if ( d->evtchn_pending[i] != 0 ) + { + mfn = domain_page_map_to_mfn(d->evtchn_pending[i]); + unmap_domain_page_global(d->evtchn_pending[i]); + put_page_and_type(mfn_to_page(mfn)); + d->evtchn_pending[i] = 0; + } + if ( d->evtchn_mask[i] != 0 ) + { + mfn = domain_page_map_to_mfn(d->evtchn_mask[i]); + unmap_domain_page_global(d->evtchn_mask[i]); + put_page_and_type(mfn_to_page(mfn)); + d->evtchn_mask[i] = 0; + } + } +} + +static long __map_l2_selector(struct vcpu *v, unsigned long gfn, + unsigned long off) +{ + void *mapping; + int rc; + struct page_info *page; + struct domain *d = v->domain; + + rc = -EINVAL; /* common errno for following operations */ + + /* Sanity check: L2 selector has maximum size of sizeof(unsigned + * long) * 8, this size is equal to the size of shared bitmap + * array of 2-level event channel. */ + if ( off + sizeof(unsigned long) * 8 >= PAGE_SIZE ) + goto out; + + page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); + if ( !page ) + goto out; + + if ( !get_page_type(page, PGT_writable_page) ) + { + put_page(page); + goto out; + } + + /* Use global mapping here, because we need to map selector for + * other vcpu (v != current). However this mapping is only used by + * v when guest is running. */ + mapping = __map_domain_page_global(page); + + if ( mapping == NULL ) + { + put_page_and_type(page); + rc = -ENOMEM; + goto out; + } + + v->evtchn_pending_sel_l2 = mapping + off; + rc = 0; + + out: + return rc; +} + +static void __unmap_l2_selector(struct vcpu *v) +{ + unsigned long mfn; + + if ( v->evtchn_pending_sel_l2 ) + { + mfn = domain_page_map_to_mfn(v->evtchn_pending_sel_l2); + unmap_domain_page_global(v->evtchn_pending_sel_l2); + put_page_and_type(mfn_to_page(mfn)); + v->evtchn_pending_sel_l2 = NULL; + } +} + +static void __evtchn_unmap_all_3level(struct domain *d) +{ + struct vcpu *v; + for_each_vcpu ( d, v ) + __unmap_l2_selector(v); + __unmap_l3_arrays(d); +} + +static void __evtchn_setup_bitmap_l3(struct domain *d) +{ + struct vcpu *v; + + /* Easy way to setup 3-level bitmap, just move existing selector + * to next level then copy pending array and mask array */ + for_each_vcpu ( d, v ) + { + memcpy(&v->evtchn_pending_sel_l2[0], + &vcpu_info(v, evtchn_pending_sel), + sizeof(vcpu_info(v, evtchn_pending_sel))); + memset(&vcpu_info(v, evtchn_pending_sel), 0, + sizeof(vcpu_info(v, evtchn_pending_sel))); + set_bit(0, &vcpu_info(v, evtchn_pending_sel)); + } + + memcpy(d->evtchn_pending[0], &shared_info(d, evtchn_pending), + sizeof(shared_info(d, evtchn_pending))); + memcpy(d->evtchn_mask[0], &shared_info(d, evtchn_mask), + sizeof(shared_info(d, evtchn_mask))); +} + +static long evtchn_register_3level(evtchn_register_3level_t *arg) +{ + struct domain *d = current->domain; + struct vcpu *v; + int rc = 0; + xen_pfn_t evtchn_pending[EVTCHN_MAX_L3_PAGES]; + xen_pfn_t evtchn_mask[EVTCHN_MAX_L3_PAGES]; + xen_pfn_t l2sel_mfn = 0; + xen_pfn_t l2sel_offset = 0; + + if ( d->evtchn_level == EVTCHN_3_LEVEL ) + { + rc = -EINVAL; + goto out; + } + + if ( arg->nr_vcpus > d->max_vcpus || + arg->nr_pages > EVTCHN_MAX_L3_PAGES ) + { + rc = -EINVAL; + goto out; + } + + memset(evtchn_pending, 0, sizeof(xen_pfn_t) * EVTCHN_MAX_L3_PAGES); + memset(evtchn_mask, 0, sizeof(xen_pfn_t) * EVTCHN_MAX_L3_PAGES); + +#define __COPY_ARRAY(_d, _s, _nr) \ + do { \ + if ( copy_from_guest((_d), (_s), (_nr)) ) \ + { \ + rc = -EFAULT; \ + goto out; \ + } \ + } while (0) + __COPY_ARRAY(evtchn_pending, arg->evtchn_pending, arg->nr_pages); + __COPY_ARRAY(evtchn_mask, arg->evtchn_mask, arg->nr_pages); +#undef __COPY_ARRAY + + rc = __map_l3_arrays(d, evtchn_pending, evtchn_mask, arg->nr_pages); + if ( rc ) + goto out; + + for_each_vcpu ( d, v ) + { + int vcpu_id = v->vcpu_id; + + if ( unlikely(copy_from_guest_offset(&l2sel_mfn, arg->l2sel_mfns, + vcpu_id, 1)) ) + { + rc = -EFAULT; + __evtchn_unmap_all_3level(d); + goto out; + } + if ( unlikely(copy_from_guest_offset(&l2sel_offset, arg->l2sel_offsets, + vcpu_id, 1)) ) + { + rc = -EFAULT; + __evtchn_unmap_all_3level(d); + goto out; + } + if ( (rc = __map_l2_selector(v, l2sel_mfn, l2sel_offset)) ) + { + __evtchn_unmap_all_3level(d); + goto out; + } + } + + __evtchn_setup_bitmap_l3(d); + + d->evtchn_level = EVTCHN_3_LEVEL; + + rc = 0; + + out: + return rc; +} + +static long evtchn_register_nlevel(struct evtchn_register_nlevel *reg) +{ + struct domain *d = current->domain; + int rc; + + spin_lock(&d->event_lock); + + switch ( reg->level ) + { + case EVTCHN_3_LEVEL: + rc = evtchn_register_3level(®->u.l3); + break; + default: + rc = -EINVAL; + } + + spin_unlock(&d->event_lock); + + return rc; +} + long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { long rc; @@ -1132,6 +1385,18 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; } + case EVTCHNOP_register_nlevel: { + struct evtchn_register_nlevel reg; + if ( copy_from_guest(®, arg, 1) != 0 ) + return -EFAULT; + rc = evtchn_register_nlevel(®); + + /* XXX always fails this call because it is not yet completed */ + rc = -EINVAL; + + break; + } + default: rc = -ENOSYS; break; @@ -1258,6 +1523,17 @@ int evtchn_init(struct domain *d) return 0; } +static void evtchn_unmap_nlevel(struct domain *d) +{ + switch ( d->evtchn_level ) + { + case EVTCHN_3_LEVEL: + __evtchn_unmap_all_3level(d); + break; + default: + break; + } +} void evtchn_destroy(struct domain *d) { @@ -1286,6 +1562,8 @@ void evtchn_destroy(struct domain *d) clear_global_virq_handlers(d); + evtchn_unmap_nlevel(d); + free_xenheap_page(d->evtchn); } -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |