[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 10/13] xen: introduce xen_event_channel_register_3level



On Thu, Jan 31, 2013 at 02:47:04PM +0000, Wei Liu wrote:
> Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
> ---
>  drivers/xen/events.c |   94 
> ++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 94 insertions(+)
> 
> diff --git a/drivers/xen/events.c b/drivers/xen/events.c
> index d953e81..9038211 100644
> --- a/drivers/xen/events.c
> +++ b/drivers/xen/events.c
> @@ -53,6 +53,9 @@
>  
>  /* Helper macro(s) */
>  #define LONG_BITORDER (BITS_PER_LONG == 64 ? 6 : 5)

Can you provide a comment explaining why the '6' or '5' value?

> +/* event bitmap size: 1 page for 32 bit and 8 pages for 64 bit */
> +#define BITMAP_PG_ORDER (BITS_PER_LONG == 64 ? 3 : 1)
> +#define BITMAP_NR_PAGES (BITMAP_PG_ORDER == 3 ? 8 : 1)

Is there some math behind this? Could you provide a comment explaining
the reason for needing so much for a 64-bit vs only needing on page on
32-bit?

>  
>  /* N-level event channel, starting from 2 */
>  unsigned int evtchn_level = 2;
> @@ -2123,6 +2126,97 @@ void xen_callback_vector(void)
>  void xen_callback_vector(void) {}
>  #endif
>  
> +static int xen_event_channel_register_3level(void)
> +{
> +     evtchn_register_nlevel_t reg;

Please no typdefs.

> +     int i, cpu;
> +     unsigned long *_evtchn_pending = NULL;
> +     unsigned long *_evtchn_mask = NULL;
> +     unsigned long *l2sel_mfns = NULL;
> +     unsigned long *l2sel_offsets = NULL;
> +     int rc;
> +
> +     /* If we come from restore path, we don't need to allocate
> +      * pages.
> +      */
> +     if (!evtchn_pending && !evtchn_mask) {
> +             evtchn_pending =
> +                     (unsigned long *)__get_free_pages(GFP_KERNEL,
> +                                                       BITMAP_PG_ORDER);
> +             evtchn_mask =
> +                     (unsigned long *)__get_free_pages(GFP_KERNEL,
> +                                                       BITMAP_PG_ORDER);
> +             if (!evtchn_pending || !evtchn_mask) {
> +                     free_pages((unsigned long)evtchn_pending, 
> BITMAP_NR_PAGES);
> +                     free_pages((unsigned long)evtchn_mask, BITMAP_NR_PAGES);
> +                     evtchn_pending = NULL;
> +                     evtchn_mask = NULL;
> +                     rc = -ENOMEM;
> +                     goto err;
> +             }
> +     }
> +
> +     rc = -ENOMEM; /* Common error code for following operations */
> +#define __ALLOC_ARRAY(_ptr, _nr)                                     \
> +     do {                                                            \
> +             (_ptr) = kzalloc(sizeof(unsigned long) * (_nr),         \
> +                              GFP_KERNEL);                           \
> +             if (!(_ptr))                                            \
> +                     goto out;                                       \
> +     } while (0)
> +
> +     __ALLOC_ARRAY(_evtchn_pending, BITMAP_NR_PAGES);
> +     __ALLOC_ARRAY(_evtchn_mask, BITMAP_NR_PAGES);
> +     __ALLOC_ARRAY(l2sel_mfns, nr_cpu_ids);
> +     __ALLOC_ARRAY(l2sel_offsets, nr_cpu_ids);
> +#undef __ALLOC_ARRAY
> +
> +     memset(&reg, 0, sizeof(reg));
> +
> +     for (i = 0; i < BITMAP_NR_PAGES; i++) {
> +             unsigned long offset = PAGE_SIZE * i;
> +             _evtchn_pending[i] =
> +                     arbitrary_virt_to_mfn(
> +                             (void *)((unsigned long)evtchn_pending+offset));
> +             _evtchn_mask[i] =
> +                     arbitrary_virt_to_mfn(
> +                             (void *)((unsigned long)evtchn_mask+offset));
> +     }
> +
> +     for_each_possible_cpu(cpu) {
> +             l2sel_mfns[cpu] =
> +                     arbitrary_virt_to_mfn(&per_cpu(evtchn_sel_l2, cpu));
> +             l2sel_offsets[cpu] =
> +                     offset_in_page(&per_cpu(evtchn_sel_l2, cpu));
> +     }
> +
> +     reg.u.l3.nr_pages = BITMAP_NR_PAGES;
> +     reg.u.l3.evtchn_pending = _evtchn_pending;
> +     reg.u.l3.evtchn_mask = _evtchn_mask;
> +
> +     reg.u.l3.nr_vcpus = nr_cpu_ids;
> +     reg.u.l3.l2sel_mfns = l2sel_mfns;
> +     reg.u.l3.l2sel_offsets = l2sel_offsets;
> +
> +     reg.level = 3;
> +
> +     rc = HYPERVISOR_event_channel_op(EVTCHNOP_register_nlevel, &reg);
> +     if (rc) {
> +             free_pages((unsigned long)evtchn_pending, BITMAP_NR_PAGES);
> +             free_pages((unsigned long)evtchn_mask, BITMAP_NR_PAGES);
> +             evtchn_pending = NULL;
> +             evtchn_mask = NULL;
> +     }
> +
> +out:
> +     kfree(_evtchn_pending);
> +     kfree(_evtchn_mask);
> +     kfree(l2sel_mfns);
> +     kfree(l2sel_offsets);

So it is OK to just free it even on success??
> +err:
> +     return rc;
> +}
> +
>  void __init xen_init_IRQ(void)
>  {
>       int i, rc;
> -- 
> 1.7.10.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.