[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 5/8] xen, common: add the XEN_DOMCTL_memory_mapping hypercall



>>> On 07.04.14 at 01:31, <avanzini.arianna@xxxxxxxxx> wrote:
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -818,6 +818,77 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
> u_domctl)
>      }
>      break;
>  
> +    case XEN_DOMCTL_memory_mapping:
> +    {
> +        unsigned long gfn = op->u.memory_mapping.first_gfn;
> +        unsigned long mfn = op->u.memory_mapping.first_mfn;
> +        unsigned long nr_mfns = op->u.memory_mapping.nr_mfns;
> +        unsigned long mfn_end = mfn + nr_mfns - 1;
> +        unsigned long gfn_end = gfn + nr_mfns - 1;
> +        int add = op->u.memory_mapping.add_mapping;
> +
> +        ret = -EINVAL;
> +        if ( (mfn_end - 1) < mfn || /* wrap? */
> +             ((mfn | (mfn_end - 1)) >> (paddr_bits - PAGE_SHIFT)) ||
> +             (gfn_end - 1) < gfn ) /* wrap? */

You subtracted 1 from [mg]fn_end above already.

> +            return ret;
> +
> +        ret = -EPERM;
> +        if ( !iomem_access_permitted(current->domain, mfn, mfn_end) )
> +            return ret;
> +
> +        ret = xsm_iomem_mapping(XSM_HOOK, d, mfn, mfn_end, add);
> +        if ( ret )
> +            return ret;
> +
> +        if ( add )
> +        {
> +            printk(XENLOG_G_INFO
> +                   "memory_map: add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
> +                   d->domain_id, gfn, mfn, nr_mfns);
> +            ret = iomem_permit_access(d, mfn, mfn_end);
> +            if ( !ret )
> +            {
> +                ret = map_mmio_regions(d, gfn, gfn_end, mfn);
> +                if ( ret )
> +                {
> +                    printk(XENLOG_G_WARNING
> +                           "memory_map: fail: dom%d gfn=%lx mfn=%lx\n",
> +                           d->domain_id, gfn, mfn);
> +                    if ( iomem_deny_access(d, mfn, mfn_end) &&
> +                         is_hardware_domain(current->domain) )
> +                        printk(XENLOG_ERR
> +                               "memory_map: failed to deny dom%d access "
> +                               "to [%lx,%lx]\n",
> +                               d->domain_id, mfn, mfn_end);
> +                }
> +            }
> +        }
> +        else
> +        {
> +         long unmap_ret;

Hard tab.

> +
> +            printk(XENLOG_G_INFO
> +                   "memory_map: remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
> +                   d->domain_id, gfn, mfn, nr_mfns);
> +
> +            unmap_ret = unmap_mmio_regions(d, gfn, gfn_end, mfn);
> +            ret = iomem_deny_access(d, mfn, mfn_end);
> +            /*
> +             * Let an error value returned by iomem_deny_access() prevail on
> +             * the one possibly returned by unmap_mmio_regions().
> +             */

I would omit this comment - it's not that important which of the two
errors we return, what is important is that we don't drop either of the
two possible failures.

> --- /dev/null
> +++ b/xen/include/xen/p2m-common.h
> @@ -0,0 +1,16 @@
> +#ifndef _XEN_P2M_COMMON_H
> +#define _XEN_P2M_COMMON_H
> +
> +/* Map MMIO regions in the p2m: start_gfn and end_gfn is the range

To make things entirely obvious, please add the word "inclusive" here.

> + * in the guest physical address space to map, starting from the
> + * machine frame number mfn. */
> +int map_mmio_regions(struct domain *d,
> +                     unsigned long start_gfn,
> +                     unsigned long end_gfn,
> +                     unsigned long mfn);
> +int unmap_mmio_regions(struct domain *d,
> +                       unsigned long start_gfn,
> +                       unsigned long end_gfn,
> +                       unsigned long mfn);
> +
> +#endif /* _XEN_P2M_COMMON_H */



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.