[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 3/4] xen/arm: introduce GNTTABOP_cache_flush



>>> On 03.10.14 at 16:50, <stefano.stabellini@xxxxxxxxxxxxx> wrote:
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -2641,6 +2641,79 @@ do_grant_table_op(
>          }
>          break;
>      }
> +    case GNTTABOP_cache_flush:
> +    {
> +        struct gnttab_cache_flush cflush;
> +        struct domain *d, *owner;
> +        struct page_info *page;
> +        uint64_t mfn;
> +        void *v;
> +
> +        /* currently unimplemented */
> +        if ( count != 1 )
> +            return -ENOSYS;

Didn't you yourself indicate that batching here would be beneficial?

> +
> +        if ( copy_from_guest(&cflush, uop, 1) )
> +            return -EFAULT;
> +
> +        if ( cflush.offset + cflush.size > PAGE_SIZE )
> +            return -EINVAL;
> +
> +        if ( cflush.size == 0 || cflush.op == 0 )
> +            return 0;
> +
> +        if ( cflush.op & ~(GNTTAB_CACHE_INVAL|GNTTAB_CACHE_CLEAN) )
> +            return -EINVAL;
> +
> +        /* currently unimplemented */
> +        if ( cflush.a.ref != 0 )

You can't check like this - a.ref and a.dev_bus_addr share the same
space in memory. You need a flag indicating whether the union holds
a gref or a busaddr.

> +            return -ENOSYS;
> +
> +        d = rcu_lock_current_domain();
> +        if ( d == NULL )
> +            return -ESRCH;
> +
> +        mfn = cflush.a.dev_bus_addr >> PAGE_SHIFT;
> +
> +        if ( !mfn_valid(mfn) )
> +        {
> +            gdprintk(XENLOG_G_ERR, "mfn=%llx is not valid\n", mfn);

I continue to question the usefulness of this and other printk()s
here.

> +            rcu_unlock_domain(d);
> +            return -EINVAL;
> +        }
> +
> +        page = mfn_to_page(mfn);
> +        owner = page_get_owner_and_reference(page);
> +        if ( !owner )
> +        {
> +            rcu_unlock_domain(d);
> +            return -EFAULT;
> +        }
> +
> +        spin_lock(&owner->grant_table->lock);
> +
> +        if ( !grant_map_exists(d, owner->grant_table, mfn) )
> +        {
> +            spin_unlock(&owner->grant_table->lock);
> +            put_page(page);
> +            rcu_unlock_domain(d);
> +            gdprintk(XENLOG_G_ERR, "mfn %llx hasn't been granted by %d to 
> %d\n",
> +                    mfn, owner->domain_id, d->domain_id);
> +            return -EINVAL;
> +        }
> +
> +        v = map_domain_page(mfn);
> +        v += cflush.offset;
> +
> +        if ( cflush.op & GNTTAB_CACHE_INVAL )
> +            invalidate_xen_dcache_va_range(v, cflush.size);
> +        if ( cflush.op & GNTTAB_CACHE_CLEAN )
> +            clean_xen_dcache_va_range(v, cflush.size);

I already commented on the ordering of these for v1, and it's still
the wrong way round for the case of both flags being set (or at
least you didn't explain why it needs to be in this order).

> @@ -574,6 +575,24 @@ struct gnttab_swap_grant_ref {
>  typedef struct gnttab_swap_grant_ref gnttab_swap_grant_ref_t;
>  DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t);
>  
> +/*
> + * Issue one or more cache maintenance operations on a portion of a
> + * page granted to the calling domain by a foreign domain.
> + */
> +struct gnttab_cache_flush {
> +    union {
> +        uint64_t dev_bus_addr;
> +        grant_ref_t ref;
> +    } a;
> +    uint32_t offset; /* offset from start of grant */
> +    uint32_t size;   /* size within the grant */

Elsewhere we use uint16_t for offsets and lengths (and we call
lengths "length", not "size"). Please extend an existing interface
in a consistent way.

> +#define GNTTAB_CACHE_CLEAN      (1<<0)
> +#define GNTTAB_CACHE_INVAL      (1<<1)
> +    uint32_t op;
> +};

The above adjustment will at once make sure the new structure
won't have different size for 32- and 64-bit guests (on x86 at least).
Speaking of which - this patch lacks the necessary adjustment(s) to
common/compat/grant_table.c.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.