[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH for-4.5 v10 15/19] xen/arm: Temporarily disable mem_access for hypervisor access



On Thu, 2014-09-25 at 13:56 +0200, Tamas K Lengyel wrote:
> The guestcopy helpers use the MMU to verify that the given guest has 
> read/write
> access to a given page during hypercalls. As we may have custom mem_access
> permissions set on these pages, we temporarily disable them to allow Xen to
> finish the hypercalls. This is permissible as mem_access events are only
> reported for events when the guest directly accesses protected memory on x86
> as well.

Is this patch new this time around? I don't recall seeing anything along
these lines previously. It seems pretty critical to me, how much testing
did v1..9 undergo?

As per the subthread with Julien the sort of overhead you are adding to
the non-xenaccess case is unacceptable, especially on such a hot path.
It must be so close to be zero as to be unnoticeable and this is
obviously nowhere near.

I'm afraid that this sort of thing cropping only up now has made me far
less confident about considering this new feature for 4.5. I think it
would be best to take a step back and consider deferring this until 4.6
so that it can be presented with a proper analysis (i.e. benchmarks) etc
to show that it isn't hurting things in the normal case.

Ian.

> 
> Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
> ---
>  xen/arch/arm/guestcopy.c | 61 
> ++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 61 insertions(+)
> 
> diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
> index 0173597..4aa041f 100644
> --- a/xen/arch/arm/guestcopy.c
> +++ b/xen/arch/arm/guestcopy.c
> @@ -6,6 +6,43 @@
>  
>  #include <asm/mm.h>
>  #include <asm/guest_access.h>
> +#include <asm/p2m.h>
> +
> +/*
> + * Temporarily disable mem_access permission restrictions.
> + * Note: In the future, events generated by the hypervisor accessing
> + * protected memory regions could be added here.
> + */
> +static long temp_disable_mem_access(vaddr_t gva, unsigned long *gfn,
> +                                    xenmem_access_t *xma)
> +{
> +    long rc;
> +    paddr_t gpa;
> +
> +    rc = gva_to_ipa((vaddr_t) gva, &gpa);
> +    if ( rc < 0 )
> +        return rc;
> +
> +    *gfn = paddr_to_pfn(gpa);
> +
> +    rc = p2m_get_mem_access(current->domain, *gfn, xma);
> +    if ( rc < 0 )
> +        return rc;
> +
> +    if ( *xma != XENMEM_access_rwx )
> +        rc = p2m_set_mem_access(current->domain, *gfn, 1, 0, ~0,
> +                                XENMEM_access_rwx);
> +
> +    return rc;
> +}
> +
> +/* Re-enable mem_access for this page (if in use). */
> +static inline
> +void temp_reenable_mem_access(unsigned long gfn, xenmem_access_t xma)
> +{
> +    if ( xma != XENMEM_access_rwx )
> +        p2m_set_mem_access(current->domain, gfn, 1, 0, ~0, xma);
> +}
>  
>  static unsigned long raw_copy_to_guest_helper(void *to, const void *from,
>                                                unsigned len, int flush_dcache)
> @@ -18,6 +55,11 @@ static unsigned long raw_copy_to_guest_helper(void *to, 
> const void *from,
>          void *p;
>          unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
>          struct page_info *page;
> +        unsigned long gfn;
> +        xenmem_access_t xma;
> +
> +        if ( temp_disable_mem_access((vaddr_t) to, &gfn, &xma) < 0 )
> +            return len;
>  
>          page = get_page_from_gva(current->domain, (vaddr_t) to, GV2M_WRITE);
>          if ( page == NULL )
> @@ -34,6 +76,9 @@ static unsigned long raw_copy_to_guest_helper(void *to, 
> const void *from,
>          len -= size;
>          from += size;
>          to += size;
> +
> +        temp_reenable_mem_access(gfn, xma);
> +
>          /*
>           * After the first iteration, guest virtual address is correctly
>           * aligned to PAGE_SIZE.
> @@ -65,6 +110,11 @@ unsigned long raw_clear_guest(void *to, unsigned len)
>          void *p;
>          unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
>          struct page_info *page;
> +        unsigned long gfn;
> +        xenmem_access_t xma;
> +
> +        if ( temp_disable_mem_access((vaddr_t) to, &gfn, &xma) < 0 )
> +            return len;
>  
>          page = get_page_from_gva(current->domain, (vaddr_t) to, GV2M_WRITE);
>          if ( page == NULL )
> @@ -78,6 +128,9 @@ unsigned long raw_clear_guest(void *to, unsigned len)
>          put_page(page);
>          len -= size;
>          to += size;
> +
> +        temp_reenable_mem_access(gfn, xma);
> +
>          /*
>           * After the first iteration, guest virtual address is correctly
>           * aligned to PAGE_SIZE.
> @@ -97,6 +150,11 @@ unsigned long raw_copy_from_guest(void *to, const void 
> __user *from, unsigned le
>          void *p;
>          unsigned size = min(len, (unsigned)(PAGE_SIZE - offset));
>          struct page_info *page;
> +        unsigned long gfn;
> +        xenmem_access_t xma;
> +
> +        if ( temp_disable_mem_access((vaddr_t) from, &gfn, &xma) < 0 )
> +            return len;
>  
>          page = get_page_from_gva(current->domain, (vaddr_t) from, GV2M_READ);
>          if ( page == NULL )
> @@ -112,6 +170,9 @@ unsigned long raw_copy_from_guest(void *to, const void 
> __user *from, unsigned le
>          len -= size;
>          from += size;
>          to += size;
> +
> +        temp_reenable_mem_access(gfn, xma);
> +
>          /*
>           * After the first iteration, guest virtual address is correctly
>           * aligned to PAGE_SIZE.



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.