[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 2/2] p2m: split mem_access into separate files



On Fri, Dec 9, 2016 at 12:59 PM, Tamas K Lengyel
<tamas.lengyel@xxxxxxxxxxxx> wrote:
> This patch relocates mem_access components that are currently mixed with p2m
> code into separate files. This better aligns the code with similar subsystems,
> such as mem_sharing and mem_paging, which are already in separate files. There
> are no code-changes introduced, the patch is mechanical code movement.
>
> On ARM we also relocate the static inline gfn_next_boundary function to p2m.h
> as it is a function the mem_access code needs access to.
>
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
> Acked-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>

Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>

> ---
> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> Cc: Julien Grall <julien.grall@xxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
>
> v2: Don't move ARM radix tree functions
>     Include asm/mem_accesss.h in xen/mem_access.h

Patch ping. I think this only needs an ARM-side ack.

> ---
>  MAINTAINERS                      |   2 +
>  xen/arch/arm/Makefile            |   1 +
>  xen/arch/arm/mem_access.c        | 431 ++++++++++++++++++++++++++++++++++++
>  xen/arch/arm/p2m.c               | 414 +----------------------------------
>  xen/arch/arm/traps.c             |   1 +
>  xen/arch/x86/mm/Makefile         |   1 +
>  xen/arch/x86/mm/mem_access.c     | 462 
> +++++++++++++++++++++++++++++++++++++++
>  xen/arch/x86/mm/p2m.c            | 421 -----------------------------------
>  xen/arch/x86/vm_event.c          |   3 +-
>  xen/common/mem_access.c          |   2 +-
>  xen/include/asm-arm/mem_access.h |  53 +++++
>  xen/include/asm-arm/p2m.h        |  31 ++-
>  xen/include/asm-x86/mem_access.h |  61 ++++++
>  xen/include/asm-x86/p2m.h        |  24 +-
>  xen/include/xen/mem_access.h     |  67 +++++-
>  xen/include/xen/p2m-common.h     |  52 -----
>  16 files changed, 1089 insertions(+), 937 deletions(-)
>  create mode 100644 xen/arch/arm/mem_access.c
>  create mode 100644 xen/arch/x86/mm/mem_access.c
>  create mode 100644 xen/include/asm-arm/mem_access.h
>  create mode 100644 xen/include/asm-x86/mem_access.h
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index f0d0202..fb26be3 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -402,6 +402,8 @@ S:  Supported
>  F:     tools/tests/xen-access
>  F:     xen/arch/*/monitor.c
>  F:     xen/arch/*/vm_event.c
> +F:     xen/arch/arm/mem_access.c
> +F:     xen/arch/x86/mm/mem_access.c
>  F:     xen/arch/x86/hvm/monitor.c
>  F:     xen/common/mem_access.c
>  F:     xen/common/monitor.c
> diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
> index da39d39..b095e8a 100644
> --- a/xen/arch/arm/Makefile
> +++ b/xen/arch/arm/Makefile
> @@ -24,6 +24,7 @@ obj-y += io.o
>  obj-y += irq.o
>  obj-y += kernel.o
>  obj-$(CONFIG_LIVEPATCH) += livepatch.o
> +obj-y += mem_access.o
>  obj-y += mm.o
>  obj-y += monitor.o
>  obj-y += p2m.o
> diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
> new file mode 100644
> index 0000000..a6e5bcd
> --- /dev/null
> +++ b/xen/arch/arm/mem_access.c
> @@ -0,0 +1,431 @@
> +/*
> + * arch/arm/mem_access.c
> + *
> + * Architecture-specific mem_access handling routines
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public
> + * License v2 as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public
> + * License along with this program; If not, see 
> <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <xen/config.h>
> +#include <xen/mem_access.h>
> +#include <xen/monitor.h>
> +#include <xen/sched.h>
> +#include <xen/vm_event.h>
> +#include <public/vm_event.h>
> +#include <asm/event.h>
> +
> +static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
> +                                xenmem_access_t *access)
> +{
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    void *i;
> +    unsigned int index;
> +
> +    static const xenmem_access_t memaccess[] = {
> +#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
> +            ACCESS(n),
> +            ACCESS(r),
> +            ACCESS(w),
> +            ACCESS(rw),
> +            ACCESS(x),
> +            ACCESS(rx),
> +            ACCESS(wx),
> +            ACCESS(rwx),
> +            ACCESS(rx2rw),
> +            ACCESS(n2rwx),
> +#undef ACCESS
> +    };
> +
> +    ASSERT(p2m_is_locked(p2m));
> +
> +    /* If no setting was ever set, just return rwx. */
> +    if ( !p2m->mem_access_enabled )
> +    {
> +        *access = XENMEM_access_rwx;
> +        return 0;
> +    }
> +
> +    /* If request to get default access. */
> +    if ( gfn_eq(gfn, INVALID_GFN) )
> +    {
> +        *access = memaccess[p2m->default_access];
> +        return 0;
> +    }
> +
> +    i = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));
> +
> +    if ( !i )
> +    {
> +        /*
> +         * No setting was found in the Radix tree. Check if the
> +         * entry exists in the page-tables.
> +         */
> +        mfn_t mfn = p2m_get_entry(p2m, gfn, NULL, NULL, NULL);
> +
> +        if ( mfn_eq(mfn, INVALID_MFN) )
> +            return -ESRCH;
> +
> +        /* If entry exists then its rwx. */
> +        *access = XENMEM_access_rwx;
> +    }
> +    else
> +    {
> +        /* Setting was found in the Radix tree. */
> +        index = radix_tree_ptr_to_int(i);
> +        if ( index >= ARRAY_SIZE(memaccess) )
> +            return -ERANGE;
> +
> +        *access = memaccess[index];
> +    }
> +
> +    return 0;
> +}
> +
> +/*
> + * If mem_access is in use it might have been the reason why 
> get_page_from_gva
> + * failed to fetch the page, as it uses the MMU for the permission checking.
> + * Only in these cases we do a software-based type check and fetch the page 
> if
> + * we indeed found a conflicting mem_access setting.
> + */
> +struct page_info*
> +p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
> +                                  const struct vcpu *v)
> +{
> +    long rc;
> +    paddr_t ipa;
> +    gfn_t gfn;
> +    mfn_t mfn;
> +    xenmem_access_t xma;
> +    p2m_type_t t;
> +    struct page_info *page = NULL;
> +    struct p2m_domain *p2m = &v->domain->arch.p2m;
> +
> +    rc = gva_to_ipa(gva, &ipa, flag);
> +    if ( rc < 0 )
> +        goto err;
> +
> +    gfn = _gfn(paddr_to_pfn(ipa));
> +
> +    /*
> +     * We do this first as this is faster in the default case when no
> +     * permission is set on the page.
> +     */
> +    rc = __p2m_get_mem_access(v->domain, gfn, &xma);
> +    if ( rc < 0 )
> +        goto err;
> +
> +    /* Let's check if mem_access limited the access. */
> +    switch ( xma )
> +    {
> +    default:
> +    case XENMEM_access_rwx:
> +    case XENMEM_access_rw:
> +        /*
> +         * If mem_access contains no rw perm restrictions at all then the 
> original
> +         * fault was correct.
> +         */
> +        goto err;
> +    case XENMEM_access_n2rwx:
> +    case XENMEM_access_n:
> +    case XENMEM_access_x:
> +        /*
> +         * If no r/w is permitted by mem_access, this was a fault caused by 
> mem_access.
> +         */
> +        break;
> +    case XENMEM_access_wx:
> +    case XENMEM_access_w:
> +        /*
> +         * If this was a read then it was because of mem_access, but if it 
> was
> +         * a write then the original get_page_from_gva fault was correct.
> +         */
> +        if ( flag == GV2M_READ )
> +            break;
> +        else
> +            goto err;
> +    case XENMEM_access_rx2rw:
> +    case XENMEM_access_rx:
> +    case XENMEM_access_r:
> +        /*
> +         * If this was a write then it was because of mem_access, but if it 
> was
> +         * a read then the original get_page_from_gva fault was correct.
> +         */
> +        if ( flag == GV2M_WRITE )
> +            break;
> +        else
> +            goto err;
> +    }
> +
> +    /*
> +     * We had a mem_access permission limiting the access, but the page type
> +     * could also be limiting, so we need to check that as well.
> +     */
> +    mfn = p2m_get_entry(p2m, gfn, &t, NULL, NULL);
> +    if ( mfn_eq(mfn, INVALID_MFN) )
> +        goto err;
> +
> +    if ( !mfn_valid(mfn_x(mfn)) )
> +        goto err;
> +
> +    /*
> +     * Base type doesn't allow r/w
> +     */
> +    if ( t != p2m_ram_rw )
> +        goto err;
> +
> +    page = mfn_to_page(mfn_x(mfn));
> +
> +    if ( unlikely(!get_page(page, v->domain)) )
> +        page = NULL;
> +
> +err:
> +    return page;
> +}
> +
> +bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec 
> npfec)
> +{
> +    int rc;
> +    bool_t violation;
> +    xenmem_access_t xma;
> +    vm_event_request_t *req;
> +    struct vcpu *v = current;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
> +
> +    /* Mem_access is not in use. */
> +    if ( !p2m->mem_access_enabled )
> +        return true;
> +
> +    rc = p2m_get_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), &xma);
> +    if ( rc )
> +        return true;
> +
> +    /* Now check for mem_access violation. */
> +    switch ( xma )
> +    {
> +    case XENMEM_access_rwx:
> +        violation = false;
> +        break;
> +    case XENMEM_access_rw:
> +        violation = npfec.insn_fetch;
> +        break;
> +    case XENMEM_access_wx:
> +        violation = npfec.read_access;
> +        break;
> +    case XENMEM_access_rx:
> +    case XENMEM_access_rx2rw:
> +        violation = npfec.write_access;
> +        break;
> +    case XENMEM_access_x:
> +        violation = npfec.read_access || npfec.write_access;
> +        break;
> +    case XENMEM_access_w:
> +        violation = npfec.read_access || npfec.insn_fetch;
> +        break;
> +    case XENMEM_access_r:
> +        violation = npfec.write_access || npfec.insn_fetch;
> +        break;
> +    default:
> +    case XENMEM_access_n:
> +    case XENMEM_access_n2rwx:
> +        violation = true;
> +        break;
> +    }
> +
> +    if ( !violation )
> +        return true;
> +
> +    /* First, handle rx2rw and n2rwx conversion automatically. */
> +    if ( npfec.write_access && xma == XENMEM_access_rx2rw )
> +    {
> +        rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
> +                                0, ~0, XENMEM_access_rw, 0);
> +        return false;
> +    }
> +    else if ( xma == XENMEM_access_n2rwx )
> +    {
> +        rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
> +                                0, ~0, XENMEM_access_rwx, 0);
> +    }
> +
> +    /* Otherwise, check if there is a vm_event monitor subscriber */
> +    if ( !vm_event_check_ring(&v->domain->vm_event->monitor) )
> +    {
> +        /* No listener */
> +        if ( p2m->access_required )
> +        {
> +            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
> +                                  "no vm_event listener VCPU %d, dom %d\n",
> +                                  v->vcpu_id, v->domain->domain_id);
> +            domain_crash(v->domain);
> +        }
> +        else
> +        {
> +            /* n2rwx was already handled */
> +            if ( xma != XENMEM_access_n2rwx )
> +            {
> +                /* A listener is not required, so clear the access
> +                 * restrictions. */
> +                rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 
> 1,
> +                                        0, ~0, XENMEM_access_rwx, 0);
> +            }
> +        }
> +
> +        /* No need to reinject */
> +        return false;
> +    }
> +
> +    req = xzalloc(vm_event_request_t);
> +    if ( req )
> +    {
> +        req->reason = VM_EVENT_REASON_MEM_ACCESS;
> +
> +        /* Send request to mem access subscriber */
> +        req->u.mem_access.gfn = gpa >> PAGE_SHIFT;
> +        req->u.mem_access.offset =  gpa & ((1 << PAGE_SHIFT) - 1);
> +        if ( npfec.gla_valid )
> +        {
> +            req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
> +            req->u.mem_access.gla = gla;
> +
> +            if ( npfec.kind == npfec_kind_with_gla )
> +                req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
> +            else if ( npfec.kind == npfec_kind_in_gpt )
> +                req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
> +        }
> +        req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
> +        req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
> +        req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
> +
> +        if ( monitor_traps(v, (xma != XENMEM_access_n2rwx), req) < 0 )
> +            domain_crash(v->domain);
> +
> +        xfree(req);
> +    }
> +
> +    return false;
> +}
> +
> +/*
> + * Set access type for a region of pfns.
> + * If gfn == INVALID_GFN, sets the default access type.
> + */
> +long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
> +                        uint32_t start, uint32_t mask, xenmem_access_t 
> access,
> +                        unsigned int altp2m_idx)
> +{
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    p2m_access_t a;
> +    unsigned int order;
> +    long rc = 0;
> +
> +    static const p2m_access_t memaccess[] = {
> +#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
> +        ACCESS(n),
> +        ACCESS(r),
> +        ACCESS(w),
> +        ACCESS(rw),
> +        ACCESS(x),
> +        ACCESS(rx),
> +        ACCESS(wx),
> +        ACCESS(rwx),
> +        ACCESS(rx2rw),
> +        ACCESS(n2rwx),
> +#undef ACCESS
> +    };
> +
> +    switch ( access )
> +    {
> +    case 0 ... ARRAY_SIZE(memaccess) - 1:
> +        a = memaccess[access];
> +        break;
> +    case XENMEM_access_default:
> +        a = p2m->default_access;
> +        break;
> +    default:
> +        return -EINVAL;
> +    }
> +
> +    /*
> +     * Flip mem_access_enabled to true when a permission is set, as to 
> prevent
> +     * allocating or inserting super-pages.
> +     */
> +    p2m->mem_access_enabled = true;
> +
> +    /* If request to set default access. */
> +    if ( gfn_eq(gfn, INVALID_GFN) )
> +    {
> +        p2m->default_access = a;
> +        return 0;
> +    }
> +
> +    p2m_write_lock(p2m);
> +
> +    for ( gfn = gfn_add(gfn, start); nr > start;
> +          gfn = gfn_next_boundary(gfn, order) )
> +    {
> +        p2m_type_t t;
> +        mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order);
> +
> +
> +        if ( !mfn_eq(mfn, INVALID_MFN) )
> +        {
> +            order = 0;
> +            rc = p2m_set_entry(p2m, gfn, 1, mfn, t, a);
> +            if ( rc )
> +                break;
> +        }
> +
> +        start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn);
> +        /* Check for continuation if it is not the last iteration */
> +        if ( nr > start && !(start & mask) && hypercall_preempt_check() )
> +        {
> +            rc = start;
> +            break;
> +        }
> +    }
> +
> +    p2m_write_unlock(p2m);
> +
> +    return rc;
> +}
> +
> +long p2m_set_mem_access_multi(struct domain *d,
> +                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
> +                              const XEN_GUEST_HANDLE(const_uint8) 
> access_list,
> +                              uint32_t nr, uint32_t start, uint32_t mask,
> +                              unsigned int altp2m_idx)
> +{
> +    /* Not yet implemented on ARM. */
> +    return -EOPNOTSUPP;
> +}
> +
> +int p2m_get_mem_access(struct domain *d, gfn_t gfn,
> +                       xenmem_access_t *access)
> +{
> +    int ret;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +
> +    p2m_read_lock(p2m);
> +    ret = __p2m_get_mem_access(d, gfn, access);
> +    p2m_read_unlock(p2m);
> +
> +    return ret;
> +}
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 837be1d..4e7ce3d 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -7,6 +7,7 @@
>  #include <xen/vm_event.h>
>  #include <xen/monitor.h>
>  #include <xen/iocap.h>
> +#include <xen/mem_access.h>
>  #include <public/vm_event.h>
>  #include <asm/flushtlb.h>
>  #include <asm/gic.h>
> @@ -58,22 +59,6 @@ static inline bool p2m_is_superpage(lpae_t pte, unsigned 
> int level)
>      return (level < 3) && p2m_mapping(pte);
>  }
>
> -/*
> - * Return the start of the next mapping based on the order of the
> - * current one.
> - */
> -static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order)
> -{
> -    /*
> -     * The order corresponds to the order of the mapping (or invalid
> -     * range) in the page table. So we need to align the GFN before
> -     * incrementing.
> -     */
> -    gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1));
> -
> -    return gfn_add(gfn, 1UL << order);
> -}
> -
>  static void p2m_flush_tlb(struct p2m_domain *p2m);
>
>  /* Unlock the flush and do a P2M TLB flush if necessary */
> @@ -602,73 +587,6 @@ static int p2m_create_table(struct p2m_domain *p2m, 
> lpae_t *entry)
>      return 0;
>  }
>
> -static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
> -                                xenmem_access_t *access)
> -{
> -    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -    void *i;
> -    unsigned int index;
> -
> -    static const xenmem_access_t memaccess[] = {
> -#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
> -            ACCESS(n),
> -            ACCESS(r),
> -            ACCESS(w),
> -            ACCESS(rw),
> -            ACCESS(x),
> -            ACCESS(rx),
> -            ACCESS(wx),
> -            ACCESS(rwx),
> -            ACCESS(rx2rw),
> -            ACCESS(n2rwx),
> -#undef ACCESS
> -    };
> -
> -    ASSERT(p2m_is_locked(p2m));
> -
> -    /* If no setting was ever set, just return rwx. */
> -    if ( !p2m->mem_access_enabled )
> -    {
> -        *access = XENMEM_access_rwx;
> -        return 0;
> -    }
> -
> -    /* If request to get default access. */
> -    if ( gfn_eq(gfn, INVALID_GFN) )
> -    {
> -        *access = memaccess[p2m->default_access];
> -        return 0;
> -    }
> -
> -    i = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));
> -
> -    if ( !i )
> -    {
> -        /*
> -         * No setting was found in the Radix tree. Check if the
> -         * entry exists in the page-tables.
> -         */
> -        mfn_t mfn = p2m_get_entry(p2m, gfn, NULL, NULL, NULL);
> -
> -        if ( mfn_eq(mfn, INVALID_MFN) )
> -            return -ESRCH;
> -
> -        /* If entry exists then its rwx. */
> -        *access = XENMEM_access_rwx;
> -    }
> -    else
> -    {
> -        /* Setting was found in the Radix tree. */
> -        index = radix_tree_ptr_to_int(i);
> -        if ( index >= ARRAY_SIZE(memaccess) )
> -            return -ERANGE;
> -
> -        *access = memaccess[index];
> -    }
> -
> -    return 0;
> -}
> -
>  static int p2m_mem_access_radix_set(struct p2m_domain *p2m, gfn_t gfn,
>                                      p2m_access_t a)
>  {
> @@ -1454,106 +1372,6 @@ mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
>      return p2m_lookup(d, gfn, NULL);
>  }
>
> -/*
> - * If mem_access is in use it might have been the reason why 
> get_page_from_gva
> - * failed to fetch the page, as it uses the MMU for the permission checking.
> - * Only in these cases we do a software-based type check and fetch the page 
> if
> - * we indeed found a conflicting mem_access setting.
> - */
> -static struct page_info*
> -p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
> -                                  const struct vcpu *v)
> -{
> -    long rc;
> -    paddr_t ipa;
> -    gfn_t gfn;
> -    mfn_t mfn;
> -    xenmem_access_t xma;
> -    p2m_type_t t;
> -    struct page_info *page = NULL;
> -    struct p2m_domain *p2m = &v->domain->arch.p2m;
> -
> -    rc = gva_to_ipa(gva, &ipa, flag);
> -    if ( rc < 0 )
> -        goto err;
> -
> -    gfn = _gfn(paddr_to_pfn(ipa));
> -
> -    /*
> -     * We do this first as this is faster in the default case when no
> -     * permission is set on the page.
> -     */
> -    rc = __p2m_get_mem_access(v->domain, gfn, &xma);
> -    if ( rc < 0 )
> -        goto err;
> -
> -    /* Let's check if mem_access limited the access. */
> -    switch ( xma )
> -    {
> -    default:
> -    case XENMEM_access_rwx:
> -    case XENMEM_access_rw:
> -        /*
> -         * If mem_access contains no rw perm restrictions at all then the 
> original
> -         * fault was correct.
> -         */
> -        goto err;
> -    case XENMEM_access_n2rwx:
> -    case XENMEM_access_n:
> -    case XENMEM_access_x:
> -        /*
> -         * If no r/w is permitted by mem_access, this was a fault caused by 
> mem_access.
> -         */
> -        break;
> -    case XENMEM_access_wx:
> -    case XENMEM_access_w:
> -        /*
> -         * If this was a read then it was because of mem_access, but if it 
> was
> -         * a write then the original get_page_from_gva fault was correct.
> -         */
> -        if ( flag == GV2M_READ )
> -            break;
> -        else
> -            goto err;
> -    case XENMEM_access_rx2rw:
> -    case XENMEM_access_rx:
> -    case XENMEM_access_r:
> -        /*
> -         * If this was a write then it was because of mem_access, but if it 
> was
> -         * a read then the original get_page_from_gva fault was correct.
> -         */
> -        if ( flag == GV2M_WRITE )
> -            break;
> -        else
> -            goto err;
> -    }
> -
> -    /*
> -     * We had a mem_access permission limiting the access, but the page type
> -     * could also be limiting, so we need to check that as well.
> -     */
> -    mfn = p2m_get_entry(p2m, gfn, &t, NULL, NULL);
> -    if ( mfn_eq(mfn, INVALID_MFN) )
> -        goto err;
> -
> -    if ( !mfn_valid(mfn_x(mfn)) )
> -        goto err;
> -
> -    /*
> -     * Base type doesn't allow r/w
> -     */
> -    if ( t != p2m_ram_rw )
> -        goto err;
> -
> -    page = mfn_to_page(mfn_x(mfn));
> -
> -    if ( unlikely(!get_page(page, v->domain)) )
> -        page = NULL;
> -
> -err:
> -    return page;
> -}
> -
>  struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
>                                      unsigned long flags)
>  {
> @@ -1666,236 +1484,6 @@ void __init setup_virt_paging(void)
>      smp_call_function(setup_virt_paging_one, (void *)val, 1);
>  }
>
> -bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec 
> npfec)
> -{
> -    int rc;
> -    bool_t violation;
> -    xenmem_access_t xma;
> -    vm_event_request_t *req;
> -    struct vcpu *v = current;
> -    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
> -
> -    /* Mem_access is not in use. */
> -    if ( !p2m->mem_access_enabled )
> -        return true;
> -
> -    rc = p2m_get_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), &xma);
> -    if ( rc )
> -        return true;
> -
> -    /* Now check for mem_access violation. */
> -    switch ( xma )
> -    {
> -    case XENMEM_access_rwx:
> -        violation = false;
> -        break;
> -    case XENMEM_access_rw:
> -        violation = npfec.insn_fetch;
> -        break;
> -    case XENMEM_access_wx:
> -        violation = npfec.read_access;
> -        break;
> -    case XENMEM_access_rx:
> -    case XENMEM_access_rx2rw:
> -        violation = npfec.write_access;
> -        break;
> -    case XENMEM_access_x:
> -        violation = npfec.read_access || npfec.write_access;
> -        break;
> -    case XENMEM_access_w:
> -        violation = npfec.read_access || npfec.insn_fetch;
> -        break;
> -    case XENMEM_access_r:
> -        violation = npfec.write_access || npfec.insn_fetch;
> -        break;
> -    default:
> -    case XENMEM_access_n:
> -    case XENMEM_access_n2rwx:
> -        violation = true;
> -        break;
> -    }
> -
> -    if ( !violation )
> -        return true;
> -
> -    /* First, handle rx2rw and n2rwx conversion automatically. */
> -    if ( npfec.write_access && xma == XENMEM_access_rx2rw )
> -    {
> -        rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
> -                                0, ~0, XENMEM_access_rw, 0);
> -        return false;
> -    }
> -    else if ( xma == XENMEM_access_n2rwx )
> -    {
> -        rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
> -                                0, ~0, XENMEM_access_rwx, 0);
> -    }
> -
> -    /* Otherwise, check if there is a vm_event monitor subscriber */
> -    if ( !vm_event_check_ring(&v->domain->vm_event->monitor) )
> -    {
> -        /* No listener */
> -        if ( p2m->access_required )
> -        {
> -            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
> -                                  "no vm_event listener VCPU %d, dom %d\n",
> -                                  v->vcpu_id, v->domain->domain_id);
> -            domain_crash(v->domain);
> -        }
> -        else
> -        {
> -            /* n2rwx was already handled */
> -            if ( xma != XENMEM_access_n2rwx )
> -            {
> -                /* A listener is not required, so clear the access
> -                 * restrictions. */
> -                rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 
> 1,
> -                                        0, ~0, XENMEM_access_rwx, 0);
> -            }
> -        }
> -
> -        /* No need to reinject */
> -        return false;
> -    }
> -
> -    req = xzalloc(vm_event_request_t);
> -    if ( req )
> -    {
> -        req->reason = VM_EVENT_REASON_MEM_ACCESS;
> -
> -        /* Send request to mem access subscriber */
> -        req->u.mem_access.gfn = gpa >> PAGE_SHIFT;
> -        req->u.mem_access.offset =  gpa & ((1 << PAGE_SHIFT) - 1);
> -        if ( npfec.gla_valid )
> -        {
> -            req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
> -            req->u.mem_access.gla = gla;
> -
> -            if ( npfec.kind == npfec_kind_with_gla )
> -                req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
> -            else if ( npfec.kind == npfec_kind_in_gpt )
> -                req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
> -        }
> -        req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
> -        req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
> -        req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
> -
> -        if ( monitor_traps(v, (xma != XENMEM_access_n2rwx), req) < 0 )
> -            domain_crash(v->domain);
> -
> -        xfree(req);
> -    }
> -
> -    return false;
> -}
> -
> -/*
> - * Set access type for a region of pfns.
> - * If gfn == INVALID_GFN, sets the default access type.
> - */
> -long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
> -                        uint32_t start, uint32_t mask, xenmem_access_t 
> access,
> -                        unsigned int altp2m_idx)
> -{
> -    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -    p2m_access_t a;
> -    unsigned int order;
> -    long rc = 0;
> -
> -    static const p2m_access_t memaccess[] = {
> -#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
> -        ACCESS(n),
> -        ACCESS(r),
> -        ACCESS(w),
> -        ACCESS(rw),
> -        ACCESS(x),
> -        ACCESS(rx),
> -        ACCESS(wx),
> -        ACCESS(rwx),
> -        ACCESS(rx2rw),
> -        ACCESS(n2rwx),
> -#undef ACCESS
> -    };
> -
> -    switch ( access )
> -    {
> -    case 0 ... ARRAY_SIZE(memaccess) - 1:
> -        a = memaccess[access];
> -        break;
> -    case XENMEM_access_default:
> -        a = p2m->default_access;
> -        break;
> -    default:
> -        return -EINVAL;
> -    }
> -
> -    /*
> -     * Flip mem_access_enabled to true when a permission is set, as to 
> prevent
> -     * allocating or inserting super-pages.
> -     */
> -    p2m->mem_access_enabled = true;
> -
> -    /* If request to set default access. */
> -    if ( gfn_eq(gfn, INVALID_GFN) )
> -    {
> -        p2m->default_access = a;
> -        return 0;
> -    }
> -
> -    p2m_write_lock(p2m);
> -
> -    for ( gfn = gfn_add(gfn, start); nr > start;
> -          gfn = gfn_next_boundary(gfn, order) )
> -    {
> -        p2m_type_t t;
> -        mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order);
> -
> -
> -        if ( !mfn_eq(mfn, INVALID_MFN) )
> -        {
> -            order = 0;
> -            rc = __p2m_set_entry(p2m, gfn, 0, mfn, t, a);
> -            if ( rc )
> -                break;
> -        }
> -
> -        start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn);
> -        /* Check for continuation if it is not the last iteration */
> -        if ( nr > start && !(start & mask) && hypercall_preempt_check() )
> -        {
> -            rc = start;
> -            break;
> -        }
> -    }
> -
> -    p2m_write_unlock(p2m);
> -
> -    return rc;
> -}
> -
> -long p2m_set_mem_access_multi(struct domain *d,
> -                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
> -                              const XEN_GUEST_HANDLE(const_uint8) 
> access_list,
> -                              uint32_t nr, uint32_t start, uint32_t mask,
> -                              unsigned int altp2m_idx)
> -{
> -    /* Not yet implemented on ARM. */
> -    return -EOPNOTSUPP;
> -}
> -
> -int p2m_get_mem_access(struct domain *d, gfn_t gfn,
> -                       xenmem_access_t *access)
> -{
> -    int ret;
> -    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -
> -    p2m_read_lock(p2m);
> -    ret = __p2m_get_mem_access(d, gfn, access);
> -    p2m_read_unlock(p2m);
> -
> -    return ret;
> -}
> -
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
> index 8ff73fe..f2ea083 100644
> --- a/xen/arch/arm/traps.c
> +++ b/xen/arch/arm/traps.c
> @@ -32,6 +32,7 @@
>  #include <xen/domain_page.h>
>  #include <xen/perfc.h>
>  #include <xen/virtual_region.h>
> +#include <xen/mem_access.h>
>  #include <public/sched.h>
>  #include <public/xen.h>
>  #include <asm/debugger.h>
> diff --git a/xen/arch/x86/mm/Makefile b/xen/arch/x86/mm/Makefile
> index 9804c3a..e977dd8 100644
> --- a/xen/arch/x86/mm/Makefile
> +++ b/xen/arch/x86/mm/Makefile
> @@ -9,6 +9,7 @@ obj-y += guest_walk_3.o
>  obj-y += guest_walk_4.o
>  obj-y += mem_paging.o
>  obj-y += mem_sharing.o
> +obj-y += mem_access.o
>
>  guest_walk_%.o: guest_walk.c Makefile
>         $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
> diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
> new file mode 100644
> index 0000000..34a994d
> --- /dev/null
> +++ b/xen/arch/x86/mm/mem_access.c
> @@ -0,0 +1,462 @@
> +/******************************************************************************
> + * arch/x86/mm/mem_access.c
> + *
> + * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. 
> (Patrick Colp)
> + * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
> + * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
> + * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
> + * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <xen/guest_access.h> /* copy_from_guest() */
> +#include <xen/mem_access.h>
> +#include <xen/vm_event.h>
> +#include <xen/event.h>
> +#include <public/vm_event.h>
> +#include <asm/p2m.h>
> +#include <asm/altp2m.h>
> +#include <asm/vm_event.h>
> +
> +#include "mm-locks.h"
> +
> +bool p2m_mem_access_emulate_check(struct vcpu *v,
> +                                  const vm_event_response_t *rsp)
> +{
> +    xenmem_access_t access;
> +    bool violation = 1;
> +    const struct vm_event_mem_access *data = &rsp->u.mem_access;
> +
> +    if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
> +    {
> +        switch ( access )
> +        {
> +        case XENMEM_access_n:
> +        case XENMEM_access_n2rwx:
> +        default:
> +            violation = data->flags & MEM_ACCESS_RWX;
> +            break;
> +
> +        case XENMEM_access_r:
> +            violation = data->flags & MEM_ACCESS_WX;
> +            break;
> +
> +        case XENMEM_access_w:
> +            violation = data->flags & MEM_ACCESS_RX;
> +            break;
> +
> +        case XENMEM_access_x:
> +            violation = data->flags & MEM_ACCESS_RW;
> +            break;
> +
> +        case XENMEM_access_rx:
> +        case XENMEM_access_rx2rw:
> +            violation = data->flags & MEM_ACCESS_W;
> +            break;
> +
> +        case XENMEM_access_wx:
> +            violation = data->flags & MEM_ACCESS_R;
> +            break;
> +
> +        case XENMEM_access_rw:
> +            violation = data->flags & MEM_ACCESS_X;
> +            break;
> +
> +        case XENMEM_access_rwx:
> +            violation = 0;
> +            break;
> +        }
> +    }
> +
> +    return violation;
> +}
> +
> +bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> +                            struct npfec npfec,
> +                            vm_event_request_t **req_ptr)
> +{
> +    struct vcpu *v = current;
> +    unsigned long gfn = gpa >> PAGE_SHIFT;
> +    struct domain *d = v->domain;
> +    struct p2m_domain *p2m = NULL;
> +    mfn_t mfn;
> +    p2m_type_t p2mt;
> +    p2m_access_t p2ma;
> +    vm_event_request_t *req;
> +    int rc;
> +
> +    if ( altp2m_active(d) )
> +        p2m = p2m_get_altp2m(v);
> +    if ( !p2m )
> +        p2m = p2m_get_hostp2m(d);
> +
> +    /* First, handle rx2rw conversion automatically.
> +     * These calls to p2m->set_entry() must succeed: we have the gfn
> +     * locked and just did a successful get_entry(). */
> +    gfn_lock(p2m, gfn, 0);
> +    mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
> +
> +    if ( npfec.write_access && p2ma == p2m_access_rx2rw )
> +    {
> +        rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, 
> p2m_access_rw, -1);
> +        ASSERT(rc == 0);
> +        gfn_unlock(p2m, gfn, 0);
> +        return 1;
> +    }
> +    else if ( p2ma == p2m_access_n2rwx )
> +    {
> +        ASSERT(npfec.write_access || npfec.read_access || npfec.insn_fetch);
> +        rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
> +                            p2mt, p2m_access_rwx, -1);
> +        ASSERT(rc == 0);
> +    }
> +    gfn_unlock(p2m, gfn, 0);
> +
> +    /* Otherwise, check if there is a memory event listener, and send the 
> message along */
> +    if ( !vm_event_check_ring(&d->vm_event->monitor) || !req_ptr )
> +    {
> +        /* No listener */
> +        if ( p2m->access_required )
> +        {
> +            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
> +                                  "no vm_event listener VCPU %d, dom %d\n",
> +                                  v->vcpu_id, d->domain_id);
> +            domain_crash(v->domain);
> +            return 0;
> +        }
> +        else
> +        {
> +            gfn_lock(p2m, gfn, 0);
> +            mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
> +            if ( p2ma != p2m_access_n2rwx )
> +            {
> +                /* A listener is not required, so clear the access
> +                 * restrictions.  This set must succeed: we have the
> +                 * gfn locked and just did a successful get_entry(). */
> +                rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
> +                                    p2mt, p2m_access_rwx, -1);
> +                ASSERT(rc == 0);
> +            }
> +            gfn_unlock(p2m, gfn, 0);
> +            return 1;
> +        }
> +    }
> +
> +    *req_ptr = NULL;
> +    req = xzalloc(vm_event_request_t);
> +    if ( req )
> +    {
> +        *req_ptr = req;
> +
> +        req->reason = VM_EVENT_REASON_MEM_ACCESS;
> +        req->u.mem_access.gfn = gfn;
> +        req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
> +        if ( npfec.gla_valid )
> +        {
> +            req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
> +            req->u.mem_access.gla = gla;
> +
> +            if ( npfec.kind == npfec_kind_with_gla )
> +                req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
> +            else if ( npfec.kind == npfec_kind_in_gpt )
> +                req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
> +        }
> +        req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
> +        req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
> +        req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
> +    }
> +
> +    /* Return whether vCPU pause is required (aka. sync event) */
> +    return (p2ma != p2m_access_n2rwx);
> +}
> +
> +int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
> +                              struct p2m_domain *ap2m, p2m_access_t a,
> +                              gfn_t gfn)
> +{
> +    mfn_t mfn;
> +    p2m_type_t t;
> +    p2m_access_t old_a;
> +    unsigned int page_order;
> +    unsigned long gfn_l = gfn_x(gfn);
> +    int rc;
> +
> +    mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL);
> +
> +    /* Check host p2m if no valid entry in alternate */
> +    if ( !mfn_valid(mfn_x(mfn)) )
> +    {
> +
> +        mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
> +                                    P2M_ALLOC | P2M_UNSHARE, &page_order, 0);
> +
> +        rc = -ESRCH;
> +        if ( !mfn_valid(mfn_x(mfn)) || t != p2m_ram_rw )
> +            return rc;
> +
> +        /* If this is a superpage, copy that first */
> +        if ( page_order != PAGE_ORDER_4K )
> +        {
> +            unsigned long mask = ~((1UL << page_order) - 1);
> +            unsigned long gfn2_l = gfn_l & mask;
> +            mfn_t mfn2 = _mfn(mfn_x(mfn) & mask);
> +
> +            rc = ap2m->set_entry(ap2m, gfn2_l, mfn2, page_order, t, old_a, 
> 1);
> +            if ( rc )
> +                return rc;
> +        }
> +    }
> +
> +    return ap2m->set_entry(ap2m, gfn_l, mfn, PAGE_ORDER_4K, t, a,
> +                         (current->domain != d));
> +}
> +
> +static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
> +                          struct p2m_domain *ap2m, p2m_access_t a,
> +                          gfn_t gfn)
> +{
> +    int rc = 0;
> +
> +    if ( ap2m )
> +    {
> +        rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
> +        /* If the corresponding mfn is invalid we will want to just skip it 
> */
> +        if ( rc == -ESRCH )
> +            rc = 0;
> +    }
> +    else
> +    {
> +        mfn_t mfn;
> +        p2m_access_t _a;
> +        p2m_type_t t;
> +        unsigned long gfn_l = gfn_x(gfn);
> +
> +        mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
> +        rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
> +    }
> +
> +    return rc;
> +}
> +
> +static bool xenmem_access_to_p2m_access(struct p2m_domain *p2m,
> +                                        xenmem_access_t xaccess,
> +                                        p2m_access_t *paccess)
> +{
> +    static const p2m_access_t memaccess[] = {
> +#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
> +        ACCESS(n),
> +        ACCESS(r),
> +        ACCESS(w),
> +        ACCESS(rw),
> +        ACCESS(x),
> +        ACCESS(rx),
> +        ACCESS(wx),
> +        ACCESS(rwx),
> +        ACCESS(rx2rw),
> +        ACCESS(n2rwx),
> +#undef ACCESS
> +    };
> +
> +    switch ( xaccess )
> +    {
> +    case 0 ... ARRAY_SIZE(memaccess) - 1:
> +        *paccess = memaccess[xaccess];
> +        break;
> +    case XENMEM_access_default:
> +        *paccess = p2m->default_access;
> +        break;
> +    default:
> +        return false;
> +    }
> +
> +    return true;
> +}
> +
> +/*
> + * Set access type for a region of gfns.
> + * If gfn == INVALID_GFN, sets the default access type.
> + */
> +long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
> +                        uint32_t start, uint32_t mask, xenmem_access_t 
> access,
> +                        unsigned int altp2m_idx)
> +{
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
> +    p2m_access_t a;
> +    unsigned long gfn_l;
> +    long rc = 0;
> +
> +    /* altp2m view 0 is treated as the hostp2m */
> +    if ( altp2m_idx )
> +    {
> +        if ( altp2m_idx >= MAX_ALTP2M ||
> +             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
> +            return -EINVAL;
> +
> +        ap2m = d->arch.altp2m_p2m[altp2m_idx];
> +    }
> +
> +    if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
> +        return -EINVAL;
> +
> +    /* If request to set default access. */
> +    if ( gfn_eq(gfn, INVALID_GFN) )
> +    {
> +        p2m->default_access = a;
> +        return 0;
> +    }
> +
> +    p2m_lock(p2m);
> +    if ( ap2m )
> +        p2m_lock(ap2m);
> +
> +    for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
> +    {
> +        rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
> +
> +        if ( rc )
> +            break;
> +
> +        /* Check for continuation if it's not the last iteration. */
> +        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
> +        {
> +            rc = start;
> +            break;
> +        }
> +    }
> +
> +    if ( ap2m )
> +        p2m_unlock(ap2m);
> +    p2m_unlock(p2m);
> +
> +    return rc;
> +}
> +
> +long p2m_set_mem_access_multi(struct domain *d,
> +                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
> +                              const XEN_GUEST_HANDLE(const_uint8) 
> access_list,
> +                              uint32_t nr, uint32_t start, uint32_t mask,
> +                              unsigned int altp2m_idx)
> +{
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
> +    long rc = 0;
> +
> +    /* altp2m view 0 is treated as the hostp2m */
> +    if ( altp2m_idx )
> +    {
> +        if ( altp2m_idx >= MAX_ALTP2M ||
> +             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
> +            return -EINVAL;
> +
> +        ap2m = d->arch.altp2m_p2m[altp2m_idx];
> +    }
> +
> +    p2m_lock(p2m);
> +    if ( ap2m )
> +        p2m_lock(ap2m);
> +
> +    while ( start < nr )
> +    {
> +        p2m_access_t a;
> +        uint8_t access;
> +        uint64_t gfn_l;
> +
> +        if ( copy_from_guest_offset(&gfn_l, pfn_list, start, 1) ||
> +             copy_from_guest_offset(&access, access_list, start, 1) )
> +        {
> +            rc = -EFAULT;
> +            break;
> +        }
> +
> +        if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
> +        {
> +            rc = -EINVAL;
> +            break;
> +        }
> +
> +        rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
> +
> +        if ( rc )
> +            break;
> +
> +        /* Check for continuation if it's not the last iteration. */
> +        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
> +        {
> +            rc = start;
> +            break;
> +        }
> +    }
> +
> +    if ( ap2m )
> +        p2m_unlock(ap2m);
> +    p2m_unlock(p2m);
> +
> +    return rc;
> +}
> +
> +/*
> + * Get access type for a gfn.
> + * If gfn == INVALID_GFN, gets the default access type.
> + */
> +int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access)
> +{
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    p2m_type_t t;
> +    p2m_access_t a;
> +    mfn_t mfn;
> +
> +    static const xenmem_access_t memaccess[] = {
> +#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
> +            ACCESS(n),
> +            ACCESS(r),
> +            ACCESS(w),
> +            ACCESS(rw),
> +            ACCESS(x),
> +            ACCESS(rx),
> +            ACCESS(wx),
> +            ACCESS(rwx),
> +            ACCESS(rx2rw),
> +            ACCESS(n2rwx),
> +#undef ACCESS
> +    };
> +
> +    /* If request to get default access. */
> +    if ( gfn_eq(gfn, INVALID_GFN) )
> +    {
> +        *access = memaccess[p2m->default_access];
> +        return 0;
> +    }
> +
> +    gfn_lock(p2m, gfn, 0);
> +    mfn = p2m->get_entry(p2m, gfn_x(gfn), &t, &a, 0, NULL, NULL);
> +    gfn_unlock(p2m, gfn, 0);
> +
> +    if ( mfn_eq(mfn, INVALID_MFN) )
> +        return -ESRCH;
> +
> +    if ( (unsigned) a >= ARRAY_SIZE(memaccess) )
> +        return -ERANGE;
> +
> +    *access =  memaccess[a];
> +    return 0;
> +}
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 6a45185..6299d5a 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1589,433 +1589,12 @@ void p2m_mem_paging_resume(struct domain *d, 
> vm_event_response_t *rsp)
>      }
>  }
>
> -bool p2m_mem_access_emulate_check(struct vcpu *v,
> -                                  const vm_event_response_t *rsp)
> -{
> -    xenmem_access_t access;
> -    bool violation = 1;
> -    const struct vm_event_mem_access *data = &rsp->u.mem_access;
> -
> -    if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
> -    {
> -        switch ( access )
> -        {
> -        case XENMEM_access_n:
> -        case XENMEM_access_n2rwx:
> -        default:
> -            violation = data->flags & MEM_ACCESS_RWX;
> -            break;
> -
> -        case XENMEM_access_r:
> -            violation = data->flags & MEM_ACCESS_WX;
> -            break;
> -
> -        case XENMEM_access_w:
> -            violation = data->flags & MEM_ACCESS_RX;
> -            break;
> -
> -        case XENMEM_access_x:
> -            violation = data->flags & MEM_ACCESS_RW;
> -            break;
> -
> -        case XENMEM_access_rx:
> -        case XENMEM_access_rx2rw:
> -            violation = data->flags & MEM_ACCESS_W;
> -            break;
> -
> -        case XENMEM_access_wx:
> -            violation = data->flags & MEM_ACCESS_R;
> -            break;
> -
> -        case XENMEM_access_rw:
> -            violation = data->flags & MEM_ACCESS_X;
> -            break;
> -
> -        case XENMEM_access_rwx:
> -            violation = 0;
> -            break;
> -        }
> -    }
> -
> -    return violation;
> -}
> -
>  void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
>  {
>      if ( altp2m_active(v->domain) )
>          p2m_switch_vcpu_altp2m_by_id(v, idx);
>  }
>
> -bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> -                            struct npfec npfec,
> -                            vm_event_request_t **req_ptr)
> -{
> -    struct vcpu *v = current;
> -    unsigned long gfn = gpa >> PAGE_SHIFT;
> -    struct domain *d = v->domain;
> -    struct p2m_domain *p2m = NULL;
> -    mfn_t mfn;
> -    p2m_type_t p2mt;
> -    p2m_access_t p2ma;
> -    vm_event_request_t *req;
> -    int rc;
> -
> -    if ( altp2m_active(d) )
> -        p2m = p2m_get_altp2m(v);
> -    if ( !p2m )
> -        p2m = p2m_get_hostp2m(d);
> -
> -    /* First, handle rx2rw conversion automatically.
> -     * These calls to p2m->set_entry() must succeed: we have the gfn
> -     * locked and just did a successful get_entry(). */
> -    gfn_lock(p2m, gfn, 0);
> -    mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
> -
> -    if ( npfec.write_access && p2ma == p2m_access_rx2rw )
> -    {
> -        rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, 
> p2m_access_rw, -1);
> -        ASSERT(rc == 0);
> -        gfn_unlock(p2m, gfn, 0);
> -        return 1;
> -    }
> -    else if ( p2ma == p2m_access_n2rwx )
> -    {
> -        ASSERT(npfec.write_access || npfec.read_access || npfec.insn_fetch);
> -        rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
> -                            p2mt, p2m_access_rwx, -1);
> -        ASSERT(rc == 0);
> -    }
> -    gfn_unlock(p2m, gfn, 0);
> -
> -    /* Otherwise, check if there is a memory event listener, and send the 
> message along */
> -    if ( !vm_event_check_ring(&d->vm_event->monitor) || !req_ptr )
> -    {
> -        /* No listener */
> -        if ( p2m->access_required )
> -        {
> -            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
> -                                  "no vm_event listener VCPU %d, dom %d\n",
> -                                  v->vcpu_id, d->domain_id);
> -            domain_crash(v->domain);
> -            return 0;
> -        }
> -        else
> -        {
> -            gfn_lock(p2m, gfn, 0);
> -            mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
> -            if ( p2ma != p2m_access_n2rwx )
> -            {
> -                /* A listener is not required, so clear the access
> -                 * restrictions.  This set must succeed: we have the
> -                 * gfn locked and just did a successful get_entry(). */
> -                rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
> -                                    p2mt, p2m_access_rwx, -1);
> -                ASSERT(rc == 0);
> -            }
> -            gfn_unlock(p2m, gfn, 0);
> -            return 1;
> -        }
> -    }
> -
> -    *req_ptr = NULL;
> -    req = xzalloc(vm_event_request_t);
> -    if ( req )
> -    {
> -        *req_ptr = req;
> -
> -        req->reason = VM_EVENT_REASON_MEM_ACCESS;
> -        req->u.mem_access.gfn = gfn;
> -        req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
> -        if ( npfec.gla_valid )
> -        {
> -            req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
> -            req->u.mem_access.gla = gla;
> -
> -            if ( npfec.kind == npfec_kind_with_gla )
> -                req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
> -            else if ( npfec.kind == npfec_kind_in_gpt )
> -                req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
> -        }
> -        req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
> -        req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
> -        req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
> -    }
> -
> -    /* Return whether vCPU pause is required (aka. sync event) */
> -    return (p2ma != p2m_access_n2rwx);
> -}
> -
> -static inline
> -int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
> -                              struct p2m_domain *ap2m, p2m_access_t a,
> -                              gfn_t gfn)
> -{
> -    mfn_t mfn;
> -    p2m_type_t t;
> -    p2m_access_t old_a;
> -    unsigned int page_order;
> -    unsigned long gfn_l = gfn_x(gfn);
> -    int rc;
> -
> -    mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL);
> -
> -    /* Check host p2m if no valid entry in alternate */
> -    if ( !mfn_valid(mfn) )
> -    {
> -
> -        mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
> -                                    P2M_ALLOC | P2M_UNSHARE, &page_order, 0);
> -
> -        rc = -ESRCH;
> -        if ( !mfn_valid(mfn) || t != p2m_ram_rw )
> -            return rc;
> -
> -        /* If this is a superpage, copy that first */
> -        if ( page_order != PAGE_ORDER_4K )
> -        {
> -            unsigned long mask = ~((1UL << page_order) - 1);
> -            unsigned long gfn2_l = gfn_l & mask;
> -            mfn_t mfn2 = _mfn(mfn_x(mfn) & mask);
> -
> -            rc = ap2m->set_entry(ap2m, gfn2_l, mfn2, page_order, t, old_a, 
> 1);
> -            if ( rc )
> -                return rc;
> -        }
> -    }
> -
> -    return ap2m->set_entry(ap2m, gfn_l, mfn, PAGE_ORDER_4K, t, a,
> -                         (current->domain != d));
> -}
> -
> -static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
> -                          struct p2m_domain *ap2m, p2m_access_t a,
> -                          gfn_t gfn)
> -{
> -    int rc = 0;
> -
> -    if ( ap2m )
> -    {
> -        rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
> -        /* If the corresponding mfn is invalid we will want to just skip it 
> */
> -        if ( rc == -ESRCH )
> -            rc = 0;
> -    }
> -    else
> -    {
> -        mfn_t mfn;
> -        p2m_access_t _a;
> -        p2m_type_t t;
> -        unsigned long gfn_l = gfn_x(gfn);
> -
> -        mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
> -        rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
> -    }
> -
> -    return rc;
> -}
> -
> -static bool xenmem_access_to_p2m_access(struct p2m_domain *p2m,
> -                                        xenmem_access_t xaccess,
> -                                        p2m_access_t *paccess)
> -{
> -    static const p2m_access_t memaccess[] = {
> -#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
> -        ACCESS(n),
> -        ACCESS(r),
> -        ACCESS(w),
> -        ACCESS(rw),
> -        ACCESS(x),
> -        ACCESS(rx),
> -        ACCESS(wx),
> -        ACCESS(rwx),
> -        ACCESS(rx2rw),
> -        ACCESS(n2rwx),
> -#undef ACCESS
> -    };
> -
> -    switch ( xaccess )
> -    {
> -    case 0 ... ARRAY_SIZE(memaccess) - 1:
> -        *paccess = memaccess[xaccess];
> -        break;
> -    case XENMEM_access_default:
> -        *paccess = p2m->default_access;
> -        break;
> -    default:
> -        return false;
> -    }
> -
> -    return true;
> -}
> -
> -/*
> - * Set access type for a region of gfns.
> - * If gfn == INVALID_GFN, sets the default access type.
> - */
> -long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
> -                        uint32_t start, uint32_t mask, xenmem_access_t 
> access,
> -                        unsigned int altp2m_idx)
> -{
> -    struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
> -    p2m_access_t a;
> -    unsigned long gfn_l;
> -    long rc = 0;
> -
> -    /* altp2m view 0 is treated as the hostp2m */
> -    if ( altp2m_idx )
> -    {
> -        if ( altp2m_idx >= MAX_ALTP2M ||
> -             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
> -            return -EINVAL;
> -
> -        ap2m = d->arch.altp2m_p2m[altp2m_idx];
> -    }
> -
> -    if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
> -        return -EINVAL;
> -
> -    /* If request to set default access. */
> -    if ( gfn_eq(gfn, INVALID_GFN) )
> -    {
> -        p2m->default_access = a;
> -        return 0;
> -    }
> -
> -    p2m_lock(p2m);
> -    if ( ap2m )
> -        p2m_lock(ap2m);
> -
> -    for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
> -    {
> -        rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
> -
> -        if ( rc )
> -            break;
> -
> -        /* Check for continuation if it's not the last iteration. */
> -        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
> -        {
> -            rc = start;
> -            break;
> -        }
> -    }
> -
> -    if ( ap2m )
> -        p2m_unlock(ap2m);
> -    p2m_unlock(p2m);
> -
> -    return rc;
> -}
> -
> -long p2m_set_mem_access_multi(struct domain *d,
> -                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
> -                              const XEN_GUEST_HANDLE(const_uint8) 
> access_list,
> -                              uint32_t nr, uint32_t start, uint32_t mask,
> -                              unsigned int altp2m_idx)
> -{
> -    struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
> -    long rc = 0;
> -
> -    /* altp2m view 0 is treated as the hostp2m */
> -    if ( altp2m_idx )
> -    {
> -        if ( altp2m_idx >= MAX_ALTP2M ||
> -             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
> -            return -EINVAL;
> -
> -        ap2m = d->arch.altp2m_p2m[altp2m_idx];
> -    }
> -
> -    p2m_lock(p2m);
> -    if ( ap2m )
> -        p2m_lock(ap2m);
> -
> -    while ( start < nr )
> -    {
> -        p2m_access_t a;
> -        uint8_t access;
> -        uint64_t gfn_l;
> -
> -        if ( copy_from_guest_offset(&gfn_l, pfn_list, start, 1) ||
> -             copy_from_guest_offset(&access, access_list, start, 1) )
> -        {
> -            rc = -EFAULT;
> -            break;
> -        }
> -
> -        if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
> -        {
> -            rc = -EINVAL;
> -            break;
> -        }
> -
> -        rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
> -
> -        if ( rc )
> -            break;
> -
> -        /* Check for continuation if it's not the last iteration. */
> -        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
> -        {
> -            rc = start;
> -            break;
> -        }
> -    }
> -
> -    if ( ap2m )
> -        p2m_unlock(ap2m);
> -    p2m_unlock(p2m);
> -
> -    return rc;
> -}
> -
> -/*
> - * Get access type for a gfn.
> - * If gfn == INVALID_GFN, gets the default access type.
> - */
> -int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access)
> -{
> -    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -    p2m_type_t t;
> -    p2m_access_t a;
> -    mfn_t mfn;
> -
> -    static const xenmem_access_t memaccess[] = {
> -#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
> -            ACCESS(n),
> -            ACCESS(r),
> -            ACCESS(w),
> -            ACCESS(rw),
> -            ACCESS(x),
> -            ACCESS(rx),
> -            ACCESS(wx),
> -            ACCESS(rwx),
> -            ACCESS(rx2rw),
> -            ACCESS(n2rwx),
> -#undef ACCESS
> -    };
> -
> -    /* If request to get default access. */
> -    if ( gfn_eq(gfn, INVALID_GFN) )
> -    {
> -        *access = memaccess[p2m->default_access];
> -        return 0;
> -    }
> -
> -    gfn_lock(p2m, gfn, 0);
> -    mfn = p2m->get_entry(p2m, gfn_x(gfn), &t, &a, 0, NULL, NULL);
> -    gfn_unlock(p2m, gfn, 0);
> -
> -    if ( mfn_eq(mfn, INVALID_MFN) )
> -        return -ESRCH;
> -
> -    if ( (unsigned) a >= ARRAY_SIZE(memaccess) )
> -        return -ERANGE;
> -
> -    *access =  memaccess[a];
> -    return 0;
> -}
> -
>  static struct p2m_domain *
>  p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
>  {
> diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
> index 1e88d67..8d8bc4a 100644
> --- a/xen/arch/x86/vm_event.c
> +++ b/xen/arch/x86/vm_event.c
> @@ -18,7 +18,8 @@
>   * License along with this program; If not, see 
> <http://www.gnu.org/licenses/>.
>   */
>
> -#include <asm/p2m.h>
> +#include <xen/sched.h>
> +#include <xen/mem_access.h>
>  #include <asm/vm_event.h>
>
>  /* Implicitly serialized by the domctl lock. */
> diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
> index 565a320..19f63bb 100644
> --- a/xen/common/mem_access.c
> +++ b/xen/common/mem_access.c
> @@ -24,8 +24,8 @@
>  #include <xen/guest_access.h>
>  #include <xen/hypercall.h>
>  #include <xen/vm_event.h>
> +#include <xen/mem_access.h>
>  #include <public/memory.h>
> -#include <asm/p2m.h>
>  #include <xsm/xsm.h>
>
>  int mem_access_memop(unsigned long cmd,
> diff --git a/xen/include/asm-arm/mem_access.h 
> b/xen/include/asm-arm/mem_access.h
> new file mode 100644
> index 0000000..3a155f8
> --- /dev/null
> +++ b/xen/include/asm-arm/mem_access.h
> @@ -0,0 +1,53 @@
> +/*
> + * mem_access.h: architecture specific mem_access handling routines
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along 
> with
> + * this program; If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef _ASM_ARM_MEM_ACCESS_H
> +#define _ASM_ARM_MEM_ACCESS_H
> +
> +static inline
> +bool p2m_mem_access_emulate_check(struct vcpu *v,
> +                                  const vm_event_response_t *rsp)
> +{
> +    /* Not supported on ARM. */
> +    return 0;
> +}
> +
> +/* vm_event and mem_access are supported on any ARM guest */
> +static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
> +{
> +    return 1;
> +}
> +
> +/*
> + * Send mem event based on the access. Boolean return value indicates if trap
> + * needs to be injected into guest.
> + */
> +bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec 
> npfec);
> +
> +struct page_info*
> +p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
> +                                  const struct vcpu *v);
> +
> +#endif /* _ASM_ARM_MEM_ACCESS_H */
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index fdb6b47..2b22e9a 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -4,6 +4,7 @@
>  #include <xen/mm.h>
>  #include <xen/radix-tree.h>
>  #include <xen/rwlock.h>
> +#include <xen/mem_access.h>
>  #include <public/vm_event.h> /* for vm_event_response_t */
>  #include <public/memory.h>
>  #include <xen/p2m-common.h>
> @@ -139,14 +140,6 @@ typedef enum {
>                               p2m_to_mask(p2m_map_foreign)))
>
>  static inline
> -bool p2m_mem_access_emulate_check(struct vcpu *v,
> -                                  const vm_event_response_t *rsp)
> -{
> -    /* Not supported on ARM. */
> -    return 0;
> -}
> -
> -static inline
>  void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
>  {
>      /* Not supported on ARM. */
> @@ -343,22 +336,26 @@ static inline int get_page_and_type(struct page_info 
> *page,
>  /* get host p2m table */
>  #define p2m_get_hostp2m(d) (&(d)->arch.p2m)
>
> -/* vm_event and mem_access are supported on any ARM guest */
> -static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
> -{
> -    return 1;
> -}
> -
>  static inline bool_t p2m_vm_event_sanity_check(struct domain *d)
>  {
>      return 1;
>  }
>
>  /*
> - * Send mem event based on the access. Boolean return value indicates if trap
> - * needs to be injected into guest.
> + * Return the start of the next mapping based on the order of the
> + * current one.
>   */
> -bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec 
> npfec);
> +static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order)
> +{
> +    /*
> +     * The order corresponds to the order of the mapping (or invalid
> +     * range) in the page table. So we need to align the GFN before
> +     * incrementing.
> +     */
> +    gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1));
> +
> +    return gfn_add(gfn, 1UL << order);
> +}
>
>  #endif /* _XEN_P2M_H */
>
> diff --git a/xen/include/asm-x86/mem_access.h 
> b/xen/include/asm-x86/mem_access.h
> new file mode 100644
> index 0000000..9f7b409
> --- /dev/null
> +++ b/xen/include/asm-x86/mem_access.h
> @@ -0,0 +1,61 @@
> +/******************************************************************************
> + * include/asm-x86/mem_access.h
> + *
> + * Memory access support.
> + *
> + * Copyright (c) 2011 GridCentric Inc. (Andres Lagar-Cavilla)
> + * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
> + * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
> + * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
> + * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ASM_X86_MEM_ACCESS_H__
> +#define __ASM_X86_MEM_ACCESS_H__
> +
> +/*
> + * Setup vm_event request based on the access (gla is -1ull if not 
> available).
> + * Handles the rw2rx conversion. Boolean return value indicates if event type
> + * is syncronous (aka. requires vCPU pause). If the req_ptr has been 
> populated,
> + * then the caller should use monitor_traps to send the event on the MONITOR
> + * ring. Once having released get_gfn* locks caller must also xfree the
> + * request.
> + */
> +bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> +                            struct npfec npfec,
> +                            vm_event_request_t **req_ptr);
> +
> +/* Check for emulation and mark vcpu for skipping one instruction
> + * upon rescheduling if required. */
> +bool p2m_mem_access_emulate_check(struct vcpu *v,
> +                                  const vm_event_response_t *rsp);
> +
> +/* Sanity check for mem_access hardware support */
> +static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
> +{
> +    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
> +}
> +
> +#endif /*__ASM_X86_MEM_ACCESS_H__ */
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 7035860..8964e90 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -29,6 +29,7 @@
>  #include <xen/config.h>
>  #include <xen/paging.h>
>  #include <xen/p2m-common.h>
> +#include <xen/mem_access.h>
>  #include <asm/mem_sharing.h>
>  #include <asm/page.h>    /* for pagetable_t */
>
> @@ -663,29 +664,6 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long 
> gfn, uint64_t buffer);
>  /* Resume normal operation (in case a domain was paused) */
>  void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp);
>
> -/*
> - * Setup vm_event request based on the access (gla is -1ull if not 
> available).
> - * Handles the rw2rx conversion. Boolean return value indicates if event type
> - * is syncronous (aka. requires vCPU pause). If the req_ptr has been 
> populated,
> - * then the caller should use monitor_traps to send the event on the MONITOR
> - * ring. Once having released get_gfn* locks caller must also xfree the
> - * request.
> - */
> -bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> -                            struct npfec npfec,
> -                            vm_event_request_t **req_ptr);
> -
> -/* Check for emulation and mark vcpu for skipping one instruction
> - * upon rescheduling if required. */
> -bool p2m_mem_access_emulate_check(struct vcpu *v,
> -                                  const vm_event_response_t *rsp);
> -
> -/* Sanity check for mem_access hardware support */
> -static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
> -{
> -    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
> -}
> -
>  /*
>   * Internal functions, only called by other p2m code
>   */
> diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
> index da36e07..5ab34c1 100644
> --- a/xen/include/xen/mem_access.h
> +++ b/xen/include/xen/mem_access.h
> @@ -19,29 +19,78 @@
>   * along with this program; If not, see <http://www.gnu.org/licenses/>.
>   */
>
> -#ifndef _XEN_ASM_MEM_ACCESS_H
> -#define _XEN_ASM_MEM_ACCESS_H
> +#ifndef _XEN_MEM_ACCESS_H
> +#define _XEN_MEM_ACCESS_H
>
> +#include <xen/types.h>
> +#include <xen/mm.h>
>  #include <public/memory.h>
> -#include <asm/p2m.h>
> +#include <public/vm_event.h>
> +#include <asm/mem_access.h>
>
> -#ifdef CONFIG_HAS_MEM_ACCESS
> +/*
> + * Additional access types, which are used to further restrict
> + * the permissions given my the p2m_type_t memory type.  Violations
> + * caused by p2m_access_t restrictions are sent to the vm_event
> + * interface.
> + *
> + * The access permissions are soft state: when any ambiguous change of page
> + * type or use occurs, or when pages are flushed, swapped, or at any other
> + * convenient type, the access permissions can get reset to the p2m_domain
> + * default.
> + */
> +typedef enum {
> +    /* Code uses bottom three bits with bitmask semantics */
> +    p2m_access_n     = 0, /* No access allowed. */
> +    p2m_access_r     = 1 << 0,
> +    p2m_access_w     = 1 << 1,
> +    p2m_access_x     = 1 << 2,
> +    p2m_access_rw    = p2m_access_r | p2m_access_w,
> +    p2m_access_rx    = p2m_access_r | p2m_access_x,
> +    p2m_access_wx    = p2m_access_w | p2m_access_x,
> +    p2m_access_rwx   = p2m_access_r | p2m_access_w | p2m_access_x,
> +
> +    p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
> +    p2m_access_n2rwx = 9, /* Special: page goes from N to RWX on access, *
> +                           * generates an event but does not pause the
> +                           * vcpu */
> +
> +    /* NOTE: Assumed to be only 4 bits right now on x86. */
> +} p2m_access_t;
> +
> +/*
> + * Set access type for a region of gfns.
> + * If gfn == INVALID_GFN, sets the default access type.
> + */
> +long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
> +                        uint32_t start, uint32_t mask, xenmem_access_t 
> access,
> +                        unsigned int altp2m_idx);
>
> +long p2m_set_mem_access_multi(struct domain *d,
> +                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
> +                              const XEN_GUEST_HANDLE(const_uint8) 
> access_list,
> +                              uint32_t nr, uint32_t start, uint32_t mask,
> +                              unsigned int altp2m_idx);
> +
> +/*
> + * Get access type for a gfn.
> + * If gfn == INVALID_GFN, gets the default access type.
> + */
> +int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access);
> +
> +#ifdef CONFIG_HAS_MEM_ACCESS
>  int mem_access_memop(unsigned long cmd,
>                       XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
> -
>  #else
> -
>  static inline
>  int mem_access_memop(unsigned long cmd,
>                       XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
>  {
>      return -ENOSYS;
>  }
> +#endif /* CONFIG_HAS_MEM_ACCESS */
>
> -#endif /* HAS_MEM_ACCESS */
> -
> -#endif /* _XEN_ASM_MEM_ACCESS_H */
> +#endif /* _XEN_MEM_ACCESS_H */
>
>  /*
>   * Local variables:
> diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h
> index 3be1e91..8cd5a6b 100644
> --- a/xen/include/xen/p2m-common.h
> +++ b/xen/include/xen/p2m-common.h
> @@ -1,38 +1,6 @@
>  #ifndef _XEN_P2M_COMMON_H
>  #define _XEN_P2M_COMMON_H
>
> -#include <public/vm_event.h>
> -
> -/*
> - * Additional access types, which are used to further restrict
> - * the permissions given my the p2m_type_t memory type.  Violations
> - * caused by p2m_access_t restrictions are sent to the vm_event
> - * interface.
> - *
> - * The access permissions are soft state: when any ambiguous change of page
> - * type or use occurs, or when pages are flushed, swapped, or at any other
> - * convenient type, the access permissions can get reset to the p2m_domain
> - * default.
> - */
> -typedef enum {
> -    /* Code uses bottom three bits with bitmask semantics */
> -    p2m_access_n     = 0, /* No access allowed. */
> -    p2m_access_r     = 1 << 0,
> -    p2m_access_w     = 1 << 1,
> -    p2m_access_x     = 1 << 2,
> -    p2m_access_rw    = p2m_access_r | p2m_access_w,
> -    p2m_access_rx    = p2m_access_r | p2m_access_x,
> -    p2m_access_wx    = p2m_access_w | p2m_access_x,
> -    p2m_access_rwx   = p2m_access_r | p2m_access_w | p2m_access_x,
> -
> -    p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
> -    p2m_access_n2rwx = 9, /* Special: page goes from N to RWX on access, *
> -                           * generates an event but does not pause the
> -                           * vcpu */
> -
> -    /* NOTE: Assumed to be only 4 bits right now on x86. */
> -} p2m_access_t;
> -
>  /* Map MMIO regions in the p2m: start_gfn and nr describe the range in
>   *  * the guest physical address space to map, starting from the machine
>   *   * frame number mfn. */
> @@ -45,24 +13,4 @@ int unmap_mmio_regions(struct domain *d,
>                         unsigned long nr,
>                         mfn_t mfn);
>
> -/*
> - * Set access type for a region of gfns.
> - * If gfn == INVALID_GFN, sets the default access type.
> - */
> -long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
> -                        uint32_t start, uint32_t mask, xenmem_access_t 
> access,
> -                        unsigned int altp2m_idx);
> -
> -long p2m_set_mem_access_multi(struct domain *d,
> -                              const XEN_GUEST_HANDLE(const_uint64) pfn_list,
> -                              const XEN_GUEST_HANDLE(const_uint8) 
> access_list,
> -                              uint32_t nr, uint32_t start, uint32_t mask,
> -                              unsigned int altp2m_idx);
> -
> -/*
> - * Get access type for a gfn.
> - * If gfn == INVALID_GFN, gets the default access type.
> - */
> -int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access);
> -
>  #endif /* _XEN_P2M_COMMON_H */
> --
> 2.10.2
>

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.