[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/5] arm: shared_info page allocation and mapping



On Thu, 2012-02-23 at 17:13 +0000, Stefano Stabellini wrote:
> Allocate the shared_info page at domain creation.
> 
> Implement arch_memory_op, only for XENMEM_add_to_physmap with space ==
> XENMAPSPACE_shared_info, so that the guest can map the shared_info page.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> ---
>  xen/arch/arm/domain.c     |    8 ++++
>  xen/arch/arm/mm.c         |   98 
> +++++++++++++++++++++++++++++++++++++++++++--
>  xen/arch/arm/p2m.c        |   15 ++++++-
>  xen/include/asm-arm/mm.h  |    4 ++
>  xen/include/asm-arm/p2m.h |    2 +
>  5 files changed, 122 insertions(+), 5 deletions(-)
> 
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 0b55934..1e5cca5 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -235,6 +235,14 @@ int arch_domain_create(struct domain *d, unsigned int 
> domcr_flags)
>      if ( (rc = p2m_init(d)) != 0 )
>          goto fail;
>  
> +    rc = -ENOMEM;
> +     if ( (d->shared_info = alloc_xenheap_pages(0, MEMF_bits(32))) == NULL )
> +             goto fail;
> +
> +     clear_page(d->shared_info);
> +     share_xen_page_with_guest(
> +                     virt_to_page(d->shared_info), d, XENSHARE_writable);

You seem to have some hard tabs here.

> +
>      d->max_vcpus = 8;
>  
>      if ( (rc = domain_vgic_init(d)) != 0 )
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index a0f39eb..5f4fd6a 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -25,8 +25,11 @@
>  #include <xen/mm.h>
>  #include <xen/preempt.h>
>  #include <xen/errno.h>
> +#include <xen/guest_access.h>
>  #include <asm/page.h>
>  #include <asm/current.h>
> +#include <public/memory.h>
> +#include <xen/sched.h>
>  
>  struct domain *dom_xen, *dom_io;
>  
> @@ -323,17 +326,104 @@ void arch_dump_shared_mem_info(void)
>  {
>  }
>  
> -long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
> +int donate_page(struct domain *d, struct page_info *page, unsigned int 
> memflags)
>  {
> +    ASSERT(0);
>      return -ENOSYS;
>  }
>  
> -int donate_page(struct domain *d, struct page_info *page, unsigned int 
> memflags)
> +void share_xen_page_with_guest(struct page_info *page,
> +                          struct domain *d, int readonly)
>  {
> -    ASSERT(0);
> -    return -ENOSYS;
> +    if ( page_get_owner(page) == d )
> +        return;
> +
> +    spin_lock(&d->page_alloc_lock);
> +
> +    /* The incremented type count pins as writable or read-only. */
> +    page->u.inuse.type_info  = (readonly ? PGT_none : PGT_writable_page);
> +    page->u.inuse.type_info |= PGT_validated | 1;
> +
> +    page_set_owner(page, d);
> +    wmb(); /* install valid domain ptr before updating refcnt. */
> +    ASSERT((page->count_info & ~PGC_xen_heap) == 0);
> +
> +    /* Only add to the allocation list if the domain isn't dying. */
> +    if ( !d->is_dying )
> +    {
> +        page->count_info |= PGC_allocated | 1;
> +        if ( unlikely(d->xenheap_pages++ == 0) )
> +            get_knownalive_domain(d);
> +        page_list_add_tail(page, &d->xenpage_list);
> +    }
> +
> +    spin_unlock(&d->page_alloc_lock);
> +}
> +
> +static int xenmem_add_to_physmap_once(
> +    struct domain *d,
> +    const struct xen_add_to_physmap *xatp)
> +{
> +    unsigned long mfn = 0;
> +    int rc;
> +
> +    switch ( xatp->space )
> +    {
> +        case XENMAPSPACE_shared_info:
> +            if ( xatp->idx == 0 )
> +                mfn = virt_to_mfn(d->shared_info);
> +            break;
> +        default:
> +                     return -ENOSYS;

Another w/s snafu.

> +    }
> +
> +    domain_lock(d);
> +
> +    /* Map at new location. */
> +    rc = guest_physmap_add_page(d, xatp->gpfn, mfn);
> +
> +    domain_unlock(d);
> +
> +    return rc;
> +}
> +
> +static int xenmem_add_to_physmap(struct domain *d,
> +                                 struct xen_add_to_physmap *xatp)
> +{
> +    return xenmem_add_to_physmap_once(d, xatp);
>  }
>  
> +long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
> +{
> +    int rc;
> +
> +    switch ( op )
> +    {
> +    case XENMEM_add_to_physmap:
> +    {
> +        struct xen_add_to_physmap xatp;
> +        struct domain *d;
> +
> +        if ( copy_from_guest(&xatp, arg, 1) )
> +            return -EFAULT;
> +
> +        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
> +        if ( rc != 0 )
> +            return rc;
> +
> +        rc = xenmem_add_to_physmap(d, &xatp);
> +
> +        rcu_unlock_domain(d);
> +
> +        return rc;
> +    }
> +
> +    default:
> +        return -ENOSYS;
> +    }
> +
> +    return 0;
> +}
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 14614fd..6ee1b5f 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -118,7 +118,12 @@ static int create_p2m_entries(struct domain *d,
>          }
>          /* else: third already valid */
>  
> -        BUG_ON(third[third_table_offset(addr)].p2m.valid);
> +        if ( third[third_table_offset(addr)].p2m.valid )
> +             {
> +                     /* p2m entry already present */
> +                     free_domheap_page(
> +                                     
> mfn_to_page(third[third_table_offset(addr)].p2m.base));
> +             }

Guess what ;)

>  
>          /* Allocate a new RAM page and attach */
>          if (alloc)
> @@ -172,6 +177,14 @@ int map_mmio_regions(struct domain *d,
>      return create_p2m_entries(d, 0, start_gaddr, end_gaddr, maddr);
>  }
>  
> +int guest_physmap_add_page(struct domain *d, unsigned long gpfn,
> +             unsigned long mfn)
> +{
> +    return create_p2m_entries(d, 0, gpfn << PAGE_SHIFT,
> +                                       (gpfn + 1) << PAGE_SHIFT,
> +                                                       mfn << PAGE_SHIFT);

Not sure if this is a hardspace issue or just a strange way of laying it
out?

> +}
> +
>  int p2m_alloc_table(struct domain *d)
>  {
>      struct p2m_domain *p2m = &d->arch.p2m;
> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
> index bfc0f76..56ab9415 100644
> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -78,6 +78,10 @@ struct page_info
>  #define _PGT_pinned       PG_shift(5)
>  #define PGT_pinned        PG_mask(1, 5)
>  
> + /* Has this page been validated for use as its current type? */
> +#define _PGT_validated    PG_shift(6)
> +#define PGT_validated     PG_mask(1, 6)
> +
>   /* Count of uses of this frame as its current type. */
>  #define PGT_count_width   PG_shift(9)
>  #define PGT_count_mask    ((1UL<<PGT_count_width)-1)
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index aec52f7..b1d42a8 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -39,6 +39,8 @@ int p2m_populate_ram(struct domain *d, paddr_t start, 
> paddr_t end);
>   * address maddr. */
>  int map_mmio_regions(struct domain *d, paddr_t start_gaddr,
>                       paddr_t end_gaddr, paddr_t maddr);
> +int guest_physmap_add_page(struct domain *d, unsigned long gpfn,
> +             unsigned long mfn);
>  
>  unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn);
>  



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.