[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 14/16]: PVH xen: add xenmem_add_foreign_to_pmap()



At 18:11 -0800 on 11 Jan (1357927863), Mukesh Rathor wrote:
> In this patch, I add a new function, xenmem_add_foreign_to_pmap(), to
> map pages from foreign gue st into current dom0 for domU creation.
> Also, some minor grant changes for PVH.
> 
> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
> 
> diff -r 2c894340b16f -r 47d2e652bd4d xen/arch/x86/mm.c
> --- a/xen/arch/x86/mm.c       Fri Jan 11 16:43:02 2013 -0800
> +++ b/xen/arch/x86/mm.c       Fri Jan 11 16:45:36 2013 -0800
> @@ -2608,7 +2608,7 @@ static struct domain *get_pg_owner(domid
>          goto out;
>      }
>  
> -    if ( unlikely(paging_mode_translate(curr)) )
> +    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
>      {
>          MEM_LOG("Cannot mix foreign mappings with translated domains");
>          goto out;
> @@ -3734,16 +3734,34 @@ static int replace_grant_p2m_mapping(
>      old_mfn = get_gfn(d, gfn, &type);
>      if ( !p2m_is_grant(type) || mfn_x(old_mfn) != frame )
>      {
> -        put_gfn(d, gfn);
>          gdprintk(XENLOG_WARNING,
> -                 "replace_grant_p2m_mapping: old mapping invalid (type %d, 
> mfn %lx, frame %lx)\n",
> +                 "replace_grant_p2m_mapping: old mapping invalid "
> +                 "(type %d, mfn %lx, frame %lx)\n",
>                   type, mfn_x(old_mfn), frame);
> -        return GNTST_general_error;
> +        goto out_err;
>      }
>      guest_physmap_remove_page(d, gfn, frame, PAGE_ORDER_4K);
>  
> +    /* PVH: Because we free the existing mfn in XENMEM_add_to_physmap during
> +     * grant map, we undo that here so the guest P2M (EPT/NPT) is consistent 
> */
> +    if ( is_pvh_domain(d) ) {
> +        struct page_info *page = alloc_domheap_page(d, 0);

I think this belongs in the previous patch, with the other gnttab stuff.

> +
> +     if ( page == NULL ) {
> +         gdprintk(XENLOG_ERR, "Unable to alloc domheap page\n");
> +            goto out_err;
> +     }

Your indentation is messed up here. 

> +     if ( guest_physmap_add_page(d, gfn, page_to_mfn(page), 0) != 0 ) {
> +         gdprintk(XENLOG_ERR, "Unable to add mfn to replace grant\n");
> +            goto out_err;
> +        }
> +    }
>      put_gfn(d, gfn);
>      return GNTST_okay;
> +
> +out_err:
> +    put_gfn(d, gfn);
> +    return GNTST_general_error;
>  }
>  
>  int replace_grant_host_mapping(
> @@ -4143,7 +4161,7 @@ long do_update_descriptor(u64 pa, u64 de
>      page = get_page_from_gfn(dom, gmfn, NULL, P2M_ALLOC);
>      if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
>           !page ||
> -         !check_descriptor(dom, &d) )
> +         (!is_pvh_domain(dom) && !check_descriptor(dom, &d)) )
>      {
>          if ( page )
>              put_page(page);
> @@ -4217,7 +4235,81 @@ static int handle_iomem_range(unsigned l
>      return 0;
>  }
>  
> -static int xenmem_add_to_physmap_once(
> +/* add frames from foreign domain to current domain's physmap. Similar to 
> + * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
> + * and is not removed from foreign domain. 
> + * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap
> + * Returns: 0 ==> success
> + */
> +static long noinline xenmem_add_foreign_to_pmap( domid_t foreign_domid, 
> +                                        unsigned long fgmfn, unsigned long 
> gpfn)
> +{
> +    unsigned long rc=0, prev_mfn, mfn = 0;
> +    struct domain *fdom, *currd = current->domain;
> +    p2m_type_t p2mt, p2mt_prev;
> +
> +    if ( (fdom = get_pg_owner(foreign_domid)) == NULL ) {
> +        return -EPERM;
> +    }
> +
> +    mfn = mfn_x(get_gfn_query_unlocked(fdom, fgmfn, &p2mt));

I don't think you can use _unlocked lookups here - you need to hold on
to the foreign pfn at least until you can get_page() the underlying mfn
to be sure it's not freed underfoot. 

> +    /* qemu, running on PVH dom0, mapping hvm domain's io pages during 
> domain 
> +     * creation, doesn't have mfns in the HAP table */
> +    if ( !mfn_valid(mfn) && p2m_is_mmio(p2mt) ) {

This test should be for == p2m_mmio_direct; we don't want to try mapping
p2m_mmio_dm areas.

> +        if (!is_hvm_domain(fdom)) {
> +            printk("mmio type for non-hvm domain. fd:%d fgmfn:%lx 
> gpfn:%lx\n",
> +                   foreign_domid, fgmfn, gpfn);
> +            return -EINVAL;
> +        }
> +        mfn = fgmfn;     /* map 1 to 1 */

Surely not -- you want to map the _actual_ MMIO range, right, not just
whatever GFN-address the foreigh domain mapped it at?

> diff -r 2c894340b16f -r 47d2e652bd4d xen/common/grant_table.c
> --- a/xen/common/grant_table.c        Fri Jan 11 16:43:02 2013 -0800
> +++ b/xen/common/grant_table.c        Fri Jan 11 16:45:36 2013 -0800
> @@ -529,7 +529,7 @@ static void mapcount(
>   * addr is _either_ a host virtual address, or the address of the pte to
>   * update, as indicated by the GNTMAP_contains_pte flag.
>   */
> -static void
> +static noinline void
>  __gnttab_map_grant_ref(
>      struct gnttab_map_grant_ref *op)
>  {
> @@ -745,7 +745,7 @@ __gnttab_map_grant_ref(
>  
>      double_gt_lock(lgt, rgt);
>  
> -    if ( !is_hvm_domain(ld) && need_iommu(ld) )
> +    if ( !is_hvm_or_pvh_domain(ld) && need_iommu(ld) )

These seem luike they're part of the previous patch too. 

Tim.

>      {
>          unsigned int wrc, rdc;
>          int err = 0;
> @@ -956,7 +956,7 @@ __gnttab_unmap_common(
>              act->pin -= GNTPIN_hstw_inc;
>      }
>  
> -    if ( !is_hvm_domain(ld) && need_iommu(ld) )
> +    if ( !is_hvm_or_pvh_domain(ld) && need_iommu(ld) )
>      {
>          unsigned int wrc, rdc;
>          int err = 0;
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.