[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 14/18 V2 RESEND]: PVH xen: add xenmem_add_foreign_to_pmap()



In this patch, a new function, xenmem_add_foreign_to_pmap(), is added
to map pages from foreign guest into current dom0 for domU creation.
Also, allow XENMEM_remove_from_physmap to remove p2m_map_foreign
pages. Note, in this path, we must release the refcount that was taken
during the map phase.

Changes in V2:
  - Move the XENMEM_remove_from_physmap changes here instead of prev patch
  - Move grant changes from this to one of the next patches.
  - In xenmem_add_foreign_to_pmap(), do locked get_gfn
  - Fail the mappings for qemu mapping pages for memory not there.

Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
---
 xen/arch/x86/mm.c   |   74 ++++++++++++++++++++++++++++++++++++++++++++++++--
 xen/common/memory.c |   44 +++++++++++++++++++++++++++---
 2 files changed, 110 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 6603752..dbac811 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2656,7 +2656,7 @@ static struct domain *get_pg_owner(domid_t domid)
         goto out;
     }
 
-    if ( unlikely(paging_mode_translate(curr)) )
+    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
     {
         MEM_LOG("Cannot mix foreign mappings with translated domains");
         goto out;
@@ -4192,7 +4192,7 @@ long do_update_descriptor(u64 pa, u64 desc)
     page = get_page_from_gfn(dom, gmfn, NULL, P2M_ALLOC);
     if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
          !page ||
-         !check_descriptor(dom, &d) )
+         (!is_pvh_domain(dom) && !check_descriptor(dom, &d)) )
     {
         if ( page )
             put_page(page);
@@ -4266,6 +4266,66 @@ static int handle_iomem_range(unsigned long s, unsigned 
long e, void *p)
     return 0;
 }
 
+/* 
+ * Add frames from foreign domain to current domain's physmap. Similar to 
+ * XENMAPSPACE_gmfn but the frame is foreign being mapped into current,
+ * and is not removed from foreign domain. 
+ * Usage: libxl on pvh dom0 creating a guest and doing privcmd_ioctl_mmap.
+ * Side Effect: the mfn for fgfn will be refcounted so it is not lost
+ *              while mapped here. The refcnt is released in do_memory_op() 
+ *              via XENMEM_remove_from_physmap.
+ * Returns: 0 ==> success
+ */
+static int xenmem_add_foreign_to_pmap(domid_t foreign_domid, 
+                                      unsigned long fgfn, unsigned long gpfn)
+{
+    p2m_type_t p2mt, p2mt_prev;
+    int rc = -EINVAL;
+    unsigned long prev_mfn, mfn = 0;
+    struct domain *fdom, *currd = current->domain;
+
+    if ( (fdom = get_pg_owner(foreign_domid)) == NULL )
+        return -EPERM;
+
+    mfn = mfn_x(get_gfn_query(fdom, fgfn, &p2mt));
+    if ( !mfn_valid(mfn) || !p2m_is_valid(p2mt) )
+        goto out_rc;
+
+    if ( !get_page_from_pagenr(mfn, fdom) )
+        goto out_rc;
+
+    /* Remove previously mapped page if it is present. */
+    prev_mfn = mfn_x(get_gfn(currd, gpfn, &p2mt_prev));
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot */
+            guest_physmap_remove_page(currd, gpfn, prev_mfn, 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(currd, gpfn);
+    }
+    put_gfn(currd, gpfn);
+
+    /* Create the new mapping. Can't use guest_physmap_add_page() because it 
+     * will update the m2p table which will result in  mfn -> gpfn of dom0 
+     * and not fgfn of domU.
+     */
+    if ( set_foreign_p2m_entry(currd, gpfn, _mfn(mfn)) == 0 ) {
+
+        printk("guest_physmap_add_page failed. gpfn:%lx mfn:%lx fgfn:%lx\n", 
+               gpfn, mfn, fgfn);
+        put_page(mfn_to_page(mfn));
+        goto out_rc;
+    }
+    rc = 0;
+
+out_rc:
+    put_gfn(fdom, fgfn);
+    put_pg_owner(fdom);
+    return rc;
+}
+
 static int xenmem_add_to_physmap_once(
     struct domain *d,
     const struct xen_add_to_physmap *xatp,
@@ -4328,6 +4388,14 @@ static int xenmem_add_to_physmap_once(
             page = mfn_to_page(mfn);
             break;
         }
+
+        case XENMAPSPACE_gmfn_foreign:
+        {
+            rc = xenmem_add_foreign_to_pmap(foreign_domid, xatp->idx, 
+                                            xatp->gpfn);
+            return rc;
+        }
+
         default:
             break;
     }
@@ -4425,7 +4493,7 @@ static int xenmem_add_to_physmap(struct domain *d,
     return xenmem_add_to_physmap_once(d, xatp, -1);
 }
 
-static noinline int xenmem_add_to_physmap_range(struct domain *d,
+static int xenmem_add_to_physmap_range(struct domain *d,
                                        struct xen_add_to_physmap_range *xatpr)
 {
     int rc;
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 68501d1..91a56b6 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -675,9 +675,12 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
     case XENMEM_remove_from_physmap:
     {
+        unsigned long argmfn, foreign_mfn = INVALID_MFN;
         struct xen_remove_from_physmap xrfp;
         struct page_info *page;
-        struct domain *d;
+        struct domain *d, *foreign_dom = NULL;
+        p2m_type_t p2mt, tp;
+        int valid_pvh_pg, is_curr_pvh = is_pvh_vcpu(current);
 
         if ( copy_from_guest(&xrfp, arg, 1) )
             return -EFAULT;
@@ -695,14 +698,45 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
         domain_lock(d);
 
-        page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
-        if ( page )
+        /* PVH note: if PVH, the gfn could be mapped to a mfn from foreign
+         * domain by the user space tool during domain creation. We need to
+         * check for that, free it up from the p2m, and release refcnt on it.
+         * In such a case, page would be NULL. */
+
+        page = get_page_from_gfn(d, xrfp.gpfn, &p2mt, P2M_ALLOC);
+        valid_pvh_pg = is_curr_pvh && 
+                       (p2m_is_mmio(p2mt) || p2m_is_foreign(p2mt));
+
+        if ( page || valid_pvh_pg)
         {
-            guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
-            put_page(page);
+            argmfn = page ? page_to_mfn(page) : INVALID_MFN;
+
+            if ( is_curr_pvh && p2m_is_foreign(p2mt) )
+            {
+                foreign_mfn = mfn_x(get_gfn_query_unlocked(d, xrfp.gpfn, &tp));
+                foreign_dom = page_get_owner(mfn_to_page(foreign_mfn));
+                ASSERT(p2m_is_mmio(tp) || p2m_is_foreign(tp));
+            }
+
+            guest_physmap_remove_page(d, xrfp.gpfn, argmfn, 0);
+            if (page)
+                put_page(page);
+
+            /* if pages were mapped from foreign domain via 
+             * xenmem_add_foreign_to_pmap(), we must drop a refcnt here */
+            if ( is_curr_pvh && p2m_is_foreign(p2mt) )
+            {
+                ASSERT( d != foreign_dom );
+                put_page(mfn_to_page(foreign_mfn));
+            }
         }
         else
+        {
+            if ( is_curr_pvh )
+                gdprintk(XENLOG_WARNING, "%s: Domain:%u gmfn:%lx invalid\n",
+                         __func__, current->domain->domain_id, xrfp.gpfn);
             rc = -ENOENT;
+        }
 
         domain_unlock(d);
 
-- 
1.7.2.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.