[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 2/4] xen/arm: introduce a generic p2m walker and use it in p2m_lookup



Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>


Changes in v5:
- align tests;
- comment p2m_walker;
- fix return codes in p2m_walker;
- handle superpages in p2m_walker;
- rename _p2m_lookup to p2m_lookup_f.
---
 xen/arch/arm/p2m.c |  131 +++++++++++++++++++++++++++++++++++++++++----------
 1 files changed, 105 insertions(+), 26 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 307c6d4..a9ceacf 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -31,48 +31,127 @@ void p2m_load_VTTBR(struct domain *d)
 }
 
 /*
- * Lookup the MFN corresponding to a domain's PFN.
+ * d: domain p2m to walk
+ * paddr: the guest start physical address
+ * order: page order
+ * func: function to call for each 2-stage lpae_t entry found
+ * arg: opaque pointer to pass to func
  *
- * There are no processor functions to do a stage 2 only lookup therefore we
- * do a a software walk.
  */
-paddr_t p2m_lookup(struct domain *d, paddr_t paddr)
+static int p2m_walker(struct domain *d, paddr_t paddr, unsigned int order,
+                      int (*func)(lpae_t *pte, void *arg, int level), void* 
arg)
 {
+    lpae_t *first = NULL, *second = NULL, *third = NULL;
     struct p2m_domain *p2m = &d->arch.p2m;
-    lpae_t pte, *first = NULL, *second = NULL, *third = NULL;
-    paddr_t maddr = INVALID_PADDR;
+    int rc = -EFAULT, level = 1;
+    unsigned long cur_first_offset = ~0, cur_second_offset = ~0;
+    paddr_t pend = paddr + (1UL << order);
 
     spin_lock(&p2m->lock);
 
     first = __map_domain_page(p2m->first_level);
 
-    pte = first[first_table_offset(paddr)];
-    if ( !pte.p2m.valid || !pte.p2m.table )
-        goto done;
+    if ( !first ||
+         !first[first_table_offset(paddr)].p2m.valid )
+        goto err;
 
-    second = map_domain_page(pte.p2m.base);
-    pte = second[second_table_offset(paddr)];
-    if ( !pte.p2m.valid || !pte.p2m.table )
-        goto done;
+    if ( !first[first_table_offset(paddr)].p2m.table )
+    {
+        rc = func(&first[first_table_offset(paddr)], arg, level);
+        if ( rc != 0 )
+            goto err;
+        paddr += FIRST_SIZE;
+    }
 
-    third = map_domain_page(pte.p2m.base);
-    pte = third[third_table_offset(paddr)];
+    while ( paddr < pend )
+    {
+        rc = -EFAULT;
+        level = 1;
 
-    /* This bit must be one in the level 3 entry */
-    if ( !pte.p2m.table )
-        pte.bits = 0;
+        if ( cur_first_offset != first_table_offset(paddr) )
+        {
+            if (second) unmap_domain_page(second);
+            second = 
map_domain_page(first[first_table_offset(paddr)].p2m.base);
+            cur_first_offset = first_table_offset(paddr);
+        }
+        level++;
+        if ( !second ||
+             !second[second_table_offset(paddr)].p2m.valid )
+            goto err;
+        if ( !second[second_table_offset(paddr)].p2m.table )
+        {
+            rc = func(&first[first_table_offset(paddr)], arg, level);
+            if ( rc != 0 )
+                goto err;
+            paddr += SECOND_SIZE;
+        }
 
-done:
-    if ( pte.p2m.valid )
-        maddr = (pte.bits & PADDR_MASK & PAGE_MASK) | (paddr & ~PAGE_MASK);
+        if ( cur_second_offset != second_table_offset(paddr) )
+        {
+            if (third) unmap_domain_page(third);
+            third = 
map_domain_page(second[second_table_offset(paddr)].p2m.base);
+            cur_second_offset = second_table_offset(paddr);
+        }
+        level++;
+        if ( !third ||
+             !third[third_table_offset(paddr)].p2m.valid )
+            goto err;
 
-    if (third) unmap_domain_page(third);
-    if (second) unmap_domain_page(second);
-    if (first) unmap_domain_page(first);
+        rc = func(&third[third_table_offset(paddr)], arg, level);
+        if ( rc != 0 )
+            goto err;
+
+        paddr += PAGE_SIZE;
+    }
+
+    rc = 0;
+
+err:
+    if ( third ) unmap_domain_page(third);
+    if ( second ) unmap_domain_page(second);
+    if ( first ) unmap_domain_page(first);
 
     spin_unlock(&p2m->lock);
 
-    return maddr;
+    return rc;
+}
+
+struct p2m_lookup_t {
+    paddr_t paddr;
+    paddr_t maddr;
+};
+
+static int p2m_lookup_f(lpae_t *ptep, void *arg, int level)
+{
+    lpae_t pte;
+    struct p2m_lookup_t *p2m = (struct p2m_lookup_t *)arg;
+    ASSERT(level == 3);
+
+    pte = *ptep;
+
+    /* This bit must be one in the level 3 entry */
+    if ( !pte.p2m.table || !pte.p2m.valid )
+        return -EFAULT;
+
+    p2m->maddr = (pte.bits & PADDR_MASK & PAGE_MASK) |
+        (p2m->paddr & ~PAGE_MASK);
+    return 0;
+}
+/*
+ * Lookup the MFN corresponding to a domain's PFN.
+ *
+ * There are no processor functions to do a stage 2 only lookup therefore we
+ * do a a software walk.
+ */
+paddr_t p2m_lookup(struct domain *d, paddr_t paddr)
+{
+    struct p2m_lookup_t p2m;
+    p2m.paddr = paddr;
+    p2m.maddr = INVALID_PADDR;
+
+    p2m_walker(d, paddr, 0, p2m_lookup_f, &p2m);
+
+    return p2m.maddr;
 }
 
 int guest_physmap_mark_populate_on_demand(struct domain *d,
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.