[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC 14/16] xen/arm: p2m: Extend p2m_get_entry to return the value of bit[0] (valid bit)



With the recent changes, a P2M entry may be populated but may as not
valid. In some situation, it would be useful to know whether the entry
has been marked available to guest in order to perform a specific
action. So extend p2m_get_entry to return the value of bit[0] (valid bit).

Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
---
 xen/arch/arm/mem_access.c |  6 +++---
 xen/arch/arm/p2m.c        | 20 ++++++++++++++++----
 xen/include/asm-arm/p2m.h |  3 ++-
 3 files changed, 21 insertions(+), 8 deletions(-)

diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
index 9239bdf323..f434510b2a 100644
--- a/xen/arch/arm/mem_access.c
+++ b/xen/arch/arm/mem_access.c
@@ -70,7 +70,7 @@ static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
          * No setting was found in the Radix tree. Check if the
          * entry exists in the page-tables.
          */
-        mfn_t mfn = p2m_get_entry(p2m, gfn, NULL, NULL, NULL);
+        mfn_t mfn = p2m_get_entry(p2m, gfn, NULL, NULL, NULL, NULL);
 
         if ( mfn_eq(mfn, INVALID_MFN) )
             return -ESRCH;
@@ -199,7 +199,7 @@ p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned 
long flag,
      * We had a mem_access permission limiting the access, but the page type
      * could also be limiting, so we need to check that as well.
      */
-    mfn = p2m_get_entry(p2m, gfn, &t, NULL, NULL);
+    mfn = p2m_get_entry(p2m, gfn, &t, NULL, NULL, NULL);
     if ( mfn_eq(mfn, INVALID_MFN) )
         goto err;
 
@@ -405,7 +405,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, 
uint32_t nr,
           gfn = gfn_next_boundary(gfn, order) )
     {
         p2m_type_t t;
-        mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order);
+        mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order, NULL);
 
 
         if ( !mfn_eq(mfn, INVALID_MFN) )
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 12b459924b..df6b48d73b 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -306,10 +306,14 @@ static int p2m_next_level(struct p2m_domain *p2m, bool 
read_only,
  *
  * If the entry is not present, INVALID_MFN will be returned and the
  * page_order will be set according to the order of the invalid range.
+ *
+ * valid will contain the value of bit[0] (e.g valid bit) of the
+ * entry.
  */
 mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
                     p2m_type_t *t, p2m_access_t *a,
-                    unsigned int *page_order)
+                    unsigned int *page_order,
+                    bool *valid)
 {
     paddr_t addr = gfn_to_gaddr(gfn);
     unsigned int level = 0;
@@ -317,6 +321,7 @@ mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
     int rc;
     mfn_t mfn = INVALID_MFN;
     p2m_type_t _t;
+    bool _valid;
 
     /* Convenience aliases */
     const unsigned int offsets[4] = {
@@ -334,6 +339,10 @@ mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
 
     *t = p2m_invalid;
 
+    /* Allow valid to be NULL */
+    valid = valid?: &_valid;
+    *valid = false;
+
     /* XXX: Check if the mapping is lower than the mapped gfn */
 
     /* This gfn is higher than the highest the p2m map currently holds */
@@ -379,6 +388,9 @@ mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
          * to the GFN.
          */
         mfn = mfn_add(mfn, gfn_x(gfn) & ((1UL << level_orders[level]) - 1));
+
+        if ( valid )
+            *valid = lpae_is_valid(entry);
     }
 
 out_unmap:
@@ -397,7 +409,7 @@ mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t)
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     p2m_read_lock(p2m);
-    mfn = p2m_get_entry(p2m, gfn, t, NULL, NULL);
+    mfn = p2m_get_entry(p2m, gfn, t, NULL, NULL, NULL);
     p2m_read_unlock(p2m);
 
     return mfn;
@@ -1464,7 +1476,7 @@ int relinquish_p2m_mapping(struct domain *d)
     for ( ; gfn_x(start) < gfn_x(end);
           start = gfn_next_boundary(start, order) )
     {
-        mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order);
+        mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
 
         count++;
         /*
@@ -1527,7 +1539,7 @@ int p2m_cache_flush_range(struct domain *d, gfn_t start, 
gfn_t end)
 
     for ( ; gfn_x(start) < gfn_x(end); start = next_gfn )
     {
-        mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order);
+        mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
 
         next_gfn = gfn_next_boundary(start, order);
 
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index d7afa2bbe8..92213dd1ab 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -211,7 +211,8 @@ mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t 
*t);
  */
 mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
                     p2m_type_t *t, p2m_access_t *a,
-                    unsigned int *page_order);
+                    unsigned int *page_order,
+                    bool *valid);
 
 /*
  * Direct set a p2m entry: only for use by the P2M code.
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.