[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 05/18] x86/mem_sharing: drop flags from mem_sharing_unshare_page



All callers pass 0 in.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxx>
Reviewed-by: Wei Liu <wl@xxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            | 2 +-
 xen/arch/x86/mm/p2m.c             | 5 ++---
 xen/common/memory.c               | 2 +-
 xen/include/asm-x86/mem_sharing.h | 8 +++-----
 4 files changed, 7 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 651998e783..55bf7353c9 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1898,7 +1898,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
     if ( npfec.write_access && (p2mt == p2m_ram_shared) )
     {
         ASSERT(p2m_is_hostp2m(p2m));
-        sharing_enomem = mem_sharing_unshare_page(currd, gfn, 0);
+        sharing_enomem = mem_sharing_unshare_page(currd, gfn);
         rc = 1;
         goto out_put_gfn;
     }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index fdeb742707..43dd46a32f 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -515,7 +515,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, 
unsigned long gfn_l,
          * Try to unshare. If we fail, communicate ENOMEM without
          * sleeping.
          */
-        if ( mem_sharing_unshare_page(p2m->domain, gfn_l, 0) < 0 )
+        if ( mem_sharing_unshare_page(p2m->domain, gfn_l) < 0 )
             mem_sharing_notify_enomem(p2m->domain, gfn_l, false);
         mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
     }
@@ -899,8 +899,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
         {
             /* Do an unshare to cleanly take care of all corner cases. */
             int rc;
-            rc = mem_sharing_unshare_page(p2m->domain,
-                                          gfn_x(gfn_add(gfn, i)), 0);
+            rc = mem_sharing_unshare_page(p2m->domain, gfn_x(gfn_add(gfn, i)));
             if ( rc )
             {
                 p2m_unlock(p2m);
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 309e872edf..c7d2bac452 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -352,7 +352,7 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
          * might be the only one using this shared page, and we need to
          * trigger proper cleanup. Once done, this is like any other page.
          */
-        rc = mem_sharing_unshare_page(d, gmfn, 0);
+        rc = mem_sharing_unshare_page(d, gmfn);
         if ( rc )
         {
             mem_sharing_notify_enomem(d, gmfn, false);
diff --git a/xen/include/asm-x86/mem_sharing.h 
b/xen/include/asm-x86/mem_sharing.h
index af2a1038b5..cf7848709f 100644
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -69,10 +69,9 @@ int __mem_sharing_unshare_page(struct domain *d,
                                uint16_t flags);
 
 static inline int mem_sharing_unshare_page(struct domain *d,
-                                           unsigned long gfn,
-                                           uint16_t flags)
+                                           unsigned long gfn)
 {
-    int rc = __mem_sharing_unshare_page(d, gfn, flags);
+    int rc = __mem_sharing_unshare_page(d, gfn, 0);
     BUG_ON(rc && (rc != -ENOMEM));
     return rc;
 }
@@ -115,8 +114,7 @@ static inline unsigned int 
mem_sharing_get_nr_shared_mfns(void)
     return 0;
 }
 
-static inline int mem_sharing_unshare_page(struct domain *d, unsigned long gfn,
-                                           uint16_t flags)
+static inline int mem_sharing_unshare_page(struct domain *d, unsigned long gfn)
 {
     ASSERT_UNREACHABLE();
     return -EOPNOTSUPP;
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.