[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/mm: update log-dirty bitmap when manipulating P2M



commit 98fccdf0ac7c1a909b54dd5beeba54a0a23f756c
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu Dec 11 09:50:30 2025 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Dec 11 09:50:30 2025 +0100

    x86/mm: update log-dirty bitmap when manipulating P2M
    
    Just like for PV guests MMU_MACHPHYS_UPDATE implies marking of the
    respective page as dirty, additions to a HVM guest's P2M should do so.
    
    For HVM the opposite is also true: Pages being removed from the P2M are
    no longer dirty at their prior GFN; there's no point in telling the tool
    stack to try and copy that page, when this will fail anyway (until
    perhaps a new page gets placed there). Introduce paging_mark_pfn_clean()
    (intentionally without a paging_mark_clean() counterpart) to handle
    this. Note that while there is an earlier call to set_gpfn_from_mfn() in
    guest_physmap_add_entry(), but there's little reason to mark the page
    clean there when later in the function it'll be marked dirty. This is
    even more so given that at this point it's only the M2P that gets
    updated, with the P2M still left unchanged.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/include/asm/paging.h |  3 ++-
 xen/arch/x86/mm/p2m-pod.c         |  3 +++
 xen/arch/x86/mm/p2m.c             |  8 ++++++++
 xen/arch/x86/mm/paging.c          | 34 +++++++++++++++++++++++-----------
 4 files changed, 36 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/include/asm/paging.h 
b/xen/arch/x86/include/asm/paging.h
index 0c06b0a7a8..72d7c077e4 100644
--- a/xen/arch/x86/include/asm/paging.h
+++ b/xen/arch/x86/include/asm/paging.h
@@ -153,8 +153,9 @@ void paging_log_dirty_init(struct domain *d, const struct 
log_dirty_ops *ops);
 
 /* mark a page as dirty */
 void paging_mark_dirty(struct domain *d, mfn_t gmfn);
-/* mark a page as dirty with taking guest pfn as parameter */
+/* mark a page as dirty/clean with taking guest pfn as parameter */
 void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn);
+void paging_mark_pfn_clean(struct domain *d, pfn_t pfn);
 
 /* is this guest page dirty? 
  * This is called from inside paging code, with the paging lock held. */
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 22dde913cc..7a0bebd2d3 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -655,7 +655,10 @@ decrease_reservation(struct domain *d, gfn_t gfn, unsigned 
int order)
             }
             p2m_tlb_flush_sync(p2m);
             for ( j = 0; j < n; ++j )
+            {
                 set_gpfn_from_mfn(mfn_x(mfn) + j, INVALID_M2P_ENTRY);
+                paging_mark_pfn_clean(d, _pfn(gfn_x(gfn) + i + j));
+            }
             p2m_pod_cache_add(p2m, page, cur_order);
 
             ioreq_request_mapcache_invalidate(d);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e2a00a0efd..2433230ac7 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -562,7 +562,10 @@ p2m_remove_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t 
mfn,
         {
             p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, NULL, NULL);
             if ( !p2m_is_special(t) && !p2m_is_shared(t) )
+            {
                 set_gpfn_from_mfn(mfn_x(mfn) + i, INVALID_M2P_ENTRY);
+                paging_mark_pfn_clean(p2m->domain, _pfn(gfn_x(gfn) + i));
+            }
         }
     }
 
@@ -753,8 +756,11 @@ p2m_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
         if ( !p2m_is_grant(t) )
         {
             for ( i = 0; i < (1UL << page_order); i++ )
+            {
                 set_gpfn_from_mfn(mfn_x(mfn_add(mfn, i)),
                                   gfn_x(gfn_add(gfn, i)));
+                paging_mark_pfn_dirty(d, _pfn(gfn_x(gfn) + i));
+            }
         }
     }
     else
@@ -1121,6 +1127,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned 
long gfn_l,
                 {
                     ASSERT(mfn_valid(mfn_add(omfn, i)));
                     set_gpfn_from_mfn(mfn_x(omfn) + i, INVALID_M2P_ENTRY);
+                    paging_mark_pfn_clean(d, _pfn(gfn_x(gfn) + i));
 
                     ioreq_request_mapcache_invalidate(d);
                 }
@@ -1142,6 +1149,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned 
long gfn_l,
         {
             ASSERT(mfn_valid(mfn_add(omfn, i)));
             set_gpfn_from_mfn(mfn_x(omfn) + i, INVALID_M2P_ENTRY);
+            paging_mark_pfn_clean(d, _pfn(gfn_x(gfn) + i));
         }
 
         ioreq_request_mapcache_invalidate(d);
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 65455a6867..2396f81ad5 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -249,7 +249,7 @@ static int paging_log_dirty_disable(struct domain *d, bool 
resuming)
 }
 
 /* Mark a page as dirty, with taking guest pfn as parameter */
-void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
+static void set_pfn_logdirty(struct domain *d, pfn_t pfn, bool dirty)
 {
     bool changed;
     mfn_t mfn, *l4, *l3, *l2;
@@ -260,7 +260,7 @@ void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
         return;
 
     /* Shared MFNs should NEVER be marked dirty */
-    BUG_ON(paging_mode_translate(d) && SHARED_M2P(pfn_x(pfn)));
+    BUG_ON(dirty && paging_mode_translate(d) && SHARED_M2P(pfn_x(pfn)));
 
     /*
      * Values with the MSB set denote MFNs that aren't really part of the
@@ -281,14 +281,15 @@ void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
 
     if ( unlikely(mfn_eq(d->arch.paging.log_dirty.top, INVALID_MFN)) )
     {
-         d->arch.paging.log_dirty.top = paging_new_log_dirty_node(d);
+         if ( dirty )
+             d->arch.paging.log_dirty.top = paging_new_log_dirty_node(d);
          if ( unlikely(mfn_eq(d->arch.paging.log_dirty.top, INVALID_MFN)) )
              goto out;
     }
 
     l4 = paging_map_log_dirty_bitmap(d);
     mfn = l4[i4];
-    if ( mfn_eq(mfn, INVALID_MFN) )
+    if ( mfn_eq(mfn, INVALID_MFN) && dirty )
         l4[i4] = mfn = paging_new_log_dirty_node(d);
     unmap_domain_page(l4);
     if ( mfn_eq(mfn, INVALID_MFN) )
@@ -296,7 +297,7 @@ void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
 
     l3 = map_domain_page(mfn);
     mfn = l3[i3];
-    if ( mfn_eq(mfn, INVALID_MFN) )
+    if ( mfn_eq(mfn, INVALID_MFN) && dirty )
         l3[i3] = mfn = paging_new_log_dirty_node(d);
     unmap_domain_page(l3);
     if ( mfn_eq(mfn, INVALID_MFN) )
@@ -304,21 +305,22 @@ void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
 
     l2 = map_domain_page(mfn);
     mfn = l2[i2];
-    if ( mfn_eq(mfn, INVALID_MFN) )
+    if ( mfn_eq(mfn, INVALID_MFN) && dirty )
         l2[i2] = mfn = paging_new_log_dirty_leaf(d);
     unmap_domain_page(l2);
     if ( mfn_eq(mfn, INVALID_MFN) )
         goto out;
 
     l1 = map_domain_page(mfn);
-    changed = !__test_and_set_bit(i1, l1);
+    changed = dirty ? !__test_and_set_bit(i1, l1)
+                    : __test_and_clear_bit(i1, l1);
     unmap_domain_page(l1);
     if ( changed )
     {
         PAGING_DEBUG(LOGDIRTY,
-                     "d%d: marked mfn %" PRI_mfn " (pfn %" PRI_pfn ")\n",
-                     d->domain_id, mfn_x(mfn), pfn_x(pfn));
-        d->arch.paging.log_dirty.dirty_count++;
+                     "%pd: marked mfn %" PRI_mfn " (pfn %" PRI_pfn ") %s\n",
+                     d, mfn_x(mfn), pfn_x(pfn), dirty ? "dirty" : "clean");
+        d->arch.paging.log_dirty.dirty_count += dirty ? 1 : -1;
     }
 
 out:
@@ -327,6 +329,16 @@ out:
     return;
 }
 
+void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
+{
+    set_pfn_logdirty(d, pfn, true);
+}
+
+void paging_mark_pfn_clean(struct domain *d, pfn_t pfn)
+{
+    set_pfn_logdirty(d, pfn, false);
+}
+
 /* Mark a page as dirty */
 void paging_mark_dirty(struct domain *d, mfn_t gmfn)
 {
@@ -339,7 +351,7 @@ void paging_mark_dirty(struct domain *d, mfn_t gmfn)
     /* We /really/ mean PFN here, even for non-translated guests. */
     pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
 
-    paging_mark_pfn_dirty(d, pfn);
+    set_pfn_logdirty(d, pfn, true);
 }
 
 #ifdef CONFIG_SHADOW_PAGING
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.