[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging-4.19] xen/mm: remove aliasing of PGC_need_scrub over PGC_allocated



commit 49a4deb467caea989d81f3692ed19a406ef6ee83
Author:     Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Wed Feb 11 12:22:15 2026 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Feb 11 12:22:15 2026 +0100

    xen/mm: remove aliasing of PGC_need_scrub over PGC_allocated
    
    Future changes will care about the state of the PGC_need_scrub flag even
    when pages have the PGC_allocated set, and hence it's no longer possible to
    alias both values.  Also introduce PGC_need_scrub to the set of preserved
    flags, so it's not dropped by assign_pages().
    
    No functional change intended, albeit the page counter on x86 looses a bit.
    
    Suggested-by: Jan Beulich <jbeulich@xxxxxxxx>
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    master commit: cbb484d008e1c7982e0038b1331ff59f94201be5
    master date: 2026-02-05 08:53:07 +0100
---
 xen/arch/arm/include/asm/mm.h | 10 +++-------
 xen/arch/ppc/include/asm/mm.h | 10 +++-------
 xen/arch/x86/include/asm/mm.h | 18 +++++++-----------
 xen/common/page_alloc.c       |  6 ++++--
 4 files changed, 17 insertions(+), 27 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 48538b5337..ed71cf9bca 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -145,6 +145,9 @@ struct page_info
 #else
 #define PGC_static     0
 #endif
+/* Page needs to be scrubbed. */
+#define _PGC_need_scrub   PG_shift(5)
+#define PGC_need_scrub    PG_mask(1, 5)
 /* ... */
 /* Page is broken? */
 #define _PGC_broken       PG_shift(7)
@@ -164,13 +167,6 @@ struct page_info
 #define PGC_count_width   PG_shift(10)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
-/*
- * Page needs to be scrubbed. Since this bit can only be set on a page that is
- * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
- */
-#define _PGC_need_scrub   _PGC_allocated
-#define PGC_need_scrub    PGC_allocated
-
 #ifdef CONFIG_ARM_32
 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
 #define is_xen_heap_mfn(mfn) ({                                 \
diff --git a/xen/arch/ppc/include/asm/mm.h b/xen/arch/ppc/include/asm/mm.h
index a433936076..9b654945de 100644
--- a/xen/arch/ppc/include/asm/mm.h
+++ b/xen/arch/ppc/include/asm/mm.h
@@ -58,6 +58,9 @@ static inline struct page_info *virt_to_page(const void *v)
 /* Page is Xen heap? */
 #define _PGC_xen_heap     PG_shift(2)
 #define PGC_xen_heap      PG_mask(1, 2)
+/* Page needs to be scrubbed. */
+#define _PGC_need_scrub   PG_shift(3)
+#define PGC_need_scrub    PG_mask(1, 3)
 /* Page is broken? */
 #define _PGC_broken       PG_shift(7)
 #define PGC_broken        PG_mask(1, 7)
@@ -76,13 +79,6 @@ static inline struct page_info *virt_to_page(const void *v)
 #define PGC_count_width   PG_shift(10)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
-/*
- * Page needs to be scrubbed. Since this bit can only be set on a page that is
- * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
- */
-#define _PGC_need_scrub   _PGC_allocated
-#define PGC_need_scrub    PGC_allocated
-
 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
 #define is_xen_heap_mfn(mfn) \
     (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
index 98b66edaca..52d2d5ff60 100644
--- a/xen/arch/x86/include/asm/mm.h
+++ b/xen/arch/x86/include/asm/mm.h
@@ -83,29 +83,25 @@
 #define PGC_state_offlined  PG_mask(2, 6)
 #define PGC_state_free      PG_mask(3, 6)
 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
+/* Page needs to be scrubbed. */
+#define _PGC_need_scrub   PG_shift(7)
+#define PGC_need_scrub    PG_mask(1, 7)
 #ifdef CONFIG_SHADOW_PAGING
  /* Set when a page table page has been shadowed. */
-#define _PGC_shadowed_pt  PG_shift(7)
-#define PGC_shadowed_pt   PG_mask(1, 7)
+#define _PGC_shadowed_pt  PG_shift(8)
+#define PGC_shadowed_pt   PG_mask(1, 8)
 #else
 #define PGC_shadowed_pt   0
 #endif
 
 /* Count of references to this frame. */
 #if PGC_shadowed_pt
-#define PGC_count_width   PG_shift(7)
+#define PGC_count_width   PG_shift(8)
 #else
-#define PGC_count_width   PG_shift(6)
+#define PGC_count_width   PG_shift(7)
 #endif
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
-/*
- * Page needs to be scrubbed. Since this bit can only be set on a page that is
- * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
- */
-#define _PGC_need_scrub   _PGC_allocated
-#define PGC_need_scrub    PGC_allocated
-
 #ifndef CONFIG_BIGMEM
 /*
  * This definition is solely for the use in struct page_info (and
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index bbb8578459..4304c3dbd4 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2364,7 +2364,8 @@ int assign_pages(
 
         for ( i = 0; i < nr; i++ )
         {
-            ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_static)));
+            ASSERT(!(pg[i].count_info &
+                     ~(PGC_extra | PGC_static | PGC_need_scrub)));
             if ( pg[i].count_info & PGC_extra )
                 extra_pages++;
         }
@@ -2424,7 +2425,8 @@ int assign_pages(
         page_set_owner(&pg[i], d);
         smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
         pg[i].count_info =
-            (pg[i].count_info & (PGC_extra | PGC_static)) | PGC_allocated | 1;
+            (pg[i].count_info & (PGC_extra | PGC_static | PGC_need_scrub)) |
+            PGC_allocated | 1;
 
         page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
     }
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.19



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.