|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v1 9/9] mm: Make sure pages are scrubbed
Add a debug Kconfig option that will make page allocator verify
that pages that were supposed to be scrubbed are, in fact, clean.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
xen/Kconfig.debug | 7 +++++++
xen/common/page_alloc.c | 33 +++++++++++++++++++++++++++++++++
2 files changed, 40 insertions(+), 0 deletions(-)
diff --git a/xen/Kconfig.debug b/xen/Kconfig.debug
index 8139564..35b86ae 100644
--- a/xen/Kconfig.debug
+++ b/xen/Kconfig.debug
@@ -113,6 +113,13 @@ config DEVICE_TREE_DEBUG
logged in the Xen ring buffer.
If unsure, say N here.
+config SCRUB_DEBUG
+ bool "Page scrubbing test"
+ default y
+ ---help---
+ Verify that pages that need to be scrubbed before being allocated to
+ a guest are indeed scrubbed.
+
endif # DEBUG || EXPERT
endmenu
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7dfbd8c..8140446 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -685,6 +685,29 @@ static void check_low_mem_virq(void)
}
}
+#ifdef CONFIG_SCRUB_DEBUG
+#define PAGE_POISON 0xbad0bad0bad0bad0ULL
+static void poison_one_page(struct page_info *pg)
+{
+ mfn_t mfn = _mfn(page_to_mfn(pg));
+ uint64_t *ptr;
+
+ ptr = map_domain_page(mfn);
+ *ptr = PAGE_POISON;
+ unmap_domain_page(ptr);
+}
+
+static void check_one_page(struct page_info *pg)
+{
+ mfn_t mfn = _mfn(page_to_mfn(pg));
+ uint64_t *ptr;
+
+ ptr = map_domain_page(mfn);
+ ASSERT(*ptr != PAGE_POISON);
+ unmap_domain_page(ptr);
+}
+#endif /* CONFIG_SCRUB_DEBUG */
+
static void check_and_stop_scrub(struct page_info *head)
{
if ( head->u.free.scrub_state & PAGE_SCRUBBING )
@@ -905,6 +928,11 @@ static struct page_info *alloc_heap_pages(
* guest can control its own visibility of/through the cache.
*/
flush_page_to_ram(page_to_mfn(&pg[i]));
+
+#ifdef CONFIG_SCRUB_DEBUG
+ if ( d && !is_idle_domain(d) )
+ check_one_page(&pg[i]);
+#endif
}
spin_unlock(&heap_lock);
@@ -1285,6 +1313,11 @@ static void free_heap_pages(
{
pg->count_info |= PGC_need_scrub;
node_need_scrub[node] += (1UL << order);
+
+#ifdef CONFIG_SCRUB_DEBUG
+ for ( i = 0; i < (1 << order); i++ )
+ poison_one_page(&pg[i]);
+#endif
}
merge_chunks(pg, node, zone, order, 0);
--
1.7.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |