|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 9/9] mm: Make sure pages are scrubbed
Add a debug Kconfig option that will make page allocator verify
that pages that were supposed to be scrubbed are, in fact, clean.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
Changes in v2:
* Check full page for having been scrubbed
Note that poison_one_page() may be unnecesary since chances that a dirty page
starts with 0xc2c2c2c2c2c2c2c2 are fairly low.
xen/Kconfig.debug | 7 ++++++
xen/common/page_alloc.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 55 insertions(+), 1 deletions(-)
diff --git a/xen/Kconfig.debug b/xen/Kconfig.debug
index 689f297..f3bf9a9 100644
--- a/xen/Kconfig.debug
+++ b/xen/Kconfig.debug
@@ -114,6 +114,13 @@ config DEVICE_TREE_DEBUG
logged in the Xen ring buffer.
If unsure, say N here.
+config SCRUB_DEBUG
+ bool "Page scrubbing test"
+ default DEBUG
+ ---help---
+ Verify that pages that need to be scrubbed before being allocated to
+ a guest are indeed scrubbed.
+
endif # DEBUG || EXPERT
endmenu
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 8273102..b82aa51 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -699,6 +699,31 @@ static void page_list_add_scrub(struct page_info *pg,
unsigned int node,
page_list_add(pg, &heap(node, zone, order));
}
+#define SCRUB_BYTE_PATTERN 0xc2c2c2c2c2c2c2c2
+#ifdef CONFIG_SCRUB_DEBUG
+static void poison_one_page(struct page_info *pg)
+{
+ mfn_t mfn = _mfn(page_to_mfn(pg));
+ uint64_t *ptr;
+
+ ptr = map_domain_page(mfn);
+ *ptr = ~SCRUB_BYTE_PATTERN;
+ unmap_domain_page(ptr);
+}
+
+static void check_one_page(struct page_info *pg)
+{
+ mfn_t mfn = _mfn(page_to_mfn(pg));
+ uint64_t *ptr;
+ unsigned i;
+
+ ptr = map_domain_page(mfn);
+ for ( i = 0; i < PAGE_SIZE / sizeof (*ptr); i++ )
+ ASSERT(ptr[i] == SCRUB_BYTE_PATTERN);
+ unmap_domain_page(ptr);
+}
+#endif /* CONFIG_SCRUB_DEBUG */
+
static void check_and_stop_scrub(struct page_info *head)
{
if ( head->u.free.scrub_state & PAGE_SCRUBBING )
@@ -913,6 +938,11 @@ static struct page_info *alloc_heap_pages(
* guest can control its own visibility of/through the cache.
*/
flush_page_to_ram(page_to_mfn(&pg[i]));
+
+#ifdef CONFIG_SCRUB_DEBUG
+ if ( d && !is_idle_domain(d) )
+ check_one_page(&pg[i]);
+#endif
}
spin_unlock(&heap_lock);
@@ -1294,6 +1324,11 @@ static void free_heap_pages(
{
pg->count_info |= PGC_need_scrub;
node_need_scrub[node] += (1UL << order);
+
+#ifdef CONFIG_SCRUB_DEBUG
+ for ( i = 0; i < (1 << order); i++ )
+ poison_one_page(&pg[i]);
+#endif
}
pg = merge_chunks(pg, node, zone, order, false);
@@ -1590,6 +1625,14 @@ static void init_heap_pages(
nr_pages -= n;
}
+#ifdef CONFIG_SCRUB_DEBUG
+ /*
+ * These pages get into heap and are allocated to dom0 before
+ * boot scrub happens.
+ * Not scrubbing them here will cause failure in check_one_page().
+ */
+ scrub_one_page(pg + i);
+#endif
free_heap_pages(pg + i, 0, 0);
}
}
@@ -2123,6 +2166,9 @@ void free_domheap_pages(struct page_info *pg, unsigned
int order)
{
BUG_ON((pg[i].u.inuse.type_info & PGT_count_mask) != 0);
arch_free_heap_page(d, &pg[i]);
+#ifdef CONFIG_SCRUB_DEBUG
+ scrub_one_page(&pg[i]);
+#endif
}
drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));
@@ -2226,7 +2272,8 @@ void scrub_one_page(struct page_info *pg)
#ifndef NDEBUG
/* Avoid callers relying on allocations returning zeroed pages. */
- unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
+ unmap_domain_page(memset(__map_domain_page(pg),
+ SCRUB_BYTE_PATTERN & 0xff, PAGE_SIZE));
#else
/* For a production build, clear_page() is the fastest way to scrub. */
clear_domain_page(_mfn(page_to_mfn(pg)));
--
1.7.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |