|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] mm: Don't scrub pages while holding heap lock in alloc_heap_pages()
commit 307c3be3ccb2853e061112d28f00cf57a8f7ca05
Author: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
AuthorDate: Wed Sep 6 11:33:52 2017 -0400
Commit: Wei Liu <wei.liu2@xxxxxxxxxx>
CommitDate: Thu Sep 7 16:16:49 2017 +0100
mm: Don't scrub pages while holding heap lock in alloc_heap_pages()
Instead, preserve PGC_need_scrub bit when setting PGC_state_inuse
state while still under the lock and clear those pages later.
Note that we still need to grub the lock when clearing PGC_need_scrub
bit since count_info might be updated during MCE handling in
mark_page_offline().
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/common/page_alloc.c | 43 +++++++++++++++++++++++++++++++++----------
1 file changed, 33 insertions(+), 10 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index dbad1e1..b5243fc 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -860,6 +860,7 @@ static struct page_info *alloc_heap_pages(
struct page_info *pg;
bool need_tlbflush = false;
uint32_t tlbflush_timestamp = 0;
+ unsigned int dirty_cnt = 0;
/* Make sure there are enough bits in memflags for nodeID. */
BUILD_BUG_ON((_MEMF_bits - _MEMF_node) < (8 * sizeof(nodeid_t)));
@@ -953,14 +954,11 @@ static struct page_info *alloc_heap_pages(
/* Reference count must continuously be zero for free pages. */
BUG_ON((pg[i].count_info & ~PGC_need_scrub) != PGC_state_free);
- if ( test_bit(_PGC_need_scrub, &pg[i].count_info) )
- {
- if ( !(memflags & MEMF_no_scrub) )
- scrub_one_page(&pg[i]);
- node_need_scrub[node]--;
- }
+ /* PGC_need_scrub can only be set if first_dirty is valid */
+ ASSERT(first_dirty != INVALID_DIRTY_IDX || !(pg[i].count_info &
PGC_need_scrub));
- pg[i].count_info = PGC_state_inuse;
+ /* Preserve PGC_need_scrub so we can check it after lock is dropped. */
+ pg[i].count_info = PGC_state_inuse | (pg[i].count_info &
PGC_need_scrub);
if ( !(memflags & MEMF_no_tlbflush) )
accumulate_tlbflush(&need_tlbflush, &pg[i],
@@ -974,13 +972,38 @@ static struct page_info *alloc_heap_pages(
* guest can control its own visibility of/through the cache.
*/
flush_page_to_ram(page_to_mfn(&pg[i]), !(memflags &
MEMF_no_icache_flush));
-
- if ( !(memflags & MEMF_no_scrub) )
- check_one_page(&pg[i]);
}
spin_unlock(&heap_lock);
+ if ( first_dirty != INVALID_DIRTY_IDX ||
+ (scrub_debug && !(memflags & MEMF_no_scrub)) )
+ {
+ for ( i = 0; i < (1U << order); i++ )
+ {
+ if ( test_bit(_PGC_need_scrub, &pg[i].count_info) )
+ {
+ if ( !(memflags & MEMF_no_scrub) )
+ scrub_one_page(&pg[i]);
+
+ dirty_cnt++;
+
+ spin_lock(&heap_lock);
+ pg[i].count_info &= ~PGC_need_scrub;
+ spin_unlock(&heap_lock);
+ }
+ else if ( !(memflags & MEMF_no_scrub) )
+ check_one_page(&pg[i]);
+ }
+
+ if ( dirty_cnt )
+ {
+ spin_lock(&heap_lock);
+ node_need_scrub[node] -= dirty_cnt;
+ spin_unlock(&heap_lock);
+ }
+ }
+
if ( need_tlbflush )
filtered_flush_tlb_mask(tlbflush_timestamp);
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |