|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v1 2/9] mm: Place unscrubbed pages at the end of pagelist
. so that it's easy to find pages that need to be scrubbed (those pages are
now marked with _PGC_need_scrub bit).
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
xen/common/page_alloc.c | 103 +++++++++++++++++++++++++++++++++++++---------
xen/include/asm-arm/mm.h | 4 ++
xen/include/asm-x86/mm.h | 4 ++
3 files changed, 91 insertions(+), 20 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7931903..a28eb38 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -383,6 +383,8 @@ typedef struct page_list_head
heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
#define heap(node, zone, order) ((*_heap[node])[zone][order])
+static unsigned long node_need_scrub[MAX_NUMNODES];
+
static unsigned long *avail[MAX_NUMNODES];
static long total_avail_pages;
@@ -807,7 +809,7 @@ static struct page_info *alloc_heap_pages(
while ( j != order )
{
PFN_ORDER(pg) = --j;
- page_list_add_tail(pg, &heap(node, zone, j));
+ page_list_add(pg, &heap(node, zone, j));
pg += 1 << j;
}
@@ -827,6 +829,8 @@ static struct page_info *alloc_heap_pages(
BUG_ON(pg[i].count_info != PGC_state_free);
pg[i].count_info = PGC_state_inuse;
+ BUG_ON(test_bit(_PGC_need_scrub, &pg[i].count_info));
+
if ( !(memflags & MEMF_no_tlbflush) )
accumulate_tlbflush(&need_tlbflush, &pg[i],
&tlbflush_timestamp);
@@ -856,6 +860,7 @@ static int reserve_offlined_page(struct page_info *head)
int zone = page_to_zone(head), i, head_order = PFN_ORDER(head), count = 0;
struct page_info *cur_head;
int cur_order;
+ bool_t need_scrub = !!test_bit(_PGC_need_scrub, &head->count_info);
ASSERT(spin_is_locked(&heap_lock));
@@ -897,7 +902,13 @@ static int reserve_offlined_page(struct page_info *head)
{
merge:
/* We don't consider merging outside the head_order. */
- page_list_add_tail(cur_head, &heap(node, zone, cur_order));
+ if ( need_scrub )
+ {
+ cur_head->count_info |= PGC_need_scrub;
+ page_list_add_tail(cur_head, &heap(node, zone, cur_order));
+ }
+ else
+ page_list_add(cur_head, &heap(node, zone, cur_order));
PFN_ORDER(cur_head) = cur_order;
cur_head += (1 << cur_order);
break;
@@ -925,7 +936,7 @@ static int reserve_offlined_page(struct page_info *head)
}
static bool_t can_merge(struct page_info *buddy, unsigned int node,
- unsigned int order)
+ unsigned int order, bool_t need_scrub)
{
if ( !mfn_valid(_mfn(page_to_mfn(buddy))) ||
!page_state_is(buddy, free) ||
@@ -933,12 +944,18 @@ static bool_t can_merge(struct page_info *buddy, unsigned
int node,
(phys_to_nid(page_to_maddr(buddy)) != node) )
return 0;
+ if ( need_scrub !=
+ !!test_bit(_PGC_need_scrub, &buddy->count_info) )
+ return 0;
+
return 1;
}
static void merge_chunks(struct page_info *pg, unsigned int node,
unsigned int zone, unsigned int order)
{
+ bool_t need_scrub = !!test_bit(_PGC_need_scrub, &pg->count_info);
+
ASSERT(spin_is_locked(&heap_lock));
/* Merge chunks as far as possible. */
@@ -951,9 +968,10 @@ static void merge_chunks(struct page_info *pg, unsigned
int node,
{
/* Merge with predecessor block? */
buddy = pg - mask;
- if ( !can_merge(buddy, node, order) )
+ if ( !can_merge(buddy, node, order, need_scrub) )
break;
+ pg->count_info &= ~PGC_need_scrub;
pg = buddy;
page_list_del(pg, &heap(node, zone, order));
}
@@ -961,9 +979,10 @@ static void merge_chunks(struct page_info *pg, unsigned
int node,
{
/* Merge with successor block? */
buddy = pg + mask;
- if ( !can_merge(buddy, node, order) )
+ if ( !can_merge(buddy, node, order, need_scrub) )
break;
+ buddy->count_info &= ~PGC_need_scrub;
page_list_del(buddy, &heap(node, zone, order));
}
@@ -971,12 +990,54 @@ static void merge_chunks(struct page_info *pg, unsigned
int node,
}
PFN_ORDER(pg) = order;
- page_list_add_tail(pg, &heap(node, zone, order));
+ if ( need_scrub )
+ {
+ pg->count_info |= PGC_need_scrub;
+ page_list_add_tail(pg, &heap(node, zone, order));
+ }
+ else
+ page_list_add(pg, &heap(node, zone, order));
+}
+
+static void scrub_free_pages(unsigned int node)
+{
+ struct page_info *pg;
+ unsigned int i, zone;
+ int order;
+
+ ASSERT(spin_is_locked(&heap_lock));
+
+ if ( !node_need_scrub[node] )
+ return;
+
+ for ( zone = 0; zone < NR_ZONES; zone++ )
+ {
+ for ( order = MAX_ORDER; order >= 0; order-- )
+ {
+ while ( !page_list_empty(&heap(node, zone, order)) )
+ {
+ /* Unscrubbed pages are always at the end of the list. */
+ pg = page_list_last(&heap(node, zone, order));
+ if ( !test_bit(_PGC_need_scrub, &pg->count_info) )
+ break;
+
+ for ( i = 0; i < (1UL << order); i++)
+ scrub_one_page(&pg[i]);
+
+ pg->count_info &= ~PGC_need_scrub;
+
+ page_list_del(pg, &heap(node, zone, order));
+ merge_chunks(pg, node, zone, order);
+
+ node_need_scrub[node] -= (1UL << order);
+ }
+ }
+ }
}
/* Free 2^@order set of pages. */
static void free_heap_pages(
- struct page_info *pg, unsigned int order)
+ struct page_info *pg, unsigned int order, bool_t need_scrub)
{
unsigned long mfn = page_to_mfn(pg);
unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0;
@@ -1025,11 +1086,20 @@ static void free_heap_pages(
midsize_alloc_zone_pages = max(
midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
+ if ( need_scrub && !tainted )
+ {
+ pg->count_info |= PGC_need_scrub;
+ node_need_scrub[node] += (1UL << order);
+ }
+
merge_chunks(pg, node, zone, order);
if ( tainted )
reserve_offlined_page(pg);
+ if ( need_scrub )
+ scrub_free_pages(node);
+
spin_unlock(&heap_lock);
}
@@ -1250,7 +1320,7 @@ unsigned int online_page(unsigned long mfn, uint32_t
*status)
spin_unlock(&heap_lock);
if ( (y & PGC_state) == PGC_state_offlined )
- free_heap_pages(pg, 0);
+ free_heap_pages(pg, 0, 0);
return ret;
}
@@ -1319,7 +1389,7 @@ static void init_heap_pages(
nr_pages -= n;
}
- free_heap_pages(pg+i, 0);
+ free_heap_pages(pg + i, 0, 0);
}
}
@@ -1646,7 +1716,7 @@ void free_xenheap_pages(void *v, unsigned int order)
memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
- free_heap_pages(virt_to_page(v), order);
+ free_heap_pages(virt_to_page(v), order, 0);
}
#else
@@ -1700,12 +1770,9 @@ void free_xenheap_pages(void *v, unsigned int order)
pg = virt_to_page(v);
for ( i = 0; i < (1u << order); i++ )
- {
- scrub_one_page(&pg[i]);
pg[i].count_info &= ~PGC_xen_heap;
- }
- free_heap_pages(pg, order);
+ free_heap_pages(pg, order, 1);
}
#endif
@@ -1814,7 +1881,7 @@ struct page_info *alloc_domheap_pages(
if ( d && !(memflags & MEMF_no_owner) &&
assign_pages(d, pg, order, memflags) )
{
- free_heap_pages(pg, order);
+ free_heap_pages(pg, order, 0);
return NULL;
}
@@ -1882,11 +1949,7 @@ void free_domheap_pages(struct page_info *pg, unsigned
int order)
scrub = 1;
}
- if ( unlikely(scrub) )
- for ( i = 0; i < (1 << order); i++ )
- scrub_one_page(&pg[i]);
-
- free_heap_pages(pg, order);
+ free_heap_pages(pg, order, scrub);
}
if ( drop_dom_ref )
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 60ccbf3..52a03a0 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -113,6 +113,10 @@ struct page_info
#define PGC_count_width PG_shift(9)
#define PGC_count_mask ((1UL<<PGC_count_width)-1)
+/* Page needs to be scrubbed */
+#define _PGC_need_scrub PG_shift(10)
+#define PGC_need_scrub PG_mask(1, 10)
+
extern unsigned long xenheap_mfn_start, xenheap_mfn_end;
extern vaddr_t xenheap_virt_end;
#ifdef CONFIG_ARM_64
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index e22603c..f3d4443 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -233,6 +233,10 @@ struct page_info
#define PGC_count_width PG_shift(9)
#define PGC_count_mask ((1UL<<PGC_count_width)-1)
+/* Page needs to be scrubbed */
+#define _PGC_need_scrub PG_shift(10)
+#define PGC_need_scrub PG_mask(1, 10)
+
struct spage_info
{
unsigned long type_info;
--
1.7.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |