[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 1/9] mm: Separate free page chunk merging into its own routine
This is needed for subsequent changes to memory scrubbing. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- Changes in v3: * Simplify merge_and_free_buddy() (and drop can_merge()) xen/common/page_alloc.c | 74 ++++++++++++++++++++++++++--------------------- 1 files changed, 41 insertions(+), 33 deletions(-) diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 9e41fb4..6fe55ee 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -919,11 +919,50 @@ static int reserve_offlined_page(struct page_info *head) return count; } +/* Returns new buddy head. */ +static struct page_info * +merge_and_free_buddy(struct page_info *pg, unsigned int node, + unsigned int zone, unsigned int order) +{ + ASSERT(spin_is_locked(&heap_lock)); + + /* Merge chunks as far as possible. */ + while ( order < MAX_ORDER ) + { + unsigned long mask = 1UL << order; + struct page_info *buddy; + + if ( (page_to_mfn(pg) & mask) ) + buddy = pg - mask; /* Merge with predecessor block. */ + else + buddy = pg + mask; /* Merge with successor block. */ + + if ( !mfn_valid(_mfn(page_to_mfn(buddy))) || + !page_state_is(buddy, free) || + (PFN_ORDER(buddy) != order) || + (phys_to_nid(page_to_maddr(buddy)) != node) ) + break; + + page_list_del(buddy, &heap(node, zone, order)); + + /* Adjust current buddy head if we merged backwards. */ + if ( buddy < pg ) + pg = buddy; + + order++; + } + + PFN_ORDER(pg) = order; + page_list_add_tail(pg, &heap(node, zone, order)); + + return pg; +} + /* Free 2^@order set of pages. */ static void free_heap_pages( struct page_info *pg, unsigned int order) { - unsigned long mask, mfn = page_to_mfn(pg); + unsigned long mfn = page_to_mfn(pg); unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0; unsigned int zone = page_to_zone(pg); @@ -970,38 +1009,7 @@ static void free_heap_pages( midsize_alloc_zone_pages = max( midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC); - /* Merge chunks as far as possible. */ - while ( order < MAX_ORDER ) - { - mask = 1UL << order; - - if ( (page_to_mfn(pg) & mask) ) - { - /* Merge with predecessor block? */ - if ( !mfn_valid(_mfn(page_to_mfn(pg-mask))) || - !page_state_is(pg-mask, free) || - (PFN_ORDER(pg-mask) != order) || - (phys_to_nid(page_to_maddr(pg-mask)) != node) ) - break; - pg -= mask; - page_list_del(pg, &heap(node, zone, order)); - } - else - { - /* Merge with successor block? */ - if ( !mfn_valid(_mfn(page_to_mfn(pg+mask))) || - !page_state_is(pg+mask, free) || - (PFN_ORDER(pg+mask) != order) || - (phys_to_nid(page_to_maddr(pg+mask)) != node) ) - break; - page_list_del(pg + mask, &heap(node, zone, order)); - } - - order++; - } - - PFN_ORDER(pg) = order; - page_list_add_tail(pg, &heap(node, zone, order)); + pg = merge_and_free_buddy(pg, node, zone, order); if ( tainted ) reserve_offlined_page(pg); -- 1.7.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |