[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 14/19] xen: make avail_domheap_pages() static
Function avail_domheap_pages() is only invoked by get_outstanding_claims(), so it could be inlined into its sole caller. Move up avail_heap_pages() to avoid declaration before get_outstanding_claims(). Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx> --- v1 -> v2: - let avail_domheap_pages() being inlined into its sole caller - move up avail_heap_pages() --- xen/common/page_alloc.c | 51 ++++++++++++++++++----------------------- xen/include/xen/mm.h | 1 - 2 files changed, 22 insertions(+), 30 deletions(-) diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index cc2ad4423a..5803a1ef4e 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -488,6 +488,27 @@ static long total_avail_pages; static DEFINE_SPINLOCK(heap_lock); static long outstanding_claims; /* total outstanding claims by all domains */ +static unsigned long avail_heap_pages( + unsigned int zone_lo, unsigned int zone_hi, unsigned int node) +{ + unsigned int i, zone; + unsigned long free_pages = 0; + + if ( zone_hi >= NR_ZONES ) + zone_hi = NR_ZONES - 1; + + for_each_online_node(i) + { + if ( !avail[i] ) + continue; + for ( zone = zone_lo; zone <= zone_hi; zone++ ) + if ( (node == -1) || (node == i) ) + free_pages += avail[i][zone]; + } + + return free_pages; +} + unsigned long domain_adjust_tot_pages(struct domain *d, long pages) { ASSERT(rspin_is_locked(&d->page_alloc_lock)); @@ -584,7 +605,7 @@ void get_outstanding_claims(uint64_t *free_pages, uint64_t *outstanding_pages) { spin_lock(&heap_lock); *outstanding_pages = outstanding_claims; - *free_pages = avail_domheap_pages(); + *free_pages = avail_heap_pages(MEMZONE_XEN + 1, NR_ZONES - 1, -1); spin_unlock(&heap_lock); } @@ -1964,27 +1985,6 @@ static void init_heap_pages( } } -static unsigned long avail_heap_pages( - unsigned int zone_lo, unsigned int zone_hi, unsigned int node) -{ - unsigned int i, zone; - unsigned long free_pages = 0; - - if ( zone_hi >= NR_ZONES ) - zone_hi = NR_ZONES - 1; - - for_each_online_node(i) - { - if ( !avail[i] ) - continue; - for ( zone = zone_lo; zone <= zone_hi; zone++ ) - if ( (node == -1) || (node == i) ) - free_pages += avail[i][zone]; - } - - return free_pages; -} - /************************* * COLORED SIDE-ALLOCATOR * @@ -2795,13 +2795,6 @@ unsigned long avail_domheap_pages_region( return avail_heap_pages(zone_lo, zone_hi, node); } -unsigned long avail_domheap_pages(void) -{ - return avail_heap_pages(MEMZONE_XEN + 1, - NR_ZONES - 1, - -1); -} - unsigned long avail_node_heap_pages(unsigned int nodeid) { return avail_heap_pages(MEMZONE_XEN, NR_ZONES -1, nodeid); diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index ae1c48a615..eda57486cf 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -140,7 +140,6 @@ struct page_info *alloc_domheap_pages( void free_domheap_pages(struct page_info *pg, unsigned int order); unsigned long avail_domheap_pages_region( unsigned int node, unsigned int min_width, unsigned int max_width); -unsigned long avail_domheap_pages(void); unsigned long avail_node_heap_pages(unsigned int nodeid); #define alloc_domheap_page(d,f) (alloc_domheap_pages(d,0,f)) #define free_domheap_page(p) (free_domheap_pages(p,0)) -- 2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |