|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH 1/2] xen: page_alloc: introduce alloc_domheap_pages_nodemask()
Introduce alloc_domheap_pages_nodemask() to allow specification of which node(s)
to allocate memory from even when 'd == NULL' is true.
Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
---
xen/common/page_alloc.c | 25 +++++++++++++++++--------
xen/include/xen/mm.h | 4 ++++
2 files changed, 21 insertions(+), 8 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 601319c..85e8188 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -561,16 +561,18 @@ static void check_low_mem_virq(void)
static struct page_info *alloc_heap_pages(
unsigned int zone_lo, unsigned int zone_hi,
unsigned int order, unsigned int memflags,
- struct domain *d)
+ struct domain *d, nodemask_t nodemask)
{
unsigned int first_node, i, j, zone = 0, nodemask_retry = 0;
unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1);
unsigned long request = 1UL << order;
struct page_info *pg;
- nodemask_t nodemask = (d != NULL ) ? d->node_affinity : node_online_map;
bool_t need_tlbflush = 0;
uint32_t tlbflush_timestamp = 0;
+ if (d != NULL)
+ nodemask = d->node_affinity;
+
if ( node == NUMA_NO_NODE )
{
memflags &= ~MEMF_exact_node;
@@ -1338,7 +1340,7 @@ void *alloc_xenheap_pages(unsigned int order, unsigned
int memflags)
ASSERT(!in_irq());
pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN,
- order, memflags, NULL);
+ order, memflags, NULL, node_online_map);
if ( unlikely(pg == NULL) )
return NULL;
@@ -1490,9 +1492,9 @@ int assign_pages(
return -1;
}
-
-struct page_info *alloc_domheap_pages(
- struct domain *d, unsigned int order, unsigned int memflags)
+struct page_info *alloc_domheap_pages_nodemask(
+ struct domain *d, unsigned int order, unsigned int memflags,
+ nodemask_t nodemask)
{
struct page_info *pg = NULL;
unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
@@ -1505,12 +1507,13 @@ struct page_info *alloc_domheap_pages(
return NULL;
if ( dma_bitsize && ((dma_zone = bits_to_zone(dma_bitsize)) < zone_hi) )
- pg = alloc_heap_pages(dma_zone + 1, zone_hi, order, memflags, d);
+ pg = alloc_heap_pages(dma_zone + 1, zone_hi, order, memflags, d,
+ nodemask);
if ( (pg == NULL) &&
((memflags & MEMF_no_dma) ||
((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi, order,
- memflags, d)) == NULL)) )
+ memflags, d, nodemask)) == NULL)) )
return NULL;
if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
@@ -1522,6 +1525,12 @@ struct page_info *alloc_domheap_pages(
return pg;
}
+struct page_info *alloc_domheap_pages(
+ struct domain *d, unsigned int order, unsigned int memflags)
+{
+ return alloc_domheap_pages_nodemask(d, order, memflags, node_online_map);
+}
+
void free_domheap_pages(struct page_info *pg, unsigned int order)
{
struct domain *d = page_get_owner(pg);
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index b183189..9df1137 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -30,6 +30,7 @@
#include <xen/types.h>
#include <xen/list.h>
+#include <xen/nodemask.h>
#include <xen/spinlock.h>
struct domain;
@@ -65,6 +66,9 @@ void get_outstanding_claims(uint64_t *free_pages, uint64_t
*outstanding_pages);
void init_domheap_pages(paddr_t ps, paddr_t pe);
struct page_info *alloc_domheap_pages(
struct domain *d, unsigned int order, unsigned int memflags);
+struct page_info *alloc_domheap_pages_nodemask(
+ struct domain *d, unsigned int order, unsigned int memflags,
+ nodemask_t nodemask);
void free_domheap_pages(struct page_info *pg, unsigned int order);
unsigned long avail_domheap_pages_region(
unsigned int node, unsigned int min_width, unsigned int max_width);
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |