|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 06/11] xen/page_alloc: Hook per-node claims to alloc_heap_pages()
Extend the claim checks in alloc_heap_pages() to exact-node claims. The
logic is slightly more complicated, so the patch moves it all to an
auxiliary function.
exact-node claims also follow global claims in order to ensure both can
coexist in the same system.
Signed-off-by: Alejandro Vallejo <alejandro.vallejo@xxxxxxxxx>
---
xen/common/page_alloc.c | 44 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 41 insertions(+), 3 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7fe574b29407..cfaa64d3b858 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -991,6 +991,46 @@ static void init_free_page_fields(struct page_info *pg)
page_set_owner(pg, NULL);
}
+/*
+ * Determine whether a heap allocation is allowed after considering all
+ * outstanding claims in the system.
+ *
+ * Exact-node allocations must also take into account global claims!
+ *
+ * e.g:
+ * Consider a domain for which toolstack issued a non-exact claim of 75% of
+ * host memory and another domain for which toolstack tries to issue an
+ * exact-node claim of 50% of host memory. If the exact claim didn't consider
+ * non-exact claims too we would overallocate, which is exactly what claims
+ * are trying to prevent.
+ */
+static bool can_alloc(struct domain *d, unsigned int memflags,
+ unsigned long request)
+{
+ nodeid_t node = (memflags & MEMF_exact_node) ? MEMF_get_node(memflags) :
+ NUMA_NO_NODE;
+
+ if ( outstanding_claims + request <= total_avail_pages )
+ {
+ if ( node == NUMA_NO_NODE )
+ return true;
+
+ if ( pernode_oc[node] + request <= pernode_avail_pages[node] )
+ return true;
+ }
+
+ /*
+ * Not enough unclaimed memory. Only allow if it's already claimed on the
+ * right node. d->claim_node == NUMA_NO_NODE if the claim isn't on an
+ * exact node.
+ *
+ * Only refcounted allocs attributed to domains may have been claimed
+ */
+
+ return d && d->claim_node == node && d->outstanding_pages >= request &&
+ !(memflags & MEMF_no_refcount);
+}
+
/* Allocate 2^@order contiguous pages. */
static struct page_info *alloc_heap_pages(
unsigned int zone_lo, unsigned int zone_hi,
@@ -1021,9 +1061,7 @@ static struct page_info *alloc_heap_pages(
* Claimed memory is considered unavailable unless the request
* is made by a domain with sufficient unclaimed pages.
*/
- if ( (outstanding_claims + request > total_avail_pages) &&
- ((memflags & MEMF_no_refcount) ||
- !d || d->outstanding_pages < request) )
+ if ( !can_alloc(d, memflags, request) )
{
spin_unlock(&heap_lock);
return NULL;
--
2.48.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |