|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v6 4/7] xen/mm: Split outstanding claims into global and node totals
Replace d->outstanding_pages with d->global_claims and add
d->node_claims as the aggregate of the domain's node-specific claims.
Keep the allocator hot path efficient and report the combined claims
state using the two new fields.
No functional change.
Signed-off-by: Bernhard Kaindl <bernhard.kaindl@xxxxxxxxxx>
---
xen/common/domctl.c | 2 +-
xen/common/page_alloc.c | 33 ++++++++++++++++-----------------
xen/include/xen/sched.h | 7 +++++--
3 files changed, 22 insertions(+), 20 deletions(-)
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 93738931c575..2cc5a4ff32fd 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -97,7 +97,7 @@ void getdomaininfo(struct domain *d, struct
xen_domctl_getdomaininfo *info)
info->tot_pages = domain_tot_pages(d);
info->max_pages = d->max_pages;
- info->outstanding_pages = d->outstanding_pages;
+ info->outstanding_pages = d->global_claims + d->node_claims;
#ifdef CONFIG_MEM_SHARING
info->shr_pages = atomic_read(&d->shr_pages);
#endif
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 28c424072d25..ee4942f93373 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -535,11 +535,11 @@ unsigned long domain_adjust_tot_pages(struct domain *d,
long pages)
static unsigned long deduct_global_claims(struct domain *d,
unsigned long reduction)
{
- reduction = min(reduction, d->outstanding_pages + 0UL);
+ reduction = min(reduction, d->global_claims + 0UL);
ASSERT(reduction <= outstanding_claims);
outstanding_claims -= reduction;
- d->outstanding_pages -= reduction;
+ d->global_claims -= reduction;
return reduction;
}
@@ -552,8 +552,7 @@ int domain_set_outstanding_pages(struct domain *d, unsigned
long pages)
/*
* Two locks are needed here:
* - d->page_alloc_lock: protects accesses to d->{tot,max,extra}_pages.
- * - heap_lock: protects accesses to d->outstanding_pages,
total_avail_pages
- * and outstanding_claims.
+ * - heap_lock: Protects accesses to the claims and avail_pages state.
*/
nrspin_lock(&d->page_alloc_lock);
spin_lock(&heap_lock);
@@ -561,13 +560,13 @@ int domain_set_outstanding_pages(struct domain *d,
unsigned long pages)
/* pages==0 means "unset" the claim. */
if ( pages == 0 )
{
- deduct_global_claims(d, d->outstanding_pages);
+ deduct_global_claims(d, d->global_claims);
ret = 0;
goto out;
}
- /* only one active claim per domain please */
- if ( d->outstanding_pages )
+ /* Reject updating global claims and we can't update node claims */
+ if ( d->global_claims || d->node_claims )
{
ret = -EINVAL;
goto out;
@@ -594,8 +593,8 @@ int domain_set_outstanding_pages(struct domain *d, unsigned
long pages)
goto out;
/* yay, claim fits in available memory, stake the claim, success! */
- d->outstanding_pages = claim;
- outstanding_claims += d->outstanding_pages;
+ d->global_claims = claim;
+ outstanding_claims += claim;
ret = 0;
out:
@@ -891,7 +890,7 @@ static bool claims_permit_request(const struct domain *d,
unsigned int memflags,
unsigned long requested_pages)
{
- unsigned long unclaimed_pages;
+ unsigned long unclaimed_pages, applicable_claims;
ASSERT(spin_is_locked(&heap_lock));
ASSERT(avail_pages >= competing_claims);
@@ -910,11 +909,13 @@ static bool claims_permit_request(const struct domain *d,
if ( !d || (memflags & MEMF_no_refcount) )
return false;
+ applicable_claims = d->global_claims;
+
/*
* Allow the request to proceed when combination of unclaimed pages and the
* claims held by the domain cover the shortfall for the requested_pages.
*/
- return requested_pages <= unclaimed_pages + d->outstanding_pages;
+ return requested_pages <= unclaimed_pages + applicable_claims;
}
static struct page_info *get_free_buddy(unsigned int zone_lo,
@@ -1112,18 +1113,16 @@ static struct page_info *alloc_heap_pages(
total_avail_pages -= request;
ASSERT(total_avail_pages >= 0);
- if ( d && d->outstanding_pages && !(memflags & MEMF_no_refcount) )
+ if ( d && d->global_claims && !(memflags & MEMF_no_refcount) )
{
/*
* Adjust claims in the same locked region where total_avail_pages is
* adjusted, not doing so would lead to a window where the amount of
* free memory (avail - claimed) would be incorrect.
*
- * Note that by adjusting the claimed amount here it's possible for
- * pages to fail to be assigned to the claiming domain while already
- * having been subtracted from d->outstanding_pages. Such claimed
- * amount is then lost, as the pages that fail to be assigned to the
- * domain are freed without replenishing the claim. This is fine given
+ * Note, after redeeming claims for the allocation here, assign_pages()
+ * could fail. The domain looses The redeemed claims as the not
assigned
+ * pages are freed without replenishing the claim. This is fine given
* claims are only to be used during physmap population as part of
* domain build, and any failure in assign_pages() there will result in
* the domain being destroyed before creation is finished. Losing part
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 212c7d765c3e..2995c99aa34a 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -413,8 +413,11 @@ struct domain
unsigned int tot_pages;
unsigned int xenheap_pages; /* pages allocated from Xen heap */
- /* Pages claimed but not possessed, protected by global heap_lock. */
- unsigned int outstanding_pages;
+
+ /* All claims are protected by the heap_lock */
+ unsigned int global_claims; /* Global Host-level claims */
+ unsigned int node_claims; /* Sum of the Node-specific claims */
+
unsigned int max_pages; /* maximum value for
domain_tot_pages() */
unsigned int extra_pages; /* pages not included in
domain_tot_pages() */
--
2.39.5
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |