|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC/PATCH] XENMEM_claim_pages (subop of an existing) hypercall
>>> On 13.11.12 at 23:23, Dan Magenheimer <dan.magenheimer@xxxxxxxxxx> wrote:
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -454,7 +454,7 @@ static long
> memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
> (j * (1UL << exch.out.extent_order)));
>
> spin_lock(&d->page_alloc_lock);
> - d->tot_pages -= dec_count;
> + domain_decrease_tot_pages(d, dec_count);
> drop_dom_ref = (dec_count && !d->tot_pages);
> spin_unlock(&d->page_alloc_lock);
>
> @@ -685,6 +685,19 @@ long do_memory_op(unsigned long cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> break;
> }
>
> + case XENMEM_claim_pages:
> +
> + rc = rcu_lock_target_domain_by_id(domid, &d);
Where does "domid" come from?
> + if ( rc )
> + return rc;
> +
> + rc = domain_set_unclaimed_pages(d, reservation.nr_extents,
> + reservation.mem_flags);
Where do you initialize "reservation"?
And if you're indeed intending to re-use struct
xen_memory_reservation for this sub-function, then you either
ought to honor the other fields, or bail on them not being zero.
> +
> + rcu_unlock_domain(d);
> +
> + break;
> +
> default:
> rc = arch_memory_op(op, arg);
> break;
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -238,6 +238,90 @@ static long midsize_alloc_zone_pages;
> #define MIDSIZE_ALLOC_FRAC 128
>
> static DEFINE_SPINLOCK(heap_lock);
> +static long total_unclaimed_pages; /* total outstanding claims by all
> domains */
> +
> +unsigned long domain_increase_tot_pages(struct domain *d, unsigned long
> pages)
> +{
> + long dom_before, dom_after, dom_claimed, sys_before, sys_after;
> +
> + ASSERT(spin_is_locked(&d->page_alloc_lock));
> + ASSERT(!spin_is_locked(&heap_lock));
> + spin_lock(&heap_lock);
> + d->tot_pages += pages;
> + if ( d->unclaimed_pages )
> + {
> + dom_before = d->unclaimed_pages;
> + dom_after = dom_before - pages;
> + if ( (dom_before > 0) && (dom_after < 0) )
> + dom_claimed = 0;
> + else
> + dom_claimed = dom_after;
> + sys_before = total_unclaimed_pages;
> + sys_after = sys_before - (dom_before - dom_claimed);
> + BUG_ON( (sys_before > 0) && (sys_after < 0) );
> + total_unclaimed_pages = sys_after;
> + d->unclaimed_pages = dom_claimed;
> + }
> + spin_unlock(&heap_lock);
> + return d->tot_pages;
> +}
> +
> +unsigned long domain_decrease_tot_pages(struct domain *d, unsigned long
> pages)
> +{
> + ASSERT(spin_is_locked(&d->page_alloc_lock));
> + ASSERT(!spin_is_locked(&heap_lock));
> + spin_lock(&heap_lock);
> + d->tot_pages -= pages;
> + if ( d->unclaimed_pages )
> + {
> + d->unclaimed_pages += pages;
> + total_unclaimed_pages += pages;
> + }
> + spin_unlock(&heap_lock);
> + return d->tot_pages;
> +}
> +
> +int domain_set_unclaimed_pages(struct domain *d, unsigned long pages,
> + unsigned long flags)
> +{
> + int ret = -ENOMEM;
> + unsigned long avail_pages;
> +
> + ASSERT(!spin_is_locked(&d->page_alloc_lock));
> + ASSERT(!spin_is_locked(&heap_lock));
> + spin_lock(&d->page_alloc_lock);
> + spin_lock(&heap_lock);
> + /* only one active claim per domain please */
> + if ( d->unclaimed_pages )
> + goto out;
> + avail_pages = total_avail_pages;
> + if ( !(flags & XENMEM_CLAIMF_free_only) )
> + avail_pages += tmem_freeable_pages();
> + avail_pages -= total_unclaimed_pages;
> + if ( pages < avail_pages )
> + {
> + /* ok if domain has allocated some pages before making a claim */
> + d->unclaimed_pages = pages - d->tot_pages;
> + total_unclaimed_pages += total_unclaimed_pages;
total_unclaimed_pages += d->unclaimed_pages;
> + ret = 0;
> + }
> +out:
> + spin_unlock(&heap_lock);
> + spin_unlock(&d->page_alloc_lock);
> + return ret;
> +}
> +
> +void domain_reset_unclaimed_pages(struct domain *d)
> +{
> + ASSERT(!spin_is_locked(&d->page_alloc_lock));
> + ASSERT(!spin_is_locked(&heap_lock));
> + spin_lock(&d->page_alloc_lock);
> + spin_lock(&heap_lock);
> + total_unclaimed_pages -= d->unclaimed_pages;
> + d->unclaimed_pages = 0;
> + spin_unlock(&heap_lock);
> + spin_unlock(&d->page_alloc_lock);
What is the point of acquiring d->page_alloc_lock here if
d->unclaimed_pages is always manipulated with heap_lock
held anyway?
Also, you mention the need to use this function at least
during domain shutdown, but there is no user of it throughout
the patch.
> +}
>
> static unsigned long init_node_heap(int node, unsigned long mfn,
> unsigned long nr, bool_t *use_tail)
> @@ -442,6 +526,11 @@ static struct page_info *alloc_heap_pages(
>
> spin_lock(&heap_lock);
>
> + /* Claimed memory is considered not available */
> + if ( total_unclaimed_pages + request >
> + total_avail_pages + tmem_freeable_pages() )
So how would a domain having an active claim get to allocate
its memory if total_unclaimed_pages == total_avail_pages +
tmem_freeable_pages()?
> + goto not_found;
> +
> /*
> * TMEM: When available memory is scarce due to tmem absorbing it, allow
> * only mid-size allocations to avoid worst of fragmentation issues.
There are also a few uses of tabs for indentation that would
need cleaning up.
Finally, assuming we really want/need this, shouldn't there be
a way for claims to expire (e.g. in case of tool stack problems),
so they don't indefinitely prevent (perhaps large) portions of
memory to be used for other purpose (e.g. when a domain can't
get fully cleaned up)?
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |