|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/2] use return value of domain_adjust_tot_pages() where feasible
On 27/11/13 08:26, Jan Beulich wrote:
> This is generally cheaper than re-reading ->tot_pages.
>
> While doing so I also noticed an improper use (lacking error handling)
> of get_domain() as well as lacks of ->is_dying checks in the memory
> sharing code, which the patch fixes at once. In the course of doing
> this I further noticed other error paths there pointlessly calling
> put_page() et al with ->page_alloc_lock still held, which is also being
> reversed.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
>
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -611,9 +611,16 @@ static int page_make_sharable(struct dom
> struct page_info *page,
> int expected_refcnt)
> {
> - int drop_dom_ref;
> + bool_t drop_dom_ref;
> +
> spin_lock(&d->page_alloc_lock);
>
> + if ( d->is_dying )
> + {
> + spin_unlock(&d->page_alloc_lock);
> + return -EBUSY;
> + }
> +
> /* Change page type and count atomically */
> if ( !get_page_and_type(page, d, PGT_shared_page) )
> {
> @@ -624,8 +631,8 @@ static int page_make_sharable(struct dom
> /* Check it wasn't already sharable and undo if it was */
> if ( (page->u.inuse.type_info & PGT_count_mask) != 1 )
> {
> - put_page_and_type(page);
> spin_unlock(&d->page_alloc_lock);
> + put_page_and_type(page);
> return -EEXIST;
> }
>
> @@ -633,15 +640,14 @@ static int page_make_sharable(struct dom
> * the second from get_page_and_type at the top of this function */
> if ( page->count_info != (PGC_allocated | (2 + expected_refcnt)) )
> {
> + spin_unlock(&d->page_alloc_lock);
> /* Return type count back to zero */
> put_page_and_type(page);
> - spin_unlock(&d->page_alloc_lock);
> return -E2BIG;
> }
>
> page_set_owner(page, dom_cow);
> - domain_adjust_tot_pages(d, -1);
> - drop_dom_ref = (d->tot_pages == 0);
> + drop_dom_ref = !domain_adjust_tot_pages(d, -1);
> page_list_del(page, &d->page_list);
> spin_unlock(&d->page_alloc_lock);
>
> @@ -659,6 +665,13 @@ static int page_make_private(struct doma
>
> spin_lock(&d->page_alloc_lock);
>
> + if ( d->is_dying )
> + {
> + spin_unlock(&d->page_alloc_lock);
> + put_page(page);
> + return -EBUSY;
> + }
> +
> /* We can only change the type if count is one */
> /* Because we are locking pages individually, we need to drop
> * the lock here, while the page is typed. We cannot risk the
> @@ -666,8 +679,8 @@ static int page_make_private(struct doma
> expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
> if ( page->u.inuse.type_info != expected_type )
> {
> - put_page(page);
> spin_unlock(&d->page_alloc_lock);
> + put_page(page);
> return -EEXIST;
> }
>
> @@ -682,7 +695,7 @@ static int page_make_private(struct doma
> page_set_owner(page, d);
>
> if ( domain_adjust_tot_pages(d, 1) == 1 )
> - get_domain(d);
> + get_knownalive_domain(d);
> page_list_add_tail(page, &d->page_list);
> spin_unlock(&d->page_alloc_lock);
>
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -475,8 +475,8 @@ static long memory_exchange(XEN_GUEST_HA
> (j * (1UL << exch.out.extent_order)));
>
> spin_lock(&d->page_alloc_lock);
> - domain_adjust_tot_pages(d, -dec_count);
> - drop_dom_ref = (dec_count && !d->tot_pages);
> + drop_dom_ref = (dec_count &&
> + !domain_adjust_tot_pages(d, -dec_count));
> spin_unlock(&d->page_alloc_lock);
>
> if ( drop_dom_ref )
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -1516,8 +1516,9 @@ struct page_info *alloc_domheap_pages(
>
> void free_domheap_pages(struct page_info *pg, unsigned int order)
> {
> - int i, drop_dom_ref;
> struct domain *d = page_get_owner(pg);
> + unsigned int i;
> + bool_t drop_dom_ref;
>
> ASSERT(!in_irq());
>
> @@ -1545,8 +1546,7 @@ void free_domheap_pages(struct page_info
> page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list);
> }
>
> - domain_adjust_tot_pages(d, -(1 << order));
> - drop_dom_ref = (d->tot_pages == 0);
> + drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));
>
> spin_unlock_recursive(&d->page_alloc_lock);
>
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |