[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/3] xen/arm: implement domain_relinquish_resources
put_page on every entry in xenpage_list and page_list Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> --- xen/arch/arm/domain.c | 47 ++++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 44 insertions(+), 3 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index 17713f8..3e9a690 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -9,6 +9,7 @@ #include <xen/grant_table.h> #include <asm/current.h> +#include <asm/event.h> #include <asm/regs.h> #include <asm/p2m.h> #include <asm/irq.h> @@ -517,11 +518,51 @@ void arch_vcpu_reset(struct vcpu *v) vcpu_end_shutdown_deferral(v); } +static int relinquish_memory(struct domain *d, struct page_list_head *list) +{ + struct page_info *page, *tmp; + int ret = 0; + + /* Use a recursive lock, as we may enter 'free_domheap_page'. */ + spin_lock_recursive(&d->page_alloc_lock); + + page_list_for_each_safe( page, tmp, list ) + { + /* Grab a reference to the page so it won't disappear from under us. */ + if ( unlikely(!get_page(page, d)) ) + /* Couldn't get a reference -- someone is freeing this page. */ + BUG(); + + if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) + put_page(page); + + put_page(page); + + if ( hypercall_preempt_check() ) + { + ret = -EAGAIN; + goto out; + } + } + + out: + spin_unlock_recursive(&d->page_alloc_lock); + return ret; +} + int domain_relinquish_resources(struct domain *d) { - /* XXX teardown pagetables, free pages etc */ - ASSERT(0); - return 0; + int ret = 0; + + ret = relinquish_memory(d, &d->xenpage_list); + if ( ret ) + return ret; + + ret = relinquish_memory(d, &d->page_list); + if ( ret ) + return ret; + + return ret; } void arch_dump_domain_info(struct domain *d) -- 1.7.2.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |