[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v12 1/6] xen: do not free reserved memory into heap
Pages used as guest RAM for static domain, shall be reserved to this domain only. So in case reserved pages being used for other purpose, users shall not free them back to heap, even when last ref gets dropped. This commit introduces a new helper free_domstatic_page to free static page in runtime, and free_staticmem_pages will be called by it in runtime, so let's drop the __init flag. Wrapper #ifdef CONFIG_STATIC_MEMORY around function declaration( free_staticmem_pages, free_domstatic_page, etc) is kinds of redundant, so we decide to remove it here. Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx> --- v12 changes: - no change --- v11 changes: - printing message ahead of the assertion, which should also be XENLOG_G_* kind of log level --- v10 changes: - let Arm keep #define PGC_static 0 private, with the generic fallback remaining in page_alloc.c - change ASSERT(d) to ASSERT_UNREACHABLE() to be more robust looking forward, and also add a printk() to log the problem - mention the the removal of #ifdef CONFIG_STATIC_MEMORY in commit message --- v9 changes: - move free_domheap_page into else-condition - considering scrubbing static pages, domain dying case and opt_scrub_domheap both donot apply to static pages. - as unowned static pages don't make themselves to free_domstatic_page at the moment, remove else-condition and add ASSERT(d) at the top of the function --- v8 changes: - introduce new helper free_domstatic_page - let put_page call free_domstatic_page for static page, when last ref drops - #define PGC_static zero when !CONFIG_STATIC_MEMORY, as it is used outside page_alloc.c --- v7 changes: - protect free_staticmem_pages with heap_lock to match its reverse function acquire_staticmem_pages --- v6 changes: - adapt to PGC_static - remove #ifdef aroud function declaration --- v5 changes: - In order to avoid stub functions, we #define PGC_staticmem to non-zero only when CONFIG_STATIC_MEMORY - use "unlikely()" around pg->count_info & PGC_staticmem - remove pointless "if", since mark_page_free() is going to set count_info to PGC_state_free and by consequence clear PGC_staticmem - move #define PGC_staticmem 0 to mm.h --- v4 changes: - no changes --- v3 changes: - fix possible racy issue in free_staticmem_pages() - introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case - move the change to free_heap_pages() to cover other potential call sites - fix the indentation --- v2 changes: - new commit --- xen/arch/arm/include/asm/mm.h | 6 +++++- xen/arch/arm/mm.c | 5 ++++- xen/common/page_alloc.c | 40 ++++++++++++++++++++++++++++++++--- xen/include/xen/mm.h | 3 +-- 4 files changed, 47 insertions(+), 7 deletions(-) diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h index da25251cda..749fbefa0c 100644 --- a/xen/arch/arm/include/asm/mm.h +++ b/xen/arch/arm/include/asm/mm.h @@ -121,9 +121,13 @@ struct page_info /* Page is Xen heap? */ #define _PGC_xen_heap PG_shift(2) #define PGC_xen_heap PG_mask(1, 2) - /* Page is static memory */ +#ifdef CONFIG_STATIC_MEMORY +/* Page is static memory */ #define _PGC_static PG_shift(3) #define PGC_static PG_mask(1, 3) +#else +#define PGC_static 0 +#endif /* ... */ /* Page is broken? */ #define _PGC_broken PG_shift(7) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index c81c706c8b..7f5b317d3e 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -1496,7 +1496,10 @@ void put_page(struct page_info *page) if ( unlikely((nx & PGC_count_mask) == 0) ) { - free_domheap_page(page); + if ( unlikely(nx & PGC_static) ) + free_domstatic_page(page); + else + free_domheap_page(page); } } diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index bfd4150be7..0c50dee4c5 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -2694,12 +2694,14 @@ struct domain *get_pg_owner(domid_t domid) #ifdef CONFIG_STATIC_MEMORY /* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */ -void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns, - bool need_scrub) +void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns, + bool need_scrub) { mfn_t mfn = page_to_mfn(pg); unsigned long i; + spin_lock(&heap_lock); + for ( i = 0; i < nr_mfns; i++ ) { mark_page_free(&pg[i], mfn_add(mfn, i)); @@ -2710,9 +2712,41 @@ void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns, scrub_one_page(pg); } - /* In case initializing page of static memory, mark it PGC_static. */ pg[i].count_info |= PGC_static; } + + spin_unlock(&heap_lock); +} + +void free_domstatic_page(struct page_info *page) +{ + struct domain *d = page_get_owner(page); + bool drop_dom_ref; + + if ( unlikely(!d) ) + { + printk(XENLOG_G_ERR + "The about-to-free static page %"PRI_mfn" must be owned by a domain\n", + mfn_x(page_to_mfn(page))); + ASSERT_UNREACHABLE(); + return; + } + + ASSERT_ALLOC_CONTEXT(); + + /* NB. May recursively lock from relinquish_memory(). */ + spin_lock_recursive(&d->page_alloc_lock); + + arch_free_heap_page(d, page); + + drop_dom_ref = !domain_adjust_tot_pages(d, -1); + + spin_unlock_recursive(&d->page_alloc_lock); + + free_staticmem_pages(page, 1, scrub_debug); + + if ( drop_dom_ref ) + put_domain(d); } /* diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 35b065146f..deadf4b2a1 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -85,13 +85,12 @@ bool scrub_free_pages(void); } while ( false ) #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0) -#ifdef CONFIG_STATIC_MEMORY /* These functions are for static memory */ void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns, bool need_scrub); +void free_domstatic_page(struct page_info *page); int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns, unsigned int memflags); -#endif /* Map machine page range in Xen virtual address space. */ int map_pages_to_xen( -- 2.25.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |