|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC XEN PATCH v4 28/41] xen/pmem: release PMEM pages on HVM domain destruction
A new step RELMEM_pmem is added and taken before RELMEM_xen to release
all PMEM pages mapped to a HVM domain.
Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
---
xen/arch/x86/domain.c | 32 ++++++++++++++++++++++++++++----
xen/arch/x86/mm.c | 9 +++++++--
xen/common/pmem.c | 10 ++++++++++
xen/include/asm-x86/domain.h | 1 +
xen/include/xen/pmem.h | 6 ++++++
5 files changed, 52 insertions(+), 6 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index e1bf2d9e9d..613a8b4250 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1801,11 +1801,15 @@ static int relinquish_memory(
{
struct page_info *page;
unsigned long x, y;
+ bool is_pmem_list = (list == &d->pmem_page_list);
int ret = 0;
/* Use a recursive lock, as we may enter 'free_domheap_page'. */
spin_lock_recursive(&d->page_alloc_lock);
+ if ( is_pmem_list )
+ spin_lock(&d->pmem_lock);
+
while ( (page = page_list_remove_head(list)) )
{
/* Grab a reference to the page so it won't disappear from under us. */
@@ -1887,8 +1891,9 @@ static int relinquish_memory(
}
}
- /* Put the page on the list and /then/ potentially free it. */
- page_list_add_tail(page, &d->arch.relmem_list);
+ if ( !is_pmem_list )
+ /* Put the page on the list and /then/ potentially free it. */
+ page_list_add_tail(page, &d->arch.relmem_list);
put_page(page);
if ( hypercall_preempt_check() )
@@ -1898,10 +1903,13 @@ static int relinquish_memory(
}
}
- /* list is empty at this point. */
- page_list_move(list, &d->arch.relmem_list);
+ if ( !is_pmem_list )
+ /* list is empty at this point. */
+ page_list_move(list, &d->arch.relmem_list);
out:
+ if ( is_pmem_list )
+ spin_unlock(&d->pmem_lock);
spin_unlock_recursive(&d->page_alloc_lock);
return ret;
}
@@ -1968,13 +1976,29 @@ int domain_relinquish_resources(struct domain *d)
return ret;
}
+#ifndef CONFIG_NVDIMM_PMEM
d->arch.relmem = RELMEM_xen;
+#else
+ d->arch.relmem = RELMEM_pmem;
+#endif
spin_lock(&d->page_alloc_lock);
page_list_splice(&d->arch.relmem_list, &d->page_list);
INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
spin_unlock(&d->page_alloc_lock);
+#ifdef CONFIG_NVDIMM_PMEM
+ /* Fallthrough. Relinquish every page of PMEM. */
+ case RELMEM_pmem:
+ if ( is_hvm_domain(d) )
+ {
+ ret = relinquish_memory(d, &d->pmem_page_list, ~0UL);
+ if ( ret )
+ return ret;
+ }
+ d->arch.relmem = RELMEM_xen;
+#endif
+
/* Fallthrough. Relinquish every page of memory. */
case RELMEM_xen:
ret = relinquish_memory(d, &d->xenpage_list, ~0UL);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 9a224cf1bb..9386e88eb1 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -106,6 +106,7 @@
#include <xen/efi.h>
#include <xen/grant_table.h>
#include <xen/hypercall.h>
+#include <xen/pmem.h>
#include <asm/paging.h>
#include <asm/shadow.h>
#include <asm/page.h>
@@ -2306,8 +2307,12 @@ void put_page(struct page_info *page)
if ( unlikely((nx & PGC_count_mask) == 0) )
{
- if ( !is_pmem_page(page) /* PMEM page is not allocated from Xen heap.
*/
- && cleanup_page_cacheattr(page) == 0 )
+#ifdef CONFIG_NVDIMM_PMEM
+ if ( is_pmem_page(page) )
+ pmem_page_cleanup(page);
+ else
+#endif
+ if ( cleanup_page_cacheattr(page) == 0 )
free_domheap_page(page);
else
gdprintk(XENLOG_WARNING,
diff --git a/xen/common/pmem.c b/xen/common/pmem.c
index d2c5518329..a0d23cdfbe 100644
--- a/xen/common/pmem.c
+++ b/xen/common/pmem.c
@@ -733,6 +733,16 @@ int pmem_populate(struct xen_pmem_map_args *args)
return rc;
}
+void pmem_page_cleanup(struct page_info *page)
+{
+ ASSERT(is_pmem_page(page));
+ ASSERT((page->count_info & PGC_count_mask) == 0);
+
+ page->count_info = PGC_pmem_page | PGC_state_free;
+ page_set_owner(page, NULL);
+ set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
+}
+
int __init pmem_dom0_setup_permission(struct domain *d)
{
struct list_head *cur;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index f69911918e..e6f575244d 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -305,6 +305,7 @@ struct arch_domain
enum {
RELMEM_not_started,
RELMEM_shared,
+ RELMEM_pmem,
RELMEM_xen,
RELMEM_l4,
RELMEM_l3,
diff --git a/xen/include/xen/pmem.h b/xen/include/xen/pmem.h
index 2dab90530b..dfbc412065 100644
--- a/xen/include/xen/pmem.h
+++ b/xen/include/xen/pmem.h
@@ -21,6 +21,7 @@
#ifdef CONFIG_NVDIMM_PMEM
#include <public/sysctl.h>
+#include <xen/mm.h>
#include <xen/types.h>
int pmem_register(unsigned long smfn, unsigned long emfn, unsigned int pxm);
@@ -46,6 +47,7 @@ struct xen_pmem_map_args {
};
int pmem_populate(struct xen_pmem_map_args *args);
+void pmem_page_cleanup(struct page_info *page);
#else /* !CONFIG_X86 */
@@ -64,6 +66,10 @@ static inline int pmem_populate(...)
return -ENOSYS;
}
+static inline void pmem_page_cleanup(...)
+{
+}
+
#endif /* CONFIG_X86 */
#endif /* CONFIG_NVDIMM_PMEM */
--
2.15.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |