|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] xen: retrieve reserved pages on populate_physmap
commit 172015009a6acc20357a2698800e13058ba1d3db
Author: Penny Zheng <Penny.Zheng@xxxxxxx>
AuthorDate: Tue Sep 6 15:39:19 2022 +0800
Commit: Julien Grall <jgrall@xxxxxxxxxx>
CommitDate: Tue Sep 6 18:03:07 2022 +0100
xen: retrieve reserved pages on populate_physmap
When a static domain populates memory through populate_physmap at runtime,
it shall retrieve reserved pages from resv_page_list to make sure that
domain's RAM is still restricted in statically configured memory regions.
This commit also introduces a new helper acquire_reserved_page to make it
work.
Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx>
---
xen/common/memory.c | 23 +++++++++++++++
xen/common/page_alloc.c | 74 +++++++++++++++++++++++++++++++++++++++----------
xen/include/xen/mm.h | 1 +
3 files changed, 84 insertions(+), 14 deletions(-)
diff --git a/xen/common/memory.c b/xen/common/memory.c
index bc89442ba5..ae8163a738 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -245,6 +245,29 @@ static void populate_physmap(struct memop_args *a)
mfn = _mfn(gpfn);
}
+ else if ( is_domain_using_staticmem(d) )
+ {
+ /*
+ * No easy way to guarantee the retrieved pages are contiguous,
+ * so forbid non-zero-order requests here.
+ */
+ if ( a->extent_order != 0 )
+ {
+ gdprintk(XENLOG_WARNING,
+ "Cannot allocate static order-%u pages for %pd\n",
+ a->extent_order, d);
+ goto out;
+ }
+
+ mfn = acquire_reserved_page(d, a->memflags);
+ if ( mfn_eq(mfn, INVALID_MFN) )
+ {
+ gdprintk(XENLOG_WARNING,
+ "%pd: failed to retrieve a reserved page\n",
+ d);
+ goto out;
+ }
+ }
else
{
page = alloc_domheap_pages(d, a->extent_order, a->memflags);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 18d34d1b69..62afb07bc6 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2755,9 +2755,8 @@ void free_domstatic_page(struct page_info *page)
put_domain(d);
}
-static bool __init prepare_staticmem_pages(struct page_info *pg,
- unsigned long nr_mfns,
- unsigned int memflags)
+static bool prepare_staticmem_pages(struct page_info *pg, unsigned long
nr_mfns,
+ unsigned int memflags)
{
bool need_tlbflush = false;
uint32_t tlbflush_timestamp = 0;
@@ -2838,6 +2837,25 @@ static struct page_info * __init
acquire_staticmem_pages(mfn_t smfn,
return pg;
}
+static int assign_domstatic_pages(struct domain *d, struct page_info *pg,
+ unsigned int nr_mfns, unsigned int memflags)
+{
+ if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
+ {
+ /*
+ * Respective handling omitted here because right now
+ * acquired static memory is only for domain's RAM.
+ */
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
+
+ if ( assign_pages(pg, nr_mfns, d, memflags) )
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
* then assign them to one specific domain #d.
@@ -2853,17 +2871,7 @@ int __init acquire_domstatic_pages(struct domain *d,
mfn_t smfn,
if ( !pg )
return -ENOENT;
- if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
- {
- /*
- * Respective handling omitted here because right now
- * acquired static memory is only for guest RAM.
- */
- ASSERT_UNREACHABLE();
- return -EINVAL;
- }
-
- if ( assign_pages(pg, nr_mfns, d, memflags) )
+ if ( assign_domstatic_pages(d, pg, nr_mfns, memflags) )
{
unprepare_staticmem_pages(pg, nr_mfns, memflags & MEMF_no_scrub);
return -EINVAL;
@@ -2871,6 +2879,44 @@ int __init acquire_domstatic_pages(struct domain *d,
mfn_t smfn,
return 0;
}
+
+/*
+ * Acquire a page from reserved page list(resv_page_list), when populating
+ * memory for static domain on runtime.
+ */
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
+{
+ struct page_info *page;
+
+ ASSERT_ALLOC_CONTEXT();
+
+ /* Acquire a page from reserved page list(resv_page_list). */
+ spin_lock(&d->page_alloc_lock);
+ page = page_list_remove_head(&d->resv_page_list);
+ spin_unlock(&d->page_alloc_lock);
+ if ( unlikely(!page) )
+ return INVALID_MFN;
+
+ if ( !prepare_staticmem_pages(page, 1, memflags) )
+ goto fail;
+
+ if ( assign_domstatic_pages(d, page, 1, memflags) )
+ goto fail_assign;
+
+ return page_to_mfn(page);
+
+ fail_assign:
+ /*
+ * The page was never accessible by the domain. So scrubbing can be
+ * skipped
+ */
+ unprepare_staticmem_pages(page, 1, false);
+ fail:
+ spin_lock(&d->page_alloc_lock);
+ page_list_add_tail(page, &d->resv_page_list);
+ spin_unlock(&d->page_alloc_lock);
+ return INVALID_MFN;
+}
#endif
/*
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 93db3c4418..a925028ab3 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -198,6 +198,7 @@ struct npfec {
#else
#define MAX_ORDER 20 /* 2^20 contiguous pages */
#endif
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags);
/* Private domain structs for DOMID_XEN, DOMID_IO, etc. */
extern struct domain *dom_xen, *dom_io;
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |