|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v10 7/9] xen: introduce prepare_staticmem_pages
Later, we want to use acquire_domstatic_pages() for populating memory
for static domain on runtime, however, there are a lot of pointless work
(checking mfn_valid(), scrubbing the free part, cleaning the cache...)
considering we know the page is valid and belong to the guest.
This commit splits acquire_staticmem_pages() in two parts, and
introduces prepare_staticmem_pages to bypass all "pointless work".
Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Julien Grall <jgrall@xxxxxxxxxx>
---
v10 changes:
- no change
---
v9 changes:
- no change
---
v8 changes:
- no change
---
v7 changes:
- no change
---
v6 changes:
- adapt to PGC_static
---
v5 changes:
- new commit
---
xen/common/page_alloc.c | 61 ++++++++++++++++++++++++-----------------
1 file changed, 36 insertions(+), 25 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 25521af600..0ee697705c 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2751,26 +2751,13 @@ void free_domstatic_page(struct page_info *page)
put_domain(d);
}
-/*
- * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
- * static memory.
- * This function needs to be reworked if used outside of boot.
- */
-static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
- unsigned long nr_mfns,
- unsigned int memflags)
+static bool __init prepare_staticmem_pages(struct page_info *pg,
+ unsigned long nr_mfns,
+ unsigned int memflags)
{
bool need_tlbflush = false;
uint32_t tlbflush_timestamp = 0;
unsigned long i;
- struct page_info *pg;
-
- ASSERT(nr_mfns);
- for ( i = 0; i < nr_mfns; i++ )
- if ( !mfn_valid(mfn_add(smfn, i)) )
- return NULL;
-
- pg = mfn_to_page(smfn);
spin_lock(&heap_lock);
@@ -2781,7 +2768,7 @@ static struct page_info * __init
acquire_staticmem_pages(mfn_t smfn,
{
printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
- i, mfn_x(smfn) + i,
+ i, mfn_x(page_to_mfn(pg)) + i,
pg[i].count_info, pg[i].tlbflush_timestamp);
goto out_err;
}
@@ -2805,6 +2792,38 @@ static struct page_info * __init
acquire_staticmem_pages(mfn_t smfn,
if ( need_tlbflush )
filtered_flush_tlb_mask(tlbflush_timestamp);
+ return true;
+
+ out_err:
+ while ( i-- )
+ pg[i].count_info = PGC_static | PGC_state_free;
+
+ spin_unlock(&heap_lock);
+
+ return false;
+}
+
+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ * This function needs to be reworked if used outside of boot.
+ */
+static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
+ unsigned long nr_mfns,
+ unsigned int memflags)
+{
+ unsigned long i;
+ struct page_info *pg;
+
+ ASSERT(nr_mfns);
+ for ( i = 0; i < nr_mfns; i++ )
+ if ( !mfn_valid(mfn_add(smfn, i)) )
+ return NULL;
+
+ pg = mfn_to_page(smfn);
+ if ( !prepare_staticmem_pages(pg, nr_mfns, memflags) )
+ return NULL;
+
/*
* Ensure cache and RAM are consistent for platforms where the guest
* can control its own visibility of/through the cache.
@@ -2813,14 +2832,6 @@ static struct page_info * __init
acquire_staticmem_pages(mfn_t smfn,
flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
return pg;
-
- out_err:
- while ( i-- )
- pg[i].count_info = PGC_static | PGC_state_free;
-
- spin_unlock(&heap_lock);
-
- return NULL;
}
/*
--
2.25.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |