|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 3/6] xen/riscv: construct the P2M pages pool for guests
Implement p2m_set_allocation() to construct p2m pages pool for guests
based on required number of pages.
This is implemented by:
- Adding a `struct paging_domain` which contains a freelist, a
counter variable and a spinlock to `struct arch_domain` to
indicate the free p2m pages and the number of p2m total pages in
the p2m pages pool.
- Adding a helper `p2m_set_allocation` to set the p2m pages pool
size. This helper should be called before allocating memory for
a guest and is called from domain_p2m_set_allocation(), the latter
is a part of common dom0less code.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
---
xen/arch/riscv/include/asm/domain.h | 10 +++++
xen/arch/riscv/p2m.c | 67 +++++++++++++++++++++++++++++
2 files changed, 77 insertions(+)
diff --git a/xen/arch/riscv/include/asm/domain.h
b/xen/arch/riscv/include/asm/domain.h
index 48be90a395..b818127f9f 100644
--- a/xen/arch/riscv/include/asm/domain.h
+++ b/xen/arch/riscv/include/asm/domain.h
@@ -2,6 +2,8 @@
#ifndef ASM__RISCV__DOMAIN_H
#define ASM__RISCV__DOMAIN_H
+#include <xen/mm.h>
+#include <xen/spinlock.h>
#include <xen/xmalloc.h>
#include <public/hvm/params.h>
@@ -18,12 +20,20 @@ struct arch_vcpu_io {
struct arch_vcpu {
};
+struct paging_domain {
+ spinlock_t lock;
+ /* Free P2M pages from the pre-allocated P2M pool */
+ struct page_list_head p2m_freelist;
+ /* Number of pages from the pre-allocated P2M pool */
+ unsigned long p2m_total_pages;
+};
struct arch_domain {
struct hvm_domain hvm;
struct p2m_domain p2m;
+ struct paging_domain paging;
};
#include <xen/sched.h>
diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
index ad4beef8f9..a890870391 100644
--- a/xen/arch/riscv/p2m.c
+++ b/xen/arch/riscv/p2m.c
@@ -1,4 +1,12 @@
#include <xen/domain_page.h>
+/*
+ * Because of general_preempt_check() from xen/sched.h which uses
+ * local_events_need_delivery() but latter is declared in <asm/event.h>.
+ * Thereby it is needed to icnlude <xen/event.h> here before xen/sched.h.
+ *
+ * Shouldn't be xen/event.h be included in <xen/sched.h>?
+ */
+#include <xen/event.h>
#include <xen/iommu.h>
#include <xen/lib.h>
#include <xen/mm.h>
@@ -133,7 +141,9 @@ int p2m_init(struct domain *d)
int rc;
rwlock_init(&p2m->lock);
+ spin_lock_init(&d->arch.paging.lock);
INIT_PAGE_LIST_HEAD(&p2m->pages);
+ INIT_PAGE_LIST_HEAD(&d->arch.paging.p2m_freelist);
p2m->max_mapped_gfn = _gfn(0);
p2m->lowest_mapped_gfn = _gfn(ULONG_MAX);
@@ -166,3 +176,60 @@ int p2m_init(struct domain *d)
return 0;
}
+
+/*
+ * Set the pool of pages to the required number of pages.
+ * Returns 0 for success, non-zero for failure.
+ * Call with d->arch.paging.lock held.
+ */
+int p2m_set_allocation(struct domain *d, unsigned long pages, bool *preempted)
+{
+ struct page_info *pg;
+
+ ASSERT(spin_is_locked(&d->arch.paging.lock));
+
+ for ( ; ; )
+ {
+ if ( d->arch.paging.p2m_total_pages < pages )
+ {
+ /* Need to allocate more memory from domheap */
+ pg = alloc_domheap_page(d, MEMF_no_owner);
+ if ( pg == NULL )
+ {
+ printk(XENLOG_ERR "Failed to allocate P2M pages.\n");
+ return -ENOMEM;
+ }
+ ACCESS_ONCE(d->arch.paging.p2m_total_pages) =
+ d->arch.paging.p2m_total_pages + 1;
+ page_list_add_tail(pg, &d->arch.paging.p2m_freelist);
+ }
+ else if ( d->arch.paging.p2m_total_pages > pages )
+ {
+ /* Need to return memory to domheap */
+ pg = page_list_remove_head(&d->arch.paging.p2m_freelist);
+ if( pg )
+ {
+ ACCESS_ONCE(d->arch.paging.p2m_total_pages) =
+ d->arch.paging.p2m_total_pages - 1;
+ free_domheap_page(pg);
+ }
+ else
+ {
+ printk(XENLOG_ERR
+ "Failed to free P2M pages, P2M freelist is empty.\n");
+ return -ENOMEM;
+ }
+ }
+ else
+ break;
+
+ /* Check to see if we need to yield and try again */
+ if ( preempted && general_preempt_check() )
+ {
+ *preempted = true;
+ return -ERESTART;
+ }
+ }
+
+ return 0;
+}
--
2.49.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |