|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 2/6] xen/riscv: introduce things necessary for p2m initialization
Introduce the following things:
- p2m_domain structure which describe per p2m-table state.
- Update arch_domain structure with the mentioned above structure.
- p2m_get_hostp2m() to recieve domain's p2m structure.
- Introudce p2m_write_lock() and p2m_is_write_locked().
- p2m_init() to initalize p2m:
- allocate p2m table by using of p2m_alloc_table().
- initialize lock premitive necessary to protect updates to the p2m.
- Introduce the following functions to implement p2m_alloc_table():
- p2m_allocate_root() to allocate p2m root table by using another introduced
helpers p2m_get_clean_page() and clear_and_clean_page().
- introduce p2m_force_tlb_flush_sync() to flush TLBs after p2m table
allocation before being used. (it isn't necessary at the current stage of
development but could be useful once the VMID is marked unused, a new domain
can reuse the VMID for its own. If the TLB is not flushed, entries can
contain wrong translation.)
- Implement maddr_to_page() and page_to_maddr().
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
---
xen/arch/riscv/Makefile | 1 +
xen/arch/riscv/include/asm/domain.h | 6 +
xen/arch/riscv/include/asm/mm.h | 4 +
xen/arch/riscv/include/asm/p2m.h | 76 +++++++++++++
xen/arch/riscv/p2m.c | 168 ++++++++++++++++++++++++++++
5 files changed, 255 insertions(+)
create mode 100644 xen/arch/riscv/p2m.c
diff --git a/xen/arch/riscv/Makefile b/xen/arch/riscv/Makefile
index d882c57528..87c5e7e7f2 100644
--- a/xen/arch/riscv/Makefile
+++ b/xen/arch/riscv/Makefile
@@ -5,6 +5,7 @@ obj-y += entry.o
obj-y += intc.o
obj-y += mm.o
obj-y += pt.o
+obj-y += p2m.o
obj-$(CONFIG_RISCV_64) += riscv64/
obj-y += sbi.o
obj-y += setup.o
diff --git a/xen/arch/riscv/include/asm/domain.h
b/xen/arch/riscv/include/asm/domain.h
index c3d965a559..48be90a395 100644
--- a/xen/arch/riscv/include/asm/domain.h
+++ b/xen/arch/riscv/include/asm/domain.h
@@ -5,6 +5,8 @@
#include <xen/xmalloc.h>
#include <public/hvm/params.h>
+#include <asm/p2m.h>
+
struct hvm_domain
{
uint64_t params[HVM_NR_PARAMS];
@@ -16,8 +18,12 @@ struct arch_vcpu_io {
struct arch_vcpu {
};
+
struct arch_domain {
struct hvm_domain hvm;
+
+ struct p2m_domain p2m;
+
};
#include <xen/sched.h>
diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h
index 01bbd92a06..972ec45448 100644
--- a/xen/arch/riscv/include/asm/mm.h
+++ b/xen/arch/riscv/include/asm/mm.h
@@ -149,6 +149,10 @@ extern struct page_info *frametable_virt_start;
#define mfn_to_page(mfn) (frametable_virt_start + mfn_x(mfn))
#define page_to_mfn(pg) _mfn((pg) - frametable_virt_start)
+/* Convert between machine addresses and page-info structures. */
+#define maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma))
+#define page_to_maddr(pg) (mfn_to_maddr(page_to_mfn(pg)))
+
static inline void *page_to_virt(const struct page_info *pg)
{
return mfn_to_virt(mfn_x(page_to_mfn(pg)));
diff --git a/xen/arch/riscv/include/asm/p2m.h b/xen/arch/riscv/include/asm/p2m.h
index 28f57a74f2..8b46210768 100644
--- a/xen/arch/riscv/include/asm/p2m.h
+++ b/xen/arch/riscv/include/asm/p2m.h
@@ -3,11 +3,73 @@
#define ASM__RISCV__P2M_H
#include <xen/errno.h>
+#include <xen/mem_access.h>
+#include <xen/mm.h>
+#include <xen/radix-tree.h>
+#include <xen/rwlock.h>
+#include <xen/types.h>
#include <asm/page-bits.h>
#define paddr_bits PADDR_BITS
+/* Get host p2m table */
+#define p2m_get_hostp2m(d) (&(d)->arch.p2m)
+
+/* Per-p2m-table state */
+struct p2m_domain {
+ /*
+ * Lock that protects updates to the p2m.
+ */
+ rwlock_t lock;
+
+ /* Page containing root p2m table */
+ struct page_info *root;
+
+ /* Pages used to construct the p2m */
+ struct page_list_head pages;
+
+ /* Address Translation Table for the p2m */
+ paddr_t hgatp;
+
+ /*
+ * P2M updates may required TLBs to be flushed (invalidated).
+ *
+ * Flushes may be deferred by setting 'need_flush' and then flushing
+ * when the p2m write lock is released.
+ *
+ * If an immediate flush is required (e.g, if a super page is
+ * shattered), call p2m_tlb_flush_sync().
+ */
+ bool need_flush;
+
+ /* Indicate if it is required to clean the cache when writing an entry */
+ bool clean_pte;
+
+ struct radix_tree_root p2m_type;
+
+ /*
+ * Default P2M access type for each page in the the domain: new pages,
+ * swapped in pages, cleared pages, and pages that are ambiguously
+ * retyped get this access type. See definition of p2m_access_t.
+ */
+ p2m_access_t default_access;
+
+ /* Highest guest frame that's ever been mapped in the p2m */
+ gfn_t max_mapped_gfn;
+
+ /*
+ * Lowest mapped gfn in the p2m. When releasing mapped gfn's in a
+ * preemptible manner this is update to track recall where to
+ * resume the search. Apart from during teardown this can only
+ * decrease.
+ */
+ gfn_t lowest_mapped_gfn;
+
+ /* Back pointer to domain */
+ struct domain *domain;
+};
+
/*
* List of possible type for each page in the p2m entry.
* The number of available bit per page in the pte for this purpose is 2 bits.
@@ -93,6 +155,20 @@ static inline void p2m_altp2m_check(struct vcpu *v,
uint16_t idx)
/* Not supported on RISCV. */
}
+int p2m_init(struct domain *d);
+
+static inline void p2m_write_lock(struct p2m_domain *p2m)
+{
+ write_lock(&p2m->lock);
+}
+
+void p2m_write_unlock(struct p2m_domain *p2m);
+
+static inline int p2m_is_write_locked(struct p2m_domain *p2m)
+{
+ return rw_is_write_locked(&p2m->lock);
+}
+
#endif /* ASM__RISCV__P2M_H */
/*
diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
new file mode 100644
index 0000000000..ad4beef8f9
--- /dev/null
+++ b/xen/arch/riscv/p2m.c
@@ -0,0 +1,168 @@
+#include <xen/domain_page.h>
+#include <xen/iommu.h>
+#include <xen/lib.h>
+#include <xen/mm.h>
+#include <xen/pfn.h>
+#include <xen/rwlock.h>
+#include <xen/sched.h>
+#include <xen/spinlock.h>
+
+#include <asm/page.h>
+#include <asm/p2m.h>
+
+/*
+ * Force a synchronous P2M TLB flush.
+ *
+ * Must be called with the p2m lock held.
+ *
+ * TODO: add support of flushing TLB connected to VMID.
+ */
+static void p2m_force_tlb_flush_sync(struct p2m_domain *p2m)
+{
+ ASSERT(p2m_is_write_locked(p2m));
+
+ /*
+ * TODO: shouldn't be this flush done for each physical CPU?
+ * If yes, then SBI call sbi_remote_hfence_gvma() could
+ * be used for that.
+ */
+#if defined(__riscv_hh) || defined(__riscv_h)
+ asm volatile ( "hfence.gvma" ::: "memory" );
+#else
+ asm volatile ( ".insn r 0x73, 0x0, 0x31, x0, x0, x0" ::: "memory" );
+#endif
+
+ p2m->need_flush = false;
+}
+
+static void p2m_tlb_flush_sync(struct p2m_domain *p2m)
+{
+ if ( p2m->need_flush )
+ p2m_force_tlb_flush_sync(p2m);
+}
+
+/* Unlock the flush and do a P2M TLB flush if necessary */
+void p2m_write_unlock(struct p2m_domain *p2m)
+{
+ /*
+ * The final flush is done with the P2M write lock taken to avoid
+ * someone else modifying the P2M wbefore the TLB invalidation has
+ * completed.
+ */
+ p2m_tlb_flush_sync(p2m);
+
+ write_unlock(&p2m->lock);
+}
+
+static void clear_and_clean_page(struct page_info *page)
+{
+ void *p = __map_domain_page(page);
+
+ clear_page(p);
+ unmap_domain_page(p);
+}
+
+static struct page_info *p2m_get_clean_page(struct domain *d)
+{
+ struct page_info *page;
+
+ /*
+ * As mentioned in the Priviliged Architecture Spec (version 20240411)
+ * As explained in Section 18.5.1, for the paged virtual-memory schemes
+ * (Sv32x4, Sv39x4, Sv48x4, and Sv57x4), the root page table is 16 KiB
+ * and must be aligned to a 16-KiB boundary.
+ */
+ page = alloc_domheap_pages(NULL, 2, 0);
+ if ( page == NULL )
+ return NULL;
+
+ clear_and_clean_page(page);
+
+ return page;
+}
+
+static struct page_info *p2m_allocate_root(struct domain *d)
+{
+ return p2m_get_clean_page(d);
+}
+
+static unsigned long hgatp_from_page_info(struct page_info *page_info)
+{
+ unsigned long ppn;
+ unsigned long hgatp_mode;
+
+ ppn = PFN_DOWN(page_to_maddr(page_info)) & HGATP_PPN;
+
+ /* ASID (VMID) not supported yet */
+
+#if RV_STAGE1_MODE == SATP_MODE_SV39
+ hgatp_mode = HGATP_MODE_SV39X4;
+#elif RV_STAGE1_MODE == SATP_MODE_SV48
+ hgatp_mode = HGATP_MODE_SV48X4;
+#else
+ #error "add HGATP_MODE"
+#endif
+
+ return ppn | (hgatp_mode << HGATP_MODE_SHIFT);
+}
+
+static int p2m_alloc_table(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ p2m->root = p2m_allocate_root(d);
+ if ( !p2m->root )
+ return -ENOMEM;
+
+ p2m->hgatp = hgatp_from_page_info(p2m->root);
+
+ /*
+ * Make sure that all TLBs corresponding to the new VMID are flushed
+ * before using it.
+ */
+ p2m_write_lock(p2m);
+ p2m_force_tlb_flush_sync(p2m);
+ p2m_write_unlock(p2m);
+
+ return 0;
+}
+
+int p2m_init(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ int rc;
+
+ rwlock_init(&p2m->lock);
+ INIT_PAGE_LIST_HEAD(&p2m->pages);
+
+ p2m->max_mapped_gfn = _gfn(0);
+ p2m->lowest_mapped_gfn = _gfn(ULONG_MAX);
+
+ p2m->default_access = p2m_access_rwx;
+
+ radix_tree_init(&p2m->p2m_type);
+
+#ifdef CONFIG_HAS_PASSTHROUGH
+ /*
+ * Some IOMMUs don't support coherent PT walk. When the p2m is
+ * shared with the CPU, Xen has to make sure that the PT changes have
+ * reached the memory
+ */
+ p2m->clean_pte = is_iommu_enabled(d) &&
+ !iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK);
+#else
+ p2m->clean_pte = true;
+#endif
+
+ /*
+ * "Trivial" initialisation is now complete. Set the backpointer so
+ * p2m_teardown() and friends know to do something.
+ */
+ p2m->domain = d;
+
+ rc = p2m_alloc_table(d);
+ if ( rc )
+ return rc;
+
+ return 0;
+}
--
2.49.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |