|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] xen/riscv: implement function to map memory in guest p2m
commit 96553849a32e5240e444db09b92c559480faffa0
Author: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
AuthorDate: Tue Dec 16 17:55:19 2025 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Dec 18 14:24:17 2025 +0100
xen/riscv: implement function to map memory in guest p2m
Implement map_regions_p2mt() to map a region in the guest p2m with
a specific p2m type. The memory attributes will be derived from the
p2m type. This function is used in dom0less common
code.
To implement it, introduce:
- p2m_write_(un)lock() to ensure safe concurrent updates to the P2M.
As part of this change, introduce p2m_tlb_flush_sync() and
p2m_force_tlb_flush_sync().
- A stub for p2m_set_range() to map a range of GFNs to MFNs.
- p2m_insert_mapping().
- p2m_is_write_locked().
Drop guest_physmap_add_entry() and call map_regions_p2mt() directly
from guest_physmap_add_page(), making guest_physmap_add_entry()
unnecessary.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/riscv/include/asm/p2m.h | 31 +++++++++++++++------
xen/arch/riscv/p2m.c | 60 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 82 insertions(+), 9 deletions(-)
diff --git a/xen/arch/riscv/include/asm/p2m.h b/xen/arch/riscv/include/asm/p2m.h
index 1c89838408..9acd6a64a8 100644
--- a/xen/arch/riscv/include/asm/p2m.h
+++ b/xen/arch/riscv/include/asm/p2m.h
@@ -128,21 +128,22 @@ static inline int
guest_physmap_mark_populate_on_demand(struct domain *d,
return -EOPNOTSUPP;
}
-static inline int guest_physmap_add_entry(struct domain *d,
- gfn_t gfn, mfn_t mfn,
- unsigned long page_order,
- p2m_type_t t)
-{
- BUG_ON("unimplemented");
- return -EINVAL;
-}
+/*
+ * Map a region in the guest's hostp2m p2m with a specific p2m type.
+ * The memory attributes will be derived from the p2m type.
+ */
+int map_regions_p2mt(struct domain *d,
+ gfn_t gfn,
+ unsigned long nr,
+ mfn_t mfn,
+ p2m_type_t p2mt);
/* Untyped version for RAM only, for compatibility */
static inline int __must_check
guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
unsigned int page_order)
{
- return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
+ return map_regions_p2mt(d, gfn, BIT(page_order, UL), mfn, p2m_ram_rw);
}
static inline mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
@@ -166,6 +167,18 @@ unsigned char get_max_supported_mode(void);
int p2m_init(struct domain *d);
+static inline void p2m_write_lock(struct p2m_domain *p2m)
+{
+ write_lock(&p2m->lock);
+}
+
+void p2m_write_unlock(struct p2m_domain *p2m);
+
+static inline bool p2m_is_write_locked(struct p2m_domain *p2m)
+{
+ return rw_is_write_locked(&p2m->lock);
+}
+
unsigned long construct_hgatp(const struct p2m_domain *p2m, uint16_t vmid);
#endif /* ASM__RISCV__P2M_H */
diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
index d14757331f..8bb197f9b3 100644
--- a/xen/arch/riscv/p2m.c
+++ b/xen/arch/riscv/p2m.c
@@ -110,6 +110,41 @@ void __init guest_mm_init(void)
local_hfence_gvma_all();
}
+/*
+ * Force a synchronous P2M TLB flush.
+ *
+ * Must be called with the p2m lock held.
+ */
+static void p2m_tlb_flush(struct p2m_domain *p2m)
+{
+ const struct domain *d = p2m->domain;
+
+ ASSERT(p2m_is_write_locked(p2m));
+
+ p2m->need_flush = false;
+
+ sbi_remote_hfence_gvma(d->dirty_cpumask, 0, 0);
+}
+
+void p2m_tlb_flush_sync(struct p2m_domain *p2m)
+{
+ if ( p2m->need_flush )
+ p2m_tlb_flush(p2m);
+}
+
+/* Unlock the P2M and do a P2M TLB flush if necessary */
+void p2m_write_unlock(struct p2m_domain *p2m)
+{
+ /*
+ * The final flush is done with the P2M write lock taken to avoid
+ * someone else modifying the P2M before the TLB invalidation has
+ * completed.
+ */
+ p2m_tlb_flush_sync(p2m);
+
+ write_unlock(&p2m->lock);
+}
+
static void clear_and_clean_page(struct page_info *page, bool clean_dcache)
{
void *p = __map_domain_page(page);
@@ -238,3 +273,28 @@ int p2m_set_allocation(struct domain *d, unsigned long
pages, bool *preempted)
return rc;
}
+
+static int p2m_set_range(struct p2m_domain *p2m,
+ gfn_t sgfn,
+ unsigned long nr,
+ mfn_t smfn,
+ p2m_type_t t)
+{
+ return -EOPNOTSUPP;
+}
+
+int map_regions_p2mt(struct domain *d,
+ gfn_t gfn,
+ unsigned long nr,
+ mfn_t mfn,
+ p2m_type_t p2mt)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ int rc;
+
+ p2m_write_lock(p2m);
+ rc = p2m_set_range(p2m, gfn, nr, mfn, p2mt);
+ p2m_write_unlock(p2m);
+
+ return rc;
+}
--
generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |