|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] xen/riscv: Implement p2m_pte_from_mfn() and support PBMT configuration
commit ff14053983b005d5de92b31574fda0fe86bf1842
Author: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
AuthorDate: Tue Dec 16 17:55:22 2025 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Dec 18 14:25:00 2025 +0100
xen/riscv: Implement p2m_pte_from_mfn() and support PBMT configuration
This patch adds the initial logic for constructing PTEs from MFNs in the
RISC-V
p2m subsystem. It includes:
- Implementation of p2m_pte_from_mfn(): Generates a valid PTE using the
given MFN, p2m_type_t, including permission encoding and PBMT attribute
setup.
- New helper p2m_set_permission(): Encodes access rights (r, w, x) into the
PTE based on both p2m type and access permissions.
- p2m_set_type(): Stores the p2m type in PTE's bits. The storage of types,
which don't fit PTE bits, will be implemented separately later.
- Add detection of Svade extension to properly handle a possible page-fault
if A and D bits aren't set.
PBMT type encoding support:
- Introduces an enum pbmt_type_t to represent the PBMT field values.
- Maps types like p2m_mmio_direct_dev to p2m_mmio_direct_io, others default
to pbmt_pma.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/riscv/cpufeature.c | 1 +
xen/arch/riscv/include/asm/cpufeature.h | 1 +
xen/arch/riscv/include/asm/page.h | 8 +++
xen/arch/riscv/p2m.c | 112 ++++++++++++++++++++++++++++++--
4 files changed, 118 insertions(+), 4 deletions(-)
diff --git a/xen/arch/riscv/cpufeature.c b/xen/arch/riscv/cpufeature.c
index b846a106a3..02b68aeaa4 100644
--- a/xen/arch/riscv/cpufeature.c
+++ b/xen/arch/riscv/cpufeature.c
@@ -138,6 +138,7 @@ const struct riscv_isa_ext_data __initconst riscv_isa_ext[]
= {
RISCV_ISA_EXT_DATA(zbs),
RISCV_ISA_EXT_DATA(smaia),
RISCV_ISA_EXT_DATA(ssaia),
+ RISCV_ISA_EXT_DATA(svade),
RISCV_ISA_EXT_DATA(svpbmt),
};
diff --git a/xen/arch/riscv/include/asm/cpufeature.h
b/xen/arch/riscv/include/asm/cpufeature.h
index d42d7b294e..b696160388 100644
--- a/xen/arch/riscv/include/asm/cpufeature.h
+++ b/xen/arch/riscv/include/asm/cpufeature.h
@@ -37,6 +37,7 @@ enum riscv_isa_ext_id {
RISCV_ISA_EXT_zbs,
RISCV_ISA_EXT_smaia,
RISCV_ISA_EXT_ssaia,
+ RISCV_ISA_EXT_svade,
RISCV_ISA_EXT_svpbmt,
RISCV_ISA_EXT_MAX
};
diff --git a/xen/arch/riscv/include/asm/page.h
b/xen/arch/riscv/include/asm/page.h
index 1fc5998a0b..b7cd61df8d 100644
--- a/xen/arch/riscv/include/asm/page.h
+++ b/xen/arch/riscv/include/asm/page.h
@@ -73,6 +73,14 @@
#define PTE_SMALL BIT(10, UL)
#define PTE_POPULATE BIT(11, UL)
+enum pbmt_type {
+ pbmt_pma,
+ pbmt_nc,
+ pbmt_io,
+ pbmt_rsvd,
+ pbmt_count,
+};
+
#define PTE_ACCESS_MASK (PTE_READABLE | PTE_WRITABLE | PTE_EXECUTABLE)
#define PTE_PBMT_MASK (PTE_PBMT_NOCACHE | PTE_PBMT_IO)
diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
index 9ab7262d70..48e75b1867 100644
--- a/xen/arch/riscv/p2m.c
+++ b/xen/arch/riscv/p2m.c
@@ -11,6 +11,7 @@
#include <xen/sections.h>
#include <xen/xvmalloc.h>
+#include <asm/cpufeature.h>
#include <asm/csr.h>
#include <asm/flushtlb.h>
#include <asm/p2m.h>
@@ -356,6 +357,18 @@ int p2m_set_allocation(struct domain *d, unsigned long
pages, bool *preempted)
return rc;
}
+static int p2m_set_type(pte_t *pte, p2m_type_t t)
+{
+ int rc = 0;
+
+ if ( t > p2m_first_external )
+ panic("unimplemeted\n");
+ else
+ pte->pte |= MASK_INSR(t, P2M_TYPE_PTE_BITS_MASK);
+
+ return rc;
+}
+
static p2m_type_t p2m_get_type(const pte_t pte)
{
p2m_type_t type = MASK_EXTR(pte.pte, P2M_TYPE_PTE_BITS_MASK);
@@ -386,11 +399,102 @@ static inline void p2m_clean_pte(pte_t *p, bool
clean_cache)
p2m_write_pte(p, pte, clean_cache);
}
-static pte_t p2m_pte_from_mfn(mfn_t mfn, p2m_type_t t)
+static void p2m_set_permission(pte_t *e, p2m_type_t t)
{
- panic("%s: hasn't been implemented yet\n", __func__);
+ e->pte &= ~PTE_ACCESS_MASK;
+
+ e->pte |= PTE_USER;
+
+ /*
+ * Two schemes to manage the A and D bits are defined:
+ * â?¢ The Svade extension: when a virtual page is accessed and the A bit
+ * is clear, or is written and the D bit is clear, a page-fault
+ * exception is raised.
+ * â?¢ When the Svade extension is not implemented, the following scheme
+ * applies.
+ * When a virtual page is accessed and the A bit is clear, the PTE is
+ * updated to set the A bit. When the virtual page is written and the
+ * D bit is clear, the PTE is updated to set the D bit. When G-stage
+ * address translation is in use and is not Bare, the G-stage virtual
+ * pages may be accessed or written by implicit accesses to VS-level
+ * memory management data structures, such as page tables.
+ * Thereby to avoid a page-fault in case of Svade is available, it is
+ * necessary to set A and D bits.
+ *
+ * TODO: For now, itâ??s fine to simply set the A/D bits, since OpenSBI
+ * delegates page faults to a lower privilege mode and so OpenSBI
+ * isn't expect to handle page-faults occured in lower modes.
+ * By setting the A/D bits here, page faults that would otherwise
+ * be generated due to unset A/D bits will not occur in Xen.
+ *
+ * Currently, Xen on RISC-V does not make use of the information
+ * that could be obtained from handling such page faults, which
+ * could otherwise be useful for several use cases such as demand
+ * paging, cache-flushing optimizations, memory access tracking,etc.
+ *
+ * To support the more general case and the optimizations mentioned
+ * above, it would be better to stop setting the A/D bits here and
+ * instead handle page faults that occur due to unset A/D bits.
+ */
+ if ( riscv_isa_extension_available(NULL, RISCV_ISA_EXT_svade) )
+ e->pte |= PTE_ACCESSED | PTE_DIRTY;
+
+ switch ( t )
+ {
+ case p2m_map_foreign_rw:
+ case p2m_mmio_direct_io:
+ e->pte |= PTE_READABLE | PTE_WRITABLE;
+ break;
+
+ case p2m_ram_rw:
+ e->pte |= PTE_ACCESS_MASK;
+ break;
+
+ case p2m_invalid:
+ e->pte &= ~PTE_VALID;
+ break;
+
+ case p2m_map_foreign_ro:
+ e->pte |= PTE_READABLE;
+ break;
+
+ default:
+ ASSERT_UNREACHABLE();
+ break;
+ }
+}
+
+static pte_t p2m_pte_from_mfn(mfn_t mfn, p2m_type_t t, bool is_table)
+{
+ pte_t e = (pte_t) { PTE_VALID };
+
+ pte_set_mfn(&e, mfn);
+
+ ASSERT(!(mfn_to_maddr(mfn) & ~PADDR_MASK) || mfn_eq(mfn, INVALID_MFN));
+
+ if ( !is_table )
+ {
+ switch ( t )
+ {
+ case p2m_mmio_direct_io:
+ e.pte |= PTE_PBMT_IO;
+ break;
+
+ default:
+ break;
+ }
+
+ p2m_set_permission(&e, t);
+ p2m_set_type(&e, t);
+ }
+ else
+ /*
+ * According to the spec and table "Encoding of PTE R/W/X fields":
+ * X=W=R=0 -> Pointer to next level of page table.
+ */
+ e.pte &= ~PTE_ACCESS_MASK;
- return (pte_t) { .pte = 0 };
+ return e;
}
#define P2M_TABLE_MAP_NONE 0
@@ -645,7 +749,7 @@ static int p2m_set_entry(struct p2m_domain *p2m,
p2m_clean_pte(entry, p2m->clean_dcache);
else
{
- pte_t pte = p2m_pte_from_mfn(mfn, t);
+ pte_t pte = p2m_pte_from_mfn(mfn, t, false);
p2m_write_pte(entry, pte, p2m->clean_dcache);
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |