[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] xen/riscv: implement p2m_next_level()



commit af87a609c79d30950150e8a3adecee8beff3169e
Author:     Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
AuthorDate: Tue Dec 16 17:55:23 2025 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Dec 18 14:25:16 2025 +0100

    xen/riscv: implement p2m_next_level()
    
    Implement the p2m_next_level() function, which enables traversal and dynamic
    allocation of intermediate levels (if necessary) in the RISC-V
    p2m (physical-to-machine) page table hierarchy.
    
    To support this, the following helpers are introduced:
    - page_to_p2m_table(): Constructs non-leaf PTEs pointing to next-level page
      tables with correct attributes.
    - p2m_alloc_page(): Allocates page table pages, supporting both hardware and
      guest domains.
    - p2m_create_table(): Allocates and initializes a new page table page and
      installs it into the hierarchy.
    
    Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/riscv/include/asm/paging.h |  2 +
 xen/arch/riscv/p2m.c                | 77 ++++++++++++++++++++++++++++++++++++-
 xen/arch/riscv/paging.c             | 12 ++++++
 3 files changed, 89 insertions(+), 2 deletions(-)

diff --git a/xen/arch/riscv/include/asm/paging.h 
b/xen/arch/riscv/include/asm/paging.h
index fe462be223..c1d225d02b 100644
--- a/xen/arch/riscv/include/asm/paging.h
+++ b/xen/arch/riscv/include/asm/paging.h
@@ -15,4 +15,6 @@ int paging_refill_from_domheap(struct domain *d, unsigned int 
nr_pages);
 
 void paging_free_page(struct domain *d, struct page_info *pg);
 
+struct page_info *paging_alloc_page(struct domain *d);
+
 #endif /* ASM_RISCV_PAGING_H */
diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
index 48e75b1867..9e1a660316 100644
--- a/xen/arch/riscv/p2m.c
+++ b/xen/arch/riscv/p2m.c
@@ -357,6 +357,19 @@ int p2m_set_allocation(struct domain *d, unsigned long 
pages, bool *preempted)
     return rc;
 }
 
+static struct page_info *p2m_alloc_page(struct p2m_domain *p2m)
+{
+    struct page_info *pg = paging_alloc_page(p2m->domain);
+
+    if ( pg )
+    {
+        page_list_add(pg, &p2m->pages);
+        clear_and_clean_page(pg, p2m->clean_dcache);
+    }
+
+    return pg;
+}
+
 static int p2m_set_type(pte_t *pte, p2m_type_t t)
 {
     int rc = 0;
@@ -497,6 +510,33 @@ static pte_t p2m_pte_from_mfn(mfn_t mfn, p2m_type_t t, 
bool is_table)
     return e;
 }
 
+/* Generate table entry with correct attributes. */
+static pte_t page_to_p2m_table(const struct page_info *page)
+{
+    /*
+     * p2m_invalid will be ignored inside p2m_pte_from_mfn() as is_table is
+     * set to true and p2m_type_t shouldn't be applied for PTEs which
+     * describe an intermediate table.
+     */
+    return p2m_pte_from_mfn(page_to_mfn(page), p2m_invalid, true);
+}
+
+/* Allocate a new page table page and hook it in via the given entry. */
+static int p2m_create_table(struct p2m_domain *p2m, pte_t *entry)
+{
+    struct page_info *page;
+
+    ASSERT(!pte_is_valid(*entry));
+
+    page = p2m_alloc_page(p2m);
+    if ( page == NULL )
+        return -ENOMEM;
+
+    p2m_write_pte(entry, page_to_p2m_table(page), p2m->clean_dcache);
+
+    return 0;
+}
+
 #define P2M_TABLE_MAP_NONE 0
 #define P2M_TABLE_MAP_NOMEM 1
 #define P2M_TABLE_SUPER_PAGE 2
@@ -521,9 +561,42 @@ static int p2m_next_level(struct p2m_domain *p2m, bool 
alloc_tbl,
                           unsigned int level, pte_t **table,
                           unsigned int offset)
 {
-    panic("%s: hasn't been implemented yet\n", __func__);
+    pte_t *entry;
+    mfn_t mfn;
+
+    /* The function p2m_next_level() is never called at the last level */
+    ASSERT(level != 0);
+
+    entry = *table + offset;
+
+    if ( !pte_is_valid(*entry) )
+    {
+        int ret;
+
+        if ( !alloc_tbl )
+            return P2M_TABLE_MAP_NONE;
+
+        ret = p2m_create_table(p2m, entry);
+        if ( ret )
+            return P2M_TABLE_MAP_NOMEM;
+    }
+
+    if ( pte_is_mapping(*entry) )
+        return P2M_TABLE_SUPER_PAGE;
+
+    mfn = mfn_from_pte(*entry);
+
+    unmap_domain_page(*table);
+
+    /*
+     * TODO: There's an inefficiency here:
+     *       In p2m_create_table(), the page is mapped to clear it.
+     *       Then that mapping is torn down in p2m_create_table(),
+     *       only to be re-established here.
+     */
+    *table = map_domain_page(mfn);
 
-    return P2M_TABLE_MAP_NONE;
+    return P2M_TABLE_NORMAL;
 }
 
 static void p2m_put_foreign_page(struct page_info *pg)
diff --git a/xen/arch/riscv/paging.c b/xen/arch/riscv/paging.c
index 09631c9894..76a203edbb 100644
--- a/xen/arch/riscv/paging.c
+++ b/xen/arch/riscv/paging.c
@@ -117,6 +117,18 @@ void paging_free_page(struct domain *d, struct page_info 
*pg)
     spin_unlock(&d->arch.paging.lock);
 }
 
+struct page_info *paging_alloc_page(struct domain *d)
+{
+    struct page_info *pg;
+
+    spin_lock(&d->arch.paging.lock);
+    pg = page_list_remove_head(&d->arch.paging.freelist);
+    d->arch.paging.total_pages--;
+    spin_unlock(&d->arch.paging.lock);
+
+    return pg;
+}
+
 /* Domain paging struct initialization. */
 int paging_domain_init(struct domain *d)
 {
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.