[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 20/22] xen/arm64: mm: Use per-pCPU page-tables



From: Julien Grall <jgrall@xxxxxxxxxx>

At the moment, on Arm64, every pCPU are sharing the same page-tables.

In a follow-up patch, we will allow the possibility to remove the
direct map and therefore it will be necessary to have a mapcache.

While we have plenty of spare virtual address space to have
to reserve part for each pCPU, it means that temporary mappings
(e.g. guest memory) could be accessible by every pCPU.

In order to increase our security posture, it would be better if
those mappings are only accessible by the pCPU doing the temporary
mapping.

In addition to that, a per-pCPU page-tables opens the way to have
per-domain mapping area.

Arm32 is already using per-pCPU page-tables so most of the code
can be re-used. Arm64 doesn't yet have support for the mapcache,
so a stub is provided (moved to its own header asm/domain_page.h).

Take the opportunity to fix a typo in a comment that is modified.

Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>
---
 xen/arch/arm/domain_page.c             |  2 ++
 xen/arch/arm/include/asm/arm32/mm.h    |  8 -----
 xen/arch/arm/include/asm/domain_page.h | 13 ++++++++
 xen/arch/arm/include/asm/mm.h          |  5 +++
 xen/arch/arm/mm.c                      | 42 +++++++-------------------
 xen/arch/arm/setup.c                   |  1 +
 6 files changed, 32 insertions(+), 39 deletions(-)
 create mode 100644 xen/arch/arm/include/asm/domain_page.h

diff --git a/xen/arch/arm/domain_page.c b/xen/arch/arm/domain_page.c
index b7c02c919064..4540b3c5f24c 100644
--- a/xen/arch/arm/domain_page.c
+++ b/xen/arch/arm/domain_page.c
@@ -3,6 +3,8 @@
 #include <xen/pmap.h>
 #include <xen/vmap.h>
 
+#include <asm/domain_page.h>
+
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef virt_to_mfn
 #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
diff --git a/xen/arch/arm/include/asm/arm32/mm.h 
b/xen/arch/arm/include/asm/arm32/mm.h
index 8bfc906e7178..6b039d9ceaa2 100644
--- a/xen/arch/arm/include/asm/arm32/mm.h
+++ b/xen/arch/arm/include/asm/arm32/mm.h
@@ -1,12 +1,6 @@
 #ifndef __ARM_ARM32_MM_H__
 #define __ARM_ARM32_MM_H__
 
-#include <xen/percpu.h>
-
-#include <asm/lpae.h>
-
-DECLARE_PER_CPU(lpae_t *, xen_pgtable);
-
 /*
  * Only a limited amount of RAM, called xenheap, is always mapped on ARM32.
  * For convenience always return false.
@@ -16,8 +10,6 @@ static inline bool arch_mfns_in_directmap(unsigned long mfn, 
unsigned long nr)
     return false;
 }
 
-bool init_domheap_mappings(unsigned int cpu);
-
 #endif /* __ARM_ARM32_MM_H__ */
 
 /*
diff --git a/xen/arch/arm/include/asm/domain_page.h 
b/xen/arch/arm/include/asm/domain_page.h
new file mode 100644
index 000000000000..e9f52685e2ec
--- /dev/null
+++ b/xen/arch/arm/include/asm/domain_page.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_ARM_DOMAIN_PAGE_H__
+#define __ASM_ARM_DOMAIN_PAGE_H__
+
+#ifdef CONFIG_ARCH_MAP_DOMAIN_PAGE
+bool init_domheap_mappings(unsigned int cpu);
+#else
+static inline bool init_domheap_mappings(unsigned int cpu)
+{
+    return true;
+}
+#endif
+
+#endif /* __ASM_ARM_DOMAIN_PAGE_H__ */
diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 2366928d71aa..7a2c775f9562 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -2,6 +2,9 @@
 #define __ARCH_ARM_MM__
 
 #include <xen/kernel.h>
+#include <xen/percpu.h>
+
+#include <asm/lpae.h>
 #include <asm/page.h>
 #include <public/xen.h>
 #include <xen/pdx.h>
@@ -14,6 +17,8 @@
 # error "unknown ARM variant"
 #endif
 
+DECLARE_PER_CPU(lpae_t *, xen_pgtable);
+
 /* Align Xen to a 2 MiB boundary. */
 #define XEN_PADDR_ALIGN (1 << 21)
 
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 4e208f7d20c8..2af751af9003 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -24,6 +24,7 @@
 
 #include <xsm/xsm.h>
 
+#include <asm/domain_page.h>
 #include <asm/fixmap.h>
 #include <asm/setup.h>
 
@@ -90,20 +91,19 @@ DEFINE_BOOT_PAGE_TABLE(boot_third);
  * xen_second, xen_fixmap and xen_xenmap are always shared between all
  * PCPUs.
  */
+/* Per-CPU pagetable pages */
+/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) 
*/
+DEFINE_PER_CPU(lpae_t *, xen_pgtable);
+
+/* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */
+static DEFINE_PAGE_TABLE(cpu0_pgtable);
+#define THIS_CPU_PGTABLE this_cpu(xen_pgtable)
 
 #ifdef CONFIG_ARM_64
 #define HYP_PT_ROOT_LEVEL 0
-static DEFINE_PAGE_TABLE(xen_pgtable);
 static DEFINE_PAGE_TABLE(xen_first);
-#define THIS_CPU_PGTABLE xen_pgtable
 #else
 #define HYP_PT_ROOT_LEVEL 1
-/* Per-CPU pagetable pages */
-/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) 
*/
-DEFINE_PER_CPU(lpae_t *, xen_pgtable);
-#define THIS_CPU_PGTABLE this_cpu(xen_pgtable)
-/* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */
-static DEFINE_PAGE_TABLE(cpu0_pgtable);
 #endif
 
 /* Common pagetable leaves */
@@ -481,14 +481,13 @@ void __init setup_pagetables(unsigned long 
boot_phys_offset)
 
     phys_offset = boot_phys_offset;
 
+    p = cpu0_pgtable;
+
 #ifdef CONFIG_ARM_64
-    p = (void *) xen_pgtable;
     p[0] = pte_of_xenaddr((uintptr_t)xen_first);
     p[0].pt.table = 1;
     p[0].pt.xn = 0;
     p = (void *) xen_first;
-#else
-    p = (void *) cpu0_pgtable;
 #endif
 
     /* Map xen second level page-table */
@@ -527,19 +526,13 @@ void __init setup_pagetables(unsigned long 
boot_phys_offset)
     pte.pt.table = 1;
     xen_second[second_table_offset(FIXMAP_ADDR(0))] = pte;
 
-#ifdef CONFIG_ARM_64
-    ttbr = (uintptr_t) xen_pgtable + phys_offset;
-#else
     ttbr = (uintptr_t) cpu0_pgtable + phys_offset;
-#endif
 
     switch_ttbr(ttbr);
 
     xen_pt_enforce_wnx();
 
-#ifdef CONFIG_ARM_32
     per_cpu(xen_pgtable, 0) = cpu0_pgtable;
-#endif
 }
 
 static void clear_boot_pagetables(void)
@@ -557,18 +550,6 @@ static void clear_boot_pagetables(void)
     clear_table(boot_third);
 }
 
-#ifdef CONFIG_ARM_64
-int init_secondary_pagetables(int cpu)
-{
-    clear_boot_pagetables();
-
-    /* Set init_ttbr for this CPU coming up. All CPus share a single setof
-     * pagetables, but rewrite it each time for consistency with 32 bit. */
-    init_ttbr = (uintptr_t) xen_pgtable + phys_offset;
-    clean_dcache(init_ttbr);
-    return 0;
-}
-#else
 int init_secondary_pagetables(int cpu)
 {
     lpae_t *root = alloc_xenheap_page();
@@ -599,7 +580,6 @@ int init_secondary_pagetables(int cpu)
 
     return 0;
 }
-#endif
 
 /* MMU setup for secondary CPUS (which already have paging enabled) */
 void mmu_init_secondary_cpu(void)
@@ -1089,7 +1069,7 @@ static int xen_pt_update(unsigned long virt,
     unsigned long left = nr_mfns;
 
     /*
-     * For arm32, page-tables are different on each CPUs. Yet, they share
+     * Page-tables are different on each CPU. Yet, they share
      * some common mappings. It is assumed that only common mappings
      * will be modified with this function.
      *
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 2311726f5ddd..88d9d90fb5ad 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -39,6 +39,7 @@
 #include <asm/gic.h>
 #include <asm/cpuerrata.h>
 #include <asm/cpufeature.h>
+#include <asm/domain_page.h>
 #include <asm/platform.h>
 #include <asm/procinfo.h>
 #include <asm/setup.h>
-- 
2.38.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.