[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 12/45] xen: arm64: PTE handling



Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 xen/include/asm-arm/arm32/page.h |   38 ++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/page.h |   33 +++++++++++++++++++++++++++++++++
 xen/include/asm-arm/page.h       |   28 ++++++++--------------------
 3 files changed, 79 insertions(+), 20 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/page.h
 create mode 100644 xen/include/asm-arm/arm64/page.h

diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
new file mode 100644
index 0000000..edf324c
--- /dev/null
+++ b/xen/include/asm-arm/arm32/page.h
@@ -0,0 +1,38 @@
+#ifndef __ARM_ARM32_PAGE_H__
+#define __ARM_ARM32_PAGE_H__
+
+#ifndef __ASSEMBLY__
+
+/* Write a pagetable entry.
+ *
+ * If the table entry is changing a text mapping, it is responsibility
+ * of the caller to issue an ISB after write_pte.
+ */
+static inline void write_pte(lpae_t *p, lpae_t pte)
+{
+    asm volatile (
+        /* Ensure any writes have completed with the old mappings. */
+        "dsb;"
+        /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */
+        "strd %0, %H0, [%1];"
+        "dsb;"
+        /* Push this cacheline to the PoC so the rest of the system sees it. */
+        STORE_CP32(1, DCCMVAC)
+        /* Ensure that the data flush is completed before proceeding */
+        "dsb;"
+        : : "r" (pte.bits), "r" (p) : "memory");
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARM_ARM32_PAGE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
new file mode 100644
index 0000000..6053bc2
--- /dev/null
+++ b/xen/include/asm-arm/arm64/page.h
@@ -0,0 +1,33 @@
+#ifndef __ARM_ARM64_PAGE_H__
+#define __ARM_ARM64_PAGE_H__
+
+#ifndef __ASSEMBLY__
+
+/* Write a pagetable entry */
+static inline void write_pte(lpae_t *p, lpae_t pte)
+{
+    asm volatile (
+        /* Ensure any writes have completed with the old mappings. */
+        "dsb sy;"
+        "str %0, [%1];"         /* Write the entry */
+        "dsb sy;"
+        /* Push this cacheline to the PoC so the rest of the system sees it. */
+        "dc cvac, %1;"
+        /* Ensure that the data flush is completed before proceeding */
+        "dsb sy;"
+        : : "r" (pte.bits), "r" (p) : "memory");
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARM_ARM64_PAGE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 1f6577d..f1a2fe2 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -230,26 +230,6 @@ static inline lpae_t mfn_to_p2m_entry(unsigned long mfn, 
unsigned int mattr)
     return e;
 }
 
-/* Write a pagetable entry.
- *
- * If the table entry is changing a text mapping, it is responsibility
- * of the caller to issue an ISB after write_pte.
- */
-static inline void write_pte(lpae_t *p, lpae_t pte)
-{
-    asm volatile (
-        /* Ensure any writes have completed with the old mappings. */
-        "dsb;"
-        /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */
-        "strd %0, %H0, [%1];"
-        "dsb;"
-        /* Push this cacheline to the PoC so the rest of the system sees it. */
-        STORE_CP32(1, DCCMVAC)
-        /* Ensure that the data flush is completed before proceeding */
-        "dsb;"
-        : : "r" (pte.bits), "r" (p) : "memory");
-}
-
 /* Architectural minimum cacheline size is 4 32-bit words. */
 #define MIN_CACHELINE_BYTES 16
 /* Actual cacheline size on the boot CPU. */
@@ -405,6 +385,14 @@ static inline int gva_to_ipa(uint32_t va, paddr_t *paddr)
 
 #endif /* __ASSEMBLY__ */
 
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/page.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/page.h>
+#else
+# error "unknown ARM variant"
+#endif
+
 /* These numbers add up to a 39-bit input address space.  The  ARMv7-A
  * architecture actually specifies a 40-bit input address space for the p2m,
  * with an 8K (1024-entry) top-level table. */
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.