[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xen: arm: flush TLB on all CPUs when setting or clearing fixmaps



These mappings are global and therefore need flushing on all processors. Add
flush_all_xen_data_tlb_range_va which accomplishes this.

Also update the comments in the other flush_xen_*_tlb functions to mention
that they operate on the local processor only.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 xen/arch/arm/mm.c                |    4 ++--
 xen/include/asm-arm/arm32/page.h |   36 ++++++++++++++++++++++++++++++------
 xen/include/asm-arm/arm64/page.h |   35 +++++++++++++++++++++++++++++------
 3 files changed, 61 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 35af1ad..cddb174 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -234,7 +234,7 @@ void set_fixmap(unsigned map, unsigned long mfn, unsigned 
attributes)
     pte.pt.ai = attributes;
     pte.pt.xn = 1;
     write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
-    flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+    flush_all_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
 }
 
 /* Remove a mapping from a fixmap entry */
@@ -242,7 +242,7 @@ void clear_fixmap(unsigned map)
 {
     lpae_t pte = {0};
     write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
-    flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+    flush_all_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
 }
 
 #ifdef CONFIG_DOMAIN_PAGE
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index cf12a89..533b253 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -23,7 +23,9 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
 #define __flush_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
 
 /*
- * Flush all hypervisor mappings from the TLB and branch predictor.
+ * Flush all hypervisor mappings from the TLB and branch predictor of
+ * the local processor.
+ *
  * This is needed after changing Xen code mappings.
  *
  * The caller needs to issue the necessary DSB and D-cache flushes
@@ -43,8 +45,9 @@ static inline void flush_xen_text_tlb(void)
 }
 
 /*
- * Flush all hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush all hypervisor mappings from the data TLB of the local
+ * processor. This is not sufficient when changing code mappings or
+ * for self modifying code.
  */
 static inline void flush_xen_data_tlb(void)
 {
@@ -57,10 +60,12 @@ static inline void flush_xen_data_tlb(void)
 }
 
 /*
- * Flush a range of VA's hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the data TLB of the
+ * local processor. This is not sufficient when changing code mappings
+ * or for self modifying code.
  */
-static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long 
size)
+static inline void flush_xen_data_tlb_range_va(unsigned long va,
+                                               unsigned long size)
 {
     unsigned long end = va + size;
     dsb(); /* Ensure preceding are visible */
@@ -73,6 +78,25 @@ static inline void flush_xen_data_tlb_range_va(unsigned long 
va, unsigned long s
     isb();
 }
 
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB on all
+ * processors in the inner-shareable domain. This is not sufficient
+ * when changing code mappings or for self modifying code.
+ */
+static inline void flush_all_xen_data_tlb_range_va(unsigned long va,
+                                                   unsigned long size)
+{
+    unsigned long end = va + size;
+    dsb(); /* Ensure preceding are visible */
+    while ( va < end ) {
+        asm volatile(STORE_CP32(0, TLBIMVAHIS)
+                     : : "r" (va) : "memory");
+        va += PAGE_SIZE;
+    }
+    dsb(); /* Ensure completion of the TLB flush */
+    isb();
+}
+
 /* Ask the MMU to translate a VA for us */
 static inline uint64_t __va_to_par(vaddr_t va)
 {
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index 9551f90..42023cc 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -18,7 +18,8 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
 #define __flush_xen_dcache_one(R) "dc cvac, %" #R ";"
 
 /*
- * Flush all hypervisor mappings from the TLB
+ * Flush all hypervisor mappings from the TLB of the local processor.
+ *
  * This is needed after changing Xen code mappings.
  *
  * The caller needs to issue the necessary DSB and D-cache flushes
@@ -36,8 +37,9 @@ static inline void flush_xen_text_tlb(void)
 }
 
 /*
- * Flush all hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush all hypervisor mappings from the data TLB of the local
+ * processor. This is not sufficient when changing code mappings or
+ * for self modifying code.
  */
 static inline void flush_xen_data_tlb(void)
 {
@@ -50,10 +52,12 @@ static inline void flush_xen_data_tlb(void)
 }
 
 /*
- * Flush a range of VA's hypervisor mappings from the data TLB. This is not
- * sufficient when changing code mappings or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the data TLB of the
+ * local processor. This is not sufficient when changing code mappings
+ * or for self modifying code.
  */
-static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long 
size)
+static inline void flush_xen_data_tlb_range_va(unsigned long va,
+                                               unsigned long size)
 {
     unsigned long end = va + size;
     dsb(); /* Ensure preceding are visible */
@@ -66,6 +70,25 @@ static inline void flush_xen_data_tlb_range_va(unsigned long 
va, unsigned long s
     isb();
 }
 
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB of all
+ * processors in the inner-shareable domain. This is not sufficient
+ * when changing code mappings or for self modifying code.
+ */
+static inline void flush_all_xen_data_tlb_range_va(unsigned long va,
+                                                   unsigned long size)
+{
+    unsigned long end = va + size;
+    dsb(); /* Ensure preceding are visible */
+    while ( va < end ) {
+        asm volatile("tlbi vae2is, %0;"
+                     : : "r" (va>>PAGE_SHIFT) : "memory");
+        va += PAGE_SIZE;
+    }
+    dsb(); /* Ensure completion of the TLB flush */
+    isb();
+}
+
 /* Ask the MMU to translate a VA for us */
 static inline uint64_t __va_to_par(vaddr_t va)
 {
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.