|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 6/7] xen/arm: flush D-cache and I-cache when appropriate
- invalidate tlb after setting WXN;
- flush D-cache and I-cache after relocation;
- flush D-cache after writing to smp_up_cpu;
- flush TLB before changing HTTBR;
- flush I-cache after changing HTTBR;
- flush I-cache and branch predictor after writing Xen text ptes.
Changes in v2:
- fix a wrong comment;
- add a comment to described why we need a DSB at the beginning of
write_pte;
- do not issue ISB within write_pte, call isb() afterwards whenever
appropriate;
- issue DSB after DCCMVAC in write_pte to make sure that the data flush
is completed before proceeding;
- make flush_xen_dcache_va take a void* as argument;
- introduce flush_xen_dcache_va_range.
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
xen/arch/arm/head.S | 9 +++++++
xen/arch/arm/mm.c | 18 +++++++++++++-
xen/arch/arm/setup.c | 4 +++
xen/arch/arm/smpboot.c | 2 +
xen/include/asm-arm/page.h | 51 +++++++++++++++++++++++++++++++++++++++++--
5 files changed, 79 insertions(+), 5 deletions(-)
diff --git a/xen/arch/arm/head.S b/xen/arch/arm/head.S
index 3fe6412..4de4d95 100644
--- a/xen/arch/arm/head.S
+++ b/xen/arch/arm/head.S
@@ -278,8 +278,15 @@ paging:
ldr r4, =boot_httbr /* VA of HTTBR value stashed by CPU 0 */
add r4, r4, r10 /* PA of it */
ldrd r4, r5, [r4] /* Actual value */
+ dsb
+ mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLB */
+ dsb
+ isb
mcrr CP64(r4, r5, HTTBR)
+ dsb
+ isb
mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLB */
+ mcr CP32(r0, ICIALLU) /* Flush I-cache */
mcr CP32(r0, BPIALL) /* Flush branch predictor */
dsb /* Ensure completion of TLB+BP flush */
isb
@@ -292,6 +299,8 @@ paging:
teq r2, #0
bne 1b
dsb
+ mcr CP32(r0, DCCMVAC) /* flush D-Cache */
+ dsb
/* Here, the non-boot CPUs must wait again -- they're now running on
* the boot CPU's pagetables so it's safe for the boot CPU to
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index d0cd2c9..3d25153 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -32,6 +32,7 @@
#include <public/memory.h>
#include <xen/sched.h>
+int __read_mostly cacheline_bytes = MIN_CACHELINE_BYTES;
struct domain *dom_xen, *dom_io;
/* Static start-of-day pagetables that we use before the allocators are up */
@@ -244,10 +245,17 @@ void __init setup_pagetables(unsigned long
boot_phys_offset, paddr_t xen_paddr)
/* Change pagetables to the copy in the relocated Xen */
boot_httbr = (unsigned long) xen_pgtable + phys_offset;
+ flush_xen_dcache_va(&boot_httbr);
+ flush_xen_dcache_va_range((void*)dest_va, _end - _start);
+ isb();
+ flush_xen_text_tlb();
+
asm volatile (
STORE_CP64(0, HTTBR) /* Change translation base */
"dsb;" /* Ensure visibility of HTTBR update */
+ "isb;"
STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */
+ STORE_CP32(0, ICIALLU) /* Flush I-cache */
STORE_CP32(0, BPIALL) /* Flush branch predictor */
"dsb;" /* Ensure completion of TLB+BP flush */
"isb;"
@@ -256,6 +264,7 @@ void __init setup_pagetables(unsigned long
boot_phys_offset, paddr_t xen_paddr)
/* Undo the temporary map */
pte.bits = 0;
write_pte(xen_second + second_table_offset(dest_va), pte);
+ isb();
flush_xen_text_tlb();
/* Link in the fixmap pagetable */
@@ -291,11 +300,14 @@ void __init setup_pagetables(unsigned long
boot_phys_offset, paddr_t xen_paddr)
>> PAGE_SHIFT);
pte.pt.table = 1;
write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte);
- /* Have changed a mapping used for .text. Flush everything for safety. */
- flush_xen_text_tlb();
+ /* ISB is needed because we changed the text mappings */
+ isb();
/* From now on, no mapping may be both writable and executable. */
WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR);
+ isb();
+ /* Flush everything after setting WXN bit. */
+ flush_xen_text_tlb();
}
/* MMU setup for secondary CPUS (which already have paging enabled) */
@@ -303,6 +315,8 @@ void __cpuinit mmu_init_secondary_cpu(void)
{
/* From now on, no mapping may be both writable and executable. */
WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR);
+ isb();
+ flush_xen_text_tlb();
}
/* Create Xen's mappings of memory.
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 6fbcb81..a579a56 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -185,6 +185,10 @@ void __init start_xen(unsigned long boot_phys_offset,
size_t fdt_size;
int cpus, i;
+ cacheline_bytes = READ_CP32(CCSIDR);
+ if ( cacheline_bytes < MIN_CACHELINE_BYTES )
+ panic("CPU has preposterously small cache lines");
+
fdt = (void *)BOOT_MISC_VIRT_START
+ (atag_paddr & ((1 << SECOND_SHIFT) - 1));
fdt_size = device_tree_early_init(fdt);
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index c0750c0..f4fd512 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -105,6 +105,7 @@ make_cpus_ready(unsigned int max_cpus, unsigned long
boot_phys_offset)
/* Tell the next CPU to get ready */
/* TODO: handle boards where CPUIDs are not contiguous */
*gate = i;
+ flush_xen_dcache_va(gate);
asm volatile("dsb; isb; sev");
/* And wait for it to respond */
while ( ready_cpus < i )
@@ -201,6 +202,7 @@ int __cpu_up(unsigned int cpu)
/* Unblock the CPU. It should be waiting in the loop in head.S
* for an event to arrive when smp_up_cpu matches its cpuid. */
smp_up_cpu = cpu;
+ flush_xen_dcache_va(&smp_up_cpu);
asm volatile("dsb; isb; sev");
while ( !cpu_online(cpu) )
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 9511c45..1b1d556 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -228,27 +228,72 @@ static inline lpae_t mfn_to_p2m_entry(unsigned long mfn,
unsigned int mattr)
return e;
}
-/* Write a pagetable entry */
+/* Write a pagetable entry.
+ *
+ * If the table entry is changing a text mapping, it is responsibility
+ * of the caller to issue an ISB after write_pte.
+ */
static inline void write_pte(lpae_t *p, lpae_t pte)
{
asm volatile (
+ /* Ensure any writes have completed with the old mappings. */
+ "dsb;"
/* Safely write the entry (STRD is atomic on CPUs that support LPAE) */
"strd %0, %H0, [%1];"
+ "dsb;"
/* Push this cacheline to the PoC so the rest of the system sees it. */
STORE_CP32(1, DCCMVAC)
+ /* Ensure that the data flush is completed before proceeding */
+ "dsb;"
: : "r" (pte.bits), "r" (p) : "memory");
}
+
+#define MIN_CACHELINE_BYTES 32
+extern int cacheline_bytes;
+
+/* Function for flushing medium-sized areas.
+ * if 'range' is large enough we might want to use model-specific
+ * full-cache flushes. */
+static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
+{
+ void *end;
+ dsb(); /* So the CPU issues all writes to the range */
+ for ( end = p + size; p < end; p += cacheline_bytes )
+ WRITE_CP32((uint32_t) p, DCCMVAC);
+ dsb(); /* So we know the flushes happen before continuing */
+}
+
+
+/* Macro for flushing a single small item. The predicate is always
+ * compile-time constant so this will compile down to 3 instructions in
+ * the common case. Make sure to call it with the correct type of
+ * pointer! */
+#define flush_xen_dcache_va(p) do { \
+ typeof(p) _p = (p); \
+ if ( (sizeof *_p) > MIN_CACHELINE_BYTES ) \
+ flush_xen_dcache_va_range(_p, sizeof *_p); \
+ else \
+ asm volatile ( \
+ "dsb;" /* Finish all earlier writes */ \
+ STORE_CP32(0, DCCMVAC) \
+ "dsb;" /* Finish flush before continuing */ \
+ : : "r" (_p), "m" (*_p)); \
+} while (0)
+
+
/*
* Flush all hypervisor mappings from the TLB and branch predictor.
- * This is needed after changing Xen code mappings.
+ * This is needed after changing Xen code mappings.
+ *
+ * The caller needs to issue the necessary barriers before this functions.
*/
static inline void flush_xen_text_tlb(void)
{
register unsigned long r0 asm ("r0");
asm volatile (
- "dsb;" /* Ensure visibility of PTE writes */
STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */
+ STORE_CP32(0, ICIALLU) /* Flush I-cache */
STORE_CP32(0, BPIALL) /* Flush branch predictor */
"dsb;" /* Ensure completion of TLB+BP flush */
"isb;"
--
1.7.2.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |