[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 6/7] xen/arm: flush D-cache and I-cache when appropriate
- invalidate tlb after setting WXN; - flush D-cache and I-cache after relocation; - flush D-cache after writing to smp_up_cpu; - flush TLB before changing HTTBR; - flush I-cache after changing HTTBR; - flush I-cache and branch predictor after writing Xen text ptes. Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> --- xen/arch/arm/head.S | 9 +++++++++ xen/arch/arm/mm.c | 14 +++++++++++++- xen/arch/arm/smpboot.c | 2 ++ xen/include/asm-arm/page.h | 14 ++++++++++++++ 4 files changed, 38 insertions(+), 1 deletions(-) diff --git a/xen/arch/arm/head.S b/xen/arch/arm/head.S index 39c4774..4c420ac 100644 --- a/xen/arch/arm/head.S +++ b/xen/arch/arm/head.S @@ -274,8 +274,15 @@ paging: ldr r4, =boot_httbr /* VA of HTTBR value stashed by CPU 0 */ add r4, r4, r10 /* PA of it */ ldrd r4, r5, [r4] /* Actual value */ + dsb + mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLB */ + dsb + isb mcrr CP64(r4, r5, HTTBR) + dsb + isb mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLB */ + mcr CP32(r0, ICIALLU) /* Flush I-cache */ mcr CP32(r0, BPIALL) /* Flush branch predictor */ dsb /* Ensure completion of TLB+BP flush */ isb @@ -288,6 +295,8 @@ paging: teq r2, #0 bne 1b dsb + mcr CP32(r0, DCCMVAC) /* flush D-Cache */ + isb /* Here, the non-boot CPUs must wait again -- they're now running on * the boot CPU's pagetables so it's safe for the boot CPU to diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index d0cd2c9..37e49c8 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -211,6 +211,7 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) unsigned long dest_va; lpae_t pte, *p; int i; + unsigned long cacheline_size = READ_CP32(CCSIDR); /* Map the destination in the boot misc area. */ dest_va = BOOT_MISC_VIRT_START; @@ -244,10 +245,18 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) /* Change pagetables to the copy in the relocated Xen */ boot_httbr = (unsigned long) xen_pgtable + phys_offset; + flush_xen_dcache_va((unsigned long)&boot_httbr); + for ( i = 0; i < _end - _start; i += cacheline_size ) + flush_xen_dcache_va(dest_va + i); + flush_xen_text_tlb(); + asm volatile ( + "dsb;" /* Ensure visibility of HTTBR update */ STORE_CP64(0, HTTBR) /* Change translation base */ "dsb;" /* Ensure visibility of HTTBR update */ + "isb;" STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */ + STORE_CP32(0, ICIALLU) /* Flush I-cache */ STORE_CP32(0, BPIALL) /* Flush branch predictor */ "dsb;" /* Ensure completion of TLB+BP flush */ "isb;" @@ -292,10 +301,11 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) pte.pt.table = 1; write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte); /* Have changed a mapping used for .text. Flush everything for safety. */ - flush_xen_text_tlb(); /* From now on, no mapping may be both writable and executable. */ WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR); + isb(); + flush_xen_text_tlb(); } /* MMU setup for secondary CPUS (which already have paging enabled) */ @@ -303,6 +313,8 @@ void __cpuinit mmu_init_secondary_cpu(void) { /* From now on, no mapping may be both writable and executable. */ WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR); + isb(); + flush_xen_text_tlb(); } /* Create Xen's mappings of memory. diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index c0750c0..767e553 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -105,6 +105,7 @@ make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset) /* Tell the next CPU to get ready */ /* TODO: handle boards where CPUIDs are not contiguous */ *gate = i; + flush_xen_dcache_va((unsigned long)gate); asm volatile("dsb; isb; sev"); /* And wait for it to respond */ while ( ready_cpus < i ) @@ -201,6 +202,7 @@ int __cpu_up(unsigned int cpu) /* Unblock the CPU. It should be waiting in the loop in head.S * for an event to arrive when smp_up_cpu matches its cpuid. */ smp_up_cpu = cpu; + flush_xen_dcache_va((unsigned long)&smp_up_cpu); asm volatile("dsb; isb; sev"); while ( !cpu_online(cpu) ) diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index 9511c45..7d70d8c 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -232,13 +232,26 @@ static inline lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr) static inline void write_pte(lpae_t *p, lpae_t pte) { asm volatile ( + "dsb;" /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */ "strd %0, %H0, [%1];" + "dsb;" /* Push this cacheline to the PoC so the rest of the system sees it. */ STORE_CP32(1, DCCMVAC) + "isb;" : : "r" (pte.bits), "r" (p) : "memory"); } +static inline void flush_xen_dcache_va(unsigned long va) +{ + register unsigned long r0 asm ("r0") = va; + asm volatile ( + "dsb;" + STORE_CP32(0, DCCMVAC) + "isb;" + : : "r" (r0) : "memory"); +} + /* * Flush all hypervisor mappings from the TLB and branch predictor. * This is needed after changing Xen code mappings. @@ -249,6 +262,7 @@ static inline void flush_xen_text_tlb(void) asm volatile ( "dsb;" /* Ensure visibility of PTE writes */ STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */ + STORE_CP32(0, ICIALLU) /* Flush I-cache */ STORE_CP32(0, BPIALL) /* Flush branch predictor */ "dsb;" /* Ensure completion of TLB+BP flush */ "isb;" -- 1.7.2.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |