[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 09/13] powerpc/mm: replace batch->active with in_lazy_mmu_mode()



The generic lazy_mmu layer now tracks whether a task is in lazy MMU
mode. As a result we no longer need to track whether the per-CPU TLB
batch struct is active - we know it is if in_lazy_mmu_mode() returns
true.

Signed-off-by: Kevin Brodsky <kevin.brodsky@xxxxxxx>
---
 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h | 9 ---------
 arch/powerpc/mm/book3s64/hash_tlb.c                | 2 +-
 2 files changed, 1 insertion(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h 
b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 623a8a8b2d0e..bbc54690d374 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -12,7 +12,6 @@
 #define PPC64_TLB_BATCH_NR 192
 
 struct ppc64_tlb_batch {
-       int                     active;
        unsigned long           index;
        struct mm_struct        *mm;
        real_pte_t              pte[PPC64_TLB_BATCH_NR];
@@ -26,8 +25,6 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch 
*batch);
 
 static inline void arch_enter_lazy_mmu_mode(void)
 {
-       struct ppc64_tlb_batch *batch;
-
        if (radix_enabled())
                return;
        /*
@@ -35,8 +32,6 @@ static inline void arch_enter_lazy_mmu_mode(void)
         * operating on kernel page tables.
         */
        preempt_disable();
-       batch = this_cpu_ptr(&ppc64_tlb_batch);
-       batch->active = 1;
 }
 
 static inline void arch_flush_lazy_mmu_mode(void)
@@ -51,14 +46,10 @@ static inline void arch_flush_lazy_mmu_mode(void)
 
 static inline void arch_leave_lazy_mmu_mode(void)
 {
-       struct ppc64_tlb_batch *batch;
-
        if (radix_enabled())
                return;
-       batch = this_cpu_ptr(&ppc64_tlb_batch);
 
        arch_flush_lazy_mmu_mode();
-       batch->active = 0;
        preempt_enable();
 }
 
diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c 
b/arch/powerpc/mm/book3s64/hash_tlb.c
index 787f7a0e27f0..72b83f582b6d 100644
--- a/arch/powerpc/mm/book3s64/hash_tlb.c
+++ b/arch/powerpc/mm/book3s64/hash_tlb.c
@@ -100,7 +100,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long 
addr,
         * Check if we have an active batch on this CPU. If not, just
         * flush now and return.
         */
-       if (!batch->active) {
+       if (!in_lazy_mmu_mode()) {
                flush_hash_page(vpn, rpte, psize, ssize, 
mm_is_thread_local(mm));
                put_cpu_var(ppc64_tlb_batch);
                return;
-- 
2.47.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.