[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v10 2/3] x86/tlb: allow disabling the TLB clock
The TLB clock is helpful when running Xen on bare metal because when doing a TLB flush each CPU is IPI'ed and can keep a timestamp of the last flush. This is not the case however when Xen is running virtualized, and the underlying hypervisor provides mechanism to assist in performing TLB flushes: Xen itself for example offers a HVMOP_flush_tlbs hypercall in order to perform a TLB flush without having to IPI each CPU. When using such mechanisms it's no longer possible to keep a timestamp of the flushes on each CPU, as they are performed by the underlying hypervisor. Offer a boolean in order to signal Xen that the timestamped TLB shouldn't be used. This avoids keeping the timestamps of the flushes, and also forces NEED_FLUSH to always return true. No functional change intended, as this change doesn't introduce any user that disables the timestamped TLB. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Wei Liu <wl@xxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/flushtlb.c | 19 +++++++++++++------ xen/include/asm-x86/flushtlb.h | 17 ++++++++++++++++- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c index 7d261aef32..e1b1e98c23 100644 --- a/xen/arch/x86/flushtlb.c +++ b/xen/arch/x86/flushtlb.c @@ -33,6 +33,9 @@ u32 tlbflush_clock = 1U; DEFINE_PER_CPU(u32, tlbflush_time); +/* Signals whether the TLB flush clock is in use. */ +bool __read_mostly tlb_clk_enabled = true; + /* * pre_flush(): Increment the virtual TLB-flush clock. Returns new clock value. * @@ -83,12 +86,13 @@ static void post_flush(u32 t) static void do_tlb_flush(void) { unsigned long flags, cr4; - u32 t; + u32 t = 0; /* This non-reentrant function is sometimes called in interrupt context. */ local_irq_save(flags); - t = pre_flush(); + if ( tlb_clk_enabled ) + t = pre_flush(); if ( use_invpcid ) invpcid_flush_all(); @@ -100,7 +104,8 @@ static void do_tlb_flush(void) else write_cr3(read_cr3()); - post_flush(t); + if ( tlb_clk_enabled ) + post_flush(t); local_irq_restore(flags); } @@ -108,7 +113,7 @@ static void do_tlb_flush(void) void switch_cr3_cr4(unsigned long cr3, unsigned long cr4) { unsigned long flags, old_cr4; - u32 t; + u32 t = 0; /* Throughout this function we make this assumption: */ ASSERT(!(cr4 & X86_CR4_PCIDE) || !(cr4 & X86_CR4_PGE)); @@ -116,7 +121,8 @@ void switch_cr3_cr4(unsigned long cr3, unsigned long cr4) /* This non-reentrant function is sometimes called in interrupt context. */ local_irq_save(flags); - t = pre_flush(); + if ( tlb_clk_enabled ) + t = pre_flush(); hvm_flush_guest_tlbs(); old_cr4 = read_cr4(); @@ -169,7 +175,8 @@ void switch_cr3_cr4(unsigned long cr3, unsigned long cr4) if ( cr4 & X86_CR4_PCIDE ) invpcid_flush_all_nonglobals(); - post_flush(t); + if ( tlb_clk_enabled ) + post_flush(t); local_irq_restore(flags); } diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h index 50df468c16..d5ca4bad57 100644 --- a/xen/include/asm-x86/flushtlb.h +++ b/xen/include/asm-x86/flushtlb.h @@ -21,10 +21,21 @@ extern u32 tlbflush_clock; /* Time at which each CPU's TLB was last flushed. */ DECLARE_PER_CPU(u32, tlbflush_time); -#define tlbflush_current_time() tlbflush_clock +/* TLB clock is in use. */ +extern bool tlb_clk_enabled; + +static inline uint32_t tlbflush_current_time(void) +{ + /* Returning 0 from tlbflush_current_time will always force a flush. */ + return tlb_clk_enabled ? tlbflush_clock : 0; +} static inline void page_set_tlbflush_timestamp(struct page_info *page) { + /* Avoid the write if the TLB clock is disabled. */ + if ( !tlb_clk_enabled ) + return; + /* * Prevent storing a stale time stamp, which could happen if an update * to tlbflush_clock plus a subsequent flush IPI happen between the @@ -67,6 +78,10 @@ static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp) { unsigned int cpu; + /* Short-circuit: there's no need to iterate if the clock is disabled. */ + if ( !tlb_clk_enabled ) + return; + for_each_cpu ( cpu, mask ) if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) ) __cpumask_clear_cpu(cpu, mask); -- 2.26.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |