[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] xen/x86: Remove USER_MAPPINGS_ARE_GLOBAL definition
It has been unconditionally enabled for 64bit Xen builds since 2006 c/s 6f562e72 "[XEN][X86_64] USe GLOBAL bit to build user mappings." Adjust the order of definitions in x86_64/page.h to put the bit definitions for pte flags together. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> --- xen/arch/x86/flushtlb.c | 31 +++++++------------------------ xen/arch/x86/mm.c | 18 ++---------------- xen/arch/x86/x86_64/traps.c | 4 ---- xen/include/asm-x86/x86_64/page.h | 11 +++-------- 4 files changed, 12 insertions(+), 52 deletions(-) diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c index 1b75652..5d5d79c 100644 --- a/xen/arch/x86/flushtlb.c +++ b/xen/arch/x86/flushtlb.c @@ -72,7 +72,7 @@ static void post_flush(u32 t) void write_cr3(unsigned long cr3) { - unsigned long flags; + unsigned long flags, cr4 = read_cr4(); u32 t; /* This non-reentrant function is sometimes called in interrupt context. */ @@ -82,16 +82,9 @@ void write_cr3(unsigned long cr3) hvm_flush_guest_tlbs(); -#ifdef USER_MAPPINGS_ARE_GLOBAL - { - unsigned long cr4 = read_cr4(); - write_cr4(cr4 & ~X86_CR4_PGE); - asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" ); - write_cr4(cr4); - } -#else + write_cr4(cr4 & ~X86_CR4_PGE); asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" ); -#endif + write_cr4(cr4); post_flush(t); @@ -123,23 +116,13 @@ void flush_area_local(const void *va, unsigned int flags) else { u32 t = pre_flush(); + unsigned long cr4 = read_cr4(); hvm_flush_guest_tlbs(); -#ifndef USER_MAPPINGS_ARE_GLOBAL - if ( !(flags & FLUSH_TLB_GLOBAL) || !(read_cr4() & X86_CR4_PGE) ) - { - asm volatile ( "mov %0, %%cr3" - : : "r" (read_cr3()) : "memory" ); - } - else -#endif - { - unsigned long cr4 = read_cr4(); - write_cr4(cr4 & ~X86_CR4_PGE); - barrier(); - write_cr4(cr4); - } + write_cr4(cr4 & ~X86_CR4_PGE); + barrier(); + write_cr4(cr4); post_flush(t); } diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 2543916..d23cb3f 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -162,7 +162,8 @@ boolean_param("allowsuperpage", opt_allow_superpage); static void put_superpage(unsigned long mfn); static uint32_t base_disallow_mask; -#define L1_DISALLOW_MASK (base_disallow_mask | _PAGE_GNTTAB) +/* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */ +#define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL) #define L2_DISALLOW_MASK (base_disallow_mask & ~_PAGE_PSE) #define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ? \ @@ -171,12 +172,6 @@ static uint32_t base_disallow_mask; #define L4_DISALLOW_MASK (base_disallow_mask) -#ifdef USER_MAPPINGS_ARE_GLOBAL -/* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */ -#undef L1_DISALLOW_MASK -#define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL) -#endif - #define l1_disallow_mask(d) \ ((d != dom_io) && \ (rangeset_is_empty((d)->iomem_caps) && \ @@ -994,7 +989,6 @@ get_page_from_l4e( return rc; } -#ifdef USER_MAPPINGS_ARE_GLOBAL #define adjust_guest_l1e(pl1e, d) \ do { \ if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) && \ @@ -1011,14 +1005,6 @@ get_page_from_l4e( l1e_add_flags((pl1e), (_PAGE_GLOBAL|_PAGE_USER)); \ } \ } while ( 0 ) -#else -#define adjust_guest_l1e(pl1e, d) \ - do { \ - if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) && \ - likely(!is_pv_32on64_domain(d)) ) \ - l1e_add_flags((pl1e), _PAGE_USER); \ - } while ( 0 ) -#endif #define adjust_guest_l2e(pl2e, d) \ do { \ diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c index 650c33d..0040bef 100644 --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -266,12 +266,8 @@ void toggle_guest_mode(struct vcpu *v) v->arch.flags ^= TF_kernel_mode; asm volatile ( "swapgs" ); update_cr3(v); -#ifdef USER_MAPPINGS_ARE_GLOBAL /* Don't flush user global mappings from the TLB. Don't tick TLB clock. */ asm volatile ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" ); -#else - write_ptbase(v); -#endif if ( !(v->arch.flags & TF_kernel_mode) ) return; diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h index c193c88..3eee5b5 100644 --- a/xen/include/asm-x86/x86_64/page.h +++ b/xen/include/asm-x86/x86_64/page.h @@ -161,20 +161,15 @@ typedef l4_pgentry_t root_pgentry_t; /* Bit 22 of a 24-bit flag mask. This corresponds to bit 62 of a pte.*/ #define _PAGE_GNTTAB (1U<<22) -#define PAGE_HYPERVISOR (__PAGE_HYPERVISOR | _PAGE_GLOBAL) -#define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL) - -#define USER_MAPPINGS_ARE_GLOBAL -#ifdef USER_MAPPINGS_ARE_GLOBAL /* * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte. * This is needed to distinguish between user and kernel PTEs since _PAGE_USER * is asserted for both. */ #define _PAGE_GUEST_KERNEL (1U<<12) -#else -#define _PAGE_GUEST_KERNEL 0 -#endif + +#define PAGE_HYPERVISOR (__PAGE_HYPERVISOR | _PAGE_GLOBAL) +#define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL) #endif /* __X86_64_PAGE_H__ */ -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |