x86/mmuext: tighten TLB flush address checks Addresses passed by PV guests should be subjected to __addr_ok(), avoiding undue TLB flushes; . Signed-off-by: Jan Beulich --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -3268,8 +3268,9 @@ long do_mmuext_op( case MMUEXT_INVLPG_LOCAL: if ( unlikely(d != pg_owner) ) rc = -EPERM; - else if ( !paging_mode_enabled(d) || - paging_invlpg(curr, op.arg1.linear_addr) != 0 ) + else if ( !paging_mode_enabled(d) + ? __addr_ok(op.arg1.linear_addr) + : paging_invlpg(curr, op.arg1.linear_addr) ) flush_tlb_one_local(op.arg1.linear_addr); break; @@ -3290,7 +3291,7 @@ long do_mmuext_op( if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI ) flush_tlb_mask(&pmask); - else + else if ( __addr_ok(op.arg1.linear_addr) ) flush_tlb_one_mask(&pmask, op.arg1.linear_addr); break; } @@ -3303,10 +3304,10 @@ long do_mmuext_op( break; case MMUEXT_INVLPG_ALL: - if ( likely(d == pg_owner) ) - flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr); - else + if ( unlikely(d != pg_owner) ) rc = -EPERM; + else if ( __addr_ok(op.arg1.linear_addr) ) + flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr); break; case MMUEXT_FLUSH_CACHE: --- a/xen/include/asm-x86/paging.h +++ b/xen/include/asm-x86/paging.h @@ -245,7 +245,9 @@ paging_fault(unsigned long va, struct cp * or 0 if it's safe not to do so. */ static inline int paging_invlpg(struct vcpu *v, unsigned long va) { - return is_canonical_address(va) && paging_get_hostmode(v)->invlpg(v, va); + return (paging_mode_external(v->domain) ? is_canonical_address(va) + : __addr_ok(va)) && + paging_get_hostmode(v)->invlpg(v, va); } /* Translate a guest virtual address to the frame number that the