[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/hvm: Avoid __hvm_copy() raising #PF behind the emulators back
commit 28ef9ccaa91cf945c052cdd9364be0c40f7bce4a Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Tue Nov 1 20:02:35 2016 +0000 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Fri Dec 2 17:23:02 2016 +0000 x86/hvm: Avoid __hvm_copy() raising #PF behind the emulators back Drop the call to hvm_inject_page_fault() in __hvm_copy(), and require callers to inject the pagefault themselves. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 2 ++ xen/arch/x86/hvm/hvm.c | 14 ++++++++++++-- xen/arch/x86/hvm/vmx/vvmx.c | 20 +++++++++++++++----- xen/arch/x86/mm/shadow/common.c | 1 + xen/include/asm-x86/hvm/support.h | 4 +--- 5 files changed, 31 insertions(+), 10 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 035b654..ccf3aa2 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -799,6 +799,7 @@ static int __hvmemul_read( case HVMCOPY_okay: break; case HVMCOPY_bad_gva_to_gfn: + x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt); return X86EMUL_EXCEPTION; case HVMCOPY_bad_gfn_to_mfn: if ( access_type == hvm_access_insn_fetch ) @@ -905,6 +906,7 @@ static int hvmemul_write( case HVMCOPY_okay: break; case HVMCOPY_bad_gva_to_gfn: + x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt); return X86EMUL_EXCEPTION; case HVMCOPY_bad_gfn_to_mfn: return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0); diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 007ce4c..27896ec 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2927,6 +2927,8 @@ void hvm_task_switch( rc = hvm_copy_from_guest_linear( &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo); + if ( rc == HVMCOPY_bad_gva_to_gfn ) + hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); if ( rc != HVMCOPY_okay ) goto out; @@ -2965,11 +2967,15 @@ void hvm_task_switch( offsetof(typeof(tss), trace) - offsetof(typeof(tss), eip), PFEC_page_present, &pfinfo); + if ( rc == HVMCOPY_bad_gva_to_gfn ) + hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); if ( rc != HVMCOPY_okay ) goto out; rc = hvm_copy_from_guest_linear( &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo); + if ( rc == HVMCOPY_bad_gva_to_gfn ) + hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); /* * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee * functions knew we want RO access. @@ -3012,7 +3018,10 @@ void hvm_task_switch( &tss.back_link, sizeof(tss.back_link), 0, &pfinfo); if ( rc == HVMCOPY_bad_gva_to_gfn ) + { + hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); exn_raised = 1; + } else if ( rc != HVMCOPY_okay ) goto out; } @@ -3050,7 +3059,10 @@ void hvm_task_switch( rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0, &pfinfo); if ( rc == HVMCOPY_bad_gva_to_gfn ) + { + hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); exn_raised = 1; + } else if ( rc != HVMCOPY_okay ) goto out; } @@ -3114,8 +3126,6 @@ static enum hvm_copy_result __hvm_copy( { pfinfo->linear = addr; pfinfo->ec = pfec; - - hvm_inject_page_fault(pfec, addr); } return HVMCOPY_bad_gva_to_gfn; } diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index fd7ea0a..e6e9ebd 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -396,7 +396,6 @@ static int decode_vmx_inst(struct cpu_user_regs *regs, struct vcpu *v = current; union vmx_inst_info info; struct segment_register seg; - pagefault_info_t pfinfo; unsigned long base, index, seg_base, disp, offset; int scale, size; @@ -451,10 +450,17 @@ static int decode_vmx_inst(struct cpu_user_regs *regs, offset + size - 1 > seg.limit) ) goto gp_fault; - if ( poperandS != NULL && - hvm_copy_from_guest_linear(poperandS, base, size, 0, &pfinfo) - != HVMCOPY_okay ) - return X86EMUL_EXCEPTION; + if ( poperandS != NULL ) + { + pagefault_info_t pfinfo; + int rc = hvm_copy_from_guest_linear(poperandS, base, size, + 0, &pfinfo); + + if ( rc == HVMCOPY_bad_gva_to_gfn ) + hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); + if ( rc != HVMCOPY_okay ) + return X86EMUL_EXCEPTION; + } decode->mem = base; decode->len = size; } @@ -1623,6 +1629,8 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs) gpa = nvcpu->nv_vvmcxaddr; rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0, &pfinfo); + if ( rc == HVMCOPY_bad_gva_to_gfn ) + hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); if ( rc != HVMCOPY_okay ) return X86EMUL_EXCEPTION; @@ -1694,6 +1702,8 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs) switch ( decode.type ) { case VMX_INST_MEMREG_TYPE_MEMORY: rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0, &pfinfo); + if ( rc == HVMCOPY_bad_gva_to_gfn ) + hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); if ( rc != HVMCOPY_okay ) return X86EMUL_EXCEPTION; break; diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 032a041..c59f022 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -198,6 +198,7 @@ hvm_read(enum x86_segment seg, case HVMCOPY_okay: return X86EMUL_OKAY; case HVMCOPY_bad_gva_to_gfn: + x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &sh_ctxt->ctxt); return X86EMUL_EXCEPTION; case HVMCOPY_bad_gfn_to_mfn: case HVMCOPY_unhandleable: diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index 78349f8..3d767d7 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -85,9 +85,7 @@ enum hvm_copy_result hvm_copy_from_guest_phys( * HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid * mapping to a guest physical address. The * pagefault_info_t structure will be filled in if - * provided, and a page fault exception is - * automatically queued for injection into the - * current HVM VCPU. + * provided. */ typedef struct pagefault_info { -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |