[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/5] x86/hvm: Rework HVM_HCALL_invalidate handling
Sending an invalidation to the device model is an internal detail of completing the hypercall; callers should not need to be responsible for it. Drop HVM_HCALL_invalidate entirely and call send_invalidate_req() when appropriate. This makes the function boolean in nature, although the existing HVM_HCALL_{completed,preempted} to aid code clarity. While updating the return type, drop _do from the name, as it is redundant. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx> CC: Kevin Tian <kevin.tian@xxxxxxxxx> CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> --- xen/arch/x86/hvm/hvm.c | 7 +++---- xen/arch/x86/hvm/svm/svm.c | 8 ++------ xen/arch/x86/hvm/vmx/vmx.c | 13 ++++--------- xen/include/asm-x86/hvm/support.h | 3 +-- 4 files changed, 10 insertions(+), 21 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 5f72758..e164f57 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3874,7 +3874,7 @@ static const hypercall_table_t hvm_hypercall_table[] = { #undef HYPERCALL #undef COMPAT_CALL -int hvm_do_hypercall(struct cpu_user_regs *regs) +bool hvm_hypercall(struct cpu_user_regs *regs) { struct vcpu *curr = current; struct domain *currd = curr->domain; @@ -4011,9 +4011,8 @@ int hvm_do_hypercall(struct cpu_user_regs *regs) return HVM_HCALL_preempted; if ( unlikely(currd->arch.hvm_domain.qemu_mapcache_invalidate) && - test_and_clear_bool(currd->arch.hvm_domain. - qemu_mapcache_invalidate) ) - return HVM_HCALL_invalidate; + test_and_clear_bool(currd->arch.hvm_domain.qemu_mapcache_invalidate) ) + send_invalidate_req(); return HVM_HCALL_completed; } diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 01c7b58..ca2785c 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -2542,13 +2542,9 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) break; BUG_ON(vcpu_guestmode); HVMTRACE_1D(VMMCALL, regs->_eax); - rc = hvm_do_hypercall(regs); - if ( rc != HVM_HCALL_preempted ) - { + + if ( hvm_hypercall(regs) == HVM_HCALL_completed ) __update_guest_eip(regs, inst_len); - if ( rc == HVM_HCALL_invalidate ) - send_invalidate_req(); - } break; case VMEXIT_DR0_READ ... VMEXIT_DR7_READ: diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index d3d98da..42f4fbd 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -3629,19 +3629,14 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) update_guest_eip(); /* Safe: RDTSC, RDTSCP */ hvm_rdtsc_intercept(regs); break; + case EXIT_REASON_VMCALL: - { - int rc; HVMTRACE_1D(VMMCALL, regs->_eax); - rc = hvm_do_hypercall(regs); - if ( rc != HVM_HCALL_preempted ) - { + + if ( hvm_hypercall(regs) == HVM_HCALL_completed ) update_guest_eip(); /* Safe: VMCALL */ - if ( rc == HVM_HCALL_invalidate ) - send_invalidate_req(); - } break; - } + case EXIT_REASON_CR_ACCESS: { __vmread(EXIT_QUALIFICATION, &exit_qualification); diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index 16550c5..dc257c5 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -105,8 +105,7 @@ enum hvm_copy_result hvm_fetch_from_guest_linear( #define HVM_HCALL_completed 0 /* hypercall completed - no further action */ #define HVM_HCALL_preempted 1 /* hypercall preempted - re-execute VMCALL */ -#define HVM_HCALL_invalidate 2 /* invalidate ioemu-dm memory cache */ -int hvm_do_hypercall(struct cpu_user_regs *pregs); +bool hvm_hypercall(struct cpu_user_regs *regs); void hvm_hlt(unsigned int eflags); void hvm_triple_fault(void); -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |