|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/2] SVM: introduce a VM entry helper
The register values copying doesn't need doing in assembly. The VMLOAD
invocation can also be further deferred (and centralized). Therefore
replace the svm_asid_handle_vmrun() invocation wiht one of the new
helper.
Similarly move the VM exit side register value copying into
svm_vmexit_handler().
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: New.
---
TBD: Now that we always make it out to guest context after VMLOAD, perhaps
svm_sync_vmcb() should no longer override vmcb_needs_vmsave, and
svm_vmexit_handler() would then no longer need to to set the field at all.
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -61,24 +61,14 @@ UNLIKELY_START(ne, nsvm_hap)
jmp .Lsvm_do_resume
__UNLIKELY_END(nsvm_hap)
- call svm_asid_handle_vmrun
+ mov %rsp, %rdi
+ call svm_vmenter_helper
cmpb $0,tb_init_done(%rip)
UNLIKELY_START(nz, svm_trace)
call svm_trace_vmentry
UNLIKELY_END(svm_trace)
- mov VCPU_svm_vmcb(%rbx),%rcx
- mov UREGS_rax(%rsp),%rax
- mov %rax,VMCB_rax(%rcx)
- mov UREGS_rip(%rsp),%rax
- mov %rax,VMCB_rip(%rcx)
- mov UREGS_rsp(%rsp),%rax
- mov %rax,VMCB_rsp(%rcx)
- mov UREGS_eflags(%rsp),%rax
- or $X86_EFLAGS_MBS,%rax
- mov %rax,VMCB_rflags(%rcx)
-
mov VCPU_arch_msr(%rbx), %rax
mov VCPUMSR_spec_ctrl_raw(%rax), %eax
@@ -111,16 +101,6 @@ UNLIKELY_END(svm_trace)
SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob:
acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
- mov VCPU_svm_vmcb(%rbx),%rcx
- mov VMCB_rax(%rcx),%rax
- mov %rax,UREGS_rax(%rsp)
- mov VMCB_rip(%rcx),%rax
- mov %rax,UREGS_rip(%rsp)
- mov VMCB_rsp(%rcx),%rax
- mov %rax,UREGS_rsp(%rsp)
- mov VMCB_rflags(%rcx),%rax
- mov %rax,UREGS_eflags(%rsp)
-
STGI
GLOBAL(svm_stgi_label)
mov %rsp,%rdi
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1169,12 +1169,25 @@ static void noreturn svm_do_resume(struc
hvm_do_resume(v);
- if ( v->arch.hvm_svm.vmcb_sync_state == vmcb_needs_vmload )
- svm_sync_vmcb(v, vmcb_needs_vmsave);
-
reset_stack_and_jump(svm_asm_do_resume);
}
+void svm_vmenter_helper(const struct cpu_user_regs *regs)
+{
+ struct vcpu *curr = current;
+ struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+
+ svm_asid_handle_vmrun();
+
+ if ( curr->arch.hvm_svm.vmcb_sync_state == vmcb_needs_vmload )
+ svm_sync_vmcb(curr, vmcb_needs_vmsave);
+
+ vmcb->rax = regs->rax;
+ vmcb->rip = regs->rip;
+ vmcb->rsp = regs->rsp;
+ vmcb->rflags = regs->rflags | X86_EFLAGS_MBS;
+}
+
static void svm_guest_osvw_init(struct vcpu *vcpu)
{
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
@@ -2621,6 +2634,12 @@ void svm_vmexit_handler(struct cpu_user_
struct vlapic *vlapic = vcpu_vlapic(v);
v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
+
+ regs->rax = vmcb->rax;
+ regs->rip = vmcb->rip;
+ regs->rsp = vmcb->rsp;
+ regs->rflags = vmcb->rflags;
+
hvm_invalidate_regs_fields(regs);
if ( paging_mode_hap(v->domain) )
@@ -3107,9 +3126,6 @@ void svm_vmexit_handler(struct cpu_user_
}
out:
- if ( v->arch.hvm_svm.vmcb_sync_state == vmcb_needs_vmload )
- svm_sync_vmcb(v, vmcb_needs_vmsave);
-
if ( vcpu_guestmode || vlapic_hw_disabled(vlapic) )
return;
@@ -3118,7 +3134,6 @@ void svm_vmexit_handler(struct cpu_user_
intr.fields.tpr =
(vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
vmcb_set_vintr(vmcb, intr);
- ASSERT(v->arch.hvm_svm.vmcb_sync_state != vmcb_needs_vmload);
}
void svm_trace_vmentry(void)
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -119,12 +119,6 @@ void __dummy__(void)
OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
BLANK();
- OFFSET(VMCB_rax, struct vmcb_struct, rax);
- OFFSET(VMCB_rip, struct vmcb_struct, rip);
- OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
- OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
- BLANK();
-
OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
BLANK();
--- a/xen/include/asm-x86/hvm/svm/asid.h
+++ b/xen/include/asm-x86/hvm/svm/asid.h
@@ -23,6 +23,7 @@
#include <asm/processor.h>
void svm_asid_init(const struct cpuinfo_x86 *c);
+void svm_asid_handle_vmrun(void);
static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
{
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |