|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 1/2] x86/svm: Remove regs param from asm-called functions
A optimisation is going to want to conditionally have extra data on the stack
around VMExit.
We could alternative between `mov %rsp, %rdi` and `lea 8(%rsp), %rdi`, but it
is easier just to make the functions void and let the compiler do the (not
very) hard work.
Passing regs is a bit weird for HVM guests anyway, because the resulting
pointer is invariant (this isn't native exception handling where the regs
pointers *are* important), and all functions calculate `current` themselves
which is another invariant.
Finally, the compiler can merge the get_cpu_info() calculation which is common
to both `current` and guest_cpu_user_regs(), meaning the delta in C really is
just one `lea`, and not any more expensive than `mov`'s in ASM anyway.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
---
xen/arch/x86/hvm/svm/entry.S | 3 ---
xen/arch/x86/hvm/svm/nestedsvm.c | 3 ++-
xen/arch/x86/hvm/svm/svm.c | 6 ++++--
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
index a60d759f7108..be4ce52bd81d 100644
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -26,7 +26,6 @@ ENTRY(svm_asm_do_resume)
GET_CURRENT(bx)
.Lsvm_do_resume:
call svm_intr_assist
- mov %rsp,%rdi
call nsvm_vcpu_switch
ASSERT_NOT_IN_ATOMIC
@@ -52,7 +51,6 @@ UNLIKELY_START(ne, nsvm_hap)
jmp .Lsvm_do_resume
__UNLIKELY_END(nsvm_hap)
- mov %rsp, %rdi
call svm_vmenter_helper
clgi
@@ -132,7 +130,6 @@ __UNLIKELY_END(nsvm_hap)
*/
stgi
GLOBAL(svm_stgi_label)
- mov %rsp,%rdi
call svm_vmexit_handler
jmp .Lsvm_do_resume
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 9f5f35f16aff..77f754736023 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1460,8 +1460,9 @@ nestedsvm_vcpu_vmexit(struct vcpu *v, struct
cpu_user_regs *regs,
}
/* VCPU switch */
-void nsvm_vcpu_switch(struct cpu_user_regs *regs)
+void nsvm_vcpu_switch(void)
{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
struct vcpu *v = current;
struct nestedvcpu *nv;
struct nestedsvm *svm;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 0849a9dc5f41..81f0cf55676b 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1040,8 +1040,9 @@ static void noreturn cf_check svm_do_resume(void)
reset_stack_and_jump(svm_asm_do_resume);
}
-void svm_vmenter_helper(const struct cpu_user_regs *regs)
+void svm_vmenter_helper(void)
{
+ const struct cpu_user_regs *regs = guest_cpu_user_regs();
struct vcpu *curr = current;
struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
@@ -2570,8 +2571,9 @@ static struct hvm_function_table __initdata_cf_clobber
svm_function_table = {
},
};
-void svm_vmexit_handler(struct cpu_user_regs *regs)
+void svm_vmexit_handler(void)
{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
uint64_t exit_reason;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
--
2.11.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |