[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 1/2] x86/entry: Introduce POP_GPRS
The macro named RESTORE_ALL has several problems. It adjusts the stack pointer despite this not being clear to the caller. It also goes against recommendations in the optimisation guides because of trying to do too many things at once. (i.e. there's a reason why compilers don't emit code looking like this.) Introduce a new POP_GPRS macro which only POPs GPRs. Use it for the HVM paths which are already using POPs. Also use it for restore_all_{xen,guest}(). This saves most of a cacheline worth of code from two fastpaths: add/remove: 0/0 grow/shrink: 0/2 up/down: 0/-99 (-99) Function old new delta restore_all_guest 378 330 -48 restore_all_xen 165 114 -51 but it also avoids having an explicit modification to the stack pointer between %rsp-relative accesses, which avoids stalls in the stack-engine optimisations in some microarchitectures. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Wei Liu <wl@xxxxxxx> --- xen/arch/x86/hvm/svm/entry.S | 17 ++--------------- xen/arch/x86/hvm/vmx/entry.S | 16 +--------------- xen/arch/x86/include/asm/asm_defns.h | 23 +++++++++++++++++++++++ xen/arch/x86/x86_64/asm-offsets.c | 1 + xen/arch/x86/x86_64/entry.S | 12 ++++++------ 5 files changed, 33 insertions(+), 36 deletions(-) diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S index 60b0b00ed0af..305f9466096a 100644 --- a/xen/arch/x86/hvm/svm/entry.S +++ b/xen/arch/x86/hvm/svm/entry.S @@ -74,22 +74,9 @@ __UNLIKELY_END(nsvm_hap) ALTERNATIVE "", svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM ALTERNATIVE "", DO_SPEC_CTRL_DIV, X86_FEATURE_SC_DIV - pop %r15 - pop %r14 - pop %r13 - pop %r12 - pop %rbp mov VCPU_svm_vmcb_pa(%rbx),%rax - pop %rbx - pop %r11 - pop %r10 - pop %r9 - pop %r8 - pop %rcx /* Skip %rax: restored by VMRUN. */ - pop %rcx - pop %rdx - pop %rsi - pop %rdi + + POP_GPRS rax=%rcx /* Skip %rax. It's restored by VMRUN. */ sti vmrun diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S index 1bead826caa3..92d8b919198c 100644 --- a/xen/arch/x86/hvm/vmx/entry.S +++ b/xen/arch/x86/hvm/vmx/entry.S @@ -115,21 +115,7 @@ UNLIKELY_END(realmode) and $SCF_verw, %eax or %eax, %ecx - pop %r15 - pop %r14 - pop %r13 - pop %r12 - pop %rbp - pop %rbx - pop %r11 - pop %r10 - pop %r9 - pop %r8 - pop %rax - pop %rcx - pop %rdx - pop %rsi - pop %rdi + POP_GPRS jpe .L_skip_verw /* VERW clobbers ZF, but preserves all others, including SF. */ diff --git a/xen/arch/x86/include/asm/asm_defns.h b/xen/arch/x86/include/asm/asm_defns.h index a69fae78b123..ec10a8e1dfc6 100644 --- a/xen/arch/x86/include/asm/asm_defns.h +++ b/xen/arch/x86/include/asm/asm_defns.h @@ -314,6 +314,29 @@ static always_inline void stac(void) subq $-(UREGS_error_code-UREGS_r15+\adj), %rsp .endm +/* + * POP GPRs from a UREGS_* frame on the stack. Does not modify flags. + * + * @rax: Alternative destination for the %rax value on the stack. + */ +.macro POP_GPRS rax=%rax + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + pop %r11 + pop %r10 + pop %r9 + pop %r8 + pop \rax + pop %rcx + pop %rdx + pop %rsi + pop %rdi +.endm + #ifdef CONFIG_PV32 #define CR4_PV32_RESTORE \ ALTERNATIVE_2 "", \ diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index d8903a3ce9c7..944f49a82171 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -59,6 +59,7 @@ void __dummy__(void) DEFINE(sym, offsetof(struct cpu_user_regs, mem) - \ offsetof(struct cpu_user_regs, error_code) __VA_ARGS__) + OFFSET_EF(EFRAME_error_code, error_code); OFFSET_EF(EFRAME_entry_vector, entry_vector); OFFSET_EF(EFRAME_rip, rip); OFFSET_EF(EFRAME_cs, cs); diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index 7d686b762807..9280216a5436 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -186,7 +186,8 @@ FUNC_LOCAL(restore_all_guest) /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ - RESTORE_ALL + POP_GPRS + BUILD_BUG_ON(TRAP_syscall & 0xff) testb $TRAP_syscall >> 8, EFRAME_entry_vector + 1(%rsp) jz iret_exit_to_guest @@ -696,20 +697,19 @@ UNLIKELY_END(exit_cr3) /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ SPEC_CTRL_EXIT_TO_XEN /* Req: %r12=ist_exit %r14=end %rsp=regs, Clob: abcd */ - RESTORE_ALL adj=8 + POP_GPRS /* * When the CPU pushed this exception frame, it zero-extended eflags. * For an IST exit, SPEC_CTRL_EXIT_TO_XEN stashed shadow copies of * spec_ctrl_flags and ver_sel above eflags, as we can't use any GPRs, * and we're at a random place on the stack, not in a CPUFINFO block. - * - * Account for ev/ec having already been popped off the stack. */ SPEC_CTRL_COND_VERW \ - scf=STK_REL(EFRAME_shadow_scf, EFRAME_rip), \ - sel=STK_REL(EFRAME_shadow_sel, EFRAME_rip) + scf=STK_REL(EFRAME_shadow_scf, EFRAME_error_code), \ + sel=STK_REL(EFRAME_shadow_sel, EFRAME_error_code) + add $8, %rsp iretq END(restore_all_xen) -- 2.30.2
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |