[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH 4/4] x86: Use linkage.h helpers to add tags to symbols
Clean up x86_64/kexec_reloc.S and x86_64/entry.S. This fixes the livepatching contents of entry.S. RFC: I'm unsure on where the page_fault symbol should end, i.e. if unlike current code handle_exception_saved should be within page_fault like handle_exception is or not. Suggested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Jane Malalane <jane.malalane@xxxxxxxxxx> --- CC: Jan Beulich <jbeulich@xxxxxxxx> CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CC: "Roger Pau Monné" <roger.pau@xxxxxxxxxx> CC: Wei Liu <wl@xxxxxxx> --- xen/arch/x86/x86_64/entry.S | 105 +++++++++++++++++++++++++------------- xen/arch/x86/x86_64/kexec_reloc.S | 43 ++++++---------- 2 files changed, 86 insertions(+), 62 deletions(-) diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index 4ad25d9c90..7dc280aafa 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -11,6 +11,7 @@ #include <asm/processor.h> #include <public/xen.h> #include <irq_vectors.h> +#include <xen/linkage.h> /* %rsp: struct cpu_user_regs */ .macro ASSERT_CONTEXT_IS_XEN @@ -152,7 +153,7 @@ process_trap: .section .text.entry, "ax", @progbits /* %rbx: struct vcpu, interrupts disabled */ -restore_all_guest: +SYM_CODE_START_LOCAL(restore_all_guest) ASSERT_INTERRUPTS_DISABLED /* Stash guest SPEC_CTRL value while we can read struct vcpu. */ @@ -239,6 +240,7 @@ iret_exit_to_guest: addq $8,%rsp .Lft0: iretq _ASM_PRE_EXTABLE(.Lft0, handle_exception) +SYM_CODE_END(restore_all_guest) /* * When entering SYSCALL from kernel mode: @@ -255,7 +257,7 @@ iret_exit_to_guest: * - Guest %rsp stored in %rax * - Xen stack loaded, pointing at the %ss slot */ -ENTRY(lstar_enter) +SYM_CODE_START(lstar_enter) #ifdef CONFIG_XEN_SHSTK ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK #endif @@ -290,9 +292,10 @@ ENTRY(lstar_enter) mov %rsp, %rdi call pv_hypercall jmp test_all_events +SYM_CODE_END(lstar_enter) /* See lstar_enter for entry register state. */ -ENTRY(cstar_enter) +SYM_CODE_START(cstar_enter) #ifdef CONFIG_XEN_SHSTK ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK #endif @@ -329,8 +332,9 @@ ENTRY(cstar_enter) jne compat_syscall #endif jmp switch_to_kernel +SYM_CODE_END(cstar_enter) -ENTRY(sysenter_entry) +SYM_CODE_START(sysenter_entry) ENDBR64 #ifdef CONFIG_XEN_SHSTK ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK @@ -339,7 +343,7 @@ ENTRY(sysenter_entry) pushq $FLAT_USER_SS pushq $0 pushfq -GLOBAL(sysenter_eflags_saved) +SYM_INNER_LABEL_GLOBAL(sysenter_eflags_saved) ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP pushq $3 /* ring 3 null cs */ pushq $0 /* null rip */ @@ -393,8 +397,9 @@ UNLIKELY_END(sysenter_gpf) jne compat_sysenter #endif jmp .Lbounce_exception +SYM_CODE_END(sysenter_entry) -ENTRY(int80_direct_trap) +SYM_CODE_START(int80_direct_trap) ENDBR64 ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP pushq $0 @@ -465,8 +470,9 @@ UNLIKELY_END(msi_check) call create_bounce_frame jmp test_all_events +SYM_CODE_END(int80_direct_trap) -int80_slow_path: +SYM_CODE_START_LOCAL(int80_slow_path) /* * Setup entry vector and error code as if this was a GPF caused by an * IDT entry with DPL==0. @@ -482,6 +488,7 @@ int80_slow_path: */ GET_STACK_END(14) jmp handle_exception_saved +SYM_CODE_END(int80_slow_path) /* create_bounce_frame & helpers don't need to be in .text.entry */ .text @@ -657,9 +664,8 @@ ret_from_intr: .section .text.entry, "ax", @progbits - ALIGN /* No special register assumptions. */ -restore_all_xen: +SYM_CODE_START_LOCAL(restore_all_xen) /* * Check whether we need to switch to the per-CPU page tables, in * case we return to late PV exit code (from an NMI or #MC). @@ -676,8 +682,9 @@ UNLIKELY_END(exit_cr3) RESTORE_ALL adj=8 iretq +SYM_CODE_END(restore_all_xen) -ENTRY(common_interrupt) +SYM_CODE_START(common_interrupt) ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP SAVE_ALL @@ -706,12 +713,14 @@ ENTRY(common_interrupt) mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) mov %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) jmp ret_from_intr +SYM_CODE_END(common_interrupt) -ENTRY(page_fault) +SYM_CODE_START(page_fault) ENDBR64 movl $TRAP_page_fault,4(%rsp) + /* No special register assumptions. */ -GLOBAL(handle_exception) +SYM_INNER_LABEL_GLOBAL(handle_exception) ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP SAVE_ALL @@ -734,7 +743,7 @@ GLOBAL(handle_exception) cmovnz %r12d, %r13d .Lxcpt_cr3_okay: -handle_exception_saved: +SYM_INNER_LABEL_LOCAL(handle_exception_saved) GET_CURRENT(bx) testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp) jz exception_with_ints_disabled @@ -842,9 +851,10 @@ handle_exception_saved: ASSERT_CONTEXT_IS_XEN jmp restore_all_xen #endif +SYM_CODE_END(page_fault) /* No special register assumptions. */ -exception_with_ints_disabled: +SYM_CODE_START_LOCAL(exception_with_ints_disabled) testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen? jnz FATAL_exception_with_ints_disabled movq %rsp,%rdi @@ -874,99 +884,116 @@ exception_with_ints_disabled: mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) mov %r13b, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) jmp restore_all_xen # return to fixup code +SYM_CODE_END(exception_with_ints_disabled) /* No special register assumptions. */ -FATAL_exception_with_ints_disabled: +SYM_CODE_START_LOCAL(FATAL_exception_with_ints_disabled) xorl %esi,%esi movq %rsp,%rdi call fatal_trap BUG /* fatal_trap() shouldn't return. */ +SYM_CODE_END(FATAL_exception_with_ints_disabled) -ENTRY(divide_error) +SYM_CODE_START(divide_error) ENDBR64 pushq $0 movl $TRAP_divide_error,4(%rsp) jmp handle_exception +SYM_CODE_END(divide_error) -ENTRY(coprocessor_error) +SYM_CODE_START(coprocessor_error) ENDBR64 pushq $0 movl $TRAP_copro_error,4(%rsp) jmp handle_exception +SYM_CODE_END(coprocessor_error) -ENTRY(simd_coprocessor_error) +SYM_CODE_START(simd_coprocessor_error) ENDBR64 pushq $0 movl $TRAP_simd_error,4(%rsp) jmp handle_exception +SYM_CODE_END(simd_coprocessor_error) -ENTRY(device_not_available) +SYM_CODE_START(device_not_available) ENDBR64 pushq $0 movl $TRAP_no_device,4(%rsp) jmp handle_exception +SYM_CODE_END(device_not_available) -ENTRY(debug) +SYM_CODE_START(debug) ENDBR64 pushq $0 movl $TRAP_debug,4(%rsp) jmp handle_ist_exception +SYM_CODE_END(debug) -ENTRY(int3) +SYM_CODE_START(int3) ENDBR64 pushq $0 movl $TRAP_int3,4(%rsp) jmp handle_exception +SYM_CODE_END(int3) -ENTRY(overflow) +SYM_CODE_START(overflow) ENDBR64 pushq $0 movl $TRAP_overflow,4(%rsp) jmp handle_exception +SYM_CODE_END(overflow) -ENTRY(bounds) +SYM_CODE_START(bounds) ENDBR64 pushq $0 movl $TRAP_bounds,4(%rsp) jmp handle_exception +SYM_CODE_END(bounds) -ENTRY(invalid_op) +SYM_CODE_START(invalid_op) ENDBR64 pushq $0 movl $TRAP_invalid_op,4(%rsp) jmp handle_exception +SYM_CODE_END(invalid_op) -ENTRY(invalid_TSS) +SYM_CODE_START(invalid_TSS) ENDBR64 movl $TRAP_invalid_tss,4(%rsp) jmp handle_exception +SYM_CODE_END(invalid_TSS) -ENTRY(segment_not_present) +SYM_CODE_START(segment_not_present) ENDBR64 movl $TRAP_no_segment,4(%rsp) jmp handle_exception +SYM_CODE_END(segment_not_present) -ENTRY(stack_segment) +SYM_CODE_START(stack_segment) ENDBR64 movl $TRAP_stack_error,4(%rsp) jmp handle_exception +SYM_CODE_END(stack_segment) -ENTRY(general_protection) +SYM_CODE_START(general_protection) ENDBR64 movl $TRAP_gp_fault,4(%rsp) jmp handle_exception +SYM_CODE_END(general_protection) -ENTRY(alignment_check) +SYM_CODE_START(alignment_check) ENDBR64 movl $TRAP_alignment_check,4(%rsp) jmp handle_exception +SYM_CODE_END(alignment_check) -ENTRY(entry_CP) +SYM_CODE_START(entry_CP) ENDBR64 movl $X86_EXC_CP, 4(%rsp) jmp handle_exception +SYM_CODE_END(entry_CP) -ENTRY(double_fault) +SYM_CODE_START(double_fault) ENDBR64 movl $TRAP_double_fault,4(%rsp) /* Set AC to reduce chance of further SMAP faults */ @@ -990,12 +1017,13 @@ ENTRY(double_fault) movq %rsp,%rdi call do_double_fault BUG /* do_double_fault() shouldn't return. */ +SYM_CODE_END(double_fault) -ENTRY(nmi) +SYM_CODE_START(nmi) ENDBR64 pushq $0 movl $TRAP_nmi,4(%rsp) -handle_ist_exception: +SYM_INNER_LABEL_LOCAL(handle_ist_exception) ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP SAVE_ALL @@ -1119,17 +1147,20 @@ handle_ist_exception: ASSERT_CONTEXT_IS_XEN jmp restore_all_xen #endif +SYM_CODE_END(nmi) -ENTRY(machine_check) +SYM_CODE_START(machine_check) ENDBR64 pushq $0 movl $TRAP_machine_check,4(%rsp) jmp handle_ist_exception +SYM_CODE_END(machine_check) /* No op trap handler. Required for kexec crash path. */ -GLOBAL(trap_nop) +SYM_CODE_START_NOALIGN(trap_nop) ENDBR64 iretq +SYM_CODE_END(trap_nop) /* Table of automatically generated entry points. One per vector. */ .pushsection .init.rodata, "a", @progbits @@ -1142,7 +1173,8 @@ GLOBAL(autogen_entrypoints) .endm .popsection -autogen_stubs: /* Automatically generated stubs. */ +/* Automatically generated stubs. */ +SYM_CODE_START_LOCAL(autogen_stubs) vec = 0 .rept X86_NR_VECTORS @@ -1186,6 +1218,7 @@ autogen_stubs: /* Automatically generated stubs. */ vec = vec + 1 .endr +SYM_CODE_END(autogen_stubs) .section .init.rodata, "a", @progbits .size autogen_entrypoints, . - autogen_entrypoints diff --git a/xen/arch/x86/x86_64/kexec_reloc.S b/xen/arch/x86/x86_64/kexec_reloc.S index f4842025eb..5f96c74085 100644 --- a/xen/arch/x86/x86_64/kexec_reloc.S +++ b/xen/arch/x86/x86_64/kexec_reloc.S @@ -14,6 +14,7 @@ .file __FILE__ #include <xen/kimage.h> +#include <xen/linkage.h> #include <asm/asm_defns.h> #include <asm/msr-index.h> @@ -24,7 +25,7 @@ .align PAGE_SIZE .code64 -ENTRY(kexec_reloc) +SYM_FUNC_START(kexec_reloc) /* %rdi - code page maddr */ /* %rsi - page table maddr */ /* %rdx - indirection page maddr */ @@ -92,8 +93,9 @@ ENTRY(kexec_reloc) /* Enter compatibility mode. */ ljmp *compatibility_mode_far(%rip) +SYM_FUNC_END(kexec_reloc) -relocate_pages: +SYM_FUNC_START_LOCAL(relocate_pages) /* %rdi - indirection page maddr */ pushq %rbx @@ -141,8 +143,9 @@ relocate_pages: ret .code32 +SYM_FUNC_END(relocate_pages) -compatibility_mode: +SYM_FUNC_START_LOCAL(compatibility_mode) /* Setup some sane segments. */ movl $0x0008, %eax movl %eax, %ds @@ -169,46 +172,34 @@ compatibility_mode: /* Call the image entry point. This should never return. */ call *%ebp ud2 +SYM_FUNC_END(compatibility_mode) - .align 4 -compatibility_mode_far: +SYM_DATA_START_LOCAL(compatibility_mode_far) .long 0x00000000 /* set in call_32_bit above */ .word 0x0010 +SYM_DATA_END(compatibility_mode_far) - .type compatibility_mode_far, @object - .size compatibility_mode_far, . - compatibility_mode_far - -compat_mode_gdt_desc: +SYM_DATA_START_LOCAL(compat_mode_gdt_desc) .word .Lcompat_mode_gdt_end - compat_mode_gdt -1 .quad 0x0000000000000000 /* set in call_32_bit above */ +SYM_DATA_END(compat_mode_gdt_desc) - .type compat_mode_gdt_desc, @object - .size compat_mode_gdt_desc, . - compat_mode_gdt_desc - - .align 8 -compat_mode_gdt: +SYM_DATA_START_LOCAL(compat_mode_gdt) .quad 0x0000000000000000 /* null */ .quad 0x00cf93000000ffff /* 0x0008 ring 0 data */ .quad 0x00cf9b000000ffff /* 0x0010 ring 0 code, compatibility */ .Lcompat_mode_gdt_end: +SYM_DATA_END(compat_mode_gdt) - .type compat_mode_gdt, @object - .size compat_mode_gdt, . - compat_mode_gdt - -compat_mode_idt: +SYM_DATA_START_LOCAL(compat_mode_idt) .word 0 /* limit */ .long 0 /* base */ - - .type compat_mode_idt, @object - .size compat_mode_idt, . - compat_mode_idt +SYM_DATA_END(compat_mode_idt) /* * 16 words of stack are more than enough. */ - .align 8 -reloc_stack: +SYM_DATA_START_LOCAL(reloc_stack) .fill 16,8,0 .Lreloc_stack_base: - - .type reloc_stack, @object - .size reloc_stack, . - reloc_stack +SYM_DATA_END(reloc_stack) -- 2.11.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |