[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 04/29] x86: assembly, use ENDPROC for functions
Somewhere END was used to end a function. It is not intended to be used for functions, because it does not mark the actual symbols as functions. Use ENDPROC in such cases which does the right job. Signed-off-by: Jiri Slaby <jslaby@xxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: <x86@xxxxxxxxxx> Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Cc: Juergen Gross <jgross@xxxxxxxx> Reviewed-by: Juergen Gross <jgross@xxxxxxxx> [xen parts] Cc: <xen-devel@xxxxxxxxxxxxxxxxxxxx> --- arch/x86/entry/entry_32.S | 58 ++++++++++++++++++++-------------------- arch/x86/entry/entry_64.S | 40 +++++++++++++-------------- arch/x86/entry/entry_64_compat.S | 4 +-- arch/x86/kernel/ftrace_32.S | 8 +++--- arch/x86/kernel/ftrace_64.S | 10 +++---- arch/x86/xen/xen-pvh.S | 2 +- 6 files changed, 61 insertions(+), 61 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 50bc26949e9e..a546b84aec01 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -249,7 +249,7 @@ ENTRY(__switch_to_asm) popl %ebp jmp __switch_to -END(__switch_to_asm) +ENDPROC(__switch_to_asm) /* * A newly forked process directly context switches into this address. @@ -289,7 +289,7 @@ ENTRY(ret_from_fork) */ movl $0, PT_EAX(%esp) jmp 2b -END(ret_from_fork) +ENDPROC(ret_from_fork) /* * Return to user mode is not as complex as all this looks, @@ -323,7 +323,7 @@ ENTRY(resume_userspace) movl %esp, %eax call prepare_exit_to_usermode jmp restore_all -END(ret_from_exception) +ENDPROC(ret_from_exception) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) @@ -335,7 +335,7 @@ ENTRY(resume_kernel) jz restore_all call preempt_schedule_irq jmp .Lneed_resched -END(resume_kernel) +ENDPROC(resume_kernel) #endif GLOBAL(__begin_SYSENTER_singlestep_region) @@ -635,7 +635,7 @@ ENTRY(irq_entries_start) jmp common_interrupt .align 8 .endr -END(irq_entries_start) +ENDPROC(irq_entries_start) /* * the CPU automatically disables interrupts when executing an IRQ vector, @@ -684,7 +684,7 @@ ENTRY(coprocessor_error) pushl $0 pushl $do_coprocessor_error jmp common_exception -END(coprocessor_error) +ENDPROC(coprocessor_error) ENTRY(simd_coprocessor_error) ASM_CLAC @@ -698,20 +698,20 @@ ENTRY(simd_coprocessor_error) pushl $do_simd_coprocessor_error #endif jmp common_exception -END(simd_coprocessor_error) +ENDPROC(simd_coprocessor_error) ENTRY(device_not_available) ASM_CLAC pushl $-1 # mark this as an int pushl $do_device_not_available jmp common_exception -END(device_not_available) +ENDPROC(device_not_available) #ifdef CONFIG_PARAVIRT ENTRY(native_iret) iret _ASM_EXTABLE(native_iret, iret_exc) -END(native_iret) +ENDPROC(native_iret) #endif ENTRY(overflow) @@ -719,59 +719,59 @@ ENTRY(overflow) pushl $0 pushl $do_overflow jmp common_exception -END(overflow) +ENDPROC(overflow) ENTRY(bounds) ASM_CLAC pushl $0 pushl $do_bounds jmp common_exception -END(bounds) +ENDPROC(bounds) ENTRY(invalid_op) ASM_CLAC pushl $0 pushl $do_invalid_op jmp common_exception -END(invalid_op) +ENDPROC(invalid_op) ENTRY(coprocessor_segment_overrun) ASM_CLAC pushl $0 pushl $do_coprocessor_segment_overrun jmp common_exception -END(coprocessor_segment_overrun) +ENDPROC(coprocessor_segment_overrun) ENTRY(invalid_TSS) ASM_CLAC pushl $do_invalid_TSS jmp common_exception -END(invalid_TSS) +ENDPROC(invalid_TSS) ENTRY(segment_not_present) ASM_CLAC pushl $do_segment_not_present jmp common_exception -END(segment_not_present) +ENDPROC(segment_not_present) ENTRY(stack_segment) ASM_CLAC pushl $do_stack_segment jmp common_exception -END(stack_segment) +ENDPROC(stack_segment) ENTRY(alignment_check) ASM_CLAC pushl $do_alignment_check jmp common_exception -END(alignment_check) +ENDPROC(alignment_check) ENTRY(divide_error) ASM_CLAC pushl $0 # no error code pushl $do_divide_error jmp common_exception -END(divide_error) +ENDPROC(divide_error) #ifdef CONFIG_X86_MCE ENTRY(machine_check) @@ -779,7 +779,7 @@ ENTRY(machine_check) pushl $0 pushl machine_check_vector jmp common_exception -END(machine_check) +ENDPROC(machine_check) #endif ENTRY(spurious_interrupt_bug) @@ -787,7 +787,7 @@ ENTRY(spurious_interrupt_bug) pushl $0 pushl $do_spurious_interrupt_bug jmp common_exception -END(spurious_interrupt_bug) +ENDPROC(spurious_interrupt_bug) #ifdef CONFIG_XEN ENTRY(xen_hypervisor_callback) @@ -888,7 +888,7 @@ ENTRY(trace_page_fault) ASM_CLAC pushl $trace_do_page_fault jmp common_exception -END(trace_page_fault) +ENDPROC(trace_page_fault) #endif ENTRY(page_fault) @@ -896,7 +896,7 @@ ENTRY(page_fault) pushl $do_page_fault ALIGN jmp common_exception -END(page_fault) +ENDPROC(page_fault) common_exception: /* the function address is in %gs's slot on the stack */ @@ -928,7 +928,7 @@ common_exception: movl %esp, %eax # pt_regs pointer call *%edi jmp ret_from_exception -END(common_exception) +ENDPROC(common_exception) ENTRY(debug) /* @@ -965,7 +965,7 @@ ENTRY(debug) call do_debug movl %ebx, %esp jmp ret_from_exception -END(debug) +ENDPROC(debug) /* * NMI is doubly nasty. It can happen on the first instruction of @@ -1033,7 +1033,7 @@ ENTRY(nmi) lss 12+4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif -END(nmi) +ENDPROC(nmi) ENTRY(int3) ASM_CLAC @@ -1045,19 +1045,19 @@ ENTRY(int3) movl %esp, %eax # pt_regs pointer call do_int3 jmp ret_from_exception -END(int3) +ENDPROC(int3) ENTRY(general_protection) pushl $do_general_protection jmp common_exception -END(general_protection) +ENDPROC(general_protection) #ifdef CONFIG_KVM_GUEST ENTRY(async_page_fault) ASM_CLAC pushl $do_async_page_fault jmp common_exception -END(async_page_fault) +ENDPROC(async_page_fault) #endif ENTRY(rewind_stack_do_exit) @@ -1069,4 +1069,4 @@ ENTRY(rewind_stack_do_exit) call do_exit 1: jmp 1b -END(rewind_stack_do_exit) +ENDPROC(rewind_stack_do_exit) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 607d72c4a485..1fe8758102cb 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -321,7 +321,7 @@ syscall_return_via_sysret: opportunistic_sysret_failed: SWAPGS jmp restore_c_regs_and_iret -END(entry_SYSCALL_64) +ENDPROC(entry_SYSCALL_64) ENTRY(stub_ptregs_64) /* @@ -347,13 +347,13 @@ ENTRY(stub_ptregs_64) 1: jmp *%rax /* Called from C */ -END(stub_ptregs_64) +ENDPROC(stub_ptregs_64) .macro ptregs_stub func ENTRY(ptregs_\func) leaq \func(%rip), %rax jmp stub_ptregs_64 -END(ptregs_\func) +ENDPROC(ptregs_\func) .endm /* Instantiate ptregs_stub for each ptregs-using syscall */ @@ -396,7 +396,7 @@ ENTRY(__switch_to_asm) popq %rbp jmp __switch_to -END(__switch_to_asm) +ENDPROC(__switch_to_asm) /* * A newly forked process directly context switches into this address. @@ -432,7 +432,7 @@ ENTRY(ret_from_fork) */ movq $0, RAX(%rsp) jmp 2b -END(ret_from_fork) +ENDPROC(ret_from_fork) /* * Build the entry stubs with some assembler magic. @@ -447,7 +447,7 @@ ENTRY(irq_entries_start) jmp common_interrupt .align 8 .endr -END(irq_entries_start) +ENDPROC(irq_entries_start) /* * Interrupt entry/exit. @@ -649,7 +649,7 @@ native_irq_return_ldt: */ jmp native_irq_return_iret #endif -END(common_interrupt) +ENDPROC(common_interrupt) /* * APIC interrupts. @@ -661,7 +661,7 @@ ENTRY(\sym) .Lcommon_\sym: interrupt \do_sym jmp ret_from_intr -END(\sym) +ENDPROC(\sym) .endm #ifdef CONFIG_TRACING @@ -827,7 +827,7 @@ ENTRY(\sym) jmp error_exit /* %ebx: no swapgs flag */ .endif -END(\sym) +ENDPROC(\sym) .endm #ifdef CONFIG_TRACING @@ -870,7 +870,7 @@ ENTRY(native_load_gs_index) SWAPGS popfq ret -END(native_load_gs_index) +ENDPROC(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, bad_gs) @@ -900,7 +900,7 @@ ENTRY(do_softirq_own_stack) leaveq decl PER_CPU_VAR(irq_count) ret -END(do_softirq_own_stack) +ENDPROC(do_softirq_own_stack) #ifdef CONFIG_XEN idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 @@ -936,7 +936,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ call xen_maybe_preempt_hcall #endif jmp error_exit -END(xen_do_hypervisor_callback) +ENDPROC(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. @@ -982,7 +982,7 @@ ENTRY(xen_failsafe_callback) SAVE_EXTRA_REGS ENCODE_FRAME_POINTER jmp error_exit -END(xen_failsafe_callback) +ENDPROC(xen_failsafe_callback) apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ xen_hvm_callback_vector xen_evtchn_do_upcall @@ -1033,7 +1033,7 @@ ENTRY(paranoid_entry) SWAPGS xorl %ebx, %ebx 1: ret -END(paranoid_entry) +ENDPROC(paranoid_entry) /* * "Paranoid" exit path from exception stack. This is invoked @@ -1062,7 +1062,7 @@ paranoid_exit_restore: RESTORE_C_REGS REMOVE_PT_GPREGS_FROM_STACK 8 INTERRUPT_RETURN -END(paranoid_exit) +ENDPROC(paranoid_exit) /* * Save all registers in pt_regs, and switch gs if needed. @@ -1144,7 +1144,7 @@ ENTRY(error_entry) mov %rax, %rsp decl %ebx jmp .Lerror_entry_from_usermode_after_swapgs -END(error_entry) +ENDPROC(error_entry) /* @@ -1158,7 +1158,7 @@ ENTRY(error_exit) testl %ebx, %ebx jnz retint_kernel jmp retint_user -END(error_exit) +ENDPROC(error_exit) /* Runs on exception stack */ ENTRY(nmi) @@ -1506,12 +1506,12 @@ nmi_restore: * mode, so this cannot result in a fault. */ INTERRUPT_RETURN -END(nmi) +ENDPROC(nmi) ENTRY(ignore_sysret) mov $-ENOSYS, %eax sysret -END(ignore_sysret) +ENDPROC(ignore_sysret) ENTRY(rewind_stack_do_exit) /* Prevent any naive code from trying to unwind to our caller. */ @@ -1522,4 +1522,4 @@ ENTRY(rewind_stack_do_exit) call do_exit 1: jmp 1b -END(rewind_stack_do_exit) +ENDPROC(rewind_stack_do_exit) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index e1721dafbcb1..966c09ffd62d 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -262,7 +262,7 @@ sysret32_from_system_call: movq RSP-ORIG_RAX(%rsp), %rsp swapgs sysretl -END(entry_SYSCALL_compat) +ENDPROC(entry_SYSCALL_compat) /* * 32-bit legacy system call entry. @@ -340,7 +340,7 @@ ENTRY(entry_INT80_compat) TRACE_IRQS_ON SWAPGS jmp restore_regs_and_iret -END(entry_INT80_compat) +ENDPROC(entry_INT80_compat) ALIGN GLOBAL(stub32_clone) diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index 07f40359c9ea..30bc4af8b0de 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -31,7 +31,7 @@ EXPORT_SYMBOL(mcount) ENTRY(function_hook) ret -END(function_hook) +ENDPROC(function_hook) ENTRY(ftrace_caller) @@ -98,7 +98,7 @@ ftrace_graph_call: /* This is weak to keep gas from relaxing the jumps */ WEAK(ftrace_stub) ret -END(ftrace_caller) +ENDPROC(ftrace_caller) ENTRY(ftrace_regs_caller) /* @@ -202,7 +202,7 @@ ftrace_stub: popl %ecx popl %eax jmp ftrace_stub -END(function_hook) +ENDPROC(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -225,7 +225,7 @@ ENTRY(ftrace_graph_caller) popl %ecx popl %eax ret -END(ftrace_graph_caller) +ENDPROC(ftrace_graph_caller) .globl return_to_handler return_to_handler: diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 1dfac634bbf7..823e31577333 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -147,7 +147,7 @@ EXPORT_SYMBOL(mcount) ENTRY(function_hook) retq -END(function_hook) +ENDPROC(function_hook) ENTRY(ftrace_caller) /* save_mcount_regs fills in first two parameters */ @@ -183,7 +183,7 @@ GLOBAL(ftrace_graph_call) /* This is weak to keep gas from relaxing the jumps */ WEAK(ftrace_stub) retq -END(ftrace_caller) +ENDPROC(ftrace_caller) ENTRY(ftrace_regs_caller) /* Save the current flags before any operations that can change them */ @@ -254,7 +254,7 @@ GLOBAL(ftrace_regs_caller_end) jmp ftrace_epilogue -END(ftrace_regs_caller) +ENDPROC(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ @@ -290,7 +290,7 @@ trace: restore_mcount_regs jmp fgraph_trace -END(function_hook) +ENDPROC(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -312,7 +312,7 @@ ENTRY(ftrace_graph_caller) restore_mcount_regs retq -END(ftrace_graph_caller) +ENDPROC(ftrace_graph_caller) GLOBAL(return_to_handler) subq $24, %rsp diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S index 5e246716d58f..512fda03c93f 100644 --- a/arch/x86/xen/xen-pvh.S +++ b/arch/x86/xen/xen-pvh.S @@ -133,7 +133,7 @@ ENTRY(pvh_start_xen) ljmp $__BOOT_CS, $_pa(startup_32) #endif -END(pvh_start_xen) +ENDPROC(pvh_start_xen) .section ".init.data","aw" .balign 8 -- 2.12.2 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |