[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v5 2/8] x86: annotate entry points with type and size
On Mon, Jan 15, 2024 at 03:34:56PM +0100, Jan Beulich wrote: > @@ -625,7 +627,7 @@ ENTRY(dom_crash_sync_extable) > > /* No special register assumptions. */ > #ifdef CONFIG_PV > -ENTRY(continue_pv_domain) > +FUNC(continue_pv_domain) > ENDBR64 > call check_wakeup_from_wait > ret_from_intr: > @@ -640,26 +642,28 @@ ret_from_intr: > #else > jmp test_all_events > #endif > +END(continue_pv_domain) > #else > -ret_from_intr: > +FUNC_LOCAL(ret_from_intr, 0) Why does this need to have an alignment of 0? There's no fallthrough of previous code AFAICT. > ASSERT_CONTEXT_IS_XEN > jmp restore_all_xen > +END(ret_from_intr) > #endif > > .section .init.text, "ax", @progbits > -ENTRY(early_page_fault) > +FUNC(early_page_fault) > ENDBR64 > movl $X86_EXC_PF, 4(%rsp) > SAVE_ALL > movq %rsp, %rdi > call do_early_page_fault > jmp restore_all_xen > +END(early_page_fault) > > .section .text.entry, "ax", @progbits > > - ALIGN > /* %r12=ist_exit */ > -restore_all_xen: > +FUNC_LOCAL(restore_all_xen) > > #ifdef CONFIG_DEBUG > mov %rsp, %rdi > @@ -683,8 +687,9 @@ UNLIKELY_END(exit_cr3) > > RESTORE_ALL adj=8 > iretq > +END(restore_all_xen) > > -ENTRY(common_interrupt) > +FUNC(common_interrupt) > ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP > SAVE_ALL > > @@ -713,12 +718,14 @@ ENTRY(common_interrupt) > mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) > mov %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) > jmp ret_from_intr > +END(common_interrupt) > > -ENTRY(entry_PF) > +FUNC(entry_PF) > ENDBR64 > movl $X86_EXC_PF, 4(%rsp) > +END(entry_PF) > /* No special register assumptions. */ > -GLOBAL(handle_exception) > +FUNC(handle_exception, 0) Given patch 8/8 that enables support for placing FUNC() into separate sections, the fallthrough arrangement here with entry_PF is no longer guaranteed, as the linker could re-order the sections and thus entry_PF could fallthrough into another text section? IOW: entry_PF needs a "jmp handle_exception", and then handle_exception itself can be padded as required by the default alignment? > ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP > SAVE_ALL > > @@ -886,92 +893,108 @@ FATAL_exception_with_ints_disabled: > xorl %esi,%esi > movq %rsp,%rdi > tailcall fatal_trap > +END(handle_exception) > > -ENTRY(entry_DE) > +FUNC(entry_DE) > ENDBR64 > pushq $0 > movl $X86_EXC_DE, 4(%rsp) > jmp handle_exception > +END(entry_DE) > > -ENTRY(entry_MF) > +FUNC(entry_MF) > ENDBR64 > pushq $0 > movl $X86_EXC_MF, 4(%rsp) > jmp handle_exception > +END(entry_MF) > > -ENTRY(entry_XM) > +FUNC(entry_XM) > ENDBR64 > pushq $0 > movl $X86_EXC_XM, 4(%rsp) > jmp handle_exception > +END(entry_XM) > > -ENTRY(entry_NM) > +FUNC(entry_NM) > ENDBR64 > pushq $0 > movl $X86_EXC_NM, 4(%rsp) > jmp handle_exception > +END(entry_NM) > > -ENTRY(entry_DB) > +FUNC(entry_DB) > ENDBR64 > pushq $0 > movl $X86_EXC_DB, 4(%rsp) > jmp handle_ist_exception > +END(entry_DB) > > -ENTRY(entry_BP) > +FUNC(entry_BP) > ENDBR64 > pushq $0 > movl $X86_EXC_BP, 4(%rsp) > jmp handle_exception > +END(entry_BP) > > -ENTRY(entry_OF) > +FUNC(entry_OF) > ENDBR64 > pushq $0 > movl $X86_EXC_OF, 4(%rsp) > jmp handle_exception > +END(entry_OF) > > -ENTRY(entry_BR) > +FUNC(entry_BR) > ENDBR64 > pushq $0 > movl $X86_EXC_BR, 4(%rsp) > jmp handle_exception > +END(entry_BR) > > -ENTRY(entry_UD) > +FUNC(entry_UD) > ENDBR64 > pushq $0 > movl $X86_EXC_UD, 4(%rsp) > jmp handle_exception > +END(entry_UD) > > -ENTRY(entry_TS) > +FUNC(entry_TS) > ENDBR64 > movl $X86_EXC_TS, 4(%rsp) > jmp handle_exception > +END(entry_TS) > > -ENTRY(entry_NP) > +FUNC(entry_NP) > ENDBR64 > movl $X86_EXC_NP, 4(%rsp) > jmp handle_exception > +END(entry_NP) > > -ENTRY(entry_SS) > +FUNC(entry_SS) > ENDBR64 > movl $X86_EXC_SS, 4(%rsp) > jmp handle_exception > +END(entry_SS) > > -ENTRY(entry_GP) > +FUNC(entry_GP) > ENDBR64 > movl $X86_EXC_GP, 4(%rsp) > jmp handle_exception > +END(entry_GP) > > -ENTRY(entry_AC) > +FUNC(entry_AC) > ENDBR64 > movl $X86_EXC_AC, 4(%rsp) > jmp handle_exception > +END(entry_AC) > > -ENTRY(entry_CP) > +FUNC(entry_CP) > ENDBR64 > movl $X86_EXC_CP, 4(%rsp) > jmp handle_exception > +END(entry_CP) > > -ENTRY(entry_DF) > +FUNC(entry_DF) > ENDBR64 > movl $X86_EXC_DF, 4(%rsp) > /* Set AC to reduce chance of further SMAP faults */ > @@ -994,8 +1017,9 @@ ENTRY(entry_DF) > > movq %rsp,%rdi > tailcall do_double_fault > +END(entry_DF) > > -ENTRY(entry_NMI) > +FUNC(entry_NMI) > ENDBR64 > pushq $0 > movl $X86_EXC_NMI, 4(%rsp) > @@ -1126,21 +1150,24 @@ handle_ist_exception: > ASSERT_CONTEXT_IS_XEN > jmp restore_all_xen > #endif > +END(entry_NMI) > > -ENTRY(entry_MC) > +FUNC(entry_MC) > ENDBR64 > pushq $0 > movl $X86_EXC_MC, 4(%rsp) > jmp handle_ist_exception > +END(entry_MC) > > /* No op trap handler. Required for kexec crash path. */ > -GLOBAL(trap_nop) > +FUNC(trap_nop, 0) Could this use the default alignment? > ENDBR64 > iretq > +END(trap_nop) > > /* Table of automatically generated entry points. One per vector. */ > .pushsection .init.rodata, "a", @progbits > -GLOBAL(autogen_entrypoints) > +DATA(autogen_entrypoints, 8) > /* pop into the .init.rodata section and record an entry point. */ > .macro entrypoint ent > .pushsection .init.rodata, "a", @progbits > @@ -1149,7 +1176,7 @@ GLOBAL(autogen_entrypoints) > .endm > > .popsection > -autogen_stubs: /* Automatically generated stubs. */ > +FUNC_LOCAL(autogen_stubs, 0) /* Automatically generated stubs. */ Won't it be good to align the stubs? As that's possible to make them faster? Thanks, Roger.
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |