[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH -next v7 2/7] arm64: entry: Refactor the entry and exit for exceptions from EL1



The generic entry code uses irqentry_state_t to track lockdep and RCU
state across exception entry and return. For historical reasons, arm64
embeds similar fields within its pt_regs structure.

In preparation for moving arm64 over to the generic entry code, pull
these fields out of arm64's pt_regs, and use a separate structure,
matching the style of the generic entry code.

No functional changes.

Suggested-by: Mark Rutland <mark.rutland@xxxxxxx>
Signed-off-by: Jinjie Ruan <ruanjinjie@xxxxxxxxxx>
---
 arch/arm64/include/asm/ptrace.h  |   4 -
 arch/arm64/kernel/entry-common.c | 151 +++++++++++++++++++------------
 2 files changed, 94 insertions(+), 61 deletions(-)

diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 8b915d4a9d4b..65b053a24d82 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -169,10 +169,6 @@ struct pt_regs {
 
        u64 sdei_ttbr1;
        struct frame_record_meta stackframe;
-
-       /* Only valid for some EL1 exceptions. */
-       u64 lockdep_hardirqs;
-       u64 exit_rcu;
 };
 
 /* For correct stack alignment, pt_regs has to be a multiple of 16 bytes. */
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 8e798f46ad28..97e0741abde1 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -29,6 +29,13 @@
 #include <asm/sysreg.h>
 #include <asm/system_misc.h>
 
+typedef struct irqentry_state {
+       union {
+               bool    exit_rcu;
+               bool    lockdep;
+       };
+} arm64_irqentry_state_t;
+
 /*
  * Handle IRQ/context state management when entering from kernel mode.
  * Before this function is called it is not safe to call regular kernel code,
@@ -37,29 +44,36 @@
  * This is intended to match the logic in irqentry_enter(), handling the kernel
  * mode transitions only.
  */
-static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
+static __always_inline arm64_irqentry_state_t __enter_from_kernel_mode(struct 
pt_regs *regs)
 {
-       regs->exit_rcu = false;
+       arm64_irqentry_state_t state = {
+               .exit_rcu = false,
+       };
 
        if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
                lockdep_hardirqs_off(CALLER_ADDR0);
                ct_irq_enter();
                trace_hardirqs_off_finish();
 
-               regs->exit_rcu = true;
-               return;
+               state.exit_rcu = true;
+               return state;
        }
 
        lockdep_hardirqs_off(CALLER_ADDR0);
        rcu_irq_enter_check_tick();
        trace_hardirqs_off_finish();
+
+       return state;
 }
 
-static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+static noinstr arm64_irqentry_state_t enter_from_kernel_mode(struct pt_regs 
*regs)
 {
-       __enter_from_kernel_mode(regs);
+       arm64_irqentry_state_t state = __enter_from_kernel_mode(regs);
+
        mte_check_tfsr_entry();
        mte_disable_tco_entry(current);
+
+       return state;
 }
 
 /*
@@ -70,12 +84,13 @@ static void noinstr enter_from_kernel_mode(struct pt_regs 
*regs)
  * This is intended to match the logic in irqentry_exit(), handling the kernel
  * mode transitions only, and with preemption handled elsewhere.
  */
-static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
+static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs,
+                                                 arm64_irqentry_state_t state)
 {
        lockdep_assert_irqs_disabled();
 
        if (!regs_irqs_disabled(regs)) {
-               if (regs->exit_rcu) {
+               if (state.exit_rcu) {
                        trace_hardirqs_on_prepare();
                        lockdep_hardirqs_on_prepare();
                        ct_irq_exit();
@@ -85,15 +100,16 @@ static __always_inline void __exit_to_kernel_mode(struct 
pt_regs *regs)
 
                trace_hardirqs_on();
        } else {
-               if (regs->exit_rcu)
+               if (state.exit_rcu)
                        ct_irq_exit();
        }
 }
 
-static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
+static void noinstr exit_to_kernel_mode(struct pt_regs *regs,
+                                       arm64_irqentry_state_t state)
 {
        mte_check_tfsr_exit();
-       __exit_to_kernel_mode(regs);
+       __exit_to_kernel_mode(regs, state);
 }
 
 /*
@@ -194,9 +210,11 @@ asmlinkage void noinstr asm_exit_to_user_mode(struct 
pt_regs *regs)
  * mode. Before this function is called it is not safe to call regular kernel
  * code, instrumentable code, or any code which may trigger an exception.
  */
-static void noinstr arm64_enter_nmi(struct pt_regs *regs)
+static noinstr arm64_irqentry_state_t arm64_enter_nmi(struct pt_regs *regs)
 {
-       regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+       arm64_irqentry_state_t state;
+
+       state.lockdep = lockdep_hardirqs_enabled();
 
        __nmi_enter();
        lockdep_hardirqs_off(CALLER_ADDR0);
@@ -205,6 +223,8 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs)
 
        trace_hardirqs_off_finish();
        ftrace_nmi_enter();
+
+       return state;
 }
 
 /*
@@ -212,19 +232,18 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs)
  * mode. After this function returns it is not safe to call regular kernel
  * code, instrumentable code, or any code which may trigger an exception.
  */
-static void noinstr arm64_exit_nmi(struct pt_regs *regs)
+static void noinstr arm64_exit_nmi(struct pt_regs *regs,
+                                  arm64_irqentry_state_t state)
 {
-       bool restore = regs->lockdep_hardirqs;
-
        ftrace_nmi_exit();
-       if (restore) {
+       if (state.lockdep) {
                trace_hardirqs_on_prepare();
                lockdep_hardirqs_on_prepare();
        }
 
        ct_nmi_exit();
        lockdep_hardirq_exit();
-       if (restore)
+       if (state.lockdep)
                lockdep_hardirqs_on(CALLER_ADDR0);
        __nmi_exit();
 }
@@ -234,14 +253,18 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs)
  * kernel mode. Before this function is called it is not safe to call regular
  * kernel code, instrumentable code, or any code which may trigger an 
exception.
  */
-static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
+static noinstr arm64_irqentry_state_t arm64_enter_el1_dbg(struct pt_regs *regs)
 {
-       regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+       arm64_irqentry_state_t state;
+
+       state.lockdep = lockdep_hardirqs_enabled();
 
        lockdep_hardirqs_off(CALLER_ADDR0);
        ct_nmi_enter();
 
        trace_hardirqs_off_finish();
+
+       return state;
 }
 
 /*
@@ -249,17 +272,16 @@ static void noinstr arm64_enter_el1_dbg(struct pt_regs 
*regs)
  * kernel mode. After this function returns it is not safe to call regular
  * kernel code, instrumentable code, or any code which may trigger an 
exception.
  */
-static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
+static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs,
+                                      arm64_irqentry_state_t state)
 {
-       bool restore = regs->lockdep_hardirqs;
-
-       if (restore) {
+       if (state.lockdep) {
                trace_hardirqs_on_prepare();
                lockdep_hardirqs_on_prepare();
        }
 
        ct_nmi_exit();
-       if (restore)
+       if (state.lockdep)
                lockdep_hardirqs_on(CALLER_ADDR0);
 }
 
@@ -475,73 +497,81 @@ UNHANDLED(el1t, 64, error)
 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
 {
        unsigned long far = read_sysreg(far_el1);
+       arm64_irqentry_state_t state;
 
-       enter_from_kernel_mode(regs);
+       state = enter_from_kernel_mode(regs);
        local_daif_inherit(regs);
        do_mem_abort(far, esr, regs);
        local_daif_mask();
-       exit_to_kernel_mode(regs);
+       exit_to_kernel_mode(regs, state);
 }
 
 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
 {
        unsigned long far = read_sysreg(far_el1);
+       arm64_irqentry_state_t state;
 
-       enter_from_kernel_mode(regs);
+       state = enter_from_kernel_mode(regs);
        local_daif_inherit(regs);
        do_sp_pc_abort(far, esr, regs);
        local_daif_mask();
-       exit_to_kernel_mode(regs);
+       exit_to_kernel_mode(regs, state);
 }
 
 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
 {
-       enter_from_kernel_mode(regs);
+       arm64_irqentry_state_t state = enter_from_kernel_mode(regs);
+
        local_daif_inherit(regs);
        do_el1_undef(regs, esr);
        local_daif_mask();
-       exit_to_kernel_mode(regs);
+       exit_to_kernel_mode(regs, state);
 }
 
 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
 {
-       enter_from_kernel_mode(regs);
+       arm64_irqentry_state_t state = enter_from_kernel_mode(regs);
+
        local_daif_inherit(regs);
        do_el1_bti(regs, esr);
        local_daif_mask();
-       exit_to_kernel_mode(regs);
+       exit_to_kernel_mode(regs, state);
 }
 
 static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr)
 {
-       enter_from_kernel_mode(regs);
+       arm64_irqentry_state_t state = enter_from_kernel_mode(regs);
+
        local_daif_inherit(regs);
        do_el1_gcs(regs, esr);
        local_daif_mask();
-       exit_to_kernel_mode(regs);
+       exit_to_kernel_mode(regs, state);
 }
 
 static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
 {
-       enter_from_kernel_mode(regs);
+       arm64_irqentry_state_t state = enter_from_kernel_mode(regs);
+
        local_daif_inherit(regs);
        do_el1_mops(regs, esr);
        local_daif_mask();
-       exit_to_kernel_mode(regs);
+       exit_to_kernel_mode(regs, state);
 }
 
 static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr)
 {
-       arm64_enter_el1_dbg(regs);
+       arm64_irqentry_state_t state = arm64_enter_el1_dbg(regs);
+
        debug_exception_enter(regs);
        do_breakpoint(esr, regs);
        debug_exception_exit(regs);
-       arm64_exit_el1_dbg(regs);
+       arm64_exit_el1_dbg(regs, state);
 }
 
 static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
 {
-       arm64_enter_el1_dbg(regs);
+       arm64_irqentry_state_t state = arm64_enter_el1_dbg(regs);
+
        if (!cortex_a76_erratum_1463225_debug_handler(regs)) {
                debug_exception_enter(regs);
                /*
@@ -554,37 +584,40 @@ static void noinstr el1_softstp(struct pt_regs *regs, 
unsigned long esr)
                        do_el1_softstep(esr, regs);
                debug_exception_exit(regs);
        }
-       arm64_exit_el1_dbg(regs);
+       arm64_exit_el1_dbg(regs, state);
 }
 
 static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr)
 {
        /* Watchpoints are the only debug exception to write FAR_EL1 */
        unsigned long far = read_sysreg(far_el1);
+       arm64_irqentry_state_t state;
 
-       arm64_enter_el1_dbg(regs);
+       state = arm64_enter_el1_dbg(regs);
        debug_exception_enter(regs);
        do_watchpoint(far, esr, regs);
        debug_exception_exit(regs);
-       arm64_exit_el1_dbg(regs);
+       arm64_exit_el1_dbg(regs, state);
 }
 
 static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr)
 {
-       arm64_enter_el1_dbg(regs);
+       arm64_irqentry_state_t state = arm64_enter_el1_dbg(regs);
+
        debug_exception_enter(regs);
        do_el1_brk64(esr, regs);
        debug_exception_exit(regs);
-       arm64_exit_el1_dbg(regs);
+       arm64_exit_el1_dbg(regs, state);
 }
 
 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
 {
-       enter_from_kernel_mode(regs);
+       arm64_irqentry_state_t state = enter_from_kernel_mode(regs);
+
        local_daif_inherit(regs);
        do_el1_fpac(regs, esr);
        local_daif_mask();
-       exit_to_kernel_mode(regs);
+       exit_to_kernel_mode(regs, state);
 }
 
 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
@@ -639,15 +672,16 @@ asmlinkage void noinstr el1h_64_sync_handler(struct 
pt_regs *regs)
 static __always_inline void __el1_pnmi(struct pt_regs *regs,
                                       void (*handler)(struct pt_regs *))
 {
-       arm64_enter_nmi(regs);
+       arm64_irqentry_state_t state = arm64_enter_nmi(regs);
+
        do_interrupt_handler(regs, handler);
-       arm64_exit_nmi(regs);
+       arm64_exit_nmi(regs, state);
 }
 
 static __always_inline void __el1_irq(struct pt_regs *regs,
                                      void (*handler)(struct pt_regs *))
 {
-       enter_from_kernel_mode(regs);
+       arm64_irqentry_state_t state = enter_from_kernel_mode(regs);
 
        irq_enter_rcu();
        do_interrupt_handler(regs, handler);
@@ -655,7 +689,7 @@ static __always_inline void __el1_irq(struct pt_regs *regs,
 
        arm64_preempt_schedule_irq();
 
-       exit_to_kernel_mode(regs);
+       exit_to_kernel_mode(regs, state);
 }
 static void noinstr el1_interrupt(struct pt_regs *regs,
                                  void (*handler)(struct pt_regs *))
@@ -681,11 +715,12 @@ asmlinkage void noinstr el1h_64_fiq_handler(struct 
pt_regs *regs)
 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
 {
        unsigned long esr = read_sysreg(esr_el1);
+       arm64_irqentry_state_t state;
 
        local_daif_restore(DAIF_ERRCTX);
-       arm64_enter_nmi(regs);
+       state = arm64_enter_nmi(regs);
        do_serror(regs, esr);
-       arm64_exit_nmi(regs);
+       arm64_exit_nmi(regs, state);
 }
 
 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
@@ -997,12 +1032,13 @@ asmlinkage void noinstr el0t_64_fiq_handler(struct 
pt_regs *regs)
 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
 {
        unsigned long esr = read_sysreg(esr_el1);
+       arm64_irqentry_state_t state;
 
        enter_from_user_mode(regs);
        local_daif_restore(DAIF_ERRCTX);
-       arm64_enter_nmi(regs);
+       state = arm64_enter_nmi(regs);
        do_serror(regs, esr);
-       arm64_exit_nmi(regs);
+       arm64_exit_nmi(regs, state);
        local_daif_restore(DAIF_PROCCTX);
        exit_to_user_mode(regs);
 }
@@ -1122,6 +1158,7 @@ asmlinkage void noinstr __noreturn 
handle_bad_stack(struct pt_regs *regs)
 asmlinkage noinstr unsigned long
 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
 {
+       arm64_irqentry_state_t state;
        unsigned long ret;
 
        /*
@@ -1146,9 +1183,9 @@ __sdei_handler(struct pt_regs *regs, struct 
sdei_registered_event *arg)
        else if (cpu_has_pan())
                set_pstate_pan(0);
 
-       arm64_enter_nmi(regs);
+       state = arm64_enter_nmi(regs);
        ret = do_sdei_event(regs, arg);
-       arm64_exit_nmi(regs);
+       arm64_exit_nmi(regs, state);
 
        return ret;
 }
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.