[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH -next v6 6/8] arm64: entry: Refactor preempt_schedule_irq() check code



ARM64 requires an additional check whether to reschedule on return
from interrupt. So add arch_irqentry_exit_need_resched() as the default
NOP implementation and hook it up into the need_resched() condition in
raw_irqentry_exit_cond_resched(). This allows ARM64 to implement
the architecture specific version for switching over to
the generic entry code.

To align the structure of the code with irqentry_exit_cond_resched()
from the generic entry code, hoist the need_irq_preemption()
and IS_ENABLED() check earlier. And different preemption check functions
are defined based on whether dynamic preemption is enabled.

Suggested-by: Mark Rutland <mark.rutland@xxxxxxx>
Suggested-by: Kevin Brodsky <kevin.brodsky@xxxxxxx>
Suggested-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Jinjie Ruan <ruanjinjie@xxxxxxxxxx>
---
v6:
- Update the commit message.
- Hoist the IS_ENABLED() and need_irq_preemption() check earlier.
- Merge the 4 pathes.
---
 arch/arm64/include/asm/preempt.h |  4 ++++
 arch/arm64/kernel/entry-common.c | 35 ++++++++++++++++++--------------
 kernel/entry/common.c            | 16 ++++++++++++++-
 3 files changed, 39 insertions(+), 16 deletions(-)

diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 0159b625cc7f..0f0ba250efe8 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -85,6 +85,7 @@ static inline bool should_resched(int preempt_offset)
 void preempt_schedule(void);
 void preempt_schedule_notrace(void);
 
+void raw_irqentry_exit_cond_resched(void);
 #ifdef CONFIG_PREEMPT_DYNAMIC
 
 DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
@@ -92,11 +93,14 @@ void dynamic_preempt_schedule(void);
 #define __preempt_schedule()           dynamic_preempt_schedule()
 void dynamic_preempt_schedule_notrace(void);
 #define __preempt_schedule_notrace()   dynamic_preempt_schedule_notrace()
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched()   dynamic_irqentry_exit_cond_resched()
 
 #else /* CONFIG_PREEMPT_DYNAMIC */
 
 #define __preempt_schedule()           preempt_schedule()
 #define __preempt_schedule_notrace()   preempt_schedule_notrace()
+#define irqentry_exit_cond_resched()   raw_irqentry_exit_cond_resched()
 
 #endif /* CONFIG_PREEMPT_DYNAMIC */
 #endif /* CONFIG_PREEMPTION */
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index dceef4cb140b..1b4936d4cf6e 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -281,19 +281,8 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs 
*regs,
                lockdep_hardirqs_on(CALLER_ADDR0);
 }
 
-#ifdef CONFIG_PREEMPT_DYNAMIC
-DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-#define need_irq_preemption() \
-       (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
-#else
-#define need_irq_preemption()  (IS_ENABLED(CONFIG_PREEMPTION))
-#endif
-
 static inline bool arm64_preempt_schedule_irq(void)
 {
-       if (!need_irq_preemption())
-               return false;
-
        /*
         * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
         * priority masking is used the GIC irqchip driver will clear DAIF.IF
@@ -576,6 +565,24 @@ static __always_inline void __el1_pnmi(struct pt_regs 
*regs,
        arm64_exit_nmi(regs, state);
 }
 
+void raw_irqentry_exit_cond_resched(void)
+{
+       if (!preempt_count()) {
+               if (need_resched() && arm64_preempt_schedule_irq())
+                       preempt_schedule_irq();
+       }
+}
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void)
+{
+       if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+               return;
+       raw_irqentry_exit_cond_resched();
+}
+#endif
+
 static __always_inline void __el1_irq(struct pt_regs *regs,
                                      void (*handler)(struct pt_regs *))
 {
@@ -585,10 +592,8 @@ static __always_inline void __el1_irq(struct pt_regs *regs,
        do_interrupt_handler(regs, handler);
        irq_exit_rcu();
 
-       if (!preempt_count() && need_resched()) {
-               if (arm64_preempt_schedule_irq())
-                       preempt_schedule_irq();
-       }
+       if (IS_ENABLED(CONFIG_PREEMPTION))
+               irqentry_exit_cond_resched();
 
        exit_to_kernel_mode(regs, state);
 }
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index b82032777310..4aa9656fa1b4 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -142,6 +142,20 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs 
*regs)
        return ret;
 }
 
+/**
+ * arch_irqentry_exit_need_resched - Architecture specific need resched 
function
+ *
+ * Invoked from raw_irqentry_exit_cond_resched() to check if need resched.
+ * Defaults return true.
+ *
+ * The main purpose is to permit arch to skip preempt a task from an IRQ.
+ */
+static inline bool arch_irqentry_exit_need_resched(void);
+
+#ifndef arch_irqentry_exit_need_resched
+static inline bool arch_irqentry_exit_need_resched(void) { return true; }
+#endif
+
 void raw_irqentry_exit_cond_resched(void)
 {
        if (!preempt_count()) {
@@ -149,7 +163,7 @@ void raw_irqentry_exit_cond_resched(void)
                rcu_irq_exit_check_preempt();
                if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
                        WARN_ON_ONCE(!on_thread_stack());
-               if (need_resched())
+               if (need_resched() && arch_irqentry_exit_need_resched())
                        preempt_schedule_irq();
        }
 }
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.