[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] irq ratelimit
This patch adds the feature of irq ratelimit. It temporarily masks the interrupt (guest) if too many irqs are observed in a short period (irq storm), to ensure responsiveness of Xen and other guests. As for now, the threshold can be adjusted at boot time using command- line option irq_ratelimit=. Signed-off-by: Qing He <qing.he@xxxxxxxxx> --- arch/x86/irq.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ include/xen/irq.h | 6 +++++ 2 files changed, 70 insertions(+) diff -r 8f81bdd57afe xen/arch/x86/irq.c --- a/xen/arch/x86/irq.c Thu Sep 03 09:51:37 2009 +0100 +++ b/xen/arch/x86/irq.c Tue Sep 15 18:16:38 2009 +0800 @@ -54,6 +54,14 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) }; DEFINE_PER_CPU(struct cpu_user_regs *, __irq_regs); + +static LIST_HEAD(irq_ratelimit_list); +static DEFINE_SPINLOCK(irq_ratelimit_lock); +static struct timer irq_ratelimit_timer; + +/* irq_ratelimit: the max irq rate allowed in every 10ms, set 0 to disable */ +unsigned int __read_mostly irq_ratelimit_threshold = 10000; +integer_param("irq_ratelimit", irq_ratelimit_threshold); /* Must be called when irq disabled */ void lock_vector_lock(void) @@ -241,6 +249,10 @@ static void init_one_irq_desc(struct irq desc->msi_desc = NULL; spin_lock_init(&desc->lock); cpus_setall(desc->affinity); + + desc->rl_quantum_start = NOW(); + desc->rl_cnt = 0; + INIT_LIST_HEAD(&desc->rl_link); } static void init_one_irq_status(int irq) @@ -469,6 +481,29 @@ asmlinkage void do_IRQ(struct cpu_user_r if ( likely(desc->status & IRQ_GUEST) ) { + s_time_t now = NOW(); + if ( now > (desc->rl_quantum_start + MILLISECS(10)) ) { + desc->rl_cnt = 0; + desc->rl_quantum_start = now; + } + if ( unlikely(desc->rl_cnt++ >= irq_ratelimit_threshold) ) { + desc->handler->disable(irq); + /* + * If handler->disable doesn't actually mask the interrupt, + * a disabled irq still can fire. This check also avoids + * possible deadlocks if ratelimit_timer_fn runs at the + * same time. + */ + if ( likely(list_empty(&desc->rl_link)) ) { + spin_lock(&irq_ratelimit_lock); + if ( list_empty(&irq_ratelimit_list) ) + set_timer(&irq_ratelimit_timer, now + MILLISECS(10)); + list_add(&desc->rl_link, &irq_ratelimit_list); + spin_unlock(&irq_ratelimit_lock); + } + goto out; + } + irq_enter(); tsc_in = tb_init_done ? get_cycles() : 0; __do_IRQ_guest(irq); @@ -511,6 +546,35 @@ asmlinkage void do_IRQ(struct cpu_user_r spin_unlock(&desc->lock); set_irq_regs(old_regs); } + +static void irq_ratelimit_timer_fn(void *data) +{ + struct irq_desc *desc, *tmp; + unsigned long flags; + + spin_lock_irqsave(&irq_ratelimit_lock, flags); + + list_for_each_entry_safe(desc, tmp, &irq_ratelimit_list, rl_link) { + spin_lock(&desc->lock); + desc->handler->enable(desc->irq); + list_del(&desc->rl_link); + INIT_LIST_HEAD(&desc->rl_link); + spin_unlock(&desc->lock); + } + + spin_unlock_irqrestore(&irq_ratelimit_lock, flags); +} + +static int __init irq_ratelimit_init(void) +{ + init_timer(&irq_ratelimit_timer, irq_ratelimit_timer_fn, NULL, 0); + + if (irq_ratelimit_threshold == 0) + irq_ratelimit_threshold = ~0U; + + return 0; +} +__initcall(irq_ratelimit_init); int request_irq(unsigned int irq, void (*handler)(int, void *, struct cpu_user_regs *), diff -r 8f81bdd57afe xen/include/xen/irq.h --- a/xen/include/xen/irq.h Thu Sep 03 09:51:37 2009 +0100 +++ b/xen/include/xen/irq.h Tue Sep 15 18:16:38 2009 +0800 @@ -4,6 +4,7 @@ #include <xen/config.h> #include <xen/cpumask.h> #include <xen/spinlock.h> +#include <xen/time.h> #include <asm/regs.h> #include <asm/hardirq.h> @@ -74,6 +75,11 @@ typedef struct irq_desc { int irq; spinlock_t lock; cpumask_t affinity; + + /* irq ratelimit */ + s_time_t rl_quantum_start; + unsigned int rl_cnt; + struct list_head rl_link; } __cacheline_aligned irq_desc_t; #if defined(__ia64__) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |