[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] x86/HPET: deal with event having expired while interrupt was masked


  • To: Jan Beulich <JBeulich@xxxxxxxx>, xen-devel <xen-devel@xxxxxxxxxxxxx>
  • From: Keir Fraser <keir@xxxxxxx>
  • Date: Wed, 20 Mar 2013 15:18:56 +0000
  • Cc: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
  • Delivery-date: Wed, 20 Mar 2013 15:22:02 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>
  • Thread-index: Ac4lfj0CZDtiAaIomUupmveIzVgtOw==
  • Thread-topic: [PATCH] x86/HPET: deal with event having expired while interrupt was masked

On 20/03/2013 14:45, "Jan Beulich" <JBeulich@xxxxxxxx> wrote:

> Commit 2d8a282 ("x86/HPET: fix FSB interrupt masking") may cause the
> HPET event to occur while its interrupt is masked. In that case we need
> to "manually" deliver the event.
> 
> Unfortunately this requires the locking to be changed: For one, it was
> always bogus for handle_hpet_broadcast() to use spin_unlock_irq() - the
> function is being called from an interrupt handler, and hence shouldn't
> blindly re-enable interrupts (this should be left up to the generic
> interrupt handling code). And with the event handler wanting to acquire
> the lock for two of its code regions, we must not enter it with the
> lock already held. Hence move the locking into
> hpet_{attach,detach}_channel(), permitting the lock to be dropped by
> set_channel_irq_affinity() (which is a tail call of those functions).
> 
> Reported-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> Tested-by: Sander Eikelenboom <linux@xxxxxxxxxxxxxx>

Acked-by: Keir Fraser <keir@xxxxxxx>

> --- a/xen/arch/x86/hpet.c
> +++ b/xen/arch/x86/hpet.c
> @@ -171,13 +171,14 @@ static void handle_hpet_broadcast(struct
>      cpumask_t mask;
>      s_time_t now, next_event;
>      unsigned int cpu;
> +    unsigned long flags;
>  
> -    spin_lock_irq(&ch->lock);
> +    spin_lock_irqsave(&ch->lock, flags);
>  
>  again:
>      ch->next_event = STIME_MAX;
>  
> -    spin_unlock_irq(&ch->lock);
> +    spin_unlock_irqrestore(&ch->lock, flags);
>  
>      next_event = STIME_MAX;
>      cpumask_clear(&mask);
> @@ -205,13 +206,13 @@ again:
>  
>      if ( next_event != STIME_MAX )
>      {
> -        spin_lock_irq(&ch->lock);
> +        spin_lock_irqsave(&ch->lock, flags);
>  
>          if ( next_event < ch->next_event &&
>               reprogram_hpet_evt_channel(ch, next_event, now, 0) )
>              goto again;
>  
> -        spin_unlock_irq(&ch->lock);
> +        spin_unlock_irqrestore(&ch->lock, flags);
>      }
>  }
>  
> @@ -460,7 +461,7 @@ static struct hpet_event_channel *hpet_g
>      return ch;
>  }
>  
> -static void set_channel_irq_affinity(const struct hpet_event_channel *ch)
> +static void set_channel_irq_affinity(struct hpet_event_channel *ch)
>  {
>      struct irq_desc *desc = irq_to_desc(ch->msi.irq);
>  
> @@ -470,12 +471,19 @@ static void set_channel_irq_affinity(con
>      hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
>      hpet_msi_unmask(desc);
>      spin_unlock(&desc->lock);
> +
> +    spin_unlock(&ch->lock);
> +
> +    /* We may have missed an interrupt due to the temporary masking. */
> +    if ( ch->event_handler && ch->next_event < NOW() )
> +        ch->event_handler(ch);
>  }
>  
>  static void hpet_attach_channel(unsigned int cpu,
>                                  struct hpet_event_channel *ch)
>  {
> -    ASSERT(spin_is_locked(&ch->lock));
> +    ASSERT(!local_irq_is_enabled());
> +    spin_lock(&ch->lock);
>  
>      per_cpu(cpu_bc_channel, cpu) = ch;
>  
> @@ -484,31 +492,34 @@ static void hpet_attach_channel(unsigned
>          ch->cpu = cpu;
>  
>      if ( ch->cpu != cpu )
> -        return;
> -
> -    set_channel_irq_affinity(ch);
> +        spin_unlock(&ch->lock);
> +    else
> +        set_channel_irq_affinity(ch);
>  }
>  
>  static void hpet_detach_channel(unsigned int cpu,
>                                  struct hpet_event_channel *ch)
>  {
> -    ASSERT(spin_is_locked(&ch->lock));
> +    spin_lock_irq(&ch->lock);
> +
>      ASSERT(ch == per_cpu(cpu_bc_channel, cpu));
>  
>      per_cpu(cpu_bc_channel, cpu) = NULL;
>  
>      if ( cpu != ch->cpu )
> -        return;
> -
> -    if ( cpumask_empty(ch->cpumask) )
> +        spin_unlock_irq(&ch->lock);
> +    else if ( cpumask_empty(ch->cpumask) )
>      {
>          ch->cpu = -1;
>          clear_bit(HPET_EVT_USED_BIT, &ch->flags);
> -        return;
> +        spin_unlock_irq(&ch->lock);
> +    }
> +    else
> +    {
> +        ch->cpu = cpumask_first(ch->cpumask);
> +        set_channel_irq_affinity(ch);
> +        local_irq_enable();
>      }
> -
> -    ch->cpu = cpumask_first(ch->cpumask);
> -    set_channel_irq_affinity(ch);
>  }
>  
>  #include <asm/mc146818rtc.h>
> @@ -686,11 +697,7 @@ void hpet_broadcast_enter(void)
>      ASSERT(!local_irq_is_enabled());
>  
>      if ( !(ch->flags & HPET_EVT_LEGACY) )
> -    {
> -        spin_lock(&ch->lock);
>          hpet_attach_channel(cpu, ch);
> -        spin_unlock(&ch->lock);
> -    }
>  
>      /* Disable LAPIC timer interrupts. */
>      disable_APIC_timer();
> @@ -722,11 +729,7 @@ void hpet_broadcast_exit(void)
>      cpumask_clear_cpu(cpu, ch->cpumask);
>  
>      if ( !(ch->flags & HPET_EVT_LEGACY) )
> -    {
> -        spin_lock_irq(&ch->lock);
>          hpet_detach_channel(cpu, ch);
> -        spin_unlock_irq(&ch->lock);
> -    }
>  }
>  
>  int hpet_broadcast_is_available(void)
> 
> 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.