[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCHv2 2 of 2] Move IOMMU faults handling into softirq for AMD-Vi.



On 05/01/2012 15:27, "Dario Faggioli" <raistlin@xxxxxxxx> wrote:

> Dealing with interrupts from AMD-Vi IOMMU(s) is deferred to a softirq-tasklet,
> raised by the actual IRQ handler. To avoid more interrupts being generated
> (because of further faults), they must be masked in the IOMMU within the low
> level IRQ handler and enabled back in the tasklet body. Notice that this may
> cause the log to overflow, but none of the existing entry will be overwritten.
> 
> Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>

This patch needs fixing to apply to xen-unstable tip. Please do that and
resubmit.

 -- Keir

> diff -r 3cb587bb34d0 xen/drivers/passthrough/amd/iommu_init.c
> --- a/xen/drivers/passthrough/amd/iommu_init.c Thu Jan 05 15:12:35 2012 +0100
> +++ b/xen/drivers/passthrough/amd/iommu_init.c Thu Jan 05 15:14:03 2012 +0100
> @@ -32,6 +32,8 @@
>  
>  static int __initdata nr_amd_iommus;
>  
> +static struct tasklet amd_iommu_fault_tasklet;
> +
>  unsigned short ivrs_bdf_entries;
>  static struct radix_tree_root ivrs_maps;
>  struct list_head amd_iommu_head;
> @@ -522,12 +524,10 @@ static void parse_event_log_entry(struct
>      }
>  }
>  
> -static void amd_iommu_page_fault(int irq, void *dev_id,
> -                             struct cpu_user_regs *regs)
> +static void __do_amd_iommu_page_fault(struct amd_iommu *iommu)
>  {
>      u32 entry;
>      unsigned long flags;
> -    struct amd_iommu *iommu = dev_id;
>  
>      spin_lock_irqsave(&iommu->lock, flags);
>      amd_iommu_read_event_log(iommu);
> @@ -546,6 +546,45 @@ static void amd_iommu_page_fault(int irq
>      spin_unlock_irqrestore(&iommu->lock, flags);
>  }
>  
> +static void do_amd_iommu_page_fault(unsigned long data)
> +{
> +    struct amd_iommu *iommu;
> +
> +    if ( !iommu_found() )
> +    {
> +        AMD_IOMMU_DEBUG("no device found, something must be very wrong!\n");
> +        return;
> +    }
> +
> +    /*
> +     * No matter from whom the interrupt came from, check all the
> +     * IOMMUs present in the system. This allows for having just one
> +     * tasklet (instead of one per each IOMMUs) and should be more than
> +     * fine, considering how rare the event of a fault should be.
> +     */
> +    for_each_amd_iommu ( iommu )
> +        __do_amd_iommu_page_fault(iommu);
> +}
> +
> +static void amd_iommu_page_fault(int irq, void *dev_id,
> +                             struct cpu_user_regs *regs)
> +{
> +    u32 entry;
> +    unsigned long flags;
> +    struct amd_iommu *iommu = dev_id;
> +
> +    /* silence interrupts. The tasklet will enable them back */
> +    spin_lock_irqsave(&iommu->lock, flags);
> +    entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
> +    iommu_clear_bit(&entry, IOMMU_STATUS_EVENT_LOG_INT_SHIFT);
> +    writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
> +    spin_unlock_irqrestore(&iommu->lock, flags);
> +
> +    /* Flag the tasklet as runnable so that it can execute, clear
> +     * the log and re-enable interrupts. */
> +    tasklet_schedule(&amd_iommu_fault_tasklet);
> +}
> +
>  static int __init set_iommu_interrupt_handler(struct amd_iommu *iommu)
>  {
>      int irq, ret;
> @@ -884,6 +923,8 @@ int __init amd_iommu_init(void)
>          if ( amd_iommu_init_one(iommu) != 0 )
>              goto error_out;
>  
> +    softirq_tasklet_init(&amd_iommu_fault_tasklet, do_amd_iommu_page_fault,
> 0);
> +
>      return 0;
>  
>  error_out:



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.