VT-d: adjust IOMMU interrupt affinities when all CPUs are online Since these interrupts get setup before APs get brought online, their affinities naturally could only ever point to CPU 0 alone so far. Adjust this to include potentially multiple CPUs in the target mask (when running in one of the cluster modes), and take into account NUMA information (to handle the interrupts on a CPU on the node where the respective IOMMU is). Signed-off-by: Jan Beulich --- v2: Call adjust_vtd_irq_affinities() explicitly from enter_state() rather than through acpi_dmar_zap(). --- a/xen/arch/x86/acpi/power.c +++ b/xen/arch/x86/acpi/power.c @@ -219,6 +219,7 @@ static int enter_state(u32 state) mtrr_aps_sync_begin(); enable_nonboot_cpus(); mtrr_aps_sync_end(); + adjust_vtd_irq_affinities(); acpi_dmar_zap(); thaw_domains(); system_state = SYS_STATE_active; --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -1971,6 +1971,33 @@ void clear_fault_bits(struct iommu *iomm spin_unlock_irqrestore(&iommu->register_lock, flags); } +static void adjust_irq_affinity(struct acpi_drhd_unit *drhd) +{ + const struct acpi_rhsa_unit *rhsa = drhd_to_rhsa(drhd); + unsigned int node = rhsa ? pxm_to_node(rhsa->proximity_domain) + : NUMA_NO_NODE; + const cpumask_t *cpumask = &cpu_online_map; + + if ( node < MAX_NUMNODES && node_online(node) && + cpumask_intersects(&node_to_cpumask(node), cpumask) ) + cpumask = &node_to_cpumask(node); + dma_msi_set_affinity(irq_to_desc(drhd->iommu->msi.irq), cpumask); +} + +int adjust_vtd_irq_affinities(void) +{ + struct acpi_drhd_unit *drhd; + + if ( !iommu_enabled ) + return 0; + + for_each_drhd_unit ( drhd ) + adjust_irq_affinity(drhd); + + return 0; +} +__initcall(adjust_vtd_irq_affinities); + static int init_vtd_hw(void) { struct acpi_drhd_unit *drhd; @@ -1984,13 +2011,10 @@ static int init_vtd_hw(void) */ for_each_drhd_unit ( drhd ) { - struct irq_desc *desc; + adjust_irq_affinity(drhd); iommu = drhd->iommu; - desc = irq_to_desc(iommu->msi.irq); - dma_msi_set_affinity(desc, desc->arch.cpu_mask); - clear_fault_bits(iommu); spin_lock_irqsave(&iommu->register_lock, flags); --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -137,6 +137,9 @@ int iommu_do_domctl(struct xen_domctl *, void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count); void iommu_iotlb_flush_all(struct domain *d); +/* While VT-d specific, this must get declared in a generic header. */ +int adjust_vtd_irq_affinities(void); + /* * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to * avoid unecessary iotlb_flush in the low level IOMMU code.