[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 05/10] x86/MSI: track host and guest masking separately
In particular we want to avoid losing track of our own intention to have an entry masked. Physical unmasking now happens only when both host and guest requested so. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/hpet.c +++ b/xen/arch/x86/hpet.c @@ -241,7 +241,7 @@ static void hpet_msi_unmask(struct irq_d cfg = hpet_read32(HPET_Tn_CFG(ch->idx)); cfg |= HPET_TN_ENABLE; hpet_write32(cfg, HPET_Tn_CFG(ch->idx)); - ch->msi.msi_attrib.masked = 0; + ch->msi.msi_attrib.host_masked = 0; } static void hpet_msi_mask(struct irq_desc *desc) @@ -252,7 +252,7 @@ static void hpet_msi_mask(struct irq_des cfg = hpet_read32(HPET_Tn_CFG(ch->idx)); cfg &= ~HPET_TN_ENABLE; hpet_write32(cfg, HPET_Tn_CFG(ch->idx)); - ch->msi.msi_attrib.masked = 1; + ch->msi.msi_attrib.host_masked = 1; } static int hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg) --- a/xen/arch/x86/hvm/vmsi.c +++ b/xen/arch/x86/hvm/vmsi.c @@ -219,7 +219,6 @@ static int msixtbl_read( { unsigned long offset; struct msixtbl_entry *entry; - void *virt; unsigned int nr_entry, index; int r = X86EMUL_UNHANDLEABLE; @@ -244,10 +243,16 @@ static int msixtbl_read( } else { - virt = msixtbl_addr_to_virt(entry, address); + const struct msi_desc *msi_desc; + void *virt = msixtbl_addr_to_virt(entry, address); + if ( !virt ) goto out; - *pval = readl(virt); + msi_desc = virt_to_msi_desc(entry->pdev, virt); + if ( !msi_desc ) + goto out; + *pval = MASK_INSR(msi_desc->msi_attrib.guest_masked, + PCI_MSIX_VECTOR_BITMASK); } r = X86EMUL_OKAY; @@ -265,7 +270,7 @@ static int msixtbl_write(struct vcpu *v, void *virt; unsigned int nr_entry, index; int r = X86EMUL_UNHANDLEABLE; - unsigned long flags, orig; + unsigned long flags; struct irq_desc *desc; if ( len != 4 || (address & 3) ) @@ -318,37 +323,7 @@ static int msixtbl_write(struct vcpu *v, ASSERT(msi_desc == desc->msi_desc); - orig = readl(virt); - - /* - * Do not allow guest to modify MSI-X control bit if it is masked - * by Xen. We'll only handle the case where Xen thinks that - * bit is unmasked, but hardware has silently masked the bit - * (in case of SR-IOV VF reset, etc). On the other hand, if Xen - * thinks that the bit is masked, but it's really not, - * we log a warning. - */ - if ( msi_desc->msi_attrib.masked ) - { - if ( !(orig & PCI_MSIX_VECTOR_BITMASK) ) - printk(XENLOG_WARNING "MSI-X control bit is unmasked when" - " it is expected to be masked [%04x:%02x:%02x.%u]\n", - entry->pdev->seg, entry->pdev->bus, - PCI_SLOT(entry->pdev->devfn), - PCI_FUNC(entry->pdev->devfn)); - - goto unlock; - } - - /* - * The mask bit is the only defined bit in the word. But we - * ought to preserve the reserved bits. Clearing the reserved - * bits can result in undefined behaviour (see PCI Local Bus - * Specification revision 2.3). - */ - val &= PCI_MSIX_VECTOR_BITMASK; - val |= (orig & ~PCI_MSIX_VECTOR_BITMASK); - writel(val, virt); + guest_mask_msi_irq(desc, !!(val & PCI_MSIX_VECTOR_BITMASK)); unlock: spin_unlock_irqrestore(&desc->lock, flags); --- a/xen/arch/x86/msi.c +++ b/xen/arch/x86/msi.c @@ -390,12 +390,13 @@ int msi_maskable_irq(const struct msi_de || entry->msi_attrib.maskbit; } -static bool_t msi_set_mask_bit(struct irq_desc *desc, int flag) +static bool_t msi_set_mask_bit(struct irq_desc *desc, bool_t host, bool_t guest) { struct msi_desc *entry = desc->msi_desc; struct pci_dev *pdev; u16 seg, control; u8 bus, slot, func; + bool_t flag = host || guest; ASSERT(spin_is_locked(&desc->lock)); BUG_ON(!entry || !entry->dev); @@ -453,7 +454,8 @@ static bool_t msi_set_mask_bit(struct ir default: return 0; } - entry->msi_attrib.masked = !!flag; + entry->msi_attrib.host_masked = host; + entry->msi_attrib.guest_masked = guest; return 1; } @@ -484,22 +486,39 @@ static int msi_get_mask_bit(const struct void mask_msi_irq(struct irq_desc *desc) { - if ( unlikely(!msi_set_mask_bit(desc, 1)) ) + if ( unlikely(!msi_set_mask_bit(desc, 1, + desc->msi_desc->msi_attrib.guest_masked)) ) BUG_ON(!(desc->status & IRQ_DISABLED)); } void unmask_msi_irq(struct irq_desc *desc) { - if ( unlikely(!msi_set_mask_bit(desc, 0)) ) + if ( unlikely(!msi_set_mask_bit(desc, 0, + desc->msi_desc->msi_attrib.guest_masked)) ) WARN(); } +void guest_mask_msi_irq(struct irq_desc *desc, bool_t mask) +{ + msi_set_mask_bit(desc, desc->msi_desc->msi_attrib.host_masked, mask); +} + static unsigned int startup_msi_irq(struct irq_desc *desc) { - unmask_msi_irq(desc); + bool_t guest_masked = (desc->status & IRQ_GUEST) && + is_hvm_domain(desc->msi_desc->dev->domain); + + if ( unlikely(!msi_set_mask_bit(desc, 0, guest_masked)) ) + WARN(); return 0; } +static void shutdown_msi_irq(struct irq_desc *desc) +{ + if ( unlikely(!msi_set_mask_bit(desc, 1, 1)) ) + BUG_ON(!(desc->status & IRQ_DISABLED)); +} + void ack_nonmaskable_msi_irq(struct irq_desc *desc) { irq_complete_move(desc); @@ -524,7 +543,7 @@ void end_nonmaskable_msi_irq(struct irq_ static hw_irq_controller pci_msi_maskable = { .typename = "PCI-MSI/-X", .startup = startup_msi_irq, - .shutdown = mask_msi_irq, + .shutdown = shutdown_msi_irq, .enable = unmask_msi_irq, .disable = mask_msi_irq, .ack = ack_maskable_msi_irq, @@ -694,7 +713,8 @@ static int msi_capability_init(struct pc entry[i].msi_attrib.is_64 = is_64bit_address(control); entry[i].msi_attrib.entry_nr = i; entry[i].msi_attrib.maskbit = is_mask_bit_support(control); - entry[i].msi_attrib.masked = 1; + entry[i].msi_attrib.host_masked = 1; + entry[i].msi_attrib.guest_masked = 0; entry[i].msi_attrib.pos = pos; if ( entry[i].msi_attrib.maskbit ) entry[i].msi.mpos = mpos; @@ -943,7 +963,8 @@ static int msix_capability_init(struct p entry->msi_attrib.is_64 = 1; entry->msi_attrib.entry_nr = msi->entry_nr; entry->msi_attrib.maskbit = 1; - entry->msi_attrib.masked = 1; + entry->msi_attrib.host_masked = 1; + entry->msi_attrib.guest_masked = 1; entry->msi_attrib.pos = pos; entry->irq = msi->irq; entry->dev = dev; @@ -1315,7 +1336,8 @@ int pci_restore_msi_state(struct pci_dev for ( i = 0; ; ) { if ( unlikely(!msi_set_mask_bit(desc, - entry[i].msi_attrib.masked)) ) + entry[i].msi_attrib.host_masked, + entry[i].msi_attrib.guest_masked)) ) BUG(); if ( !--nr ) @@ -1469,7 +1491,7 @@ static void dump_msi(unsigned char key) else mask = '?'; printk(" %-6s%4u vec=%02x%7s%6s%3sassert%5s%7s" - " dest=%08x mask=%d/%d/%c\n", + " dest=%08x mask=%d/%c%c/%c\n", type, irq, (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT, data & MSI_DATA_DELIVERY_LOWPRI ? "lowest" : "fixed", @@ -1477,7 +1499,10 @@ static void dump_msi(unsigned char key) data & MSI_DATA_LEVEL_ASSERT ? "" : "de", addr & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys", addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "cpu", - dest32, attr.maskbit, attr.masked, mask); + dest32, attr.maskbit, + attr.host_masked ? 'H' : ' ', + attr.guest_masked ? 'G' : ' ', + mask); } } --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -451,7 +451,7 @@ static void iommu_msi_unmask(struct irq_ spin_lock_irqsave(&iommu->lock, flags); amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED); spin_unlock_irqrestore(&iommu->lock, flags); - iommu->msi.msi_attrib.masked = 0; + iommu->msi.msi_attrib.host_masked = 0; } static void iommu_msi_mask(struct irq_desc *desc) @@ -464,7 +464,7 @@ static void iommu_msi_mask(struct irq_de spin_lock_irqsave(&iommu->lock, flags); amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED); spin_unlock_irqrestore(&iommu->lock, flags); - iommu->msi.msi_attrib.masked = 1; + iommu->msi.msi_attrib.host_masked = 1; } static unsigned int iommu_msi_startup(struct irq_desc *desc) --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -997,7 +997,7 @@ static void dma_msi_unmask(struct irq_de spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FECTL_REG, 0); spin_unlock_irqrestore(&iommu->register_lock, flags); - iommu->msi.msi_attrib.masked = 0; + iommu->msi.msi_attrib.host_masked = 0; } static void dma_msi_mask(struct irq_desc *desc) @@ -1009,7 +1009,7 @@ static void dma_msi_mask(struct irq_desc spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM); spin_unlock_irqrestore(&iommu->register_lock, flags); - iommu->msi.msi_attrib.masked = 1; + iommu->msi.msi_attrib.host_masked = 1; } static unsigned int dma_msi_startup(struct irq_desc *desc) --- a/xen/include/asm-x86/msi.h +++ b/xen/include/asm-x86/msi.h @@ -90,12 +90,13 @@ extern unsigned int pci_msix_get_table_l struct msi_desc { struct msi_attrib { - __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */ - __u8 maskbit : 1; /* mask-pending bit supported ? */ - __u8 masked : 1; + __u8 type; /* {0: unused, 5h:MSI, 11h:MSI-X} */ + __u8 pos; /* Location of the MSI capability */ + __u8 maskbit : 1; /* mask/pending bit supported ? */ __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ - __u8 pos; /* Location of the msi capability */ - __u16 entry_nr; /* specific enabled entry */ + __u8 host_masked : 1; + __u8 guest_masked : 1; + __u16 entry_nr; /* specific enabled entry */ } msi_attrib; struct list_head list; @@ -236,6 +237,7 @@ void msi_compose_msg(unsigned vector, co void __msi_set_enable(u16 seg, u8 bus, u8 slot, u8 func, int pos, int enable); void mask_msi_irq(struct irq_desc *); void unmask_msi_irq(struct irq_desc *); +void guest_mask_msi_irq(struct irq_desc *, bool_t mask); void ack_nonmaskable_msi_irq(struct irq_desc *); void end_nonmaskable_msi_irq(struct irq_desc *, u8 vector); void set_msi_affinity(struct irq_desc *, const cpumask_t *); Attachment:
x86-MSI-X-guest-mask.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |