|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging] x86/vtd: Drop struct ir_ctrl
commit 1cc8d6fbf48ac7226f34e16ca07d06e25acb222b
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Nov 27 15:02:18 2018 +0000
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Sep 5 11:26:26 2019 +0100
x86/vtd: Drop struct ir_ctrl
It is unclear why this abstraction exists, but iommu_ir_ctrl() returns
possibly NULL and every user unconditionally dereferences the result. In
practice, I can't spot a path where iommu is NULL, so I think it is mostly
dead.
Move the fields into struct vtd_iommu, and delete iommu_ir_ctrl().
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
xen/drivers/passthrough/vtd/intremap.c | 85 ++++++++++++++++------------------
xen/drivers/passthrough/vtd/iommu.c | 3 +-
xen/drivers/passthrough/vtd/iommu.h | 18 +++----
xen/drivers/passthrough/vtd/utils.c | 13 +++---
4 files changed, 53 insertions(+), 66 deletions(-)
diff --git a/xen/drivers/passthrough/vtd/intremap.c
b/xen/drivers/passthrough/vtd/intremap.c
index e75344f696..bf846195c4 100644
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -179,7 +179,7 @@ bool __init intel_iommu_supports_eim(void)
static void update_irte(struct vtd_iommu *iommu, struct iremap_entry *entry,
const struct iremap_entry *new_ire, bool atomic)
{
- ASSERT(spin_is_locked(&iommu_ir_ctrl(iommu)->iremap_lock));
+ ASSERT(spin_is_locked(&iommu->intremap.lock));
if ( cpu_has_cx16 )
{
@@ -220,14 +220,13 @@ static void update_irte(struct vtd_iommu *iommu, struct
iremap_entry *entry,
static void free_remap_entry(struct vtd_iommu *iommu, int index)
{
struct iremap_entry *iremap_entry = NULL, *iremap_entries, new_ire = { };
- struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
if ( index < 0 || index > IREMAP_ENTRY_NR - 1 )
return;
- ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) );
+ ASSERT(spin_is_locked(&iommu->intremap.lock));
- GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
+ GET_IREMAP_ENTRY(iommu->intremap.maddr, index,
iremap_entries, iremap_entry);
update_irte(iommu, iremap_entry, &new_ire, false);
@@ -235,7 +234,7 @@ static void free_remap_entry(struct vtd_iommu *iommu, int
index)
iommu_flush_iec_index(iommu, 0, index);
unmap_vtd_domain_page(iremap_entries);
- ir_ctrl->iremap_num--;
+ iommu->intremap.num--;
}
/*
@@ -245,10 +244,9 @@ static void free_remap_entry(struct vtd_iommu *iommu, int
index)
static unsigned int alloc_remap_entry(struct vtd_iommu *iommu, unsigned int nr)
{
struct iremap_entry *iremap_entries = NULL;
- struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
unsigned int i, found;
- ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) );
+ ASSERT(spin_is_locked(&iommu->intremap.lock));
for ( found = i = 0; i < IREMAP_ENTRY_NR; i++ )
{
@@ -259,7 +257,7 @@ static unsigned int alloc_remap_entry(struct vtd_iommu
*iommu, unsigned int nr)
if ( iremap_entries )
unmap_vtd_domain_page(iremap_entries);
- GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, i,
+ GET_IREMAP_ENTRY(iommu->intremap.maddr, i,
iremap_entries, p);
}
else
@@ -274,8 +272,9 @@ static unsigned int alloc_remap_entry(struct vtd_iommu
*iommu, unsigned int nr)
if ( iremap_entries )
unmap_vtd_domain_page(iremap_entries);
- if ( i < IREMAP_ENTRY_NR )
- ir_ctrl->iremap_num += nr;
+ if ( i < IREMAP_ENTRY_NR )
+ iommu->intremap.num += nr;
+
return i;
}
@@ -284,7 +283,6 @@ static int remap_entry_to_ioapic_rte(
{
struct iremap_entry *iremap_entry = NULL, *iremap_entries;
unsigned long flags;
- struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
if ( index < 0 || index > IREMAP_ENTRY_NR - 1 )
{
@@ -294,9 +292,9 @@ static int remap_entry_to_ioapic_rte(
return -EFAULT;
}
- spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
+ spin_lock_irqsave(&iommu->intremap.lock, flags);
- GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
+ GET_IREMAP_ENTRY(iommu->intremap.maddr, index,
iremap_entries, iremap_entry);
if ( iremap_entry->val == 0 )
@@ -305,7 +303,7 @@ static int remap_entry_to_ioapic_rte(
"IO-APIC index (%d) has an empty entry\n",
index);
unmap_vtd_domain_page(iremap_entries);
- spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
+ spin_unlock_irqrestore(&iommu->intremap.lock, flags);
return -EFAULT;
}
@@ -323,7 +321,8 @@ static int remap_entry_to_ioapic_rte(
}
unmap_vtd_domain_page(iremap_entries);
- spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
+ spin_unlock_irqrestore(&iommu->intremap.lock, flags);
+
return 0;
}
@@ -337,11 +336,10 @@ static int ioapic_rte_to_remap_entry(struct vtd_iommu
*iommu,
struct IO_xAPIC_route_entry new_rte;
int index;
unsigned long flags;
- struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
bool init = false;
remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
- spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
+ spin_lock_irqsave(&iommu->intremap.lock, flags);
index = apic_pin_2_ir_idx[apic][ioapic_pin];
if ( index < 0 )
@@ -357,11 +355,11 @@ static int ioapic_rte_to_remap_entry(struct vtd_iommu
*iommu,
dprintk(XENLOG_ERR VTDPREFIX,
"IO-APIC intremap index (%d) larger than maximum index (%d)\n",
index, IREMAP_ENTRY_NR - 1);
- spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
+ spin_unlock_irqrestore(&iommu->intremap.lock, flags);
return -EFAULT;
}
- GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
+ GET_IREMAP_ENTRY(iommu->intremap.maddr, index,
iremap_entries, iremap_entry);
new_ire = *iremap_entry;
@@ -412,7 +410,7 @@ static int ioapic_rte_to_remap_entry(struct vtd_iommu
*iommu,
iommu_flush_iec_index(iommu, 0, index);
unmap_vtd_domain_page(iremap_entries);
- spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
+ spin_unlock_irqrestore(&iommu->intremap.lock, flags);
return 0;
}
@@ -424,9 +422,8 @@ unsigned int io_apic_read_remap_rte(
struct IO_xAPIC_route_entry old_rte = { 0 };
int rte_upper = (reg & 1) ? 1 : 0;
struct vtd_iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
- struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
- if ( !ir_ctrl->iremap_num ||
+ if ( !iommu->intremap.num ||
( (index = apic_pin_2_ir_idx[apic][ioapic_pin]) < 0 ) )
return __io_apic_read(apic, reg);
@@ -544,7 +541,6 @@ static int remap_entry_to_msi_msg(
struct iremap_entry *iremap_entry = NULL, *iremap_entries;
struct msi_msg_remap_entry *remap_rte;
unsigned long flags;
- struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
remap_rte = (struct msi_msg_remap_entry *) msg;
index += (remap_rte->address_lo.index_15 << 15) |
@@ -558,9 +554,9 @@ static int remap_entry_to_msi_msg(
return -EFAULT;
}
- spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
+ spin_lock_irqsave(&iommu->intremap.lock, flags);
- GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
+ GET_IREMAP_ENTRY(iommu->intremap.maddr, index,
iremap_entries, iremap_entry);
if ( iremap_entry->val == 0 )
@@ -569,7 +565,7 @@ static int remap_entry_to_msi_msg(
"MSI index (%d) has an empty entry\n",
index);
unmap_vtd_domain_page(iremap_entries);
- spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
+ spin_unlock_irqrestore(&iommu->intremap.lock, flags);
return -EFAULT;
}
@@ -597,7 +593,7 @@ static int remap_entry_to_msi_msg(
iremap_entry->remap.vector;
unmap_vtd_domain_page(iremap_entries);
- spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
+ spin_unlock_irqrestore(&iommu->intremap.lock, flags);
return 0;
}
@@ -609,13 +605,12 @@ static int msi_msg_to_remap_entry(
struct msi_msg_remap_entry *remap_rte;
unsigned int index, i, nr = 1;
unsigned long flags;
- struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
const struct pi_desc *pi_desc = msi_desc->pi_desc;
if ( msi_desc->msi_attrib.type == PCI_CAP_ID_MSI )
nr = msi_desc->msi.nvec;
- spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
+ spin_lock_irqsave(&iommu->intremap.lock, flags);
if ( msg == NULL )
{
@@ -625,7 +620,7 @@ static int msi_msg_to_remap_entry(
free_remap_entry(iommu, msi_desc->remap_index + i);
msi_desc[i].irte_initialized = false;
}
- spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
+ spin_unlock_irqrestore(&iommu->intremap.lock, flags);
return 0;
}
@@ -645,11 +640,12 @@ static int msi_msg_to_remap_entry(
index, IREMAP_ENTRY_NR - 1);
for ( i = 0; i < nr; ++i )
msi_desc[i].remap_index = -1;
- spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
+ spin_unlock_irqrestore(&iommu->intremap.lock, flags);
+
return -EFAULT;
}
- GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
+ GET_IREMAP_ENTRY(iommu->intremap.maddr, index,
iremap_entries, iremap_entry);
if ( !pi_desc )
@@ -703,7 +699,8 @@ static int msi_msg_to_remap_entry(
iommu_flush_iec_index(iommu, 0, index);
unmap_vtd_domain_page(iremap_entries);
- spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
+ spin_unlock_irqrestore(&iommu->intremap.lock, flags);
+
return 0;
}
@@ -736,14 +733,13 @@ int msi_msg_write_remap_rte(
int __init intel_setup_hpet_msi(struct msi_desc *msi_desc)
{
struct vtd_iommu *iommu = hpet_to_iommu(msi_desc->hpet_id);
- struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
unsigned long flags;
int rc = 0;
- if ( !ir_ctrl || !ir_ctrl->iremap_maddr )
+ if ( !iommu->intremap.maddr )
return 0;
- spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
+ spin_lock_irqsave(&iommu->intremap.lock, flags);
msi_desc->remap_index = alloc_remap_entry(iommu, 1);
if ( msi_desc->remap_index >= IREMAP_ENTRY_NR )
{
@@ -753,14 +749,13 @@ int __init intel_setup_hpet_msi(struct msi_desc *msi_desc)
msi_desc->remap_index = -1;
rc = -ENXIO;
}
- spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
+ spin_unlock_irqrestore(&iommu->intremap.lock, flags);
return rc;
}
int enable_intremap(struct vtd_iommu *iommu, int eim)
{
- struct ir_ctrl *ir_ctrl;
u32 sts, gcmd;
unsigned long flags;
@@ -773,11 +768,10 @@ int enable_intremap(struct vtd_iommu *iommu, int eim)
return -EINVAL;
}
- ir_ctrl = iommu_ir_ctrl(iommu);
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
/* Return if already enabled by Xen */
- if ( (sts & DMA_GSTS_IRES) && ir_ctrl->iremap_maddr )
+ if ( (sts & DMA_GSTS_IRES) && iommu->intremap.maddr )
return 0;
if ( !(sts & DMA_GSTS_QIES) )
@@ -793,17 +787,18 @@ int enable_intremap(struct vtd_iommu *iommu, int eim)
" Compatibility Format Interrupts permitted on IOMMU #%u:"
" Device pass-through will be insecure\n", iommu->index);
- if ( ir_ctrl->iremap_maddr == 0 )
+ if ( iommu->intremap.maddr == 0 )
{
- ir_ctrl->iremap_maddr = alloc_pgtable_maddr(IREMAP_ARCH_PAGE_NR,
+ iommu->intremap.maddr = alloc_pgtable_maddr(IREMAP_ARCH_PAGE_NR,
iommu->node);
- if ( ir_ctrl->iremap_maddr == 0 )
+ if ( iommu->intremap.maddr == 0 )
{
dprintk(XENLOG_WARNING VTDPREFIX,
"Cannot allocate memory for ir_ctrl->iremap_maddr\n");
return -ENOMEM;
}
- ir_ctrl->iremap_num = 0;
+
+ iommu->intremap.num = 0;
}
spin_lock_irqsave(&iommu->register_lock, flags);
@@ -813,7 +808,7 @@ int enable_intremap(struct vtd_iommu *iommu, int eim)
* Interrupt Mode.
*/
dmar_writeq(iommu->reg, DMAR_IRTA_REG,
- ir_ctrl->iremap_maddr | IRTA_REG_TABLE_SIZE |
+ iommu->intremap.maddr | IRTA_REG_TABLE_SIZE |
(eim ? IRTA_EIME : 0));
/* set SIRTP */
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index d7e04fc724..547867686d 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -147,8 +147,6 @@ static struct intel_iommu *__init alloc_intel_iommu(void)
if ( intel == NULL )
return NULL;
- spin_lock_init(&intel->ir_ctrl.iremap_lock);
-
return intel;
}
@@ -1166,6 +1164,7 @@ int __init iommu_alloc(struct acpi_drhd_unit *drhd)
iommu->msi.irq = -1; /* No irq assigned yet. */
iommu->node = NUMA_NO_NODE;
INIT_LIST_HEAD(&iommu->ats_devices);
+ spin_lock_init(&iommu->intremap.lock);
iommu->intel = alloc_intel_iommu();
if ( iommu->intel == NULL )
diff --git a/xen/drivers/passthrough/vtd/iommu.h
b/xen/drivers/passthrough/vtd/iommu.h
index 1f6a932e15..b78cbf7a6c 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -505,12 +505,6 @@ extern struct list_head acpi_drhd_units;
extern struct list_head acpi_rmrr_units;
extern struct list_head acpi_ioapic_units;
-struct ir_ctrl {
- u64 iremap_maddr; /* interrupt remap table machine address */
- int iremap_num; /* total num of used interrupt remap entry */
- spinlock_t iremap_lock; /* lock for irq remapping table */
-};
-
struct iommu_flush {
int __must_check (*context)(void *iommu, u16 did, u16 source_id,
u8 function_mask, u64 type,
@@ -522,7 +516,6 @@ struct iommu_flush {
};
struct intel_iommu {
- struct ir_ctrl ir_ctrl;
struct iommu_flush flush;
struct acpi_drhd_unit *drhd;
};
@@ -543,16 +536,17 @@ struct vtd_iommu {
uint64_t qinval_maddr; /* queue invalidation page machine address */
+ struct {
+ uint64_t maddr; /* interrupt remap table machine address */
+ unsigned int num; /* total num of used interrupt remap entry */
+ spinlock_t lock; /* lock for irq remapping table */
+ } intremap;
+
struct list_head ats_devices;
unsigned long *domid_bitmap; /* domain id bitmap */
u16 *domid_map; /* domain id mapping array */
};
-static inline struct ir_ctrl *iommu_ir_ctrl(struct vtd_iommu *iommu)
-{
- return iommu ? &iommu->intel->ir_ctrl : NULL;
-}
-
static inline struct iommu_flush *iommu_get_flush(struct vtd_iommu *iommu)
{
return iommu ? &iommu->intel->flush : NULL;
diff --git a/xen/drivers/passthrough/vtd/utils.c
b/xen/drivers/passthrough/vtd/utils.c
index 705e51b77b..9fdbd52ec1 100644
--- a/xen/drivers/passthrough/vtd/utils.c
+++ b/xen/drivers/passthrough/vtd/utils.c
@@ -208,7 +208,7 @@ void vtd_dump_iommu_info(unsigned char key)
uint64_t iremap_maddr = irta & PAGE_MASK;
unsigned int nr_entry = 1 << ((irta & 0xF) + 1);
struct iremap_entry *iremap_entries = NULL;
- int print_cnt = 0;
+ unsigned int print_cnt = 0;
printk(" Interrupt remapping table (nr_entry=%#x. "
"Only dump P=1 entries here):\n", nr_entry);
@@ -251,9 +251,9 @@ void vtd_dump_iommu_info(unsigned char key)
}
if ( iremap_entries )
unmap_vtd_domain_page(iremap_entries);
- if ( iommu_ir_ctrl(iommu)->iremap_num != print_cnt )
- printk("Warning: Print %d IRTE (actually have %d)!\n",
- print_cnt, iommu_ir_ctrl(iommu)->iremap_num);
+ if ( iommu->intremap.num != print_cnt )
+ printk("Warning: Print %u IRTE (actually have %u)!\n",
+ print_cnt, iommu->intremap.num);
}
}
@@ -264,13 +264,12 @@ void vtd_dump_iommu_info(unsigned char key)
int apic;
union IO_APIC_reg_01 reg_01;
struct IO_APIC_route_remap_entry *remap;
- struct ir_ctrl *ir_ctrl;
for ( apic = 0; apic < nr_ioapics; apic++ )
{
iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
- ir_ctrl = iommu_ir_ctrl(iommu);
- if ( !ir_ctrl || !ir_ctrl->iremap_maddr || !ir_ctrl->iremap_num )
+
+ if ( !iommu->intremap.maddr || !iommu->intremap.num )
continue;
printk( "\nRedirection table of IOAPIC %x:\n", apic);
--
generated by git-patchbot for /home/xen/git/xen.git#staging
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |