|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging] x86/vtd: Drop struct qi_ctrl
commit 8cfc407fba2b8b924a37c01ebba96d66c48628ab
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Nov 27 14:57:14 2018 +0000
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Sep 5 11:26:26 2019 +0100
x86/vtd: Drop struct qi_ctrl
It is unclear why this abstraction exists, but iommu_qi_ctrl() returns
possibly NULL and every user unconditionally dereferences the result. In
practice, I can't spot a path where iommu is NULL, so I think it is mostly
dead.
Move the sole member into struct vtd_iommu, and delete iommu_qi_ctrl().
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
xen/drivers/passthrough/vtd/iommu.h | 13 +++----------
xen/drivers/passthrough/vtd/qinval.c | 37 +++++++++++++++---------------------
2 files changed, 18 insertions(+), 32 deletions(-)
diff --git a/xen/drivers/passthrough/vtd/iommu.h
b/xen/drivers/passthrough/vtd/iommu.h
index ef8dac02eb..1f6a932e15 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -505,10 +505,6 @@ extern struct list_head acpi_drhd_units;
extern struct list_head acpi_rmrr_units;
extern struct list_head acpi_ioapic_units;
-struct qi_ctrl {
- u64 qinval_maddr; /* queue invalidation page machine address */
-};
-
struct ir_ctrl {
u64 iremap_maddr; /* interrupt remap table machine address */
int iremap_num; /* total num of used interrupt remap entry */
@@ -526,7 +522,6 @@ struct iommu_flush {
};
struct intel_iommu {
- struct qi_ctrl qi_ctrl;
struct ir_ctrl ir_ctrl;
struct iommu_flush flush;
struct acpi_drhd_unit *drhd;
@@ -545,16 +540,14 @@ struct vtd_iommu {
nodeid_t node;
struct msi_desc msi;
struct intel_iommu *intel;
+
+ uint64_t qinval_maddr; /* queue invalidation page machine address */
+
struct list_head ats_devices;
unsigned long *domid_bitmap; /* domain id bitmap */
u16 *domid_map; /* domain id mapping array */
};
-static inline struct qi_ctrl *iommu_qi_ctrl(struct vtd_iommu *iommu)
-{
- return iommu ? &iommu->intel->qi_ctrl : NULL;
-}
-
static inline struct ir_ctrl *iommu_ir_ctrl(struct vtd_iommu *iommu)
{
return iommu ? &iommu->intel->ir_ctrl : NULL;
diff --git a/xen/drivers/passthrough/vtd/qinval.c
b/xen/drivers/passthrough/vtd/qinval.c
index 95df2a58b1..6718d33c58 100644
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -84,7 +84,7 @@ static int __must_check queue_invalidate_context_sync(struct
vtd_iommu *iommu,
spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
- entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ entry_base = iommu->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
@@ -118,7 +118,7 @@ static int __must_check queue_invalidate_iotlb_sync(struct
vtd_iommu *iommu,
spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
- entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ entry_base = iommu->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
@@ -157,7 +157,7 @@ static int __must_check queue_invalidate_wait(struct
vtd_iommu *iommu,
spin_lock_irqsave(&iommu->register_lock, flags);
ACCESS_ONCE(*this_poll_slot) = QINVAL_STAT_INIT;
index = qinval_next_index(iommu);
- entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ entry_base = iommu->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
@@ -202,9 +202,7 @@ static int __must_check queue_invalidate_wait(struct
vtd_iommu *iommu,
static int __must_check invalidate_sync(struct vtd_iommu *iommu)
{
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
-
- ASSERT(qi_ctrl->qinval_maddr);
+ ASSERT(iommu->qinval_maddr);
return queue_invalidate_wait(iommu, 0, 1, 1, 0);
}
@@ -212,10 +210,9 @@ static int __must_check invalidate_sync(struct vtd_iommu
*iommu)
static int __must_check dev_invalidate_sync(struct vtd_iommu *iommu,
struct pci_dev *pdev, u16 did)
{
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
int rc;
- ASSERT(qi_ctrl->qinval_maddr);
+ ASSERT(iommu->qinval_maddr);
rc = queue_invalidate_wait(iommu, 0, 1, 1, 1);
if ( rc == -ETIMEDOUT )
{
@@ -249,7 +246,7 @@ int qinval_device_iotlb_sync(struct vtd_iommu *iommu,
struct pci_dev *pdev,
ASSERT(pdev);
spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
- entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ entry_base = iommu->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
@@ -283,7 +280,7 @@ static int __must_check queue_invalidate_iec_sync(struct
vtd_iommu *iommu,
spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
- entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ entry_base = iommu->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
@@ -326,9 +323,8 @@ static int __must_check flush_context_qi(void *_iommu, u16
did,
bool_t flush_non_present_entry)
{
struct vtd_iommu *iommu = _iommu;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
- ASSERT(qi_ctrl->qinval_maddr);
+ ASSERT(iommu->qinval_maddr);
/*
* In the non-present entry flush case, if hardware doesn't cache
@@ -356,9 +352,8 @@ static int __must_check flush_iotlb_qi(void *_iommu, u16
did, u64 addr,
u8 dr = 0, dw = 0;
int ret = 0, rc;
struct vtd_iommu *iommu = _iommu;
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
- ASSERT(qi_ctrl->qinval_maddr);
+ ASSERT(iommu->qinval_maddr);
/*
* In the non-present entry flush case, if hardware doesn't cache
@@ -397,7 +392,6 @@ static int __must_check flush_iotlb_qi(void *_iommu, u16
did, u64 addr,
int enable_qinval(struct vtd_iommu *iommu)
{
- struct qi_ctrl *qi_ctrl;
struct iommu_flush *flush;
u32 sts;
unsigned long flags;
@@ -405,19 +399,18 @@ int enable_qinval(struct vtd_iommu *iommu)
if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval )
return -ENOENT;
- qi_ctrl = iommu_qi_ctrl(iommu);
flush = iommu_get_flush(iommu);
/* Return if already enabled by Xen */
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
- if ( (sts & DMA_GSTS_QIES) && qi_ctrl->qinval_maddr )
+ if ( (sts & DMA_GSTS_QIES) && iommu->qinval_maddr )
return 0;
- if ( qi_ctrl->qinval_maddr == 0 )
+ if ( iommu->qinval_maddr == 0 )
{
- qi_ctrl->qinval_maddr = alloc_pgtable_maddr(QINVAL_ARCH_PAGE_NR,
- iommu->node);
- if ( qi_ctrl->qinval_maddr == 0 )
+ iommu->qinval_maddr = alloc_pgtable_maddr(QINVAL_ARCH_PAGE_NR,
+ iommu->node);
+ if ( iommu->qinval_maddr == 0 )
{
dprintk(XENLOG_WARNING VTDPREFIX,
"Cannot allocate memory for qi_ctrl->qinval_maddr\n");
@@ -438,7 +431,7 @@ int enable_qinval(struct vtd_iommu *iommu)
* to IQA register.
*/
dmar_writeq(iommu->reg, DMAR_IQA_REG,
- qi_ctrl->qinval_maddr | QINVAL_PAGE_ORDER);
+ iommu->qinval_maddr | QINVAL_PAGE_ORDER);
dmar_writeq(iommu->reg, DMAR_IQT_REG, 0);
--
generated by git-patchbot for /home/xen/git/xen.git#staging
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |