|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging] x86/vtd: Drop struct iommu_flush
commit 488eccdd568bb781b989d9c897045e3124339d6b
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Nov 27 15:06:15 2018 +0000
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Sep 5 11:44:59 2019 +0100
x86/vtd: Drop struct iommu_flush
It is unclear why this abstraction exists, but iommu_get_flush() returns
possibly NULL and every user unconditionally dereferences the result. In
practice, I can't spot a path where iommu is NULL, so I think it is mostly
dead.
Move the two function pointers into struct vtd_iommu (using a flush prefix),
and delete iommu_get_flush(). Furthermore, there is no need to pass the
IOMMU
pointer to the callbacks via a void pointer, so change the parameter to be
correctly typed as struct vtd_iommu. Clean up bool_t to bool in surrounding
context.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
xen/drivers/passthrough/vtd/iommu.c | 62 ++++++++++++++++--------------------
xen/drivers/passthrough/vtd/iommu.h | 26 ++++++---------
xen/drivers/passthrough/vtd/qinval.c | 21 +++++-------
3 files changed, 46 insertions(+), 63 deletions(-)
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index 547867686d..2fa0e743e2 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -315,11 +315,11 @@ static void iommu_flush_write_buffer(struct vtd_iommu
*iommu)
}
/* return value determine if we need a write buffer flush */
-static int __must_check flush_context_reg(void *_iommu, u16 did, u16 source_id,
- u8 function_mask, u64 type,
- bool_t flush_non_present_entry)
+static int __must_check flush_context_reg(struct vtd_iommu *iommu, u16 did,
+ u16 source_id, u8 function_mask,
+ u64 type,
+ bool flush_non_present_entry)
{
- struct vtd_iommu *iommu = _iommu;
u64 val = 0;
unsigned long flags;
@@ -368,31 +368,28 @@ static int __must_check flush_context_reg(void *_iommu,
u16 did, u16 source_id,
}
static int __must_check iommu_flush_context_global(struct vtd_iommu *iommu,
- bool_t
flush_non_present_entry)
+ bool
flush_non_present_entry)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
- return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
- flush_non_present_entry);
+ return iommu->flush.context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
+ flush_non_present_entry);
}
static int __must_check iommu_flush_context_device(struct vtd_iommu *iommu,
u16 did, u16 source_id,
u8 function_mask,
- bool_t
flush_non_present_entry)
+ bool
flush_non_present_entry)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
- return flush->context(iommu, did, source_id, function_mask,
- DMA_CCMD_DEVICE_INVL,
- flush_non_present_entry);
+ return iommu->flush.context(iommu, did, source_id, function_mask,
+ DMA_CCMD_DEVICE_INVL, flush_non_present_entry);
}
/* return value determine if we need a write buffer flush */
-static int __must_check flush_iotlb_reg(void *_iommu, u16 did, u64 addr,
+static int __must_check flush_iotlb_reg(struct vtd_iommu *iommu, u16 did,
+ u64 addr,
unsigned int size_order, u64 type,
- bool_t flush_non_present_entry,
- bool_t flush_dev_iotlb)
+ bool flush_non_present_entry,
+ bool flush_dev_iotlb)
{
- struct vtd_iommu *iommu = _iommu;
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0;
unsigned long flags;
@@ -455,17 +452,16 @@ static int __must_check flush_iotlb_reg(void *_iommu, u16
did, u64 addr,
}
static int __must_check iommu_flush_iotlb_global(struct vtd_iommu *iommu,
- bool_t
flush_non_present_entry,
- bool_t flush_dev_iotlb)
+ bool flush_non_present_entry,
+ bool flush_dev_iotlb)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
/* apply platform specific errata workarounds */
vtd_ops_preamble_quirk(iommu);
- status = flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
- flush_non_present_entry, flush_dev_iotlb);
+ status = iommu->flush.iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
+ flush_non_present_entry, flush_dev_iotlb);
/* undo platform specific errata workarounds */
vtd_ops_postamble_quirk(iommu);
@@ -477,14 +473,13 @@ static int __must_check iommu_flush_iotlb_dsi(struct
vtd_iommu *iommu, u16 did,
bool_t flush_non_present_entry,
bool_t flush_dev_iotlb)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
/* apply platform specific errata workarounds */
vtd_ops_preamble_quirk(iommu);
- status = flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
- flush_non_present_entry, flush_dev_iotlb);
+ status = iommu->flush.iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
+ flush_non_present_entry, flush_dev_iotlb);
/* undo platform specific errata workarounds */
vtd_ops_postamble_quirk(iommu);
@@ -497,18 +492,19 @@ static int __must_check iommu_flush_iotlb_psi(struct
vtd_iommu *iommu, u16 did,
bool_t flush_non_present_entry,
bool_t flush_dev_iotlb)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
ASSERT(!(addr & (~PAGE_MASK_4K)));
/* Fallback to domain selective flush if no PSI support */
if ( !cap_pgsel_inv(iommu->cap) )
- return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
flush_dev_iotlb);
+ return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
+ flush_dev_iotlb);
/* Fallback to domain selective flush if size is too big */
if ( order > cap_max_amask_val(iommu->cap) )
- return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
flush_dev_iotlb);
+ return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
+ flush_dev_iotlb);
addr >>= PAGE_SHIFT_4K + order;
addr <<= PAGE_SHIFT_4K + order;
@@ -516,8 +512,8 @@ static int __must_check iommu_flush_iotlb_psi(struct
vtd_iommu *iommu, u16 did,
/* apply platform specific errata workarounds */
vtd_ops_preamble_quirk(iommu);
- status = flush->iotlb(iommu, did, addr, order, DMA_TLB_PSI_FLUSH,
- flush_non_present_entry, flush_dev_iotlb);
+ status = iommu->flush.iotlb(iommu, did, addr, order, DMA_TLB_PSI_FLUSH,
+ flush_non_present_entry, flush_dev_iotlb);
/* undo platform specific errata workarounds */
vtd_ops_postamble_quirk(iommu);
@@ -2154,7 +2150,6 @@ static int __must_check init_vtd_hw(void)
{
struct acpi_drhd_unit *drhd;
struct vtd_iommu *iommu;
- struct iommu_flush *flush = NULL;
int ret;
unsigned long flags;
u32 sts;
@@ -2189,9 +2184,8 @@ static int __must_check init_vtd_hw(void)
*/
if ( enable_qinval(iommu) != 0 )
{
- flush = iommu_get_flush(iommu);
- flush->context = flush_context_reg;
- flush->iotlb = flush_iotlb_reg;
+ iommu->flush.context = flush_context_reg;
+ iommu->flush.iotlb = flush_iotlb_reg;
}
}
diff --git a/xen/drivers/passthrough/vtd/iommu.h
b/xen/drivers/passthrough/vtd/iommu.h
index b78cbf7a6c..ae2369ca55 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -505,18 +505,7 @@ extern struct list_head acpi_drhd_units;
extern struct list_head acpi_rmrr_units;
extern struct list_head acpi_ioapic_units;
-struct iommu_flush {
- int __must_check (*context)(void *iommu, u16 did, u16 source_id,
- u8 function_mask, u64 type,
- bool_t non_present_entry_flush);
- int __must_check (*iotlb)(void *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type,
- bool_t flush_non_present_entry,
- bool_t flush_dev_iotlb);
-};
-
struct intel_iommu {
- struct iommu_flush flush;
struct acpi_drhd_unit *drhd;
};
@@ -542,16 +531,21 @@ struct vtd_iommu {
spinlock_t lock; /* lock for irq remapping table */
} intremap;
+ struct {
+ int __must_check (*context)(struct vtd_iommu *iommu, u16 did,
+ u16 source_id, u8 function_mask, u64 type,
+ bool non_present_entry_flush);
+ int __must_check (*iotlb)(struct vtd_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type,
+ bool flush_non_present_entry,
+ bool flush_dev_iotlb);
+ } flush;
+
struct list_head ats_devices;
unsigned long *domid_bitmap; /* domain id bitmap */
u16 *domid_map; /* domain id mapping array */
};
-static inline struct iommu_flush *iommu_get_flush(struct vtd_iommu *iommu)
-{
- return iommu ? &iommu->intel->flush : NULL;
-}
-
#define INTEL_IOMMU_DEBUG(fmt, args...) \
do \
{ \
diff --git a/xen/drivers/passthrough/vtd/qinval.c
b/xen/drivers/passthrough/vtd/qinval.c
index 6718d33c58..39b7abd009 100644
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -318,12 +318,10 @@ int iommu_flush_iec_index(struct vtd_iommu *iommu, u8 im,
u16 iidx)
return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx);
}
-static int __must_check flush_context_qi(void *_iommu, u16 did,
+static int __must_check flush_context_qi(struct vtd_iommu *iommu, u16 did,
u16 sid, u8 fm, u64 type,
- bool_t flush_non_present_entry)
+ bool flush_non_present_entry)
{
- struct vtd_iommu *iommu = _iommu;
-
ASSERT(iommu->qinval_maddr);
/*
@@ -344,14 +342,14 @@ static int __must_check flush_context_qi(void *_iommu,
u16 did,
type >> DMA_CCMD_INVL_GRANU_OFFSET);
}
-static int __must_check flush_iotlb_qi(void *_iommu, u16 did, u64 addr,
+static int __must_check flush_iotlb_qi(struct vtd_iommu *iommu, u16 did,
+ u64 addr,
unsigned int size_order, u64 type,
- bool_t flush_non_present_entry,
- bool_t flush_dev_iotlb)
+ bool flush_non_present_entry,
+ bool flush_dev_iotlb)
{
u8 dr = 0, dw = 0;
int ret = 0, rc;
- struct vtd_iommu *iommu = _iommu;
ASSERT(iommu->qinval_maddr);
@@ -392,15 +390,12 @@ static int __must_check flush_iotlb_qi(void *_iommu, u16
did, u64 addr,
int enable_qinval(struct vtd_iommu *iommu)
{
- struct iommu_flush *flush;
u32 sts;
unsigned long flags;
if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval )
return -ENOENT;
- flush = iommu_get_flush(iommu);
-
/* Return if already enabled by Xen */
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
if ( (sts & DMA_GSTS_QIES) && iommu->qinval_maddr )
@@ -418,8 +413,8 @@ int enable_qinval(struct vtd_iommu *iommu)
}
}
- flush->context = flush_context_qi;
- flush->iotlb = flush_iotlb_qi;
+ iommu->flush.context = flush_context_qi;
+ iommu->flush.iotlb = flush_iotlb_qi;
spin_lock_irqsave(&iommu->register_lock, flags);
--
generated by git-patchbot for /home/xen/git/xen.git#staging
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |