|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/7] amd-iommu: re-name u8/16/32/64 to uint8/16/32/64_t
This patch is largely cosmetic. The only non-cosmetic changes are to
re-define the local pde variable as a uint32_t pointer (rather than a
uint64_t pointer) in iommu_merge_pages() and iommu_pde_from_dfn() to allow
the removal of rather excessive amounts of casting.
NOTE: This patch also adds missing emacs boilerplate.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Brian Woods <brian.woods@xxxxxxx>
---
xen/drivers/passthrough/amd/iommu_map.c | 134 +++++++++++++++++---------------
1 file changed, 71 insertions(+), 63 deletions(-)
diff --git a/xen/drivers/passthrough/amd/iommu_map.c
b/xen/drivers/passthrough/amd/iommu_map.c
index 8a10412a07..e4f22c9fc6 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -37,7 +37,7 @@ static unsigned int pfn_to_pde_idx(unsigned long pfn,
unsigned int level)
void clear_iommu_pte_present(unsigned long l1_mfn, unsigned long dfn)
{
- u64 *table, *pte;
+ uint64_t *table, *pte;
table = map_domain_page(_mfn(l1_mfn));
pte = table + pfn_to_pde_idx(dfn, IOMMU_PAGING_MODE_LEVEL_1);
@@ -45,15 +45,15 @@ void clear_iommu_pte_present(unsigned long l1_mfn, unsigned
long dfn)
unmap_domain_page(table);
}
-static bool_t set_iommu_pde_present(u32 *pde, unsigned long next_mfn,
+static bool_t set_iommu_pde_present(uint32_t *pde, unsigned long next_mfn,
unsigned int next_level,
bool_t iw, bool_t ir)
{
- u64 addr_lo, addr_hi, maddr_old, maddr_next;
- u32 entry;
+ uint64_t addr_lo, addr_hi, maddr_old, maddr_next;
+ uint32_t entry;
bool_t need_flush = 0;
- maddr_next = (u64)next_mfn << PAGE_SHIFT;
+ maddr_next = (uint64_t)next_mfn << PAGE_SHIFT;
addr_hi = get_field_from_reg_u32(pde[1],
IOMMU_PTE_ADDR_HIGH_MASK,
@@ -71,7 +71,7 @@ static bool_t set_iommu_pde_present(u32 *pde, unsigned long
next_mfn,
addr_hi = maddr_next >> 32;
/* enable read/write permissions,which will be enforced at the PTE */
- set_field_in_reg_u32((u32)addr_hi, 0,
+ set_field_in_reg_u32((uint32_t)addr_hi, 0,
IOMMU_PDE_ADDR_HIGH_MASK,
IOMMU_PDE_ADDR_HIGH_SHIFT, &entry);
set_field_in_reg_u32(iw, entry,
@@ -90,7 +90,7 @@ static bool_t set_iommu_pde_present(u32 *pde, unsigned long
next_mfn,
pde[1] = entry;
/* mark next level as 'present' */
- set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
+ set_field_in_reg_u32((uint32_t)addr_lo >> PAGE_SHIFT, 0,
IOMMU_PDE_ADDR_LOW_MASK,
IOMMU_PDE_ADDR_LOW_SHIFT, &entry);
set_field_in_reg_u32(next_level, entry,
@@ -108,13 +108,13 @@ static bool_t set_iommu_pte_present(unsigned long pt_mfn,
unsigned long dfn,
unsigned long next_mfn, int pde_level,
bool_t iw, bool_t ir)
{
- u64 *table;
- u32 *pde;
+ uint64_t *table;
+ uint32_t *pde;
bool_t need_flush = 0;
table = map_domain_page(_mfn(pt_mfn));
- pde = (u32*)(table + pfn_to_pde_idx(dfn, pde_level));
+ pde = (uint32_t *)(table + pfn_to_pde_idx(dfn, pde_level));
need_flush = set_iommu_pde_present(pde, next_mfn,
IOMMU_PAGING_MODE_LEVEL_0, iw, ir);
@@ -123,10 +123,10 @@ static bool_t set_iommu_pte_present(unsigned long pt_mfn,
unsigned long dfn,
}
void amd_iommu_set_root_page_table(
- u32 *dte, u64 root_ptr, u16 domain_id, u8 paging_mode, u8 valid)
+ uint32_t *dte, uint64_t root_ptr, uint16_t domain_id, uint8_t paging_mode,
uint8_t valid)
{
- u64 addr_hi, addr_lo;
- u32 entry;
+ uint64_t addr_hi, addr_lo;
+ uint32_t entry;
set_field_in_reg_u32(domain_id, 0,
IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
@@ -135,7 +135,7 @@ void amd_iommu_set_root_page_table(
addr_lo = root_ptr & DMA_32BIT_MASK;
addr_hi = root_ptr >> 32;
- set_field_in_reg_u32((u32)addr_hi, 0,
+ set_field_in_reg_u32((uint32_t)addr_hi, 0,
IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry);
set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
@@ -146,7 +146,7 @@ void amd_iommu_set_root_page_table(
IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry);
dte[1] = entry;
- set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
+ set_field_in_reg_u32((uint32_t)addr_lo >> PAGE_SHIFT, 0,
IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry);
set_field_in_reg_u32(paging_mode, entry,
@@ -162,9 +162,9 @@ void amd_iommu_set_root_page_table(
dte[0] = entry;
}
-void iommu_dte_set_iotlb(u32 *dte, u8 i)
+void iommu_dte_set_iotlb(uint32_t *dte, uint8_t i)
{
- u32 entry;
+ uint32_t entry;
entry = dte[3];
set_field_in_reg_u32(!!i, entry,
@@ -174,16 +174,16 @@ void iommu_dte_set_iotlb(u32 *dte, u8 i)
}
void __init amd_iommu_set_intremap_table(
- u32 *dte, u64 intremap_ptr, u8 int_valid)
+ uint32_t *dte, uint64_t intremap_ptr, uint8_t int_valid)
{
- u64 addr_hi, addr_lo;
- u32 entry;
+ uint64_t addr_hi, addr_lo;
+ uint32_t entry;
addr_lo = intremap_ptr & DMA_32BIT_MASK;
addr_hi = intremap_ptr >> 32;
entry = dte[5];
- set_field_in_reg_u32((u32)addr_hi, entry,
+ set_field_in_reg_u32((uint32_t)addr_hi, entry,
IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK,
IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT, &entry);
/* Fixed and arbitrated interrupts remapepd */
@@ -192,7 +192,7 @@ void __init amd_iommu_set_intremap_table(
IOMMU_DEV_TABLE_INT_CONTROL_SHIFT, &entry);
dte[5] = entry;
- set_field_in_reg_u32((u32)addr_lo >> 6, 0,
+ set_field_in_reg_u32((uint32_t)addr_lo >> 6, 0,
IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK,
IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT, &entry);
/* 2048 entries */
@@ -211,11 +211,11 @@ void __init amd_iommu_set_intremap_table(
dte[4] = entry;
}
-void __init iommu_dte_add_device_entry(u32 *dte, struct ivrs_mappings
*ivrs_dev)
+void __init iommu_dte_add_device_entry(uint32_t *dte, struct ivrs_mappings
*ivrs_dev)
{
- u32 entry;
- u8 sys_mgt, dev_ex, flags;
- u8 mask = ~(0x7 << 3);
+ uint32_t entry;
+ uint8_t sys_mgt, dev_ex, flags;
+ uint8_t mask = ~(0x7 << 3);
dte[7] = dte[6] = dte[4] = dte[2] = dte[1] = dte[0] = 0;
@@ -238,10 +238,10 @@ void __init iommu_dte_add_device_entry(u32 *dte, struct
ivrs_mappings *ivrs_dev)
dte[3] = entry;
}
-void iommu_dte_set_guest_cr3(u32 *dte, u16 dom_id, u64 gcr3,
+void iommu_dte_set_guest_cr3(uint32_t *dte, uint16_t dom_id, uint64_t gcr3,
int gv, unsigned int glx)
{
- u32 entry, gcr3_1, gcr3_2, gcr3_3;
+ uint32_t entry, gcr3_1, gcr3_2, gcr3_3;
gcr3_3 = gcr3 >> 31;
gcr3_2 = (gcr3 >> 15) & 0xFFFF;
@@ -285,9 +285,9 @@ void iommu_dte_set_guest_cr3(u32 *dte, u16 dom_id, u64 gcr3,
dte[1] = entry;
}
-u64 amd_iommu_get_next_table_from_pte(u32 *entry)
+uint64_t amd_iommu_get_next_table_from_pte(uint32_t *entry)
{
- u64 addr_lo, addr_hi, ptr;
+ uint64_t addr_lo, addr_hi, ptr;
addr_lo = get_field_from_reg_u32(
entry[0],
@@ -306,22 +306,22 @@ u64 amd_iommu_get_next_table_from_pte(u32 *entry)
/* For each pde, We use ignored bits (bit 1 - bit 8 and bit 63)
* to save pde count, pde count = 511 is a candidate of page coalescing.
*/
-static unsigned int get_pde_count(u64 pde)
+static unsigned int get_pde_count(uint64_t pde)
{
unsigned int count;
- u64 upper_mask = 1ULL << 63 ;
- u64 lower_mask = 0xFF << 1;
+ uint64_t upper_mask = 1ULL << 63 ;
+ uint64_t lower_mask = 0xFF << 1;
count = ((pde & upper_mask) >> 55) | ((pde & lower_mask) >> 1);
return count;
}
/* Convert pde count into iommu pte ignored bits */
-static void set_pde_count(u64 *pde, unsigned int count)
+static void set_pde_count(uint64_t *pde, unsigned int count)
{
- u64 upper_mask = 1ULL << 8 ;
- u64 lower_mask = 0xFF;
- u64 pte_mask = (~(1ULL << 63)) & (~(0xFF << 1));
+ uint64_t upper_mask = 1ULL << 8 ;
+ uint64_t lower_mask = 0xFF;
+ uint64_t pte_mask = (~(1ULL << 63)) & (~(0xFF << 1));
*pde &= pte_mask;
*pde |= ((count & upper_mask ) << 55) | ((count & lower_mask ) << 1);
@@ -336,8 +336,8 @@ static int iommu_update_pde_count(struct domain *d,
unsigned long pt_mfn,
{
unsigned int pde_count, next_level;
unsigned long first_mfn;
- u64 *table, *pde, *ntable;
- u64 ntable_maddr, mask;
+ uint64_t *table, *pde, *ntable;
+ uint64_t ntable_maddr, mask;
struct domain_iommu *hd = dom_iommu(d);
bool_t ok = 0;
@@ -350,11 +350,11 @@ static int iommu_update_pde_count(struct domain *d,
unsigned long pt_mfn,
pde = table + pfn_to_pde_idx(dfn, merge_level);
/* get page table of next level */
- ntable_maddr = amd_iommu_get_next_table_from_pte((u32*)pde);
+ ntable_maddr = amd_iommu_get_next_table_from_pte((uint32_t *)pde);
ntable = map_domain_page(_mfn(paddr_to_pfn(ntable_maddr)));
/* get the first mfn of next level */
- first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
+ first_mfn = amd_iommu_get_next_table_from_pte((uint32_t *)ntable) >>
PAGE_SHIFT;
if ( first_mfn == 0 )
goto out;
@@ -390,18 +390,19 @@ static int iommu_merge_pages(struct domain *d, unsigned
long pt_mfn,
unsigned long dfn, unsigned int flags,
unsigned int merge_level)
{
- u64 *table, *pde, *ntable;
- u64 ntable_mfn;
+ uint64_t *table, *ntable;
+ uint32_t *pde;
+ uint64_t ntable_mfn;
unsigned long first_mfn;
struct domain_iommu *hd = dom_iommu(d);
ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
table = map_domain_page(_mfn(pt_mfn));
- pde = table + pfn_to_pde_idx(dfn, merge_level);
+ pde = (uint32_t *)(table + pfn_to_pde_idx(dfn, merge_level));
/* get first mfn */
- ntable_mfn = amd_iommu_get_next_table_from_pte((u32*)pde) >> PAGE_SHIFT;
+ ntable_mfn = amd_iommu_get_next_table_from_pte(pde) >> PAGE_SHIFT;
if ( ntable_mfn == 0 )
{
@@ -410,7 +411,7 @@ static int iommu_merge_pages(struct domain *d, unsigned
long pt_mfn,
}
ntable = map_domain_page(_mfn(ntable_mfn));
- first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
+ first_mfn = amd_iommu_get_next_table_from_pte((uint32_t *)ntable) >>
PAGE_SHIFT;
if ( first_mfn == 0 )
{
@@ -420,10 +421,8 @@ static int iommu_merge_pages(struct domain *d, unsigned
long pt_mfn,
}
/* setup super page mapping, next level = 0 */
- set_iommu_pde_present((u32*)pde, first_mfn,
- IOMMU_PAGING_MODE_LEVEL_0,
- !!(flags & IOMMUF_writable),
- !!(flags & IOMMUF_readable));
+ set_iommu_pde_present(pde, first_mfn, IOMMU_PAGING_MODE_LEVEL_0,
+ !!(flags & IOMMUF_writable), !!(flags &
IOMMUF_readable));
amd_iommu_flush_all_pages(d);
@@ -439,7 +438,8 @@ static int iommu_merge_pages(struct domain *d, unsigned
long pt_mfn,
static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn,
unsigned long pt_mfn[])
{
- u64 *pde, *next_table_vaddr;
+ uint64_t *next_table_vaddr;
+ uint32_t *pde;
unsigned long next_table_mfn;
unsigned int level;
struct page_info *table;
@@ -465,16 +465,15 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned
long dfn,
pt_mfn[level] = next_table_mfn;
next_table_vaddr = map_domain_page(_mfn(next_table_mfn));
- pde = next_table_vaddr + pfn_to_pde_idx(dfn, level);
+ pde = (uint32_t *)(next_table_vaddr + pfn_to_pde_idx(dfn, level));
/* Here might be a super page frame */
- next_table_mfn = amd_iommu_get_next_table_from_pte((uint32_t*)pde)
- >> PAGE_SHIFT;
+ next_table_mfn =
+ amd_iommu_get_next_table_from_pte(pde) >> PAGE_SHIFT;
/* Split super page frame into smaller pieces.*/
- if ( iommu_is_pte_present((u32*)pde) &&
- (iommu_next_level((u32*)pde) == 0) &&
- next_table_mfn != 0 )
+ if ( iommu_is_pte_present(pde) && !iommu_next_level(pde) &&
+ next_table_mfn )
{
int i;
unsigned long mfn, pfn;
@@ -494,7 +493,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned
long dfn,
}
next_table_mfn = mfn_x(page_to_mfn(table));
- set_iommu_pde_present((u32*)pde, next_table_mfn, next_level,
+ set_iommu_pde_present(pde, next_table_mfn, next_level,
!!IOMMUF_writable, !!IOMMUF_readable);
for ( i = 0; i < PTE_PER_TABLE_SIZE; i++ )
@@ -509,7 +508,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned
long dfn,
}
/* Install lower level page table for non-present entries */
- else if ( !iommu_is_pte_present((u32*)pde) )
+ else if ( !iommu_is_pte_present(pde) )
{
if ( next_table_mfn == 0 )
{
@@ -521,7 +520,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned
long dfn,
return 1;
}
next_table_mfn = mfn_x(page_to_mfn(table));
- set_iommu_pde_present((u32*)pde, next_table_mfn, next_level,
+ set_iommu_pde_present(pde, next_table_mfn, next_level,
!!IOMMUF_writable, !!IOMMUF_readable);
}
else /* should never reach here */
@@ -542,7 +541,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned
long dfn,
static int update_paging_mode(struct domain *d, unsigned long dfn)
{
- u16 bdf;
+ uint16_t bdf;
void *device_entry;
unsigned int req_id, level, offset;
unsigned long flags;
@@ -613,7 +612,7 @@ static int update_paging_mode(struct domain *d, unsigned
long dfn)
(req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
/* valid = 0 only works for dom0 passthrough mode */
- amd_iommu_set_root_page_table((u32 *)device_entry,
+ amd_iommu_set_root_page_table(device_entry,
page_to_maddr(hd->arch.root_table),
d->domain_id,
hd->arch.paging_mode, 1);
@@ -771,7 +770,7 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn)
}
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
- u64 phys_addr,
+ uint64_t phys_addr,
unsigned long size, int iw, int ir)
{
unsigned long npages, i;
@@ -816,3 +815,12 @@ void amd_iommu_share_p2m(struct domain *d)
mfn_x(pgd_mfn));
}
}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |