[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 01/10] AMD/IOMMU: miscellaneous DTE handling adjustments
First and foremost switch boolean fields to bool. Adjust a few related function parameters as well. Then - in amd_iommu_set_intremap_table() don't use literal numbers, - in iommu_dte_add_device_entry() use a compound literal instead of many assignments, - in amd_iommu_setup_domain_device() - eliminate a pointless local variable, - use || instead of && when deciding whether to clear an entry, - clear the I field without any checking of ATS / IOTLB state, - leave reserved fields unnamed. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- v5: IOMMU_INTREMAP_LENGTH -> IOMMU_INTREMAP_ORDER. Adjust comment. v4: New. --- a/xen/drivers/passthrough/amd/iommu_intr.c +++ b/xen/drivers/passthrough/amd/iommu_intr.c @@ -69,8 +69,7 @@ union irte_cptr { const union irte128 *ptr128; } __transparent__;-#define INTREMAP_LENGTH 0xB -#define INTREMAP_ENTRIES (1 << INTREMAP_LENGTH) +#define INTREMAP_ENTRIES (1 << IOMMU_INTREMAP_ORDER)struct ioapic_sbdf ioapic_sbdf[MAX_IO_APICS]; struct hpet_sbdf hpet_sbdf; --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -101,51 +101,52 @@ static unsigned int set_iommu_pte_presenvoid amd_iommu_set_root_page_table(struct amd_iommu_dte *dte, uint64_t root_ptr, uint16_t domain_id, - uint8_t paging_mode, uint8_t valid) + uint8_t paging_mode, bool valid) { dte->domain_id = domain_id; dte->pt_root = paddr_to_pfn(root_ptr); - dte->iw = 1; - dte->ir = 1; + dte->iw = true; + dte->ir = true; dte->paging_mode = paging_mode; - dte->tv = 1; + dte->tv = true; dte->v = valid; }void __init amd_iommu_set_intremap_table( - struct amd_iommu_dte *dte, uint64_t intremap_ptr, uint8_t int_valid) + struct amd_iommu_dte *dte, uint64_t intremap_ptr, bool valid) { dte->it_root = intremap_ptr >> 6; - dte->int_tab_len = 0xb; /* 2048 entries */ - dte->int_ctl = 2; /* fixed and arbitrated interrupts remapped */ - dte->ig = 0; /* unmapped interrupt results io page faults */ - dte->iv = int_valid; + dte->int_tab_len = IOMMU_INTREMAP_ORDER; + dte->int_ctl = IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED; + dte->ig = false; /* unmapped interrupts result in i/o page faults */ + dte->iv = valid; }void __init iommu_dte_add_device_entry(struct amd_iommu_dte *dte, - struct ivrs_mappings *ivrs_dev) + const struct ivrs_mappings *ivrs_dev) { uint8_t flags = ivrs_dev->device_flags;- memset(dte, 0, sizeof(*dte)); - - dte->init_pass = MASK_EXTR(flags, ACPI_IVHD_INIT_PASS); - dte->ext_int_pass = MASK_EXTR(flags, ACPI_IVHD_EINT_PASS); - dte->nmi_pass = MASK_EXTR(flags, ACPI_IVHD_NMI_PASS); - dte->lint0_pass = MASK_EXTR(flags, ACPI_IVHD_LINT0_PASS); - dte->lint1_pass = MASK_EXTR(flags, ACPI_IVHD_LINT1_PASS); - dte->sys_mgt = MASK_EXTR(flags, ACPI_IVHD_SYSTEM_MGMT); - dte->ex = ivrs_dev->dte_allow_exclusion; + *dte = (struct amd_iommu_dte){ + .init_pass = flags & ACPI_IVHD_INIT_PASS, + .ext_int_pass = flags & ACPI_IVHD_EINT_PASS, + .nmi_pass = flags & ACPI_IVHD_NMI_PASS, + .lint0_pass = flags & ACPI_IVHD_LINT0_PASS, + .lint1_pass = flags & ACPI_IVHD_LINT1_PASS, + .ioctl = IOMMU_DEV_TABLE_IO_CONTROL_ABORTED, + .sys_mgt = MASK_EXTR(flags, ACPI_IVHD_SYSTEM_MGMT), + .ex = ivrs_dev->dte_allow_exclusion, + }; }void iommu_dte_set_guest_cr3(struct amd_iommu_dte *dte, uint16_t dom_id, - uint64_t gcr3_mfn, uint8_t gv, uint8_t glx) + uint64_t gcr3_mfn, bool gv, uint8_t glx) { #define GCR3_MASK(hi, lo) (((1ul << ((hi) + 1)) - 1) & ~((1ul << (lo)) - 1)) #define GCR3_SHIFT(lo) ((lo) - PAGE_SHIFT)/* I bit must be set when gcr3 is enabled */ - dte->i = 1; + dte->i = true;dte->gcr3_trp_14_12 = (gcr3_mfn & GCR3_MASK(14, 12)) >> GCR3_SHIFT(12); dte->gcr3_trp_30_15 = (gcr3_mfn & GCR3_MASK(30, 15)) >> GCR3_SHIFT(15); --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -93,7 +93,6 @@ static void amd_iommu_setup_domain_devic struct amd_iommu_dte *table, *dte; unsigned long flags; int req_id, valid = 1; - int dte_i = 0; u8 bus = pdev->bus; const struct domain_iommu *hd = dom_iommu(domain);@@ -103,9 +102,6 @@ static void amd_iommu_setup_domain_devic if ( iommu_hwdom_passthrough && is_hardware_domain(domain) ) valid = 0;- if ( ats_enabled ) - dte_i = 1; - /* get device-table entry */ req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn)); table = iommu->dev_table.buffer; @@ -122,7 +118,7 @@ static void amd_iommu_setup_domain_devicif ( pci_ats_device(iommu->seg, bus, pdev->devfn) && iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) - dte->i = dte_i; + dte->i = ats_enabled;amd_iommu_flush_device(iommu, req_id); @@ -288,14 +284,11 @@ void amd_iommu_disable_domain_device(str dte = &table[req_id];spin_lock_irqsave(&iommu->lock, flags); - if ( dte->tv && dte->v ) + if ( dte->tv || dte->v ) { - dte->tv = 0; - dte->v = 0; - - if ( pci_ats_device(iommu->seg, bus, pdev->devfn) && - iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) - dte->i = 0; + dte->tv = false; + dte->v = false; + dte->i = false;amd_iommu_flush_device(iommu, req_id); --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h @@ -107,57 +107,60 @@ #define IOMMU_DEV_TABLE_INT_CONTROL_FORWARDED 0x1 #define IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED 0x2+/* For now, we always allocate the maximum: 2048 entries. */ +#define IOMMU_INTREMAP_ORDER 0xB + struct amd_iommu_dte { /* 0 - 63 */ - uint64_t v:1; - uint64_t tv:1; - uint64_t reserved0:5; - uint64_t had:2; - uint64_t paging_mode:3; + bool v:1; + bool tv:1; + unsigned int :5; + unsigned int had:2; + unsigned int paging_mode:3; uint64_t pt_root:40; - uint64_t ppr:1; - uint64_t gprp:1; - uint64_t giov:1; - uint64_t gv:1; - uint64_t glx:2; - uint64_t gcr3_trp_14_12:3; - uint64_t ir:1; - uint64_t iw:1; - uint64_t reserved1:1; + bool ppr:1; + bool gprp:1; + bool giov:1; + bool gv:1; + unsigned int glx:2; + unsigned int gcr3_trp_14_12:3; + bool ir:1; + bool iw:1; + unsigned int :1;/* 64 - 127 */ - uint64_t domain_id:16; - uint64_t gcr3_trp_30_15:16; - uint64_t i:1; - uint64_t se:1; - uint64_t sa:1; - uint64_t ioctl:2; - uint64_t cache:1; - uint64_t sd:1; - uint64_t ex:1; - uint64_t sys_mgt:2; - uint64_t reserved2:1; - uint64_t gcr3_trp_51_31:21; + unsigned int domain_id:16; + unsigned int gcr3_trp_30_15:16; + bool i:1; + bool se:1; + bool sa:1; + unsigned int ioctl:2; + bool cache:1; + bool sd:1; + bool ex:1; + unsigned int sys_mgt:2; + unsigned int :1; + unsigned int gcr3_trp_51_31:21;/* 128 - 191 */ - uint64_t iv:1; - uint64_t int_tab_len:4; - uint64_t ig:1; + bool iv:1; + unsigned int int_tab_len:4; + bool ig:1; uint64_t it_root:46; - uint64_t reserved3:4; - uint64_t init_pass:1; - uint64_t ext_int_pass:1; - uint64_t nmi_pass:1; - uint64_t reserved4:1; - uint64_t int_ctl:2; - uint64_t lint0_pass:1; - uint64_t lint1_pass:1; + unsigned int :4; + bool init_pass:1; + bool ext_int_pass:1; + bool nmi_pass:1; + unsigned int :1; + unsigned int int_ctl:2; + bool lint0_pass:1; + bool lint1_pass:1;/* 192 - 255 */ - uint64_t reserved5:54; - uint64_t attr_v:1; - uint64_t mode0_fc:1; - uint64_t snoop_attr:8; + uint64_t :54; + bool attr_v:1; + bool mode0_fc:1; + unsigned int snoop_attr:8; };/* Command Buffer */ --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @@ -73,14 +73,14 @@ int __must_check amd_iommu_flush_iotlb_a int get_dma_requestor_id(uint16_t seg, uint16_t bdf); void amd_iommu_set_intremap_table(struct amd_iommu_dte *dte, uint64_t intremap_ptr, - uint8_t int_valid); + bool valid); void amd_iommu_set_root_page_table(struct amd_iommu_dte *dte, uint64_t root_ptr, uint16_t domain_id, - uint8_t paging_mode, uint8_t valid); + uint8_t paging_mode, bool valid); void iommu_dte_add_device_entry(struct amd_iommu_dte *dte, - struct ivrs_mappings *ivrs_dev); + const struct ivrs_mappings *ivrs_dev); void iommu_dte_set_guest_cr3(struct amd_iommu_dte *dte, uint16_t dom_id, - uint64_t gcr3_mfn, uint8_t gv, uint8_t glx); + uint64_t gcr3_mfn, bool gv, uint8_t glx);/* send cmd to iommu */ void amd_iommu_flush_all_pages(struct domain *d); _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |