[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v2 1/4] IOMMU/x86: switch to alternatives-call patching in further instances
Hi Jan, > On 27 Jan 2022, at 2:47 pm, Jan Beulich <jbeulich@xxxxxxxx> wrote: > > This is, once again, to limit the number of indirect calls as much as > possible. The only hook invocation which isn't sensible to convert is > setup(). And of course Arm-only use sites are left alone as well. > > Note regarding the introduction / use of local variables in pci.c: > struct pci_dev's involved fields are const. This const propagates, via > typeof(), to the local helper variables in the altcall macros. These > helper variables are, however, used as outputs (and hence can't be > const). In iommu_get_device_group() make use of the new local variables > to also simplify some adjacent code. > > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Rahul Singh <rahul.singh@xxxxxxx> Tested-by: Rahul Singh <rahul.singh@xxxxxxx> Regards, Rahul > > --- a/xen/drivers/passthrough/iommu.c > +++ b/xen/drivers/passthrough/iommu.c > @@ -198,7 +198,7 @@ int iommu_domain_init(struct domain *d, > return ret; > > hd->platform_ops = iommu_get_ops(); > - ret = hd->platform_ops->init(d); > + ret = iommu_call(hd->platform_ops, init, d); > if ( ret || is_system_domain(d) ) > return ret; > > @@ -233,7 +233,7 @@ void __hwdom_init iommu_hwdom_init(struc > > register_keyhandler('o', &iommu_dump_page_tables, "dump iommu page > tables", 0); > > - hd->platform_ops->hwdom_init(d); > + iommu_vcall(hd->platform_ops, hwdom_init, d); > } > > static void iommu_teardown(struct domain *d) > @@ -576,7 +576,7 @@ int iommu_get_reserved_device_memory(iom > if ( !ops->get_reserved_device_memory ) > return 0; > > - return ops->get_reserved_device_memory(func, ctxt); > + return iommu_call(ops, get_reserved_device_memory, func, ctxt); > } > > bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature) > @@ -603,7 +603,7 @@ static void iommu_dump_page_tables(unsig > continue; > } > > - dom_iommu(d)->platform_ops->dump_page_tables(d); > + iommu_vcall(dom_iommu(d)->platform_ops, dump_page_tables, d); > } > > rcu_read_unlock(&domlist_read_lock); > --- a/xen/drivers/passthrough/pci.c > +++ b/xen/drivers/passthrough/pci.c > @@ -861,15 +861,15 @@ static int deassign_device(struct domain > devfn += pdev->phantom_stride; > if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) ) > break; > - ret = hd->platform_ops->reassign_device(d, target, devfn, > - pci_to_dev(pdev)); > + ret = iommu_call(hd->platform_ops, reassign_device, d, target, devfn, > + pci_to_dev(pdev)); > if ( ret ) > goto out; > } > > devfn = pdev->devfn; > - ret = hd->platform_ops->reassign_device(d, target, devfn, > - pci_to_dev(pdev)); > + ret = iommu_call(hd->platform_ops, reassign_device, d, target, devfn, > + pci_to_dev(pdev)); > if ( ret ) > goto out; > > @@ -1300,7 +1300,7 @@ static int iommu_add_device(struct pci_d > { > const struct domain_iommu *hd; > int rc; > - u8 devfn; > + unsigned int devfn = pdev->devfn; > > if ( !pdev->domain ) > return -EINVAL; > @@ -1311,16 +1311,16 @@ static int iommu_add_device(struct pci_d > if ( !is_iommu_enabled(pdev->domain) ) > return 0; > > - rc = hd->platform_ops->add_device(pdev->devfn, pci_to_dev(pdev)); > + rc = iommu_call(hd->platform_ops, add_device, devfn, pci_to_dev(pdev)); > if ( rc || !pdev->phantom_stride ) > return rc; > > - for ( devfn = pdev->devfn ; ; ) > + for ( ; ; ) > { > devfn += pdev->phantom_stride; > if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) ) > return 0; > - rc = hd->platform_ops->add_device(devfn, pci_to_dev(pdev)); > + rc = iommu_call(hd->platform_ops, add_device, devfn, > pci_to_dev(pdev)); > if ( rc ) > printk(XENLOG_WARNING "IOMMU: add %pp failed (%d)\n", > &pdev->sbdf, rc); > @@ -1341,7 +1341,7 @@ static int iommu_enable_device(struct pc > !hd->platform_ops->enable_device ) > return 0; > > - return hd->platform_ops->enable_device(pci_to_dev(pdev)); > + return iommu_call(hd->platform_ops, enable_device, pci_to_dev(pdev)); > } > > static int iommu_remove_device(struct pci_dev *pdev) > @@ -1363,7 +1363,8 @@ static int iommu_remove_device(struct pc > devfn += pdev->phantom_stride; > if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) ) > break; > - rc = hd->platform_ops->remove_device(devfn, pci_to_dev(pdev)); > + rc = iommu_call(hd->platform_ops, remove_device, devfn, > + pci_to_dev(pdev)); > if ( !rc ) > continue; > > @@ -1371,7 +1372,9 @@ static int iommu_remove_device(struct pc > return rc; > } > > - return hd->platform_ops->remove_device(pdev->devfn, pci_to_dev(pdev)); > + devfn = pdev->devfn; > + > + return iommu_call(hd->platform_ops, remove_device, devfn, > pci_to_dev(pdev)); > } > > static int device_assigned(u16 seg, u8 bus, u8 devfn) > @@ -1421,7 +1424,8 @@ static int assign_device(struct domain * > > pdev->fault.count = 0; > > - if ( (rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev), > flag)) ) > + if ( (rc = iommu_call(hd->platform_ops, assign_device, d, devfn, > + pci_to_dev(pdev), flag)) ) > goto done; > > for ( ; pdev->phantom_stride; rc = 0 ) > @@ -1429,7 +1433,8 @@ static int assign_device(struct domain * > devfn += pdev->phantom_stride; > if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) ) > break; > - rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev), > flag); > + rc = iommu_call(hd->platform_ops, assign_device, d, devfn, > + pci_to_dev(pdev), flag); > } > > done: > @@ -1457,24 +1462,24 @@ static int iommu_get_device_group( > if ( !is_iommu_enabled(d) || !ops->get_device_group_id ) > return 0; > > - group_id = ops->get_device_group_id(seg, bus, devfn); > + group_id = iommu_call(ops, get_device_group_id, seg, bus, devfn); > > pcidevs_lock(); > for_each_pdev( d, pdev ) > { > - if ( (pdev->seg != seg) || > - ((pdev->bus == bus) && (pdev->devfn == devfn)) ) > + unsigned int b = pdev->bus; > + unsigned int df = pdev->devfn; > + > + if ( (pdev->seg != seg) || ((b == bus) && (df == devfn)) ) > continue; > > - if ( xsm_get_device_group(XSM_HOOK, (seg << 16) | (pdev->bus << 8) | > pdev->devfn) ) > + if ( xsm_get_device_group(XSM_HOOK, (seg << 16) | (b << 8) | df) ) > continue; > > - sdev_id = ops->get_device_group_id(seg, pdev->bus, pdev->devfn); > + sdev_id = iommu_call(ops, get_device_group_id, seg, b, df); > if ( (sdev_id == group_id) && (i < max_sdevs) ) > { > - bdf = 0; > - bdf |= (pdev->bus & 0xff) << 16; > - bdf |= (pdev->devfn & 0xff) << 8; > + bdf = (b << 16) | (df << 8); > > if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) ) > { > --- a/xen/drivers/passthrough/x86/iommu.c > +++ b/xen/drivers/passthrough/x86/iommu.c > @@ -145,7 +145,7 @@ unsigned int iommu_read_apic_from_ire(un > int __init iommu_setup_hpet_msi(struct msi_desc *msi) > { > const struct iommu_ops *ops = iommu_get_ops(); > - return ops->setup_hpet_msi ? ops->setup_hpet_msi(msi) : -ENODEV; > + return ops->setup_hpet_msi ? iommu_call(ops, setup_hpet_msi, msi) : > -ENODEV; > } > > void __hwdom_init arch_iommu_check_autotranslated_hwdom(struct domain *d) > @@ -406,7 +406,7 @@ int iommu_free_pgtables(struct domain *d > * Pages will be moved to the free list below. So we want to > * clear the root page-table to avoid any potential use after-free. > */ > - hd->platform_ops->clear_root_pgtable(d); > + iommu_vcall(hd->platform_ops, clear_root_pgtable, d); > > while ( (pg = page_list_remove_head(&hd->arch.pgtables.list)) ) > { > >
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |