[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v7 6/8] AMD/IOMMU: provide function backing XENMEM_reserved_device_memory_map
Just like for VT-d, exclusion / unity map ranges would better be reflected in e.g. the guest's E820 map. The reporting infrastructure was put in place still pretty tailored to VT-d's needs; extend get_reserved_device_memory() to allow vendor specific code to probe whether a particular (seg,bus,dev,func) tuple would get its data actually recorded. I admit the de-duplication of entries is quite limited for now, but considering our trouble to find a system surfacing _any_ IVMD this is likely not a critical issue for this initial implementation. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Paul Durrant <paul@xxxxxxx> --- v7: Re-base. v5: New. --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -1042,6 +1042,9 @@ static int get_reserved_device_memory(xe if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) ) return 0; + if ( !nr ) + return 1; + if ( grdm->used_entries < grdm->map.nr_entries ) { struct xen_reserved_device_memory rdm = { --- a/xen/drivers/passthrough/amd/iommu.h +++ b/xen/drivers/passthrough/amd/iommu.h @@ -110,6 +110,7 @@ struct amd_iommu { struct ivrs_unity_map { bool read:1; bool write:1; + bool global:1; paddr_t addr; unsigned long length; struct ivrs_unity_map *next; @@ -236,6 +237,7 @@ int amd_iommu_reserve_domain_unity_map(s unsigned int flag); int amd_iommu_reserve_domain_unity_unmap(struct domain *d, const struct ivrs_unity_map *map); +int amd_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt); int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn, unsigned long page_count, unsigned int flush_flags); --- a/xen/drivers/passthrough/amd/iommu_acpi.c +++ b/xen/drivers/passthrough/amd/iommu_acpi.c @@ -145,7 +145,7 @@ static int __init reserve_iommu_exclusio static int __init reserve_unity_map_for_device( uint16_t seg, uint16_t bdf, unsigned long base, - unsigned long length, bool iw, bool ir) + unsigned long length, bool iw, bool ir, bool global) { struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg); struct ivrs_unity_map *unity_map = ivrs_mappings[bdf].unity_map; @@ -164,7 +164,11 @@ static int __init reserve_unity_map_for_ */ if ( base == unity_map->addr && length == unity_map->length && ir == unity_map->read && iw == unity_map->write ) + { + if ( global ) + unity_map->global = true; return 0; + } if ( unity_map->addr + unity_map->length > base && base + length > unity_map->addr ) @@ -183,6 +187,7 @@ static int __init reserve_unity_map_for_ unity_map->read = ir; unity_map->write = iw; + unity_map->global = global; unity_map->addr = base; unity_map->length = length; unity_map->next = ivrs_mappings[bdf].unity_map; @@ -222,7 +227,8 @@ static int __init register_range_for_all /* reserve r/w unity-mapped page entries for devices */ for ( bdf = rc = 0; !rc && bdf < ivrs_bdf_entries; bdf++ ) - rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir); + rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir, + true); } return rc; @@ -255,8 +261,10 @@ static int __init register_range_for_dev paddr_t length = limit + PAGE_SIZE - base; /* reserve unity-mapped page entries for device */ - rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir) ?: - reserve_unity_map_for_device(seg, req, base, length, iw, ir); + rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir, + false) ?: + reserve_unity_map_for_device(seg, req, base, length, iw, ir, + false); } else { @@ -292,9 +300,9 @@ static int __init register_range_for_iom req = get_ivrs_mappings(iommu->seg)[bdf].dte_requestor_id; rc = reserve_unity_map_for_device(iommu->seg, bdf, base, length, - iw, ir) ?: + iw, ir, false) ?: reserve_unity_map_for_device(iommu->seg, req, base, length, - iw, ir); + iw, ir, false); } return rc; --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -467,6 +467,81 @@ int amd_iommu_reserve_domain_unity_unmap return rc; } +int amd_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt) +{ + unsigned int seg = 0 /* XXX */, bdf; + const struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg); + /* At least for global entries, avoid reporting them multiple times. */ + enum { pending, processing, done } global = pending; + + for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf ) + { + pci_sbdf_t sbdf = PCI_SBDF2(seg, bdf); + const struct ivrs_unity_map *um = ivrs_mappings[bdf].unity_map; + unsigned int req = ivrs_mappings[bdf].dte_requestor_id; + const struct amd_iommu *iommu = ivrs_mappings[bdf].iommu; + int rc; + + if ( !iommu ) + { + /* May need to trigger the workaround in find_iommu_for_device(). */ + const struct pci_dev *pdev; + + pcidevs_lock(); + pdev = pci_get_pdev(seg, sbdf.bus, sbdf.devfn); + pcidevs_unlock(); + + if ( pdev ) + iommu = find_iommu_for_device(seg, bdf); + if ( !iommu ) + continue; + } + + if ( func(0, 0, sbdf.sbdf, ctxt) ) + { + /* + * When the caller processes a XENMEM_RDM_ALL request, don't report + * multiple times the same range(s) for perhaps many devices with + * the same alias ID. + */ + if ( bdf != req && ivrs_mappings[req].iommu && + func(0, 0, PCI_SBDF2(seg, req).sbdf, ctxt) ) + continue; + + if ( global == pending ) + global = processing; + } + + if ( iommu->exclusion_enable && + (iommu->exclusion_allow_all ? + global == processing : + ivrs_mappings[bdf].dte_allow_exclusion) ) + { + rc = func(PFN_DOWN(iommu->exclusion_base), + PFN_UP(iommu->exclusion_limit | 1) - + PFN_DOWN(iommu->exclusion_base), sbdf.sbdf, ctxt); + if ( unlikely(rc < 0) ) + return rc; + } + + for ( ; um; um = um->next ) + { + if ( um->global && global != processing ) + continue; + + rc = func(PFN_DOWN(um->addr), PFN_DOWN(um->length), + sbdf.sbdf, ctxt); + if ( unlikely(rc < 0) ) + return rc; + } + + if ( global == processing ) + global = done; + } + + return 0; +} + int __init amd_iommu_quarantine_init(struct domain *d) { struct domain_iommu *hd = dom_iommu(d); --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -667,6 +667,7 @@ static const struct iommu_ops __initcons .suspend = amd_iommu_suspend, .resume = amd_iommu_resume, .crash_shutdown = amd_iommu_crash_shutdown, + .get_reserved_device_memory = amd_iommu_get_reserved_device_memory, .dump_page_tables = amd_dump_page_tables, };
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |