[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 14/23] AMD/IOMMU: allow use of superpage mappings
No separate feature flags exist which would control availability of these; the only restriction is HATS (establishing the maximum number of page table levels in general), and even that has a lower bound of 4. Thus we can unconditionally announce 2M, 1G, and 512G mappings. (Via non-default page sizes the implementation in principle permits arbitrary size mappings, but these require multiple identical leaf PTEs to be written, which isn't all that different from having to write multiple consecutive PTEs with increasing frame numbers. IMO that's therefore beneficial only on hardware where suitable TLBs exist; I'm unaware of such hardware.) Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- I'm not fully sure about allowing 512G mappings: The scheduling-for- freeing of intermediate page tables would take quite a while when replacing a tree of 4k mappings by a single 512G one. Yet then again there's no present code path via which 512G chunks of memory could be allocated (and hence mapped) anyway, so this would only benefit huge systems where 512 1G mappings could be re-coalesced (once suitable code is in place) into a single L4 entry. And re-coalescing wouldn't result in scheduling-for-freeing of full trees of lower level pagetables. --- v3: Rename queue_free_pt()'s last parameter. Replace "level > 1" checks where possible. --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -32,12 +32,13 @@ static unsigned int pfn_to_pde_idx(unsig } static union amd_iommu_pte clear_iommu_pte_present(unsigned long l1_mfn, - unsigned long dfn) + unsigned long dfn, + unsigned int level) { union amd_iommu_pte *table, *pte, old; table = map_domain_page(_mfn(l1_mfn)); - pte = &table[pfn_to_pde_idx(dfn, 1)]; + pte = &table[pfn_to_pde_idx(dfn, level)]; old = *pte; write_atomic(&pte->raw, 0); @@ -291,10 +292,31 @@ static int iommu_pde_from_dfn(struct dom return 0; } +static void queue_free_pt(struct domain *d, mfn_t mfn, unsigned int level) +{ + if ( level > 1 ) + { + union amd_iommu_pte *pt = map_domain_page(mfn); + unsigned int i; + + for ( i = 0; i < PTE_PER_TABLE_SIZE; ++i ) + if ( pt[i].pr && pt[i].next_level ) + { + ASSERT(pt[i].next_level < level); + queue_free_pt(d, _mfn(pt[i].mfn), pt[i].next_level); + } + + unmap_domain_page(pt); + } + + iommu_queue_free_pgtable(d, mfn_to_page(mfn)); +} + int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn, unsigned int flags, unsigned int *flush_flags) { struct domain_iommu *hd = dom_iommu(d); + unsigned int level = (IOMMUF_order(flags) / PTE_PER_TABLE_SHIFT) + 1; int rc; unsigned long pt_mfn = 0; union amd_iommu_pte old; @@ -323,7 +345,7 @@ int amd_iommu_map_page(struct domain *d, return rc; } - if ( iommu_pde_from_dfn(d, dfn_x(dfn), 1, &pt_mfn, flush_flags, true) || + if ( iommu_pde_from_dfn(d, dfn_x(dfn), level, &pt_mfn, flush_flags, true) || !pt_mfn ) { spin_unlock(&hd->arch.mapping_lock); @@ -333,8 +355,8 @@ int amd_iommu_map_page(struct domain *d, return -EFAULT; } - /* Install 4k mapping */ - old = set_iommu_pte_present(pt_mfn, dfn_x(dfn), mfn_x(mfn), 1, + /* Install mapping */ + old = set_iommu_pte_present(pt_mfn, dfn_x(dfn), mfn_x(mfn), level, (flags & IOMMUF_writable), (flags & IOMMUF_readable)); @@ -342,8 +364,13 @@ int amd_iommu_map_page(struct domain *d, *flush_flags |= IOMMU_FLUSHF_added; if ( old.pr ) + { *flush_flags |= IOMMU_FLUSHF_modified; + if ( IOMMUF_order(flags) && old.next_level ) + queue_free_pt(d, _mfn(old.mfn), old.next_level); + } + return 0; } @@ -352,6 +379,7 @@ int amd_iommu_unmap_page(struct domain * { unsigned long pt_mfn = 0; struct domain_iommu *hd = dom_iommu(d); + unsigned int level = (order / PTE_PER_TABLE_SHIFT) + 1; union amd_iommu_pte old = {}; spin_lock(&hd->arch.mapping_lock); @@ -362,7 +390,7 @@ int amd_iommu_unmap_page(struct domain * return 0; } - if ( iommu_pde_from_dfn(d, dfn_x(dfn), 1, &pt_mfn, flush_flags, false) ) + if ( iommu_pde_from_dfn(d, dfn_x(dfn), level, &pt_mfn, flush_flags, false) ) { spin_unlock(&hd->arch.mapping_lock); AMD_IOMMU_ERROR("invalid IO pagetable entry dfn = %"PRI_dfn"\n", @@ -374,14 +402,19 @@ int amd_iommu_unmap_page(struct domain * if ( pt_mfn ) { /* Mark PTE as 'page not present'. */ - old = clear_iommu_pte_present(pt_mfn, dfn_x(dfn)); + old = clear_iommu_pte_present(pt_mfn, dfn_x(dfn), level); } spin_unlock(&hd->arch.mapping_lock); if ( old.pr ) + { *flush_flags |= IOMMU_FLUSHF_modified; + if ( order && old.next_level ) + queue_free_pt(d, _mfn(old.mfn), old.next_level); + } + return 0; } --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -630,7 +630,7 @@ static void amd_dump_page_tables(struct } static const struct iommu_ops __initconstrel _iommu_ops = { - .page_sizes = PAGE_SIZE_4K, + .page_sizes = PAGE_SIZE_4K | PAGE_SIZE_2M | PAGE_SIZE_1G | PAGE_SIZE_512G, .init = amd_iommu_domain_init, .hwdom_init = amd_iommu_hwdom_init, .quarantine_init = amd_iommu_quarantine_init, --- a/xen/include/xen/page-defs.h +++ b/xen/include/xen/page-defs.h @@ -21,4 +21,19 @@ #define PAGE_MASK_64K PAGE_MASK_GRAN(64K) #define PAGE_ALIGN_64K(addr) PAGE_ALIGN_GRAN(64K, addr) +#define PAGE_SHIFT_2M 21 +#define PAGE_SIZE_2M PAGE_SIZE_GRAN(2M) +#define PAGE_MASK_2M PAGE_MASK_GRAN(2M) +#define PAGE_ALIGN_2M(addr) PAGE_ALIGN_GRAN(2M, addr) + +#define PAGE_SHIFT_1G 30 +#define PAGE_SIZE_1G PAGE_SIZE_GRAN(1G) +#define PAGE_MASK_1G PAGE_MASK_GRAN(1G) +#define PAGE_ALIGN_1G(addr) PAGE_ALIGN_GRAN(1G, addr) + +#define PAGE_SHIFT_512G 39 +#define PAGE_SIZE_512G PAGE_SIZE_GRAN(512G) +#define PAGE_MASK_512G PAGE_MASK_GRAN(512G) +#define PAGE_ALIGN_512G(addr) PAGE_ALIGN_GRAN(512G, addr) + #endif /* __XEN_PAGE_DEFS_H__ */
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |