[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 1/2] xen/swiotlb: relax alignment requirements



On Tue, 11 Feb 2025, Juergen Gross wrote:
> When mapping a buffer for DMA via .map_page or .map_sg DMA operations,
> there is no need to check the machine frames to be aligned according
> to the mapped areas size. All what is needed in these cases is that the
> buffer is contiguous at machine level.
> 
> So carve out the alignment check from range_straddles_page_boundary()
> and move it to a helper called by xen_swiotlb_alloc_coherent() and
> xen_swiotlb_free_coherent() directly.
> 
> Fixes: 9f40ec84a797 ("xen/swiotlb: add alignment check for dma buffers")
> Reported-by: Jan Vejvalka <jan.vejvalka@xxxxxxxxxxxxxxx>
> Tested-by: Jan Vejvalka <jan.vejvalka@xxxxxxxxxxxxxxx>
> Signed-off-by: Juergen Gross <jgross@xxxxxxxx>

Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>


> ---
>  drivers/xen/swiotlb-xen.c | 20 ++++++++++++--------
>  1 file changed, 12 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> index a337edcf8faf..26c62e0d34e9 100644
> --- a/drivers/xen/swiotlb-xen.c
> +++ b/drivers/xen/swiotlb-xen.c
> @@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device 
> *dev,
>       return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
>  }
>  
> +static inline bool range_requires_alignment(phys_addr_t p, size_t size)
> +{
> +     phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
> +     phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
> +
> +     return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
> +}
> +
>  static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
>  {
>       unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
>       unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
> -     phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
>  
>       next_bfn = pfn_to_bfn(xen_pfn);
>  
> -     /* If buffer is physically aligned, ensure DMA alignment. */
> -     if (IS_ALIGNED(p, algn) &&
> -         !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
> -             return 1;
> -
>       for (i = 1; i < nr_pages; i++)
>               if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
>                       return 1;
> @@ -156,7 +158,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t 
> size,
>  
>       *dma_handle = xen_phys_to_dma(dev, phys);
>       if (*dma_handle + size - 1 > dma_mask ||
> -         range_straddles_page_boundary(phys, size)) {
> +         range_straddles_page_boundary(phys, size) ||
> +         range_requires_alignment(phys, size)) {
>               if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
>                               dma_handle) != 0)
>                       goto out_free_pages;
> @@ -182,7 +185,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t 
> size, void *vaddr,
>       size = ALIGN(size, XEN_PAGE_SIZE);
>  
>       if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
> -         WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
> +         WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
> +                      range_requires_alignment(phys, size)))
>               return;
>  
>       if (TestClearPageXenRemapped(virt_to_page(vaddr)))
> -- 
> 2.43.0
> 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.