[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] linux-2.6.18: make gnttab_dma_map_page() compound page aware
While this was found to be a problem in practice only on recent kernels, it was nevertheless a mistake from the beginning to not take into consideration compound pages. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/arch/i386/kernel/pci-dma-xen.c +++ b/arch/i386/kernel/pci-dma-xen.c @@ -120,7 +120,7 @@ dma_map_sg(struct device *hwdev, struct for (i = 0; i < nents; i++ ) { BUG_ON(!sg[i].page); sg[i].dma_address = - gnttab_dma_map_page(sg[i].page) + sg[i].offset; + gnttab_dma_map_page(sg[i].page, sg[i].offset); sg[i].dma_length = sg[i].length; IOMMU_BUG_ON(address_needs_mapping( hwdev, sg[i].dma_address)); @@ -165,7 +165,7 @@ dma_map_page(struct device *dev, struct dma_addr = swiotlb_map_page( dev, page, offset, size, direction); } else { - dma_addr = gnttab_dma_map_page(page) + offset; + dma_addr = gnttab_dma_map_page(page, offset); IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr)); } @@ -363,8 +363,8 @@ dma_map_single(struct device *dev, void if (swiotlb) { dma = swiotlb_map_single(dev, ptr, size, direction); } else { - dma = gnttab_dma_map_page(virt_to_page(ptr)) + - offset_in_page(ptr); + dma = gnttab_dma_map_page(virt_to_page(ptr), + offset_in_page(ptr)); IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size)); IOMMU_BUG_ON(address_needs_mapping(dev, dma)); } --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1522,7 +1522,7 @@ dma_map_sg(struct device *hwdev, struct for (sg = sglist ; filled < nents ; filled++, sg++){ sg->dma_length = sg->length; #ifdef CONFIG_XEN - sg->dma_address = gnttab_dma_map_page(sg->page) + sg->offset; + sg->dma_address = gnttab_dma_map_page(sg->page, sg->offset); #else sg->dma_address = virt_to_bus(sba_sg_address(sg)); #endif --- a/arch/ia64/xen/xen_dma.c +++ b/arch/ia64/xen/xen_dma.c @@ -92,7 +92,7 @@ dma_map_sg(struct device *hwdev, struct int i; for (i = 0 ; i < nents ; i++) { - sg[i].dma_address = gnttab_dma_map_page(sg[i].page) + sg[i].offset; + sg[i].dma_address = gnttab_dma_map_page(sg[i].page, sg[i].offset); sg[i].dma_length = sg[i].length; IOMMU_BUG_ON(address_needs_mapping(dev, sg[i].dma_address)); --- a/drivers/xen/core/gnttab.c +++ b/drivers/xen/core/gnttab.c @@ -705,6 +705,8 @@ void __gnttab_dma_map_page(struct page * if (!is_running_on_xen() || !PageForeign(page)) return; + BUG_ON(PageCompound(page)); + do { seq = read_seqbegin(&gnttab_dma_lock); --- a/include/asm-i386/mach-xen/asm/gnttab_dma.h +++ b/include/asm-i386/mach-xen/asm/gnttab_dma.h @@ -21,16 +21,24 @@ #ifndef _ASM_I386_GNTTAB_DMA_H #define _ASM_I386_GNTTAB_DMA_H +#include <asm/bug.h> + static inline int gnttab_dma_local_pfn(struct page *page) { /* Has it become a local MFN? */ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page)))); } -static inline maddr_t gnttab_dma_map_page(struct page *page) +static inline maddr_t gnttab_dma_map_page(struct page *page, + unsigned long offset) { + unsigned int pgnr = offset >> PAGE_SHIFT; + unsigned int order = PageCompound(page) ? (long)page[1].lru.prev : 0; + + BUG_ON(pgnr >> order); __gnttab_dma_map_page(page); - return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT); + return ((maddr_t)pfn_to_mfn(page_to_pfn(page) + pgnr) << PAGE_SHIFT) + + (offset & ~PAGE_MASK); } static inline void gnttab_dma_unmap_page(maddr_t maddr) --- a/include/asm-ia64/gnttab_dma.h +++ b/include/asm-ia64/gnttab_dma.h @@ -21,6 +21,8 @@ #ifndef _ASM_IA64_GNTTAB_DMA_H #define _ASM_IA64_GNTTAB_DMA_H +#include <asm/bug.h> + static inline int gnttab_dma_local_pfn(struct page *page) { return 0; @@ -32,10 +34,15 @@ __gnttab_dma_map_page(page); } -static inline dma_addr_t gnttab_dma_map_page(struct page *page) +static inline dma_addr_t gnttab_dma_map_page(struct page *page, + unsigned long offset) { + unsigned int pgnr = offset >> PAGE_SHIFT; + unsigned int order = PageCompound(page) ? (long)page[1].lru.prev : 0; + + BUG_ON(pgnr >> order); gnttab_dma_use_page(page); - return page_to_bus(page); + return page_to_bus(page + pgnr) + (offset & ~PAGE_MASK); } static inline dma_addr_t gnttab_dma_map_virt(void *ptr) --- a/lib/swiotlb-xen.c +++ b/lib/swiotlb-xen.c @@ -501,8 +501,8 @@ swiotlb_full(struct device *dev, size_t dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) { - dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) + - offset_in_page(ptr); + dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr), + offset_in_page(ptr)); void *map; struct phys_addr buffer; @@ -613,7 +613,7 @@ swiotlb_map_sg(struct device *hwdev, str BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) { - dev_addr = gnttab_dma_map_page(sg->page) + sg->offset; + dev_addr = gnttab_dma_map_page(sg->page, sg->offset); if (range_straddles_page_boundary(page_to_pseudophys(sg->page) + sg->offset, sg->length) @@ -705,7 +705,7 @@ swiotlb_map_page(struct device *hwdev, s dma_addr_t dev_addr; char *map; - dev_addr = gnttab_dma_map_page(page) + offset; + dev_addr = gnttab_dma_map_page(page, offset); if (address_needs_mapping(hwdev, dev_addr)) { gnttab_dma_unmap_page(dev_addr); buffer.page = page; Attachment:
xen-gnttab-map-compound-page.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |