diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 87f6673..6bb6f68 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -47,6 +47,7 @@ #include #include #include +#include #include @@ -2073,6 +2074,7 @@ void __init xen_init_mmu_ops(void) /* Protected by xen_reservation_lock. */ #define MAX_CONTIG_ORDER 9 /* 2MB */ static unsigned long discontig_frames[1< MAX_CONTIG_ORDER)) + return -ENOMEM; + + if (BITS_PER_LONG >> order) { + limit_map = kzalloc(BITS_TO_LONGS(1U << order) * + sizeof(*limit_map), GFP_KERNEL); + if (unlikely(!limit_map)) + return -ENOMEM; + } else + limit_map = &_limit_map; + + /* 0. Construct our per page bitmap lookup. */ + + if (address_bits && (address_bits < PAGE_SHIFT)) + return -EINVAL; + + if (order) + bitmap_zero(limit_map, 1U << order); + else + __set_bit(0, limit_map); + + /* 1. Clear the pages */ + for (i = 0; i < 1ULL << order; i++) { + void *vaddr; + page = &pages[i]; + vaddr = page_address(page); + if (address_bits) { + if (!pfn_to_mfn(virt_to_mfn(vaddr)) >> (address_bits - PAGE_SHIFT)) + continue; + __set_bit(i, limit_map); + } + if (!PageHighMem(page)) + memset(vaddr, 0, PAGE_SIZE); + else { + memset(kmap(page), 0, PAGE_SIZE); + kunmap(page); + ++n; + } + } + /* Check to see if we actually have to do any work. */ + if (bitmap_empty(limit_map, 1U << order)) { + if (limit_map != &_limit_map) + kfree(limit_map); + return 0; + } + if (n) + kmap_flush_unused(); + + spin_lock_irqsave(&xen_reservation_lock, flags); + + /* 2. Zap current PTEs. */ + n = xen_zap_page_range(pages, order, in_frames, NULL /*out_frames */, limit_map); + + /* 3. Do the exchange for non-contiguous MFNs. */ + success = xen_exchange_memory(n, 0, in_frames, + n, 0, out_frames, address_bits); + + /* 4. Map new pages in place of old pages. */ + if (success) + xen_remap_exchanged_pages(pages, order, out_frames, 0, limit_map); + else + xen_remap_exchanged_pages(pages, order, NULL, *in_frames, limit_map); + + spin_unlock_irqrestore(&xen_reservation_lock, flags); + if (limit_map != &_limit_map) + kfree(limit_map); + + return success ? 0 : -ENOMEM; +} +EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn); #ifdef CONFIG_XEN_PVHVM static void xen_hvm_exit_mmap(struct mm_struct *mm) { diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 03c85d7..ae5b1ef 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -28,4 +28,6 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long mfn, int nr, pgprot_t prot, unsigned domid); +int xen_limit_pages_to_max_mfn(struct page *pages, unsigned int order, + unsigned int address_bits); #endif /* INCLUDE_XEN_OPS_H */ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 27be2f0..4fa2066 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -31,6 +31,8 @@ #include #include +#include +#include /*** Page table manipulation functions ***/ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) @@ -1550,7 +1552,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, struct page **pages; unsigned int nr_pages, array_size, i; gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; - + gfp_t dma_mask = gfp_mask & (__GFP_DMA | __GFP_DMA32); + if (xen_pv_domain()) { + if (dma_mask == (__GFP_DMA | __GFP_DMA32)) + gfp_mask &= (__GFP_DMA | __GFP_DMA32); + } nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); @@ -1586,6 +1592,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, goto fail; } area->pages[i] = page; + if (xen_pv_domain()) { + if (dma_mask) { + if (xen_limit_pages_to_max_mfn(page, 0, 32)) { + area->nr_pages = i + 1; + goto fail; + } + if (gfp_mask & __GFP_ZERO) + clear_highpage(page); + } + } } if (map_vm_area(area, prot, &pages))