[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] linux-2.6.18/mm: properly frame Xen additions with CONFIG_XEN conditionals
There's no need and no good reason to affect native kernels built from the same sources. Also eliminate a compiler warning triggered by various versions, and adjust some white space. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -209,14 +209,15 @@ struct vm_operations_struct { /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); +#ifdef CONFIG_XEN /* Area-specific function for clearing the PTE at @ptep. Returns the * original value of @ptep. */ - pte_t (*zap_pte)(struct vm_area_struct *vma, + pte_t (*zap_pte)(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, int is_fullmm); - /* called before close() to indicate no more pages should be mapped */ - void (*unmap)(struct vm_area_struct *area); - + /* called before close() to indicate no more pages should be mapped */ + void (*unmap)(struct vm_area_struct *area); +#endif #ifdef CONFIG_NUMA int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); struct mempolicy *(*get_policy)(struct vm_area_struct *vma, --- a/mm/memory.c +++ b/mm/memory.c @@ -409,7 +409,9 @@ struct page *vm_normal_page(struct vm_ar * and that the resulting page looks ok. */ if (unlikely(!pfn_valid(pfn))) { +#ifdef CONFIG_XEN if (!(vma->vm_flags & VM_RESERVED)) +#endif print_bad_pte(vma, pte, addr); return NULL; } @@ -665,10 +667,12 @@ static unsigned long zap_pte_range(struc page->index > details->last_index)) continue; } +#ifdef CONFIG_XEN if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte)) ptent = vma->vm_ops->zap_pte(vma, addr, pte, tlb->fullmm); else +#endif ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr); @@ -1425,7 +1429,7 @@ static inline int apply_to_pte_range(str spinlock_t *ptl; pte = (mm == &init_mm) ? - pte_alloc_kernel(pmd, addr) : + ptl = NULL, pte_alloc_kernel(pmd, addr) : pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1689,8 +1689,10 @@ static void unmap_region(struct mm_struc static inline void unmap_vma(struct vm_area_struct *vma) { +#ifdef CONFIG_XEN if (unlikely(vma->vm_ops && vma->vm_ops->unmap)) vma->vm_ops->unmap(vma); +#endif } /* @@ -1966,7 +1968,7 @@ EXPORT_SYMBOL(do_brk); void exit_mmap(struct mm_struct *mm) { struct mmu_gather *tlb; - struct vm_area_struct *vma_tmp, *vma = mm->mmap; + struct vm_area_struct *vma = mm->mmap; unsigned long nr_accounted = 0; unsigned long end; @@ -1974,8 +1976,11 @@ void exit_mmap(struct mm_struct *mm) arch_exit_mmap(mm); #endif - for (vma_tmp = mm->mmap; vma_tmp; vma_tmp = vma_tmp->vm_next) - unmap_vma(vma_tmp); +#ifdef CONFIG_XEN + for (; vma; vma = vma->vm_next) + unmap_vma(vma); + vma = mm->mmap; +#endif lru_add_drain(); flush_cache_mm(mm); Attachment:
xen-mm-conditionals.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |