|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [linux-2.6.18-xen] mm: properly frame Xen additions with CONFIG_XEN conditionals
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1344319692 -7200
# Node ID 2d3239c1550132c77c2c306f294b2cd01d57ec81
# Parent f628814a279a118f4b1b7c121ccc1b8be4d44f9d
mm: properly frame Xen additions with CONFIG_XEN conditionals
There's no need and no good reason to affect native kernels built from
the same sources.
Also eliminate a compiler warning triggered by various versions, and
adjust some white space.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
diff -r f628814a279a -r 2d3239c15501 include/linux/mm.h
--- a/include/linux/mm.h Tue Aug 07 08:07:12 2012 +0200
+++ b/include/linux/mm.h Tue Aug 07 08:08:12 2012 +0200
@@ -209,14 +209,15 @@ struct vm_operations_struct {
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
+#ifdef CONFIG_XEN
/* Area-specific function for clearing the PTE at @ptep. Returns the
* original value of @ptep. */
- pte_t (*zap_pte)(struct vm_area_struct *vma,
+ pte_t (*zap_pte)(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, int is_fullmm);
- /* called before close() to indicate no more pages should be mapped */
- void (*unmap)(struct vm_area_struct *area);
-
+ /* called before close() to indicate no more pages should be mapped */
+ void (*unmap)(struct vm_area_struct *area);
+#endif
#ifdef CONFIG_NUMA
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
diff -r f628814a279a -r 2d3239c15501 mm/memory.c
--- a/mm/memory.c Tue Aug 07 08:07:12 2012 +0200
+++ b/mm/memory.c Tue Aug 07 08:08:12 2012 +0200
@@ -409,7 +409,9 @@ struct page *vm_normal_page(struct vm_ar
* and that the resulting page looks ok.
*/
if (unlikely(!pfn_valid(pfn))) {
+#ifdef CONFIG_XEN
if (!(vma->vm_flags & VM_RESERVED))
+#endif
print_bad_pte(vma, pte, addr);
return NULL;
}
@@ -665,10 +667,12 @@ static unsigned long zap_pte_range(struc
page->index > details->last_index))
continue;
}
+#ifdef CONFIG_XEN
if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte))
ptent = vma->vm_ops->zap_pte(vma, addr, pte,
tlb->fullmm);
else
+#endif
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
@@ -1425,7 +1429,7 @@ static inline int apply_to_pte_range(str
spinlock_t *ptl;
pte = (mm == &init_mm) ?
- pte_alloc_kernel(pmd, addr) :
+ ptl = NULL, pte_alloc_kernel(pmd, addr) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
diff -r f628814a279a -r 2d3239c15501 mm/mmap.c
--- a/mm/mmap.c Tue Aug 07 08:07:12 2012 +0200
+++ b/mm/mmap.c Tue Aug 07 08:08:12 2012 +0200
@@ -1689,8 +1689,10 @@ static void unmap_region(struct mm_struc
static inline void unmap_vma(struct vm_area_struct *vma)
{
+#ifdef CONFIG_XEN
if (unlikely(vma->vm_ops && vma->vm_ops->unmap))
vma->vm_ops->unmap(vma);
+#endif
}
/*
@@ -1966,7 +1968,7 @@ EXPORT_SYMBOL(do_brk);
void exit_mmap(struct mm_struct *mm)
{
struct mmu_gather *tlb;
- struct vm_area_struct *vma_tmp, *vma = mm->mmap;
+ struct vm_area_struct *vma = mm->mmap;
unsigned long nr_accounted = 0;
unsigned long end;
@@ -1974,8 +1976,11 @@ void exit_mmap(struct mm_struct *mm)
arch_exit_mmap(mm);
#endif
- for (vma_tmp = mm->mmap; vma_tmp; vma_tmp = vma_tmp->vm_next)
- unmap_vma(vma_tmp);
+#ifdef CONFIG_XEN
+ for (; vma; vma = vma->vm_next)
+ unmap_vma(vma);
+ vma = mm->mmap;
+#endif
lru_add_drain();
flush_cache_mm(mm);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |