[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH mm-unstable v9 22/31] m68k: Convert various functions to use ptdescs



As part of the conversions to replace pgtable constructor/destructors with
ptdesc equivalents, convert various page table functions to use ptdescs.

Some of the functions use the *get*page*() helper functions. Convert
these to use pagetable_alloc() and ptdesc_address() instead to help
standardize page tables further.

Acked-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx>
Acked-by: Geert Uytterhoeven <geert@xxxxxxxxxxxxxx>
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx>
---
 arch/m68k/include/asm/mcf_pgalloc.h  | 47 ++++++++++++++--------------
 arch/m68k/include/asm/sun3_pgalloc.h |  8 ++---
 arch/m68k/mm/motorola.c              |  4 +--
 3 files changed, 30 insertions(+), 29 deletions(-)

diff --git a/arch/m68k/include/asm/mcf_pgalloc.h 
b/arch/m68k/include/asm/mcf_pgalloc.h
index 5c2c0a864524..302c5bf67179 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -5,22 +5,22 @@
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 
-extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-       free_page((unsigned long) pte);
+       pagetable_free(virt_to_ptdesc(pte));
 }
 
 extern const char bad_pmd_string[];
 
-extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
 {
-       unsigned long page = __get_free_page(GFP_DMA);
+       struct ptdesc *ptdesc = pagetable_alloc((GFP_DMA | __GFP_ZERO) &
+                       ~__GFP_HIGHMEM, 0);
 
-       if (!page)
+       if (!ptdesc)
                return NULL;
 
-       memset((void *)page, 0, PAGE_SIZE);
-       return (pte_t *) (page);
+       return ptdesc_address(ptdesc);
 }
 
 extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
@@ -35,36 +35,34 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned 
long address)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
                                  unsigned long address)
 {
-       struct page *page = virt_to_page(pgtable);
+       struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
 
-       pgtable_pte_page_dtor(page);
-       __free_page(page);
+       pagetable_pte_dtor(ptdesc);
+       pagetable_free(ptdesc);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 {
-       struct page *page = alloc_pages(GFP_DMA, 0);
+       struct ptdesc *ptdesc = pagetable_alloc(GFP_DMA | __GFP_ZERO, 0);
        pte_t *pte;
 
-       if (!page)
+       if (!ptdesc)
                return NULL;
-       if (!pgtable_pte_page_ctor(page)) {
-               __free_page(page);
+       if (!pagetable_pte_ctor(ptdesc)) {
+               pagetable_free(ptdesc);
                return NULL;
        }
 
-       pte = page_address(page);
-       clear_page(pte);
-
+       pte = ptdesc_address(ptdesc);
        return pte;
 }
 
 static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
 {
-       struct page *page = virt_to_page(pgtable);
+       struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
 
-       pgtable_pte_page_dtor(page);
-       __free_page(page);
+       pagetable_pte_dtor(ptdesc);
+       pagetable_free(ptdesc);
 }
 
 /*
@@ -75,16 +73,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t 
pgtable)
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-       free_page((unsigned long) pgd);
+       pagetable_free(virt_to_ptdesc(pgd));
 }
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        pgd_t *new_pgd;
+       struct ptdesc *ptdesc = pagetable_alloc((GFP_DMA | __GFP_NOWARN) &
+                       ~__GFP_HIGHMEM, 0);
 
-       new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
-       if (!new_pgd)
+       if (!ptdesc)
                return NULL;
+       new_pgd = ptdesc_address(ptdesc);
+
        memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t));
        memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
        return new_pgd;
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h 
b/arch/m68k/include/asm/sun3_pgalloc.h
index 198036aff519..ff48573db2c0 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -17,10 +17,10 @@
 
 extern const char bad_pmd_string[];
 
-#define __pte_free_tlb(tlb,pte,addr)                   \
-do {                                                   \
-       pgtable_pte_page_dtor(pte);                     \
-       tlb_remove_page((tlb), pte);                    \
+#define __pte_free_tlb(tlb, pte, addr)                         \
+do {                                                           \
+       pagetable_pte_dtor(page_ptdesc(pte));                   \
+       tlb_remove_page_ptdesc((tlb), page_ptdesc(pte));        \
 } while (0)
 
 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t 
*pte)
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 8bca46e51e94..c1761d309fc6 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -161,7 +161,7 @@ void *get_pointer_table(int type)
                         * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
                         * SMP.
                         */
-                       pgtable_pte_page_ctor(virt_to_page(page));
+                       pagetable_pte_ctor(virt_to_ptdesc(page));
                }
 
                mmu_page_ctor(page);
@@ -201,7 +201,7 @@ int free_pointer_table(void *table, int type)
                list_del(dp);
                mmu_page_dtor((void *)page);
                if (type == TABLE_PTE)
-                       pgtable_pte_page_dtor(virt_to_page((void *)page));
+                       pagetable_pte_dtor(virt_to_ptdesc((void *)page));
                free_page (page);
                return 1;
        } else if (ptable_list[type].next != dp) {
-- 
2.40.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.