[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V6 12/18] x86: Use new cache mode type in mm/ioremap.c



Instead of directly using the cache mode bits in the pte switch to
using the cache mode type.

Based-on-patch-by: Stefan Bader <stefan.bader@xxxxxxxxxxxxx>
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
 arch/x86/include/asm/io.h  |  2 +-
 arch/x86/include/asm/pat.h |  2 +-
 arch/x86/mm/ioremap.c      | 65 +++++++++++++++++++++++++---------------------
 arch/x86/mm/pat.c          | 12 +++++----
 4 files changed, 44 insertions(+), 37 deletions(-)

diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index b8237d8..71b9e65 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -314,7 +314,7 @@ extern void *xlate_dev_mem_ptr(unsigned long phys);
 extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
 
 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
-                               unsigned long prot_val);
+                               enum page_cache_mode pcm);
 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
 
 extern bool is_early_ioremap_ptep(pte_t *ptep);
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index a8438bc..d35ee2d 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -17,7 +17,7 @@ extern int reserve_memtype(u64 start, u64 end,
 extern int free_memtype(u64 start, u64 end);
 
 extern int kernel_map_sync_memtype(u64 base, unsigned long size,
-               unsigned long flag);
+               enum page_cache_mode pcm);
 
 int io_reserve_memtype(resource_size_t start, resource_size_t end,
                        enum page_cache_mode *pcm);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 3a81eb9..f31507f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -29,20 +29,20 @@
  * conflicts.
  */
 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
-                              unsigned long prot_val)
+                       enum page_cache_mode pcm)
 {
        unsigned long nrpages = size >> PAGE_SHIFT;
        int err;
 
-       switch (prot_val) {
-       case _PAGE_CACHE_UC:
+       switch (pcm) {
+       case _PAGE_CACHE_MODE_UC:
        default:
                err = _set_memory_uc(vaddr, nrpages);
                break;
-       case _PAGE_CACHE_WC:
+       case _PAGE_CACHE_MODE_WC:
                err = _set_memory_wc(vaddr, nrpages);
                break;
-       case _PAGE_CACHE_WB:
+       case _PAGE_CACHE_MODE_WB:
                err = _set_memory_wb(vaddr, nrpages);
                break;
        }
@@ -75,13 +75,14 @@ static int __ioremap_check_ram(unsigned long start_pfn, 
unsigned long nr_pages,
  * caller shouldn't need to know that small detail.
  */
 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
-               unsigned long size, unsigned long prot_val, void *caller)
+               unsigned long size, enum page_cache_mode pcm, void *caller)
 {
        unsigned long offset, vaddr;
        resource_size_t pfn, last_pfn, last_addr;
        const resource_size_t unaligned_phys_addr = phys_addr;
        const unsigned long unaligned_size = size;
        struct vm_struct *area;
+       enum page_cache_mode new_pcm;
        unsigned long new_prot_val;
        pgprot_t prot;
        int retval;
@@ -134,39 +135,42 @@ static void __iomem *__ioremap_caller(resource_size_t 
phys_addr,
        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 
        retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
-                                               prot_val, &new_prot_val);
+                                cachemode2protval(pcm), &new_prot_val);
        if (retval) {
                printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
                return NULL;
        }
 
-       if (prot_val != new_prot_val) {
-               if (!is_new_memtype_allowed(phys_addr, size,
-                               pgprot2cachemode(__pgprot(prot_val)),
-                               pgprot2cachemode(__pgprot(new_prot_val)))) {
+       new_pcm = pgprot2cachemode(__pgprot(new_prot_val));
+
+       if (pcm != new_pcm) {
+               if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
                        printk(KERN_ERR
-               "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
+               "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
                                (unsigned long long)phys_addr,
                                (unsigned long long)(phys_addr + size),
-                               prot_val, new_prot_val);
+                               pcm, new_pcm);
                        goto err_free_memtype;
                }
-               prot_val = new_prot_val;
+               pcm = new_pcm;
        }
 
-       switch (prot_val) {
-       case _PAGE_CACHE_UC:
+       prot = PAGE_KERNEL_IO;
+       switch (pcm) {
+       case _PAGE_CACHE_MODE_UC:
        default:
-               prot = PAGE_KERNEL_IO_NOCACHE;
+               prot = __pgprot(pgprot_val(prot) |
+                               cachemode2protval(_PAGE_CACHE_MODE_UC));
                break;
-       case _PAGE_CACHE_UC_MINUS:
-               prot = PAGE_KERNEL_IO_UC_MINUS;
+       case _PAGE_CACHE_MODE_UC_MINUS:
+               prot = __pgprot(pgprot_val(prot) |
+                               cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
                break;
-       case _PAGE_CACHE_WC:
-               prot = PAGE_KERNEL_IO_WC;
+       case _PAGE_CACHE_MODE_WC:
+               prot = __pgprot(pgprot_val(prot) |
+                               cachemode2protval(_PAGE_CACHE_MODE_WC));
                break;
-       case _PAGE_CACHE_WB:
-               prot = PAGE_KERNEL_IO;
+       case _PAGE_CACHE_MODE_WB:
                break;
        }
 
@@ -179,7 +183,7 @@ static void __iomem *__ioremap_caller(resource_size_t 
phys_addr,
        area->phys_addr = phys_addr;
        vaddr = (unsigned long) area->addr;
 
-       if (kernel_map_sync_memtype(phys_addr, size, prot_val))
+       if (kernel_map_sync_memtype(phys_addr, size, pcm))
                goto err_free_area;
 
        if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
@@ -228,14 +232,14 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, 
unsigned long size)
 {
        /*
         * Ideally, this should be:
-        *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
+        *      pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
         *
         * Till we fix all X drivers to use ioremap_wc(), we will use
         * UC MINUS.
         */
-       unsigned long val = _PAGE_CACHE_UC_MINUS;
+       enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
 
-       return __ioremap_caller(phys_addr, size, val,
+       return __ioremap_caller(phys_addr, size, pcm,
                                __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_nocache);
@@ -253,7 +257,7 @@ EXPORT_SYMBOL(ioremap_nocache);
 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
 {
        if (pat_enabled)
-               return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
+               return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
                                        __builtin_return_address(0));
        else
                return ioremap_nocache(phys_addr, size);
@@ -262,7 +266,7 @@ EXPORT_SYMBOL(ioremap_wc);
 
 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
                                __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_cache);
@@ -270,7 +274,8 @@ EXPORT_SYMBOL(ioremap_cache);
 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
                                unsigned long prot_val)
 {
-       return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
+       return __ioremap_caller(phys_addr, size,
+                               pgprot2cachemode(__pgprot(prot_val)),
                                __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_prot);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2f3744f..8f68a83 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -462,7 +462,7 @@ int io_reserve_memtype(resource_size_t start, 
resource_size_t end,
        if (!is_new_memtype_allowed(start, size, req_type, new_type))
                goto out_free;
 
-       if (kernel_map_sync_memtype(start, size, new_prot) < 0)
+       if (kernel_map_sync_memtype(start, size, new_type) < 0)
                goto out_free;
 
        *type = new_type;
@@ -560,7 +560,8 @@ int phys_mem_access_prot_allowed(struct file *file, 
unsigned long pfn,
  * Change the memory type for the physial address range in kernel identity
  * mapping space if that range is a part of identity map.
  */
-int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
+int kernel_map_sync_memtype(u64 base, unsigned long size,
+                           enum page_cache_mode pcm)
 {
        unsigned long id_sz;
 
@@ -578,11 +579,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, 
unsigned long flags)
                                __pa(high_memory) - base :
                                size;
 
-       if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
+       if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
                printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
                        "for [mem %#010Lx-%#010Lx]\n",
                        current->comm, current->pid,
-                       cattr_name(flags),
+                       cattr_name(cachemode2protval(pcm)),
                        base, (unsigned long long)(base + size-1));
                return -EINVAL;
        }
@@ -656,7 +657,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, 
pgprot_t *vma_prot,
                                     flags);
        }
 
-       if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
+       if (kernel_map_sync_memtype(paddr, size,
+                                   pgprot2cachemode(__pgprot(flags))) < 0) {
                free_memtype(paddr, paddr + size);
                return -EINVAL;
        }
-- 
1.8.4.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.