diff --git a/xen/arch/arm/include/asm/page.h b/xen/arch/arm/include/asm/page.h index 69f817d1e68a..5f9357632164 100644 --- a/xen/arch/arm/include/asm/page.h +++ b/xen/arch/arm/include/asm/page.h @@ -159,13 +159,13 @@ static inline size_t read_dcache_line_bytes(void) * if 'range' is large enough we might want to use model-specific * full-cache flushes. */ -static inline int invalidate_dcache_va_range(const void *p, unsigned long size) +static inline void invalidate_dcache_va_range(const void *p, unsigned long size) { size_t cacheline_mask = dcache_line_bytes - 1; unsigned long idx = 0; if ( !size ) - return 0; + return; /* Passing a region that wraps around is illegal */ ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); @@ -188,17 +188,15 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size) asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx)); dsb(sy); /* So we know the flushes happen before continuing */ - - return 0; } -static inline int clean_dcache_va_range(const void *p, unsigned long size) +static inline void clean_dcache_va_range(const void *p, unsigned long size) { size_t cacheline_mask = dcache_line_bytes - 1; unsigned long idx = 0; if ( !size ) - return 0; + return; /* Passing a region that wraps around is illegal */ ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); @@ -211,18 +209,16 @@ static inline int clean_dcache_va_range(const void *p, unsigned long size) idx += dcache_line_bytes, size -= dcache_line_bytes ) asm volatile (__clean_dcache_one(0) : : "r" (p + idx)); dsb(sy); /* So we know the flushes happen before continuing */ - /* ARM callers assume that dcache_* functions cannot fail. */ - return 0; } -static inline int clean_and_invalidate_dcache_va_range +static inline void clean_and_invalidate_dcache_va_range (const void *p, unsigned long size) { size_t cacheline_mask = dcache_line_bytes - 1; unsigned long idx = 0; if ( !size ) - return 0; + return; /* Passing a region that wraps around is illegal */ ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); @@ -235,8 +231,6 @@ static inline int clean_and_invalidate_dcache_va_range idx += dcache_line_bytes, size -= dcache_line_bytes ) asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx)); dsb(sy); /* So we know the flushes happen before continuing */ - /* ARM callers assume that dcache_* functions cannot fail. */ - return 0; } /* Macros for flushing a single small item. The predicate is always @@ -266,6 +260,20 @@ static inline int clean_and_invalidate_dcache_va_range : : "r" (_p), "m" (*_p)); \ } while (0) +static inline int arch_grant_cache_flush(unsigned int op, const void *p, + unsigned long size) +{ + if ( (op & GNTTAB_CACHE_INVAL) && (op & GNTTAB_CACHE_CLEAN) ) + clean_and_invalidate_dcache_va_range(v, size); + else if ( op & GNTTAB_CACHE_INVAL ) + invalidate_dcache_va_range(v, size); + else if ( op & GNTTAB_CACHE_CLEAN ) + clean_dcache_va_range(v, size); + + /* ARM callers assume that dcache_* functions cannot fail. */ + return 0; +} + /* * Write a pagetable entry. * diff --git a/xen/arch/x86/include/asm/flushtlb.h b/xen/arch/x86/include/asm/flushtlb.h index bb0ad58db49b..7c71fe377757 100644 --- a/xen/arch/x86/include/asm/flushtlb.h +++ b/xen/arch/x86/include/asm/flushtlb.h @@ -182,21 +182,21 @@ void flush_area_mask(const cpumask_t *mask, const void *va, } static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {} -static inline int invalidate_dcache_va_range(const void *p, - unsigned long size) -{ return -EOPNOTSUPP; } -static inline int clean_and_invalidate_dcache_va_range(const void *p, - unsigned long size) + +static inline arch_grant_cache_flush(unsigned int op, const void *p, + unsigned long size) { - unsigned int order = get_order_from_bytes(size); + unsigned int order; + + if ( !(cflush->op & GNTTAB_CACHE_CLEAN) ) + return -EOPNOTSUPP; + + order = get_order_from_bytes(size); /* sub-page granularity support needs to be added if necessary */ flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order)); + return 0; } -static inline int clean_dcache_va_range(const void *p, unsigned long size) -{ - return clean_and_invalidate_dcache_va_range(p, size); -} unsigned int guest_flush_tlb_flags(const struct domain *d); void guest_flush_tlb_mask(const struct domain *d, const cpumask_t *mask); diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index 5721eab22561..8615ea144bb3 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -3572,14 +3572,7 @@ static int _cache_flush(const gnttab_cache_flush_t *cflush, grant_ref_t *cur_ref v = map_domain_page(mfn); v += cflush->offset; - if ( (cflush->op & GNTTAB_CACHE_INVAL) && (cflush->op & GNTTAB_CACHE_CLEAN) ) - ret = clean_and_invalidate_dcache_va_range(v, cflush->length); - else if ( cflush->op & GNTTAB_CACHE_INVAL ) - ret = invalidate_dcache_va_range(v, cflush->length); - else if ( cflush->op & GNTTAB_CACHE_CLEAN ) - ret = clean_dcache_va_range(v, cflush->length); - else - ret = 0; + ret = arch_grant_cache_flush(cflush->op, v, cflush->length); if ( d != owner ) {