[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4/6] gnttab: simplify page copying/clearing
... by making {copy,clear}_domain_page() available also on other than x86. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -230,24 +230,6 @@ void unmap_domain_page(const void *ptr) local_irq_restore(flags); } -void clear_domain_page(unsigned long mfn) -{ - void *ptr = map_domain_page(mfn); - - clear_page(ptr); - unmap_domain_page(ptr); -} - -void copy_domain_page(unsigned long dmfn, unsigned long smfn) -{ - const void *src = map_domain_page(smfn); - void *dst = map_domain_page(dmfn); - - copy_page(dst, src); - unmap_domain_page(dst); - unmap_domain_page(src); -} - int mapcache_domain_init(struct domain *d) { struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache; --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -1643,7 +1643,6 @@ gnttab_transfer( if ( (1UL << (max_bitsize - PAGE_SHIFT)) <= mfn ) { struct page_info *new_page; - void *sp, *dp; new_page = alloc_domheap_page(e, MEMF_no_owner | MEMF_bits(max_bitsize)); @@ -1653,11 +1652,7 @@ gnttab_transfer( goto unlock_and_copyback; } - sp = map_domain_page(mfn); - dp = __map_domain_page(new_page); - memcpy(dp, sp, PAGE_SIZE); - unmap_domain_page(dp); - unmap_domain_page(sp); + copy_domain_page(page_to_mfn(new_page), mfn); page->count_info &= ~(PGC_count_mask|PGC_allocated); free_domheap_page(page); @@ -2434,7 +2429,7 @@ gnttab_set_version(XEN_GUEST_HANDLE_PARA /* Make sure there's no crud left over in the table from the old version. */ for ( i = 0; i < nr_grant_frames(gt); i++ ) - memset(gt->shared_raw[i], 0, PAGE_SIZE); + clear_page(gt->shared_raw[i]); /* Restore the first 8 entries (toolstack reserved grants) */ if ( gt->gt_version != 0 && op.version == 1 ) --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -1170,6 +1170,26 @@ long do_memory_op(unsigned long cmd, XEN return rc; } +#ifdef CONFIG_DOMAIN_PAGE +void clear_domain_page(unsigned long mfn) +{ + void *ptr = map_domain_page(mfn); + + clear_page(ptr); + unmap_domain_page(ptr); +} + +void copy_domain_page(unsigned long dmfn, unsigned long smfn) +{ + const void *src = map_domain_page(smfn); + void *dst = map_domain_page(dmfn); + + copy_page(dst, src); + unmap_domain_page(dst); + unmap_domain_page(src); +} +#endif + void destroy_ring_for_helper( void **_va, struct page_info *page) { --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -264,6 +264,8 @@ static inline lpae_t mfn_to_xen_entry(un /* Actual cacheline size on the boot CPU. */ extern size_t cacheline_bytes; +#define copy_page(dp, sp) memcpy(dp, sp, PAGE_SIZE) + /* Functions for flushing medium-sized areas. * if 'range' is large enough we might want to use model-specific * full-cache flushes. */ Attachment:
gnttab-simplify-page-ops.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |