[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC/WIPv2 1/6] Introduce XENMEM_transfer operation
New operation reassigns pages from one domain to the other mapping them at exactly the same GFNs in the destination domain. Pages mapped more than once (e.g. granted pages) are being copied. Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> --- xen/common/memory.c | 178 ++++++++++++++++++++++++++++++++++++++++++++ xen/include/public/memory.h | 32 +++++++- 2 files changed, 209 insertions(+), 1 deletion(-) diff --git a/xen/common/memory.c b/xen/common/memory.c index 2e3225d..653e117 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -578,6 +578,180 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) return rc; } +static long memory_transfer(XEN_GUEST_HANDLE_PARAM(xen_memory_transfer_t) arg) +{ + long rc = 0; + struct xen_memory_transfer trans; + struct domain *source_d, *dest_d; + unsigned long mfn, gmfn, last_gmfn; + p2m_type_t p2mt; + struct page_info *page, *new_page; + char *sp, *dp; + int copying; + + if ( copy_from_guest(&trans, arg, 1) ) + return -EFAULT; + + source_d = rcu_lock_domain_by_any_id(trans.source_domid); + if ( source_d == NULL ) + { + rc = -ESRCH; + goto fail_early; + } + + if ( source_d->is_dying ) + { + rc = -EINVAL; + rcu_unlock_domain(source_d); + goto fail_early; + } + + dest_d = rcu_lock_domain_by_any_id(trans.dest_domid); + if ( dest_d == NULL ) + { + rc = -ESRCH; + rcu_unlock_domain(source_d); + goto fail_early; + } + + if ( dest_d->is_dying ) + { + rc = -EINVAL; + goto fail; + } + + last_gmfn = trans.gmfn_start + trans.gmfn_count; + for ( gmfn = trans.gmfn_start; gmfn < last_gmfn; gmfn++ ) + { + page = get_page_from_gfn(source_d, gmfn, &p2mt, 0); + if ( !page ) + { + continue; + } + + mfn = page_to_mfn(page); + if ( !mfn_valid(mfn) ) + { + put_page(page); + continue; + } + + copying = 0; + + if ( is_xen_heap_mfn(mfn) ) + { + put_page(page); + continue; + } + + /* Page table always worth copying */ + if ( (page->u.inuse.type_info & PGT_l4_page_table) || + (page->u.inuse.type_info & PGT_l3_page_table) || + (page->u.inuse.type_info & PGT_l2_page_table) || + (page->u.inuse.type_info & PGT_l1_page_table) ) + copying = 1; + + /* + * A normal page is supposed to have count_info = 2 ( 1 from the domain + * and 1 from get_page_from_gfn() above ). If the condition is not met + * copy the page. These are granted pages, vcpu info pages, ... + */ + if ( (page->count_info & (PGC_count_mask|PGC_allocated)) != + (2 | PGC_allocated) ) + copying = 1; + + if ( copying ) + { + new_page = alloc_domheap_page(dest_d, 0); + if ( !new_page ) + { + gdprintk(XENLOG_INFO, "Failed to alloc free page instead of " + "%lx\n", mfn); + rc = -ENOMEM; + put_page(page); + goto fail; + } + if ( (page->u.inuse.type_info & PGT_l4_page_table) ) + new_page->u.inuse.type_info = PGT_l4_page_table; + + if ( (page->u.inuse.type_info & PGT_l3_page_table) ) + new_page->u.inuse.type_info = PGT_l3_page_table; + + if ( (page->u.inuse.type_info & PGT_l2_page_table) ) + new_page->u.inuse.type_info = PGT_l2_page_table; + + if ( (page->u.inuse.type_info & PGT_l1_page_table) ) + new_page->u.inuse.type_info = PGT_l1_page_table; + + if ( (page->u.inuse.type_info & PGT_pinned) ) + set_bit(_PGT_pinned, &new_page->u.inuse.type_info); + + sp = map_domain_page(mfn); + mfn = page_to_mfn(new_page); + dp = map_domain_page(mfn); + memcpy(dp, sp, PAGE_SIZE); + unmap_domain_page(dp); + unmap_domain_page(sp); + put_page(page); + } + else + { + new_page = page; + spin_lock(&source_d->page_alloc_lock); + page_set_owner(page, NULL); + page_list_del(page, &source_d->page_list); + /* + * Don't use domain_adjust_tot_pages() here as we're reassigning + * the page to avoid increasing outstanding_pages counter. + */ + source_d->tot_pages -= 1; + if ( unlikely(!source_d->tot_pages) ) + put_domain(source_d); + guest_physmap_remove_page(source_d, gmfn, mfn, 0); + spin_unlock(&source_d->page_alloc_lock); + put_page(page); + if ( assign_pages(dest_d, page, 0, 0) ) + { + gdprintk(XENLOG_INFO, "Failed to assign page to destination domain" + " mfn: %lx\n", mfn); + rc = -EFAULT; + goto fail; + } + } + + if ( guest_physmap_add_page(dest_d, gmfn, mfn, 0) ) { + gdprintk(XENLOG_INFO, "Failed to add page to domain's physmap" + " mfn: %lx\n", mfn); + rc = -EFAULT; + goto fail; + } + + trans.nr_transferred++; + + if ( hypercall_preempt_check() && (gmfn + 1 < last_gmfn) ) + { + trans.gmfn_start = gmfn + 1; + rcu_unlock_domain(source_d); + rcu_unlock_domain(dest_d); + if ( __copy_field_to_guest(arg, &trans, gmfn_start) ) + return -EFAULT; + if ( __copy_field_to_guest(arg, &trans, nr_transferred) ) + return -EFAULT; + return hypercall_create_continuation( + __HYPERVISOR_memory_op, "lh", XENMEM_transfer, arg); + } + } + + fail: + rcu_unlock_domain(dest_d); + rcu_unlock_domain(source_d); + fail_early: + if ( __copy_field_to_guest(arg, &trans, nr_transferred) ) + rc = -EFAULT; + + return rc; +} + static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap *xatp, unsigned int start) @@ -781,6 +955,10 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t)); break; + case XENMEM_transfer: + rc = memory_transfer(guest_handle_cast(arg, xen_memory_transfer_t)); + break; + case XENMEM_maximum_ram_page: rc = max_page; break; diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index db961ec..1414012 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -570,10 +570,40 @@ DEFINE_XEN_GUEST_HANDLE(vnuma_topology_info_t); * vNUMA topology from hypervisor. */ #define XENMEM_get_vnumainfo 26 +/* + * Trasfer pages from one domain to another. Pages are unmapped from + * source domain and mapped at exactly the same GFNs to the destination + * domain. + * + * If a particular page is mapped more then once a new page is being allocated + * for the destination domain and its content is being copied instead of + * reassigning. The original page remains mapped to the source domain. + * + * The caller has to be priviliged. + */ + +#define XENMEM_transfer 27 +struct xen_memory_transfer { + /* + * [IN] Transfer details. + */ + domid_t source_domid; /* steal pages from */ + domid_t dest_domid; /* assign pages to */ + + xen_pfn_t gmfn_start; /* start from gmfn */ + uint64_aligned_t gmfn_count; /* how many pages to steal */ + + /* + * [OUT] Number of transfered pages including copies. + */ + xen_ulong_t nr_transferred; +}; +typedef struct xen_memory_transfer xen_memory_transfer_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_transfer_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ -/* Next available subop number is 27 */ +/* Next available subop number is 28 */ #endif /* __XEN_PUBLIC_MEMORY_H__ */ -- 1.9.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |