[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/6] xen/arm: Save and restore support for hvm context hypercall
On 04/10/2014 12:26 PM, Andrew Cooper wrote: On 10/04/14 17:48, Wei Huang wrote:From: Jaeyong Yoon <jaeyong.yoo@xxxxxxxxxxx> Implement save/restore of hvm context hypercall. In hvm context save/restore, this patch saves gic, timer and vfp registers. Singed-off-by: Evgeny Fedotov <e.fedotov@xxxxxxxxxxx> Signed-off-by: Wei Huang <w1.huang@xxxxxxxxxxx> --- xen/arch/arm/Makefile | 1 + xen/arch/arm/domctl.c | 92 +++++- xen/arch/arm/hvm.c | 505 ++++++++++++++++++++++++++++++++- xen/arch/arm/save.c | 66 +++++ xen/common/Makefile | 2 + xen/include/asm-arm/hvm/support.h | 29 ++ xen/include/public/arch-arm/hvm/save.h | 136 +++++++++ 7 files changed, 826 insertions(+), 5 deletions(-) create mode 100644 xen/arch/arm/save.c create mode 100644 xen/include/asm-arm/hvm/support.h diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile index 63e0460..d9a328c 100644 --- a/xen/arch/arm/Makefile +++ b/xen/arch/arm/Makefile @@ -33,6 +33,7 @@ obj-y += hvm.o obj-y += device.o obj-y += decode.o obj-y += processor.o +obj-y += save.o #obj-bin-y += ....o diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c index 45974e7..914de29 100644 --- a/xen/arch/arm/domctl.c +++ b/xen/arch/arm/domctl.c @@ -9,31 +9,115 @@ #include <xen/lib.h> #include <xen/errno.h> #include <xen/sched.h> +#include <xen/hvm/save.h> +#include <xen/guest_access.h> #include <xen/hypercall.h> #include <public/domctl.h> long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { + long ret = 0; + bool_t copyback = 0; + switch ( domctl->cmd ) { + case XEN_DOMCTL_sethvmcontext: + { + struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size }; + + ret = -ENOMEM; + if ( (c.data = xmalloc_bytes(c.size)) == NULL ) + goto sethvmcontext_out; + + ret = -EFAULT; + if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0) + goto sethvmcontext_out; +You need to ensure that d != current->domain, or domain_pause() will ASSERT().+ domain_pause(d); + ret = hvm_load(d, &c); + domain_unpause(d); + + sethvmcontext_out: + if ( c.data != NULL ) + xfree(c.data); + } + break; + + case XEN_DOMCTL_gethvmcontext: + { + struct hvm_domain_context c = { 0 }; + + ret = -EINVAL; + + c.size = hvm_save_size(d); + + if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) ) + { + /* Client is querying for the correct buffer size */ + domctl->u.hvmcontext.size = c.size; + ret = 0; + goto gethvmcontext_out; + } + + /* Check that the client has a big enough buffer */ + ret = -ENOSPC; + if ( domctl->u.hvmcontext.size < c.size ) + { + printk("(gethvmcontext) size error: %d and %d\n", + domctl->u.hvmcontext.size, c.size ); + goto gethvmcontext_out; + } + + /* Allocate our own marshalling buffer */ + ret = -ENOMEM; + if ( (c.data = xmalloc_bytes(c.size)) == NULL ) + { + printk("(gethvmcontext) xmalloc_bytes failed: %d\n", c.size ); + goto gethvmcontext_out; + } +Same here.+ domain_pause(d); + ret = hvm_save(d, &c); + domain_unpause(d); + + domctl->u.hvmcontext.size = c.cur; + if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 ) + { + printk("(gethvmcontext) copy to guest failed\n"); + ret = -EFAULT; + } + + gethvmcontext_out: + copyback = 1; + + if ( c.data != NULL ) + xfree(c.data); + } + break; + case XEN_DOMCTL_cacheflush: { unsigned long s = domctl->u.cacheflush.start_pfn; unsigned long e = s + domctl->u.cacheflush.nr_pfns; if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) ) - return -EINVAL; + ret = -EINVAL; if ( e < s ) - return -EINVAL; + ret = -EINVAL; - return p2m_cache_flush(d, s, e); + ret = p2m_cache_flush(d, s, e); } default: - return subarch_do_domctl(domctl, d, u_domctl); + ret = subarch_do_domctl(domctl, d, u_domctl); } + + if ( copyback && __copy_to_guest(u_domctl, domctl, 1) ) + ret = -EFAULT; + + return ret; }Both these hypercalls look suspiciously similar to the x86 variants, and look to be good candidates to live in common code. Got it. I will try to merge them with x86, if possible. ~Andrew _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |