[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH RFC LINUX v1] xen: arm: enable migration on ARM.
Please CC linux-arm for the non-RFC patches On Wed, 9 Dec 2015, Ian Campbell wrote: > Replace various stub functions with real functionality, including > reestablishing the shared info page and the per-vcpu info pages on > restore. > > Reestablishing the vcpu info page is a little subtle. The > VCPUOP_register_vcpu_info hypercall can only be called on either the > current VCPU or on an offline different VCPU. Since migration occurs > with all VCPUS online they are all therefore online at the point of > resume. > > Therefore we must perform a cross VCPU call to each non-boot VCPU, > which cannot be done in the xen_arch_post_suspend() callback since > that is run from stop_machine() with interrupts disabled. > > Furthermore VCPUOP_register_vcpu_info can only be called once per-VCPU > in a given domain, so it must not be called after a cancelled suspend > (which resumes in the same domain). > > Therefore xen_arch_resume() gains a suspend_cancelled parameter and we > resume the secondary VCPUs there only if needed. It is a bit complex but it seems better than what we do on x86, which is: for_each_possible_cpu(cpu) { bool other_cpu = (cpu != smp_processor_id()); bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL); if (other_cpu && is_up && HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) BUG(); xen_setup_runstate_info(cpu); if (have_vcpu_info_placement) xen_vcpu_setup(cpu); if (other_cpu && is_up && HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) BUG(); } > The VCPU which is running the suspend is resumed earlier in the > xen_arch_post_suspend callback, again conditionally only for > non-cancelled suspends. > > Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> > --- > arch/arm/xen/Makefile | 2 +- > arch/arm/xen/enlighten.c | 54 > +++++++++++++++++++++++++++++++----------------- > arch/arm/xen/suspend.c | 54 > ++++++++++++++++++++++++++++++++++++++++++++++++ > arch/arm/xen/xen-ops.h | 9 ++++++++ > arch/x86/xen/suspend.c | 2 +- > drivers/xen/manage.c | 2 +- > include/xen/xen-ops.h | 2 +- > 7 files changed, 102 insertions(+), 23 deletions(-) > create mode 100644 arch/arm/xen/suspend.c > create mode 100644 arch/arm/xen/xen-ops.h > > diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile > index 1296952..677022c 100644 > --- a/arch/arm/xen/Makefile > +++ b/arch/arm/xen/Makefile > @@ -1 +1 @@ > -obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o > +obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o suspend.o > diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c > index eeeab07..72f314e 100644 > --- a/arch/arm/xen/enlighten.c > +++ b/arch/arm/xen/enlighten.c > @@ -182,10 +182,41 @@ void __init xen_early_init(void) > add_preferred_console("hvc", 0, NULL); > } > > -static int __init xen_guest_init(void) > +static struct shared_info *shared_info_page; > + > +int xen_register_shared_info(void) > { > struct xen_add_to_physmap xatp; > - struct shared_info *shared_info_page = NULL; > + > + /* > + * This function is called on boot and on restore. On boot we > + * allocate this page immediately before calling this function > + * and bail on failure. On resume that allocation must have > + * succeeded or we couldn't be doing a save/restore. > + */ > + BUG_ON(!shared_info_page); > + > + xatp.domid = DOMID_SELF; > + xatp.idx = 0; > + xatp.space = XENMAPSPACE_shared_info; > + xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; > + if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) > + BUG(); > + > + HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; > + > + return 0; > +} > + > +void xen_vcpu_restore(void) > +{ > + xen_percpu_init(); > + > + /* XXX TODO: xen_setup_runstate_info(cpu); */ > +} > + > +static int __init xen_guest_init(void) > +{ > struct resource res; > phys_addr_t grant_frames; > > @@ -210,18 +241,12 @@ static int __init xen_guest_init(void) > pr_err("not enough memory\n"); > return -ENOMEM; > } > - xatp.domid = DOMID_SELF; > - xatp.idx = 0; > - xatp.space = XENMAPSPACE_shared_info; > - xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; > - if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) > - BUG(); > > - HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; > + xen_register_shared_info(); > > /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info > * page, we use it in the event channel upcall and in some pvclock > - * related functions. > + * related functions. > * The shared info contains exactly 1 CPU (the boot CPU). The guest > * is required to use VCPUOP_register_vcpu_info to place vcpu info > * for secondary CPUs as they are brought up. > @@ -275,15 +300,6 @@ static int __init xen_pm_init(void) > } > late_initcall(xen_pm_init); > > - > -/* empty stubs */ > -void xen_arch_pre_suspend(void) { } > -void xen_arch_post_suspend(int suspend_cancelled) { } > -void xen_timer_resume(void) { } > -void xen_arch_resume(void) { } > -void xen_arch_suspend(void) { } > - > - > /* In the hypervisor.S file. */ > EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); > EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); > diff --git a/arch/arm/xen/suspend.c b/arch/arm/xen/suspend.c > new file mode 100644 > index 0000000..b420758 > --- /dev/null > +++ b/arch/arm/xen/suspend.c > @@ -0,0 +1,54 @@ > +#include <linux/types.h> > +#include <linux/tick.h> > + > +#include <xen/interface/xen.h> > + > +#include <asm/xen/hypercall.h> > + > +#include "xen-ops.h" > + > +void xen_arch_pre_suspend(void) { > + /* Nothing to do */ > +} > + > +void xen_arch_post_suspend(int suspend_cancelled) > +{ > + xen_register_shared_info(); > + if (!suspend_cancelled) > + xen_vcpu_restore(); could we wait and call xen_vcpu_restore for cpu0 from xen_vcpu_notify_resume? > +} > + > +static void xen_vcpu_notify_suspend(void *data) > +{ > + tick_suspend_local(); > +} > + > +static void xen_vcpu_notify_resume(void *data) > +{ > + int suspend_cancelled = *(int *)data; > + > + if (smp_processor_id() == 0) > + return; > + > + /* Boot processor done in post_suspend */ > + if (!suspend_cancelled) > + xen_vcpu_restore(); > + > + /* Boot processor notified via generic timekeeping_resume() */ > + tick_resume_local(); > +} > + > +void xen_arch_suspend(void) > +{ > + on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); > +} > + > +void xen_arch_resume(int suspend_cancelled) > +{ > + on_each_cpu(xen_vcpu_notify_resume, &suspend_cancelled, 1); > +} > + > +void xen_timer_resume(void) > +{ > + /* Nothing to do */ > +} > diff --git a/arch/arm/xen/xen-ops.h b/arch/arm/xen/xen-ops.h > new file mode 100644 > index 0000000..de23e91 > --- /dev/null > +++ b/arch/arm/xen/xen-ops.h > @@ -0,0 +1,9 @@ > +#ifndef XEN_OPS_H > +#define XEN_OPS_H > + > +#include <xen/xen-ops.h> > + > +void xen_register_shared_info(void); > +void xen_vcpu_restore(void); > + > +#endif /* XEN_OPS_H */ > diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c > index feddabd..ce2545a 100644 > --- a/arch/x86/xen/suspend.c > +++ b/arch/x86/xen/suspend.c > @@ -104,7 +104,7 @@ static void xen_vcpu_notify_suspend(void *data) > tick_suspend_local(); > } > > -void xen_arch_resume(void) > +void xen_arch_resume(int suspend_cancelled) > { > on_each_cpu(xen_vcpu_notify_restore, NULL, 1); > } > diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c > index b6e4c40..a1a64bc 100644 > --- a/drivers/xen/manage.c > +++ b/drivers/xen/manage.c > @@ -156,7 +156,7 @@ static void do_suspend(void) > si.cancelled = 1; > } > > - xen_arch_resume(); > + xen_arch_resume(si.cancelled); > > out_resume: > if (!si.cancelled) > diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h > index e4e214a..d93da50 100644 > --- a/include/xen/xen-ops.h > +++ b/include/xen/xen-ops.h > @@ -12,7 +12,7 @@ void xen_arch_pre_suspend(void); > void xen_arch_post_suspend(int suspend_cancelled); > > void xen_timer_resume(void); > -void xen_arch_resume(void); > +void xen_arch_resume(int suspend_cancelled); > void xen_arch_suspend(void); > > void xen_resume_notifier_register(struct notifier_block *nb); > -- > 2.1.4 > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |