[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH RFC XEN v1 08/14] xen: arm: Save and restore arch timer state.
On Wed, 9 Dec 2015, Ian Campbell wrote: > Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> > --- > xen/arch/arm/vtimer.c | 72 > ++++++++++++++++++++++++++++++++++ > xen/include/public/arch-arm/hvm/save.h | 15 ++++++- > 2 files changed, 86 insertions(+), 1 deletion(-) > > diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c > index 629feb4..9dfc699 100644 > --- a/xen/arch/arm/vtimer.c > +++ b/xen/arch/arm/vtimer.c > @@ -22,6 +22,7 @@ > #include <xen/timer.h> > #include <xen/sched.h> > #include <xen/perfc.h> > +#include <xen/hvm/save.h> > #include <asm/div64.h> > #include <asm/irq.h> > #include <asm/time.h> > @@ -355,6 +356,77 @@ int vtimer_emulate(struct cpu_user_regs *regs, union hsr > hsr) > } > } > > +static int timer_save_one(hvm_domain_context_t *h, > + struct vcpu *v, struct vtimer *t, > + int type, uint64_t offset) > +{ > + struct hvm_hw_timer ctxt; > + > + ctxt.cval = t->cval; > + ctxt.ctl = t->ctl; > + ctxt.vtb_offset = offset; > + ctxt.type = type; > + if ( hvm_save_entry(A15_TIMER, v->vcpu_id, h, &ctxt) != 0 ) > + return 1; > + return 0; > +} > + > +static int timer_save(struct domain *d, hvm_domain_context_t *h) > +{ > + struct vcpu *v; > + > + /* Save the state of vtimer and ptimer */ > + for_each_vcpu( d, v ) > + { > + timer_save_one(h, v, &v->arch.phys_timer, > + HVM_SAVE_TIMER_TYPE_PHYS, > d->arch.phys_timer_base.offset); > + timer_save_one(h, v, &v->arch.virt_timer, > + HVM_SAVE_TIMER_TYPE_VIRT, > d->arch.virt_timer_base.offset); > + } I don't think we should save phys_timer_base.offset and virt_timer_base.offset: they represent host specific offsets. I think we need to save: phys_timer: ctxt.offset = NOW() - d->arch.phys_timer_base.offset; virt_timer: ctxt.offset = READ_SYSREG64(CNTPCT_EL0) - d->arch.virt_timer_base.offset > + return 0; > +} > + > +static int timer_load(struct domain *d, hvm_domain_context_t *h) > +{ > + int vcpuid; > + struct hvm_hw_timer ctxt; > + struct vcpu *v; > + struct vtimer *t = NULL; > + > + /* Which vcpu is this? */ > + vcpuid = hvm_load_instance(h); > + > + if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) > + { > + dprintk(XENLOG_G_ERR, "HVM restore: dom%u has no vcpu%u\n", > + d->domain_id, vcpuid); > + return -EINVAL; > + } > + > + if ( hvm_load_entry(A15_TIMER, h, &ctxt) != 0 ) > + return -EINVAL; > + > + if ( ctxt.type == HVM_SAVE_TIMER_TYPE_VIRT ) > + { > + t = &v->arch.virt_timer; > + d->arch.virt_timer_base.offset = ctxt.vtb_offset; > + > + } > + else > + { > + t = &v->arch.phys_timer; > + d->arch.phys_timer_base.offset = ctxt.vtb_offset; > + } The setting of phys_timer_base.offset and virt_timer_base.offset look wrong. I think they should be: virt_timer_base.offset = READ_SYSREG64(CNTPCT_EL0) - ctxt.offset; phys_timer_base.offset = NOW() - ctxt.offset; You can test this by rebooting the physical machine between save and restore. > + t->cval = ctxt.cval; > + t->ctl = ctxt.ctl; > + t->v = v; > + > + return 0; > +} > + > +HVM_REGISTER_SAVE_RESTORE(A15_TIMER, timer_save, timer_load, 2, > HVMSR_PER_VCPU); > + > /* > * Local variables: > * mode: C > diff --git a/xen/include/public/arch-arm/hvm/save.h > b/xen/include/public/arch-arm/hvm/save.h > index 72474e5..7b92c9c 100644 > --- a/xen/include/public/arch-arm/hvm/save.h > +++ b/xen/include/public/arch-arm/hvm/save.h > @@ -80,10 +80,23 @@ struct hvm_hw_cpu > > DECLARE_HVM_SAVE_TYPE(VCPU, 2, struct hvm_hw_cpu); > > +#define HVM_SAVE_TIMER_TYPE_VIRT 0 > +#define HVM_SAVE_TIMER_TYPE_PHYS 1 > + > +struct hvm_hw_timer > +{ > + uint64_t vtb_offset; /* XXX Should be abs time since guest booted */ I would call this simply "offset" with a comment > + uint32_t ctl; > + uint64_t cval; > + uint32_t type; Isn't a uint32_t a bit too much for one bit? Don't we need to save and restore the PPIs too? > +}; > + > +DECLARE_HVM_SAVE_TYPE(A15_TIMER, 3, struct hvm_hw_timer); > + > /* > * Largest type-code in use > */ > -#define HVM_SAVE_CODE_MAX 2 > +#define HVM_SAVE_CODE_MAX 3 > > #endif > > -- > 2.6.1 > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |