|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC XEN v1 07/14] xen: arm: Save and restore basic per-VCPU state.
XXX TBD No support for arm64 (or even 32-bit guest on arm64).
XXX In particular the handling of save/restore of VFP state doesn't
XXX even compile for arm32. I need to investigate the best way to
XXX reflect the differing possible VFB states in the save record.
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
xen/arch/arm/hvm.c | 167 +++++++++++++++++++++++++++++++++
xen/include/public/arch-arm/hvm/save.h | 38 +++++++-
2 files changed, 204 insertions(+), 1 deletion(-)
diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
index 5fd0753..3c59e63 100644
--- a/xen/arch/arm/hvm.c
+++ b/xen/arch/arm/hvm.c
@@ -7,6 +7,7 @@
#include <xsm/xsm.h>
+#include <xen/hvm/save.h>
#include <public/xen.h>
#include <public/hvm/params.h>
#include <public/hvm/hvm_op.h>
@@ -65,3 +66,169 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
return rc;
}
+
+static int cpu_save(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_hw_cpu ctxt;
+ struct vcpu *v;
+
+ /* Save the state of CPU */
+ for_each_vcpu( d, v )
+ {
+ /*
+ * We don't need to save state for a vcpu that is down; the restore
+ * code will leave it down if there is nothing saved.
+ */
+ if ( test_bit(_VPF_down, &v->pause_flags) )
+ continue;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.sctlr = v->arch.sctlr;
+ ctxt.ttbr0 = v->arch.ttbr0;
+ ctxt.ttbr1 = v->arch.ttbr1;
+ ctxt.ttbcr = v->arch.ttbcr;
+
+ ctxt.dacr = v->arch.dacr;
+#ifdef CONFIG_ARM_32 /* XXX 32on64 */
+ ctxt.ifar = v->arch.ifar;
+ ctxt.ifsr = v->arch.ifsr;
+ ctxt.dfar = v->arch.dfar;
+ ctxt.dfsr = v->arch.dfsr;
+#else
+ /* XXX 64-bit */
+#endif
+
+#ifdef CONFIG_ARM_32
+ ctxt.mair0 = v->arch.mair0;
+ ctxt.mair1 = v->arch.mair1;
+#else
+ ctxt.mair0 = v->arch.mair;
+#endif
+ /* Control Registers */
+ ctxt.actlr = v->arch.actlr;
+ ctxt.sctlr = v->arch.sctlr;
+ ctxt.cpacr = v->arch.cpacr;
+
+ ctxt.contextidr = v->arch.contextidr;
+ ctxt.tpidr_el0 = v->arch.tpidr_el0;
+ ctxt.tpidr_el1 = v->arch.tpidr_el1;
+ ctxt.tpidrro_el0 = v->arch.tpidrro_el0;
+
+ /* CP 15 */
+ ctxt.csselr = v->arch.csselr;
+ ctxt.mpidr = v->arch.vmpidr;
+
+ ctxt.afsr0 = v->arch.afsr0;
+ ctxt.afsr1 = v->arch.afsr1;
+ ctxt.vbar = v->arch.vbar;
+ ctxt.par = v->arch.par;
+ ctxt.teecr = v->arch.teecr;
+ ctxt.teehbr = v->arch.teehbr;
+#ifdef CONFIG_ARM_32 /* XXX 32on64 */
+ ctxt.joscr = v->arch.joscr;
+ ctxt.jmcr = v->arch.jmcr;
+#endif
+
+ memset(&ctxt.core_regs, 0, sizeof(ctxt.core_regs));
+
+ /* get guest core registers */
+ vcpu_regs_hyp_to_user(v, &ctxt.core_regs);
+
+ /* check VFP state size before dumping */
+ BUILD_BUG_ON(sizeof(v->arch.vfp) > sizeof (ctxt.vfp));
+ memcpy((void*) &ctxt.vfp, (void*) &v->arch.vfp, sizeof(v->arch.vfp));
+
+ if ( hvm_save_entry(VCPU, v->vcpu_id, h, &ctxt) != 0 )
+ return 1;
+ }
+ return 0;
+}
+
+static int cpu_load(struct domain *d, hvm_domain_context_t *h)
+{
+ int vcpuid;
+ struct hvm_hw_cpu ctxt;
+ struct vcpu *v;
+
+ /* Which vcpu is this? */
+ vcpuid = hvm_load_instance(h);
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+ {
+ dprintk(XENLOG_G_ERR, "HVM restore: dom%u has no vcpu%u\n",
+ d->domain_id, vcpuid);
+ return -EINVAL;
+ }
+
+ if ( hvm_load_entry(VCPU, h, &ctxt) != 0 )
+ return -EINVAL;
+
+ v->arch.sctlr = ctxt.sctlr;
+ v->arch.ttbr0 = ctxt.ttbr0;
+ v->arch.ttbr1 = ctxt.ttbr1;
+ v->arch.ttbcr = ctxt.ttbcr;
+
+ v->arch.dacr = ctxt.dacr;
+#ifdef CONFIG_ARM_32 /* XXX 32on64 */
+ v->arch.ifar = ctxt.ifar;
+ v->arch.ifsr = ctxt.ifsr;
+ v->arch.dfar = ctxt.dfar;
+ v->arch.dfsr = ctxt.dfsr;
+#else
+ /* XXX 64-bit */
+#endif
+
+#ifdef CONFIG_ARM_32
+ v->arch.mair0 = ctxt.mair0;
+ v->arch.mair1 = ctxt.mair1;
+#else
+ v->arch.mair = ctxt.mair0;
+#endif
+
+ /* Control Registers */
+ v->arch.actlr = ctxt.actlr;
+ v->arch.cpacr = ctxt.cpacr;
+ v->arch.contextidr = ctxt.contextidr;
+ v->arch.tpidr_el0 = ctxt.tpidr_el0;
+ v->arch.tpidr_el1 = ctxt.tpidr_el1;
+ v->arch.tpidrro_el0 = ctxt.tpidrro_el0;
+
+ /* CP 15 */
+ v->arch.csselr = ctxt.csselr;
+ v->arch.vmpidr = ctxt.mpidr;
+
+ v->arch.afsr0 = ctxt.afsr0;
+ v->arch.afsr1 = ctxt.afsr1;
+ v->arch.vbar = ctxt.vbar;
+ v->arch.par = ctxt.par;
+ v->arch.teecr = ctxt.teecr;
+ v->arch.teehbr = ctxt.teehbr;
+#ifdef CONFIG_ARM_32 /* XXX 32on64 */
+ v->arch.joscr = ctxt.joscr;
+ v->arch.jmcr = ctxt.jmcr;
+#endif
+
+ /* set guest core registers */
+ vcpu_regs_user_to_hyp(v, &ctxt.core_regs);
+
+ /* restore VFP */
+ BUILD_BUG_ON(sizeof(v->arch.vfp) > sizeof (ctxt.vfp));
+ memcpy(&v->arch.vfp, &ctxt.vfp, sizeof(v->arch.vfp));
+
+ v->is_initialised = 1;
+ clear_bit(_VPF_down, &v->pause_flags);
+
+ /* we don't need vcpu_wake(v) here */
+ return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(VCPU, cpu_save, cpu_load, 1, HVMSR_PER_VCPU);
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/public/arch-arm/hvm/save.h
b/xen/include/public/arch-arm/hvm/save.h
index 6f1be37..72474e5 100644
--- a/xen/include/public/arch-arm/hvm/save.h
+++ b/xen/include/public/arch-arm/hvm/save.h
@@ -44,10 +44,46 @@ struct hvm_save_header
DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
+struct hvm_hw_cpu
+{
+ uint64_t vfp[34]; /* Vector floating pointer */
+ /* VFP v3 state is 34x64 bit, VFP v4 is not yet supported */
+
+ /* Guest core registers */
+ struct vcpu_guest_core_regs core_regs;
+
+ uint32_t sctlr, ttbcr;
+ uint64_t ttbr0, ttbr1;
+
+ uint32_t ifar, dfar;
+ uint32_t ifsr, dfsr;
+ uint32_t dacr;
+ uint64_t par;
+
+ uint64_t mair0, mair1;
+ uint64_t tpidr_el0;
+ uint64_t tpidr_el1;
+ uint64_t tpidrro_el0;
+ uint64_t vbar;
+
+ /* Control Registers */
+ uint32_t actlr;
+ uint32_t cpacr;
+ uint32_t afsr0, afsr1;
+ uint32_t contextidr;
+ uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */
+ uint32_t joscr, jmcr;
+ /* CP 15 */
+ uint32_t csselr;
+ uint64_t mpidr;
+};
+
+DECLARE_HVM_SAVE_TYPE(VCPU, 2, struct hvm_hw_cpu);
+
/*
* Largest type-code in use
*/
-#define HVM_SAVE_CODE_MAX 1
+#define HVM_SAVE_CODE_MAX 2
#endif
--
2.6.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |