[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC v3 2/6] xen/arm: Add save/restore support for ARM GIC V2



This patch implements a save/restore support for
ARM guest GIC. Two types of GIC V2 states are saved seperately:
1) VGICD_* contains the GIC distributor state from
guest VM's view; 2) GICH_* is the GIC virtual control
state from hypervisor's persepctive.

Signed-off-by: Evgeny Fedotov <e.fedotov@xxxxxxxxxxx>
Signed-off-by: Wei Huang <w1.huang@xxxxxxxxxxx>
---
 xen/arch/arm/vgic.c                    |  171 ++++++++++++++++++++++++++++++++
 xen/include/public/arch-arm/hvm/save.h |   34 ++++++-
 2 files changed, 204 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 4cf6470..505e944 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -24,6 +24,7 @@
 #include <xen/softirq.h>
 #include <xen/irq.h>
 #include <xen/sched.h>
+#include <xen/hvm/save.h>
 
 #include <asm/current.h>
 
@@ -73,6 +74,110 @@ static struct vgic_irq_rank *vgic_irq_rank(struct vcpu *v, 
int b, int n)
         return NULL;
 }
 
+/* Save guest VM's distributor info into a context to support domains
+ * save/restore. Such info represents guest VM's view of its GIC
+ * distributor (GICD_*).
+ */
+static int hvm_vgicd_save(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_arm_vgicd_v2 ctxt;
+    struct vcpu *v;
+    struct vgic_irq_rank *rank;
+    int rc = 0;
+
+    /* Save the state for each VCPU */
+    for_each_vcpu( d, v )
+    {
+        rank = &v->arch.vgic.private_irqs;
+
+        /* IENABLE, IACTIVE, IPEND,  PENDSGI */
+        ctxt.ienable = rank->ienable;
+        ctxt.iactive = rank->iactive;
+        ctxt.ipend = rank->ipend;
+        ctxt.pendsgi = rank->pendsgi;
+
+        /* ICFG */
+        ctxt.icfg[0] = rank->icfg[0];
+        ctxt.icfg[1] = rank->icfg[1];
+
+        /* IPRIORITY */
+        BUILD_BUG_ON(sizeof(rank->ipriority) != sizeof (ctxt.ipriority));
+        memcpy(ctxt.ipriority, rank->ipriority, sizeof(rank->ipriority));
+
+        /* ITARGETS */
+        BUILD_BUG_ON(sizeof(rank->itargets) != sizeof (ctxt.itargets));
+        memcpy(ctxt.itargets, rank->itargets, sizeof(rank->itargets));
+
+        if ( (rc = hvm_save_entry(VGICD_V2, v->vcpu_id, h, &ctxt)) != 0 )
+            return rc;
+    }
+
+    return rc;
+}
+
+/* Load guest VM's distributor info from a context to support domain
+ * save/restore. The info is loaded into vgic_irq_rank.
+ */
+static int hvm_vgicd_load(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_arm_vgicd_v2 ctxt;
+    struct vgic_irq_rank *rank;
+    struct vcpu *v;
+    int vcpuid;
+    unsigned long enable_bits;
+    struct pending_irq *p;
+    unsigned int irq = 0;
+    int rc = 0;
+
+    /* Which vcpu is this? */
+    vcpuid = hvm_load_instance(h);
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        dprintk(XENLOG_ERR, "HVM restore: dom%u has no vcpu%u\n",
+                d->domain_id, vcpuid);
+        return -EINVAL;
+    }
+
+    if ( (rc = hvm_load_entry(VGICD_V2, h, &ctxt)) != 0 )
+        return rc;
+
+    /* Restore PPI states */
+    rank = &v->arch.vgic.private_irqs;
+
+    /* IENABLE, IACTIVE, IPEND, PENDSGI */
+    rank->ienable = ctxt.ienable;
+    rank->iactive = ctxt.iactive;
+    rank->ipend = ctxt.ipend;
+    rank->pendsgi = ctxt.pendsgi;
+
+    /* ICFG */
+    rank->icfg[0] = ctxt.icfg[0];
+    rank->icfg[1] = ctxt.icfg[1];
+
+    /* IPRIORITY */
+    BUILD_BUG_ON(sizeof(rank->ipriority) != sizeof (ctxt.ipriority));
+    memcpy(rank->ipriority, ctxt.ipriority, sizeof(rank->ipriority));
+
+    /* ITARGETS */
+    BUILD_BUG_ON(sizeof(rank->itargets) != sizeof (ctxt.itargets));
+    memcpy(rank->itargets, ctxt.itargets, sizeof(rank->itargets));
+
+    /* Set IRQ status as enabled by iterating through rank->ienable register.
+     * This step is required otherwise events won't be received by the VM
+     * after restore. */
+    enable_bits = ctxt.ienable;
+    while ( (irq = find_next_bit(&enable_bits, 32, irq)) < 32 )
+    {
+        p = irq_to_pending(v, irq);
+        set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+        irq++;
+    }
+
+    return 0;
+}
+HVM_REGISTER_SAVE_RESTORE(VGICD_V2, hvm_vgicd_save, hvm_vgicd_load,
+                          1, HVMSR_PER_VCPU);
+
 int domain_vgic_init(struct domain *d)
 {
     int i;
@@ -759,6 +864,72 @@ out:
         smp_send_event_check_mask(cpumask_of(v->processor));
 }
 
+/* Save GIC virtual control state into a context to support save/restore. 
+ * The info reprsents most of GICH_* registers. */
+static int hvm_gich_save(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_arm_gich_v2 ctxt;
+    struct vcpu *v;
+    int rc = 0;
+
+    /* Save the state of GICs */
+    for_each_vcpu( d, v )
+    {
+        ctxt.gic_hcr = v->arch.gic_hcr;
+        ctxt.gic_vmcr = v->arch.gic_vmcr;
+        ctxt.gic_apr = v->arch.gic_apr;
+
+        /* Save list registers and masks */
+        BUILD_BUG_ON(sizeof(v->arch.gic_lr) > sizeof (ctxt.gic_lr));
+        memcpy(ctxt.gic_lr, v->arch.gic_lr, sizeof(v->arch.gic_lr));
+
+        ctxt.lr_mask = v->arch.lr_mask;
+        ctxt.event_mask = v->arch.event_mask;
+
+        if ( (rc = hvm_save_entry(GICH_V2, v->vcpu_id, h, &ctxt)) != 0 )
+            return rc;
+    }
+
+    return rc;
+}
+
+/* Restore GIC virtual control state from a context to support save/restore */
+static int hvm_gich_load(struct domain *d, hvm_domain_context_t *h)
+{
+    int vcpuid;
+    struct hvm_arm_gich_v2 ctxt;
+    struct vcpu *v;
+    int rc = 0;
+
+    /* Which vcpu is this? */
+    vcpuid = hvm_load_instance(h);
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        dprintk(XENLOG_ERR, "HVM restore: dom%u has no vcpu%u\n", d->domain_id,
+                vcpuid);
+        return -EINVAL;
+    }
+
+    if ( (rc = hvm_load_entry(GICH_V2, h, &ctxt)) != 0 )
+        return rc;
+
+    v->arch.gic_hcr = ctxt.gic_hcr;
+    v->arch.gic_vmcr = ctxt.gic_vmcr;
+    v->arch.gic_apr = ctxt.gic_apr;
+
+    /* Restore list registers and masks */
+    BUILD_BUG_ON(sizeof(v->arch.gic_lr) > sizeof (ctxt.gic_lr));
+    memcpy(v->arch.gic_lr, ctxt.gic_lr, sizeof(v->arch.gic_lr));
+
+    v->arch.lr_mask = ctxt.lr_mask;
+    v->arch.event_mask = ctxt.event_mask;
+
+    return rc;
+}
+
+HVM_REGISTER_SAVE_RESTORE(GICH_V2, hvm_gich_save, hvm_gich_load, 1,
+                          HVMSR_PER_VCPU);
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/public/arch-arm/hvm/save.h 
b/xen/include/public/arch-arm/hvm/save.h
index 8312e7b..421a6f6 100644
--- a/xen/include/public/arch-arm/hvm/save.h
+++ b/xen/include/public/arch-arm/hvm/save.h
@@ -40,10 +40,42 @@ struct hvm_save_header
 };
 DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
 
+/* Guest's view of GIC distributor (per-vcpu)
+ *   - Based on GICv2 (see "struct vgic_irq_rank")
+ *   - Store guest's view of GIC distributor
+ *   - Only support SGI and PPI for DomU (DomU doesn't handle SPI)
+ */
+struct hvm_arm_vgicd_v2
+{
+    uint32_t ienable;
+    uint32_t iactive;
+    uint32_t ipend;
+    uint32_t pendsgi;
+    uint32_t icfg[2];
+    uint32_t ipriority[8];
+    uint32_t itargets[8];
+};
+DECLARE_HVM_SAVE_TYPE(VGICD_V2, 2, struct hvm_arm_vgicd_v2);
+
+/* Info for hypervisor to manage guests (per-vcpu)
+ *   - Based on GICv2
+ *   - Mainly store registers of GICH_*
+ */
+struct hvm_arm_gich_v2
+{
+    uint32_t gic_hcr;
+    uint32_t gic_vmcr;
+    uint32_t gic_apr;
+    uint32_t gic_lr[64];
+    uint64_t event_mask;
+    uint64_t lr_mask;
+};
+DECLARE_HVM_SAVE_TYPE(GICH_V2, 3, struct hvm_arm_gich_v2);
+
 /*
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 1
+#define HVM_SAVE_CODE_MAX 3
 
 #endif
 
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.