[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/9] xen/arm: support for guest SGI



Trap writes to GICD_SGIR, parse the requests, inject SGIs into the right
guest vcpu.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/arch/arm/vgic.c       |   52 ++++++++++++++++++++++++++++++++++++++++----
 xen/include/asm-arm/gic.h |    3 ++
 2 files changed, 50 insertions(+), 5 deletions(-)

diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index b30da78..acdd732 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -370,6 +370,7 @@ static void vgic_enable_irqs(struct vcpu *v, uint32_t r, 
int n)
 
 static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
 {
+    struct domain *d = v->domain;
     struct hsr_dabt dabt = info->dabt;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     register_t *r = select_user_reg(regs, dabt.reg);
@@ -498,11 +499,52 @@ static int vgic_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info)
         goto write_ignore;
 
     case GICD_SGIR:
-        if ( dabt.size != 2 ) goto bad_width;
-        printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n",
-               *r, gicd_reg - GICD_ICFGR);
-        return 0;
-
+        {
+            cpumask_t vcpu_mask;
+            int virtual_irq;
+            int filter;
+            int vcpuid;
+            struct vcpu *vt;
+            int i;
+
+            if ( dabt.size != 2 ) goto bad_width;
+
+            filter = (*r & GICD_SGI_TARGET_LIST_MASK);
+            virtual_irq = (*r & GICD_SGI_INTID_MASK);
+
+            cpumask_clear(&vcpu_mask);
+            switch ( filter )
+            {
+                case GICD_SGI_TARGET_LIST:
+                    cpumask_bits(&vcpu_mask)[0] = (*r & GICD_SGI_TARGET_MASK) 
>> GICD_SGI_TARGET_SHIFT;
+                    break;
+                case GICD_SGI_TARGET_OTHERS:
+                    for ( i = 0; i < d->max_vcpus; i++ )
+                    {
+                        if ( i != current->vcpu_id && d->vcpu[i] != NULL )
+                            cpumask_set_cpu(i, &vcpu_mask);
+                    }
+                case GICD_SGI_TARGET_SELF:
+                    cpumask_of(current->vcpu_id);
+                    break;
+                default:
+                    printk("vGICD: unhandled GICD_SGIR write %x with wrong 
TargetListFilter field\n", *r);
+                    return 0;
+            }
+
+            for_each_cpu( vcpuid, &vcpu_mask )
+            {
+                if ( vcpuid >= d->max_vcpus || (vt = d->vcpu[vcpuid]) == NULL 
||
+                        virtual_irq >= 16 )
+                {
+                    printk("vGICD: GICD_SGIR write %x, wrong CPUTargetList\n", 
*r);
+                    return 0;
+                }
+                vgic_vcpu_inject_irq(vt, virtual_irq, 1);
+            }
+            return 1;
+        }
+ 
     case GICD_CPENDSGIR ... GICD_CPENDSGIRN:
         if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width;
         printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 24c0d5c..b23b747 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -51,12 +51,15 @@
 #define GICD_SPENDSGIRN (0xF2C/4)
 #define GICD_ICPIDR2    (0xFE8/4)
 
+#define GICD_SGI_TARGET_LIST_SHIFT   (24)
+#define GICD_SGI_TARGET_LIST_MASK    (0x3UL << GICD_SGI_TARGET_LIST_SHIFT)
 #define GICD_SGI_TARGET_LIST   (0UL<<24)
 #define GICD_SGI_TARGET_OTHERS (1UL<<24)
 #define GICD_SGI_TARGET_SELF   (2UL<<24)
 #define GICD_SGI_TARGET_SHIFT  (16)
 #define GICD_SGI_TARGET_MASK   (0xFFUL<<GICD_SGI_TARGET_SHIFT)
 #define GICD_SGI_GROUP1        (1UL<<15)
+#define GICD_SGI_INTID_MASK    (0xFUL)
 
 #define GICC_CTLR       (0x0000/4)
 #define GICC_PMR        (0x0004/4)
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.