[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 3/6] xen/arm: gic: Use the correct CPU ID



I only resend this patch.

====================================================================================

commit 89aafa2d3d8a4f9ebce700673f89c8b4822a02e8
Author: Julien Grall <julien.grall@xxxxxxxxxx>
Date:   Thu Aug 29 18:38:06 2013 +0100

    xen/arm: gic: Use the correct CPU ID
    
    The GIC mapping of CPU interfaces does not necessarily match the logical
    CPU numbering.
    
    When Xen wants to send an SGI to specific CPU, it needs to use the GIC CPU 
ID.
    It can be retrieved from ITARGETSR0, in fact when this field is read, the 
GIC
    will return a value that corresponds only to the processor reading the 
register.
    So Xen can use the PPI 0 to initialize the mapping.
    
    Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
    
    ---
        Changes in v4:
            - Make logical and between the cpumask given in arguments and
            cpu_possible_map.
            - Make sure the per_cpu is initialized
            - Add comment restriction for gic_set_irq_properties
    
        Changes in v3:
            - Correctly create the mask in gic_cpu_mask
    
        Changes in v2:
            - Use per-cpu variable instead of an array
            - Add comment for NR_GIC_CPU_IF

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index b969d23..9f703a0 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -57,6 +57,32 @@ static DEFINE_PER_CPU(uint64_t, lr_mask);
 
 static unsigned nr_lrs;
 
+/* The GIC mapping of CPU interfaces does not necessarily match the
+ * logical CPU numbering. Let's use mapping as returned by the GIC
+ * itself
+ */
+static DEFINE_PER_CPU(u8, gic_cpu_id);
+
+/* Maximum cpu interface per GIC */
+#define NR_GIC_CPU_IF 8
+
+static unsigned int gic_cpu_mask(const cpumask_t *cpumask)
+{
+    unsigned int cpu;
+    unsigned int mask = 0;
+    cpumask_t possible_mask;
+
+    cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
+    for_each_cpu(cpu, &possible_mask)
+    {
+        ASSERT(cpu < NR_GIC_CPU_IF);
+        ASSERT(__per_cpu_offset[cpu] != -(long)__per_cpu_start);
+        mask |= per_cpu(gic_cpu_id, cpu);
+    }
+
+    return mask;
+}
+
 unsigned int gic_number_lines(void)
 {
     return gic.lines;
@@ -182,16 +208,18 @@ static hw_irq_controller gic_guest_irq_type = {
     .set_affinity = gic_irq_set_affinity,
 };
 
-/* needs to be called with gic.lock held */
+/*
+ * - needs to be called with gic.lock held
+ * - needs to be called with a valid cpu_mask, ie each cpu in the mask has
+ * already called gic_cpu_init
+ */
 static void gic_set_irq_properties(unsigned int irq, bool_t level,
                                    const cpumask_t *cpu_mask,
                                    unsigned int priority)
 {
     volatile unsigned char *bytereg;
     uint32_t cfg, edgebit;
-    unsigned int mask = cpumask_bits(cpu_mask)[0];
-
-    ASSERT(!(mask & ~0xff)); /* Target bitmap only support 8 CPUS */
+    unsigned int mask = gic_cpu_mask(cpu_mask);
 
     /* Set edge / level */
     cfg = GICD[GICD_ICFGR + irq / 16];
@@ -300,6 +328,8 @@ static void __cpuinit gic_cpu_init(void)
 {
     int i;
 
+    this_cpu(gic_cpu_id) = GICD[GICD_ITARGETSR] & 0xff;
+
     /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
      * even though they are controlled with GICD registers, they must
      * be set up here with the other per-cpu state. */
@@ -431,13 +461,13 @@ void __init gic_init(void)
 
 void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi)
 {
-    unsigned long mask = cpumask_bits(cpumask)[0];
+    unsigned int mask = 0;
+    cpumask_t online_mask;
 
     ASSERT(sgi < 16); /* There are only 16 SGIs */
 
-    mask &= cpumask_bits(&cpu_online_map)[0];
-
-    ASSERT(mask < 0x100); /* The target bitmap only supports 8 CPUs */
+    cpumask_and(&online_mask, cpumask, &cpu_online_map);
+    mask = gic_cpu_mask(&online_mask);
 
     dsb();

-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.