[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH v2 18/22] ARM: vGIC: move virtual IRQ target VCPU from rank to pending_irq



Hi Andre,

On 21/07/17 21:00, Andre Przywara wrote:
The VCPU a shared virtual IRQ is targeting is currently stored in the
irq_rank structure.
For LPIs we already store the target VCPU in struct pending_irq, so
move SPIs over as well.
The ITS code, which was using this field already, was so far using the
VCPU lock to protect the pending_irq, so move this over to the new lock.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
---
 xen/arch/arm/vgic-v2.c     | 56 +++++++++++++++--------------------
 xen/arch/arm/vgic-v3-its.c |  9 +++---
 xen/arch/arm/vgic-v3.c     | 69 ++++++++++++++++++++-----------------------
 xen/arch/arm/vgic.c        | 73 +++++++++++++++++++++-------------------------
 xen/include/asm-arm/vgic.h | 13 +++------
 5 files changed, 96 insertions(+), 124 deletions(-)

diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 0c8a598..c7ed3ce 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -66,19 +66,22 @@ void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t 
csize,
  *
  * Note the byte offset will be aligned to an ITARGETSR<n> boundary.
  */
-static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank *rank,
-                                     unsigned int offset)
+static uint32_t vgic_fetch_itargetsr(struct vcpu *v, unsigned int offset)
 {
     uint32_t reg = 0;
     unsigned int i;
+    unsigned long flags;

-    ASSERT(spin_is_locked(&rank->lock));
-
-    offset &= INTERRUPT_RANK_MASK;
     offset &= ~(NR_TARGETS_PER_ITARGETSR - 1);

     for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++ )
-        reg |= (1 << read_atomic(&rank->vcpu[offset])) << (i * 
NR_BITS_PER_TARGET);
+    {
+        struct pending_irq *p = irq_to_pending(v, offset);
+
+        vgic_irq_lock(p, flags);
+        reg |= (1 << p->vcpu_id) << (i * NR_BITS_PER_TARGET);
+        vgic_irq_unlock(p, flags);
+    }

     return reg;
 }
@@ -89,32 +92,29 @@ static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank 
*rank,
  *
  * Note the byte offset will be aligned to an ITARGETSR<n> boundary.
  */
-static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank,
+static void vgic_store_itargetsr(struct domain *d,
                                  unsigned int offset, uint32_t itargetsr)
 {
     unsigned int i;
     unsigned int virq;

-    ASSERT(spin_is_locked(&rank->lock));
-
     /*
      * The ITARGETSR0-7, used for SGIs/PPIs, are implemented RO in the
      * emulation and should never call this function.
      *
-     * They all live in the first rank.
+     * They all live in the first four bytes of ITARGETSR.
      */
-    BUILD_BUG_ON(NR_INTERRUPT_PER_RANK != 32);
-    ASSERT(rank->index >= 1);
+    ASSERT(offset >= 4);

-    offset &= INTERRUPT_RANK_MASK;
+    virq = offset;
     offset &= ~(NR_TARGETS_PER_ITARGETSR - 1);

-    virq = rank->index * NR_INTERRUPT_PER_RANK + offset;
-
     for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++, virq++ )
     {
         unsigned int new_target, old_target;
+        unsigned long flags;
         uint8_t new_mask;
+        struct pending_irq *p = spi_to_pending(d, virq);

         /*
          * Don't need to mask as we rely on new_mask to fit for only one
@@ -151,16 +151,14 @@ static void vgic_store_itargetsr(struct domain *d, struct 
vgic_irq_rank *rank,
         /* The vCPU ID always starts from 0 */
         new_target--;

-        old_target = read_atomic(&rank->vcpu[offset]);
+        vgic_irq_lock(p, flags);
+        old_target = p->vcpu_id;

         /* Only migrate the vIRQ if the target vCPU has changed */
         if ( new_target != old_target )
-        {
-            if ( vgic_migrate_irq(d->vcpu[old_target],
-                             d->vcpu[new_target],
-                             virq) )
-                write_atomic(&rank->vcpu[offset], new_target);
-        }
+            vgic_migrate_irq(p, &flags, d->vcpu[new_target]);

Why do you need to pass a pointer on the flags and not directly the value?

+        else
+            vgic_irq_unlock(p, flags);
     }
 }

@@ -264,11 +262,7 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, 
mmio_info_t *info,
         uint32_t itargetsr;

         if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
-        rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD);
-        if ( rank == NULL) goto read_as_zero;
-        vgic_lock_rank(v, rank, flags);
-        itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR);
-        vgic_unlock_rank(v, rank, flags);
+        itargetsr = vgic_fetch_itargetsr(v, gicd_reg - GICD_ITARGETSR);

You need a check on the IRQ to avoid calling vgic_fetch_itargetsr with an IRQ not handled.

         *r = vreg_reg32_extract(itargetsr, info);

         return 1;
@@ -498,14 +492,10 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info,
         uint32_t itargetsr;

         if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
-        rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD);
-        if ( rank == NULL) goto write_ignore;
-        vgic_lock_rank(v, rank, flags);
-        itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR);
+        itargetsr = vgic_fetch_itargetsr(v, gicd_reg - GICD_ITARGETSR);

Ditto.

         vreg_reg32_update(&itargetsr, r, info);
-        vgic_store_itargetsr(v->domain, rank, gicd_reg - GICD_ITARGETSR,
+        vgic_store_itargetsr(v->domain, gicd_reg - GICD_ITARGETSR,
                              itargetsr);

The itargetsr is updated using read-modify-write and should be atomic. This was protected by the rank lock that you now dropped. So what would be the locking here?

-        vgic_unlock_rank(v, rank, flags);
         return 1;
     }

diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index 682ce10..1020ebe 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -628,7 +628,7 @@ static int its_discard_event(struct virt_its *its,

     /* Cleanup the pending_irq and disconnect it from the LPI. */
     gic_remove_irq_from_queues(vcpu, p);
-    vgic_init_pending_irq(p, INVALID_LPI);
+    vgic_init_pending_irq(p, INVALID_LPI, INVALID_VCPU_ID);

     spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);

@@ -768,7 +768,7 @@ static int its_handle_mapti(struct virt_its *its, uint64_t 
*cmdptr)
     if ( !pirq )
         goto out_remove_mapping;

-    vgic_init_pending_irq(pirq, intid);
+    vgic_init_pending_irq(pirq, intid, vcpu->vcpu_id);

     /*
      * Now read the guest's property table to initialize our cached state.
@@ -781,7 +781,6 @@ static int its_handle_mapti(struct virt_its *its, uint64_t 
*cmdptr)
     if ( ret )
         goto out_remove_host_entry;

-    pirq->vcpu_id = vcpu->vcpu_id;
     /*
      * Mark this LPI as new, so any older (now unmapped) LPI in any LR
      * can be easily recognised as such.
@@ -852,9 +851,9 @@ static int its_handle_movi(struct virt_its *its, uint64_t 
*cmdptr)
      */
     spin_lock_irqsave(&ovcpu->arch.vgic.lock, flags);

+    vgic_irq_lock(p, flags);
     p->vcpu_id = nvcpu->vcpu_id;
-
-    spin_unlock_irqrestore(&ovcpu->arch.vgic.lock, flags);
+    vgic_irq_unlock(p, flags);

     /*
      * TODO: Investigate if and how to migrate an already pending LPI. This
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index e9e36eb..e9d46af 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -100,18 +100,21 @@ static struct vcpu *vgic_v3_irouter_to_vcpu(struct domain 
*d, uint64_t irouter)
  *
  * Note the byte offset will be aligned to an IROUTER<n> boundary.
  */
-static uint64_t vgic_fetch_irouter(struct vgic_irq_rank *rank,
-                                   unsigned int offset)
+static uint64_t vgic_fetch_irouter(struct vcpu *v, unsigned int offset)
 {
-    ASSERT(spin_is_locked(&rank->lock));
+    struct pending_irq *p;
+    unsigned long flags;
+    uint64_t aff;

     /* There is exactly 1 vIRQ per IROUTER */
     offset /= NR_BYTES_PER_IROUTER;

-    /* Get the index in the rank */
-    offset &= INTERRUPT_RANK_MASK;
+    p = irq_to_pending(v, offset);
+    vgic_irq_lock(p, flags);
+    aff = vcpuid_to_vaffinity(p->vcpu_id);
+    vgic_irq_unlock(p, flags);

-    return vcpuid_to_vaffinity(read_atomic(&rank->vcpu[offset]));
+    return aff;
 }

 /*
@@ -120,10 +123,12 @@ static uint64_t vgic_fetch_irouter(struct vgic_irq_rank 
*rank,
  *
  * Note the offset will be aligned to the appropriate boundary.
  */
-static void vgic_store_irouter(struct domain *d, struct vgic_irq_rank *rank,
+static void vgic_store_irouter(struct domain *d,
                                unsigned int offset, uint64_t irouter)
 {
-    struct vcpu *new_vcpu, *old_vcpu;
+    struct vcpu *new_vcpu;
+    struct pending_irq *p;
+    unsigned long flags;
     unsigned int virq;

     /* There is 1 vIRQ per IROUTER */
@@ -135,11 +140,10 @@ static void vgic_store_irouter(struct domain *d, struct 
vgic_irq_rank *rank,
      */
     ASSERT(virq >= 32);

-    /* Get the index in the rank */
-    offset &= virq & INTERRUPT_RANK_MASK;
+    p = spi_to_pending(d, virq);
+    vgic_irq_lock(p, flags);

     new_vcpu = vgic_v3_irouter_to_vcpu(d, irouter);
-    old_vcpu = d->vcpu[read_atomic(&rank->vcpu[offset])];

     /*
      * From the spec (see 8.9.13 in IHI 0069A), any write with an
@@ -149,16 +153,13 @@ static void vgic_store_irouter(struct domain *d, struct 
vgic_irq_rank *rank,
      * invalid vCPU. So for now, just ignore the write.
      *
      * TODO: Respect the spec
+     *
+     * Only migrate the IRQ if the target vCPU has changed
      */
-    if ( !new_vcpu )
-        return;
-
-    /* Only migrate the IRQ if the target vCPU has changed */
-    if ( new_vcpu != old_vcpu )
-    {
-        if ( vgic_migrate_irq(old_vcpu, new_vcpu, virq) )
-            write_atomic(&rank->vcpu[offset], new_vcpu->vcpu_id);
-    }
+    if ( new_vcpu && new_vcpu->vcpu_id != p->vcpu_id )
+        vgic_migrate_irq(p, &flags, new_vcpu);
+    else
+        vgic_irq_unlock(p, flags);
 }

 static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
@@ -1061,8 +1062,6 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, 
mmio_info_t *info,
                                    register_t *r, void *priv)
 {
     struct hsr_dabt dabt = info->dabt;
-    struct vgic_irq_rank *rank;
-    unsigned long flags;
     int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);

     perfc_incr(vgicd_reads);
@@ -1190,15 +1189,12 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, 
mmio_info_t *info,
     case VRANGE64(GICD_IROUTER32, GICD_IROUTER1019):
     {
         uint64_t irouter;
+        unsigned int irq;

         if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
-        rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
-                                DABT_DOUBLE_WORD);
-        if ( rank == NULL ) goto read_as_zero;
-        vgic_lock_rank(v, rank, flags);
-        irouter = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER);
-        vgic_unlock_rank(v, rank, flags);
-
+        irq = (gicd_reg - GICD_IROUTER) / 8;
+        if ( irq >= v->domain->arch.vgic.nr_spis + 32 ) goto read_as_zero;
+        irouter = vgic_fetch_irouter(v, gicd_reg - GICD_IROUTER);
         *r = vreg_reg64_extract(irouter, info);

         return 1;
@@ -1264,8 +1260,6 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info,
                                     register_t r, void *priv)
 {
     struct hsr_dabt dabt = info->dabt;
-    struct vgic_irq_rank *rank;
-    unsigned long flags;
     int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);

     perfc_incr(vgicd_writes);
@@ -1379,16 +1373,15 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info,
     case VRANGE64(GICD_IROUTER32, GICD_IROUTER1019):
     {
         uint64_t irouter;
+        unsigned int irq;

         if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
-        rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
-                                DABT_DOUBLE_WORD);
-        if ( rank == NULL ) goto write_ignore;
-        vgic_lock_rank(v, rank, flags);
-        irouter = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER);
+        irq = (gicd_reg - GICD_IROUTER) / 8;
+        if ( irq >= v->domain->arch.vgic.nr_spis + 32 ) goto write_ignore;
+
+        irouter = vgic_fetch_irouter(v, gicd_reg - GICD_IROUTER);
         vreg_reg64_update(&irouter, r, info);
-        vgic_store_irouter(v->domain, rank, gicd_reg - GICD_IROUTER, irouter);
-        vgic_unlock_rank(v, rank, flags);
+        vgic_store_irouter(v->domain, gicd_reg - GICD_IROUTER, irouter);

Same here for the locking issue.

         return 1;
     }

diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 0e6dfe5..f6532ee 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -61,7 +61,8 @@ struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned 
int irq)
     return vgic_get_rank(v, rank);
 }

-void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
+void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq,
+                           unsigned int vcpu_id)
 {
     /* The vcpu_id field must be big enough to hold a VCPU ID. */
     BUILD_BUG_ON(BIT(sizeof(p->vcpu_id) * 8) < MAX_VIRT_CPUS);
@@ -71,27 +72,15 @@ void vgic_init_pending_irq(struct pending_irq *p, unsigned 
int virq)
     INIT_LIST_HEAD(&p->lr_queue);
     spin_lock_init(&p->lock);
     p->irq = virq;
-    p->vcpu_id = INVALID_VCPU_ID;
+    p->vcpu_id = vcpu_id;
 }

 static void vgic_rank_init(struct vgic_irq_rank *rank, uint8_t index,
                            unsigned int vcpu)
 {
-    unsigned int i;
-
-    /*
-     * Make sure that the type chosen to store the target is able to
-     * store an VCPU ID between 0 and the maximum of virtual CPUs
-     * supported.
-     */
-    BUILD_BUG_ON((1 << (sizeof(rank->vcpu[0]) * 8)) < MAX_VIRT_CPUS);
-
     spin_lock_init(&rank->lock);

     rank->index = index;
-
-    for ( i = 0; i < NR_INTERRUPT_PER_RANK; i++ )
-        write_atomic(&rank->vcpu[i], vcpu);
 }

 int domain_vgic_register(struct domain *d, int *mmio_count)
@@ -142,9 +131,9 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis)
     if ( d->arch.vgic.pending_irqs == NULL )
         return -ENOMEM;

+    /* SPIs are routed to VCPU0 by default */
     for (i=0; i<d->arch.vgic.nr_spis; i++)
-        vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32);
-
+        vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32, 0);
     /* SPIs are routed to VCPU0 by default */
     for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
         vgic_rank_init(&d->arch.vgic.shared_irqs[i], i + 1, 0);
@@ -208,8 +197,9 @@ int vcpu_vgic_init(struct vcpu *v)
     v->domain->arch.vgic.handler->vcpu_init(v);

     memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs));
+    /* SGIs/PPIs are always routed to this VCPU */
     for (i = 0; i < 32; i++)
-        vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i);
+        vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i, v->vcpu_id);

     INIT_LIST_HEAD(&v->arch.vgic.inflight_irqs);
     INIT_LIST_HEAD(&v->arch.vgic.lr_pending);
@@ -268,10 +258,7 @@ struct vcpu *vgic_lock_vcpu_irq(struct vcpu *v, struct 
pending_irq *p,

 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, struct pending_irq *p)
 {
-    struct vgic_irq_rank *rank = vgic_rank_irq(v, p->irq);
-    int target = read_atomic(&rank->vcpu[p->irq & INTERRUPT_RANK_MASK]);
-
-    return v->domain->vcpu[target];
+    return v->domain->vcpu[p->vcpu_id];

Do you need p to be locked for reading vcpu_id? If so, then an ASSERT should be added. If not, then maybe you need an ACCESS_ONCE/read-atomic.

 }

 #define MAX_IRQS_PER_IPRIORITYR 4
@@ -360,57 +347,65 @@ void vgic_store_irq_config(struct vcpu *v, unsigned int 
first_irq,
     local_irq_restore(flags);
 }

-bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
+bool vgic_migrate_irq(struct pending_irq *p, unsigned long *flags,
+                      struct vcpu *new)
 {
-    unsigned long flags;
-    struct pending_irq *p;
+    unsigned long vcpu_flags;
+    struct vcpu *old;
+    bool ret = false;

     /* This will never be called for an LPI, as we don't migrate them. */
-    ASSERT(!is_lpi(irq));
+    ASSERT(!is_lpi(p->irq));

-    spin_lock_irqsave(&old->arch.vgic.lock, flags);
-
-    p = irq_to_pending(old, irq);
+    ASSERT(spin_is_locked(&p->lock));

     /* nothing to do for virtual interrupts */
     if ( p->desc == NULL )
     {
-        spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
-        return true;
+        ret = true;
+        goto out_unlock;
     }

     /* migration already in progress, no need to do anything */
     if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
     {
-        gprintk(XENLOG_WARNING, "irq %u migration failed: requested while in 
progress\n", irq);
-        spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
-        return false;
+        gprintk(XENLOG_WARNING, "irq %u migration failed: requested while in 
progress\n", p->irq);
+        goto out_unlock;
     }

+    p->vcpu_id = new->vcpu_id;

Something is wrong here. You update p->vcpu_id quite early. This means if the IRQ fire whilst you are in vgic_migrate_irq, then you will call use the new vCPU in vgic_vcpu_inject_irq but potential still in the old list.

+
     perfc_incr(vgic_irq_migrates);

     if ( list_empty(&p->inflight) )

I was kind of expecting the old vCPU lock to be taken given that you check p->inflight.

     {
         irq_set_affinity(p->desc, cpumask_of(new->processor));
-        spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
-        return true;
+        goto out_unlock;
     }
+
     /* If the IRQ is still lr_pending, re-inject it to the new vcpu */
     if ( !list_empty(&p->lr_queue) )
     {
+        old = vgic_lock_vcpu_irq(new, p, &vcpu_flags);

I may miss something here. The vCPU returned should be new, not old, right?

         gic_remove_irq_from_queues(old, p);
         irq_set_affinity(p->desc, cpumask_of(new->processor));
-        spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
-        vgic_vcpu_inject_irq(new, irq);
+
+        vgic_irq_unlock(p, *flags);
+        spin_unlock_irqrestore(&old->arch.vgic.lock, vcpu_flags);
+
+        vgic_vcpu_inject_irq(new, p->irq);
         return true;
     }
+
     /* if the IRQ is in a GICH_LR register, set GIC_IRQ_GUEST_MIGRATING
      * and wait for the EOI */
     if ( !list_empty(&p->inflight) )
         set_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);

-    spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
-    return true;
+out_unlock:
+    vgic_irq_unlock(p, *flags);
+
+    return false;
 }

 void arch_move_irqs(struct vcpu *v)
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index ffd9a95..4b47a9b 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -112,13 +112,6 @@ struct vgic_irq_rank {

     uint32_t ienable;

-    /*
-     * It's more convenient to store a target VCPU per vIRQ
-     * than the register ITARGETSR/IROUTER itself.
-     * Use atomic operations to read/write the vcpu fields to avoid
-     * taking the rank lock.
-     */
-    uint8_t vcpu[32];
 };

 struct sgi_target {
@@ -217,7 +210,8 @@ extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, 
struct pending_irq *p);
 extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq);
 extern void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq);
 extern void vgic_clear_pending_irqs(struct vcpu *v);
-extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq);
+extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq,
+                                  unsigned int vcpu_id);
 extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
 extern struct pending_irq *spi_to_pending(struct domain *d, unsigned int irq);
 extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, 
int s);
@@ -237,7 +231,8 @@ extern int vcpu_vgic_free(struct vcpu *v);
 extern bool vgic_to_sgi(struct vcpu *v, register_t sgir,
                         enum gic_sgi_mode irqmode, int virq,
                         const struct sgi_target *target);
-extern bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int 
irq);
+extern bool vgic_migrate_irq(struct pending_irq *p,
+                             unsigned long *flags, struct vcpu *new);

 /* Reserve a specific guest vIRQ */
 extern bool vgic_reserve_virq(struct domain *d, unsigned int virq);


Cheers,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.