[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] virq injection probelem in Xen ARM 4.5



HI

 

I might find the VIRQ injection in Xen ARM 4.5 and fix with simple way.

Check please.

 

Problem

- In Xen ARM 4.5, SPI's pending_irq structure could be accessed simultaneously from each pCPU.

 

Reason

- When inject SPI VIRQ to domain0, Xen ARM 4.5 set bit HW(31bit) bit in list register. so domain0's EOI command makes deactivate IRQ without Xen ARM interference. But SPI's pending_irq is not updated immediately . because Xen ARM 4.5 dosen't use maintenance interrupt. After a while domain0's vcpu that receive SPI VIRQ is go to hyp mode and update SPI's pending_irq in enter_hypervisor_head() function. After EOI in domain0's, same SPI IRQ can occur immediately and SPI's pending_irq is updated in vgic_vcpu_inject_irq() function.

If enter_hypervisor_head() is excecuted in pcpu1 and SPI's pending_irq is updated in pcpu0, SPI's pending_irq is could be accesed simultaneously. To access SPI's pending_irq Xen have to acquire v->arch.vgic.lock. But this lock don't guarantee exclusive access of SPI's pending_irq.

 

Example

* Machine has 2 pcpu

* Domain0 has 2 vcpu. vcpu0 is running on pcpu0 and vcpu1 is running on pcpu1.

* All SPI is go to pcpu0 that run Domain0's vcpu0.

* Assume that second IRQ 100 occur immediately after deactivate first IRQ 100.

* Assume that first IRQ 100 injected Domain0's vcpu1 and second IRQ 100 injected Domain0's vcpu0. First IRQ 100 is managed in pcpu1 and second IRQ 100 is managed in pcpu0.

- After deactivate first IRQ 100 in pcpu1, IRQ 100's pending_irq struct is not updated immediately. But second IRQ 100 occur and IRQ 100's pending_irq pcpu0. enter_hypervisor_head() and vgic_vcpu_inject_irq() could be executed simultaneously and IRQ 100's pending_irq struct could be changed simultaneously.

 

Fix

- To prevent access SPI's pending_irq struct, deactivate irq must be executed after updated SPI's pending_irq struct. So Xen ARM 4.5 cannot use HW(31bit) bit in list register to prevent domain0 deactivated SPI. Xen ARM 4.5 have to manually deactivate SPI after updated SPI's pending_irq struct.

 

Code Changed

- I Modify below functions. and this functions works well in my SPI Routing Code in Arndale Board.

---------------------------------------------------------------------------------------------

static void gic_update_one_lr(struct vcpu *v, int i)

{

    struct pending_irq *p;

    int irq;

    struct gic_lr lr_val;

 

    ASSERT(spin_is_locked(&v->arch.vgic.lock));

    ASSERT(!local_irq_is_enabled());

 

    gic_hw_ops->read_lr(i, &lr_val);

    irq = lr_val.virq;

    p = irq_to_pending(v, irq);

    if ( lr_val.state & GICH_LR_ACTIVE )

    {

        set_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);

        if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&

             test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status) )

        {

            if ( p->desc == NULL )

            {

                 lr_val.state |= GICH_LR_PENDING;

                 gic_hw_ops->write_lr(i, &lr_val);

            }

            else

                gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into d%dv%d: already active in LR%d\n",

                         irq, v->domain->domain_id, v->vcpu_id, i);

        }

    }

    else if ( lr_val.state & GICH_LR_PENDING )

    {

        int q __attribute__ ((unused)) = test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);

#ifdef GIC_DEBUG

        if ( q )

            gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into d%dv%d, when it is already pending in LR%d\n",

                    irq, v->domain->domain_id, v->vcpu_id, i);

#endif

    }

    else

    {

        gic_hw_ops->clear_lr(i);

        clear_bit(i, &this_cpu(lr_mask));

 

        clear_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);

        clear_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);

        p->lr = GIC_INVALID_LR;

        if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&

             test_bit(GIC_IRQ_GUEST_QUEUED, &p->status) &&

             !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )

            gic_raise_guest_irq(v, irq, p->priority);

        else {

            list_del_init(&p->inflight);

            if ( test_and_clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )

            {

                struct vcpu *v_target = vgic_get_target_vcpu(v, irq);

                irq_set_affinity(p->desc, cpumask_of(v_target->processor));

            }

        }

//* supsupi

        if ( p->desc != NULL )

gic_hw_ops->deactivate_irq(p->desc);

    }

 

}

---------------------------------------------------------------------------------------------

static void gicv2_update_lr(int lr, const struct pending_irq *p,

                            unsigned int state)

{

    uint32_t lr_reg;

 

    BUG_ON(lr >= gicv2_info.nr_lrs);

    BUG_ON(lr < 0);

 

    lr_reg = (((state & GICH_V2_LR_STATE_MASK) << GICH_V2_LR_STATE_SHIFT)  |

              ((GIC_PRI_TO_GUEST(p->priority) & GICH_V2_LR_PRIORITY_MASK)

                                             << GICH_V2_LR_PRIORITY_SHIFT) |

              ((p->irq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT));

 

//* supsupi

    if ( p->desc != NULL )

lr_reg |= GICH_V2_LR_MAINTENANCE_IRQ;

 

    writel_gich(lr_reg, GICH_LR + lr * 4);

 

}

---------------------------------------------------------------------------------------------

Thank you

 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.