[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-4.9 v2] hvm/vpt: fix inconsistent views of vIOAPIC in vmx_intr_assist()
When injecting periodic timer interrupt in vmx_intr_assist(), multi-read operations are done during one event delivery. For example, if a periodic timer interrupt is from PIT, when set the corresponding bit in vIRR, the corresponding RTE is accessed in pt_update_irq(). When this function returns, it accesses the RTE again to get the vector it sets in vIRR. Between the two accesses, the content of RTE may have been changed by another CPU for no protection method in use. This case can incur the assertion failure in vmx_intr_assist(). Also after a periodic timer interrupt is injected successfully, pt_irq_posted() will decrease the count (pending_intr_nr). But if the corresponding RTE has been changed, pt_irq_posted() will not decrease the count, resulting one more timer interrupt. More discussion and history can be found in 1.https://lists.xenproject.org/archives/html/xen-devel/2017-03/msg00906.html 2.https://lists.xenproject.org/archives/html/xen-devel/2017-01/msg01019.html This patch introduces a new field 'latched_vector' to structure periodic_timer. The field is only valid between calling pt_update_irq() and calling pt_irq_posted() in vmx_intr_assist(). The new field is set to the vector we set in vIRR at the first access we describe above, is used in the following two accesses through calling pt_irq_vector() and finally cleared in pt_irq_posted() or updated in next calling vmx_intr_assist(). The latching operation should be protected by irq_lock to prevent other vCPUs changing the value we are interest in. Thus, provide a -locked variant of hvm_isa_irq_assert() to avoid deadlock. Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx> --- This patch is to fix a user triggerable assertion failure. I think it should be fixed in 4.9. --- v2: - only use hold irq_lock in pt_update_irq(). Avoid holding irq_lock in vmx_intr_assist() to avoid putting too much functions in the locked-region. --- xen/arch/x86/hvm/irq.c | 11 ++++++--- xen/arch/x86/hvm/vpt.c | 56 +++++++++++++++++++++++++++---------------- xen/include/asm-x86/hvm/vpt.h | 6 +++++ xen/include/xen/hvm/irq.h | 2 ++ 4 files changed, 52 insertions(+), 23 deletions(-) diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index 8625584..60deb6e 100644 --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -126,20 +126,25 @@ void hvm_pci_intx_deassert( spin_unlock(&d->arch.hvm_domain.irq_lock); } -void hvm_isa_irq_assert( +void hvm_isa_irq_assert_locked( struct domain *d, unsigned int isa_irq) { struct hvm_irq *hvm_irq = hvm_domain_irq(d); unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); ASSERT(isa_irq <= 15); - - spin_lock(&d->arch.hvm_domain.irq_lock); + ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock)); if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) && (hvm_irq->gsi_assert_count[gsi]++ == 0) ) assert_irq(d, gsi, isa_irq); +} +void hvm_isa_irq_assert( + struct domain *d, unsigned int isa_irq) +{ + spin_lock(&d->arch.hvm_domain.irq_lock); + hvm_isa_irq_assert_locked(d, isa_irq); spin_unlock(&d->arch.hvm_domain.irq_lock); } diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c index e3f2039..0edfc4a 100644 --- a/xen/arch/x86/hvm/vpt.c +++ b/xen/arch/x86/hvm/vpt.c @@ -75,33 +75,43 @@ void hvm_set_guest_time(struct vcpu *v, u64 guest_time) } } -static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src) +static void pt_set_latched_vector(struct periodic_time *pt, enum hvm_intsrc src) { struct vcpu *v = pt->vcpu; struct hvm_vioapic *vioapic; - unsigned int gsi, isa_irq, pin; + unsigned int gsi, pin; + + ASSERT(pt->source == PTSRC_isa); + ASSERT(src == hvm_intsrc_lapic); + gsi = hvm_isa_irq_to_gsi(pt->irq); + vioapic = gsi_vioapic(v->domain, gsi, &pin); + if ( !vioapic ) + { + dprintk(XENLOG_WARNING, "d%u: invalid GSI (%u) for platform timer\n", + v->domain->domain_id, gsi); + domain_crash(v->domain); + return; + } + + pt->latched_vector = vioapic->redirtbl[pin].fields.vector; +} + +static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src) +{ + struct vcpu *v = pt->vcpu; + unsigned int isa_irq; if ( pt->source == PTSRC_lapic ) return pt->irq; isa_irq = pt->irq; - gsi = hvm_isa_irq_to_gsi(isa_irq); if ( src == hvm_intsrc_pic ) return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base + (isa_irq & 7)); ASSERT(src == hvm_intsrc_lapic); - vioapic = gsi_vioapic(v->domain, gsi, &pin); - if ( !vioapic ) - { - dprintk(XENLOG_WARNING, "d%u: invalid GSI (%u) for platform timer\n", - v->domain->domain_id, gsi); - domain_crash(v->domain); - return -1; - } - - return vioapic->redirtbl[pin].fields.vector; + return pt->latched_vector; } static int pt_irq_masked(struct periodic_time *pt) @@ -253,6 +263,7 @@ int pt_update_irq(struct vcpu *v) struct periodic_time *pt, *temp, *earliest_pt; uint64_t max_lag; int irq, is_lapic; + bool handle_by_pic = false; spin_lock(&v->arch.hvm_vcpu.tm_lock); @@ -297,7 +308,15 @@ int pt_update_irq(struct vcpu *v) else { hvm_isa_irq_deassert(v->domain, irq); - hvm_isa_irq_assert(v->domain, irq); + spin_lock(&v->domain->arch.hvm_domain.irq_lock); + hvm_isa_irq_assert_locked(v->domain, irq); + if ( platform_legacy_irq(irq) && vlapic_accept_pic_intr(v) && + (&v->domain->arch.hvm_domain)->vpic[irq >> 3].int_output ) + { + handle_by_pic = true; + pt_set_latched_vector(earliest_pt, hvm_intsrc_lapic); + } + spin_unlock(&v->domain->arch.hvm_domain.irq_lock); } /* @@ -305,12 +324,7 @@ int pt_update_irq(struct vcpu *v) * IRR is returned and used to set eoi_exit_bitmap for virtual * interrupt delivery case. Otherwise return -1 to do nothing. */ - if ( !is_lapic && - platform_legacy_irq(irq) && vlapic_accept_pic_intr(v) && - (&v->domain->arch.hvm_domain)->vpic[irq >> 3].int_output ) - return -1; - else - return pt_irq_vector(earliest_pt, hvm_intsrc_lapic); + return handle_by_pic ? -1 : pt_irq_vector(earliest_pt, hvm_intsrc_lapic); } static struct periodic_time *is_pt_irq( @@ -347,6 +361,7 @@ void pt_intr_post(struct vcpu *v, struct hvm_intack intack) return; } + pt->latched_vector = -1; pt->irq_issued = 0; if ( pt->one_shot ) @@ -414,6 +429,7 @@ void create_periodic_time( pt->pending_intr_nr = 0; pt->do_not_freeze = 0; pt->irq_issued = 0; + pt->latched_vector = -1; /* Periodic timer must be at least 0.1ms. */ if ( (period < 100000) && period ) diff --git a/xen/include/asm-x86/hvm/vpt.h b/xen/include/asm-x86/hvm/vpt.h index 21166ed..2daf356 100644 --- a/xen/include/asm-x86/hvm/vpt.h +++ b/xen/include/asm-x86/hvm/vpt.h @@ -46,6 +46,12 @@ struct periodic_time { #define PTSRC_lapic 2 /* LAPIC time source */ u8 source; /* PTSRC_ */ u8 irq; + /* + * Vector set in vIRR in one interrupt delivery. Using this value can + * avoid reading the IOAPIC RTE again. Valid only when its source is + * 'PTSRC_isa' and handled by vlapic. + */ + int16_t latched_vector; struct vcpu *vcpu; /* vcpu timer interrupt delivers to */ u32 pending_intr_nr; /* pending timer interrupts */ u64 period; /* frequency in ns */ diff --git a/xen/include/xen/hvm/irq.h b/xen/include/xen/hvm/irq.h index 671a6f2..2451e8d 100644 --- a/xen/include/xen/hvm/irq.h +++ b/xen/include/xen/hvm/irq.h @@ -120,6 +120,8 @@ void hvm_pci_intx_deassert( /* Modify state of an ISA device's IRQ wire. */ void hvm_isa_irq_assert( struct domain *d, unsigned int isa_irq); +void hvm_isa_irq_assert_locked( + struct domain *d, unsigned int isa_irq); void hvm_isa_irq_deassert( struct domain *d, unsigned int isa_irq); -- 1.8.3.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |