|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-4.13] x86/vmx: always sync PIR to IRR before vmentry
When using posted interrupts on Intel hardware it's possible that the
vCPU resumes execution with a stale local APIC IRR register because
depending on the interrupts to be injected vlapic_has_pending_irq
might not be called, and thus PIR won't be synced into IRR.
Fix this by making sure PIR is always synced to IRR in vmx_intr_assist
regardless of what interrupts are pending.
While there also simplify the code in __vmx_deliver_posted_interrupt:
there's no need to raise a softirq if the destination vCPU is the one
currently running on the pCPU. The sync of PIR into IRR will be
performed by vmx_intr_assist regardless of whether there's a softirq
pending.
Reported-by: Joe Jin <joe.jin@xxxxxxxxxx>
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Tested-by: Joe Jin <joe.jin@xxxxxxxxxx>
---
xen/arch/x86/hvm/vmx/intr.c | 8 +++++
xen/arch/x86/hvm/vmx/vmx.c | 60 ++++++++++---------------------
xen/include/asm-x86/hvm/vmx/vmx.h | 1 +
3 files changed, 28 insertions(+), 41 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index 0d097cf1f2..ce70f4bc75 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -232,6 +232,14 @@ void vmx_intr_assist(void)
enum hvm_intblk intblk;
int pt_vector;
+ if ( cpu_has_vmx_posted_intr_processing )
+ /*
+ * Always force PIR to be synced to IRR before vmentry, this is also
+ * done by vlapic_has_pending_irq but it's possible other pending
+ * interrupts prevent the execution of that function.
+ */
+ vmx_sync_pir_to_irr(v);
+
/* Block event injection when single step with MTF. */
if ( unlikely(v->arch.hvm.single_step) )
{
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 33e68eaddf..82a1b972c5 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1945,8 +1945,6 @@ static void vmx_process_isr(int isr, struct vcpu *v)
static void __vmx_deliver_posted_interrupt(struct vcpu *v)
{
- bool_t running = v->is_running;
-
vcpu_unblock(v);
/*
* Just like vcpu_kick(), nothing is needed for the following two cases:
@@ -1954,48 +1952,28 @@ static void __vmx_deliver_posted_interrupt(struct vcpu
*v)
* 2. The target vCPU is the current vCPU and we're in non-interrupt
* context.
*/
- if ( running && (in_irq() || (v != current)) )
- {
+ if ( vcpu_runnable(v) && v != current )
/*
- * Note: Only two cases will reach here:
- * 1. The target vCPU is running on other pCPU.
- * 2. The target vCPU is the current vCPU.
+ * If the vCPU is running on another pCPU send an IPI to the pCPU. When
+ * the IPI arrives, the target vCPU may be running in non-root mode,
+ * running in root mode, runnable or blocked. If the target vCPU is
+ * running in non-root mode, the hardware will sync PIR to vIRR for
+ * 'posted_intr_vector' is a special vector handled directly by the
+ * hardware.
*
- * Note2: Don't worry the v->processor may change. The vCPU being
- * moved to another processor is guaranteed to sync PIR to vIRR,
- * due to the involved scheduling cycle.
+ * If the target vCPU is running in root-mode, the interrupt handler
+ * starts to run. Considering an IPI may arrive in the window between
+ * the call to vmx_intr_assist() and interrupts getting disabled, the
+ * interrupt handler should raise a softirq to ensure events will be
+ * delivered in time.
*/
- unsigned int cpu = v->processor;
+ send_IPI_mask(cpumask_of(v->processor), posted_intr_vector);
- /*
- * For case 1, we send an IPI to the pCPU. When an IPI arrives, the
- * target vCPU maybe is running in non-root mode, running in root
- * mode, runnable or blocked. If the target vCPU is running in
- * non-root mode, the hardware will sync PIR to vIRR for
- * 'posted_intr_vector' is special to the pCPU. If the target vCPU is
- * running in root-mode, the interrupt handler starts to run.
- * Considering an IPI may arrive in the window between the call to
- * vmx_intr_assist() and interrupts getting disabled, the interrupt
- * handler should raise a softirq to ensure events will be delivered
- * in time. If the target vCPU is runnable, it will sync PIR to
- * vIRR next time it is chose to run. In this case, a IPI and a
- * softirq is sent to a wrong vCPU which will not have any adverse
- * effect. If the target vCPU is blocked, since vcpu_block() checks
- * whether there is an event to be delivered through
- * local_events_need_delivery() just after blocking, the vCPU must
- * have synced PIR to vIRR. Similarly, there is a IPI and a softirq
- * sent to a wrong vCPU.
- */
- if ( cpu != smp_processor_id() )
- send_IPI_mask(cpumask_of(cpu), posted_intr_vector);
- /*
- * For case 2, raising a softirq ensures PIR will be synced to vIRR.
- * As any softirq will do, as an optimization we only raise one if
- * none is pending already.
- */
- else if ( !softirq_pending(cpu) )
- raise_softirq(VCPU_KICK_SOFTIRQ);
- }
+ /*
+ * If the vCPU is not runnable or if it's the one currently running in this
+ * pCPU there's nothing to do, the vmentry code will already sync the PIR
+ * to IRR when resuming execution.
+ */
}
static void vmx_deliver_posted_intr(struct vcpu *v, u8 vector)
@@ -2048,7 +2026,7 @@ static void vmx_deliver_posted_intr(struct vcpu *v, u8
vector)
vcpu_kick(v);
}
-static void vmx_sync_pir_to_irr(struct vcpu *v)
+void vmx_sync_pir_to_irr(struct vcpu *v)
{
struct vlapic *vlapic = vcpu_vlapic(v);
unsigned int group, i;
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h
b/xen/include/asm-x86/hvm/vmx/vmx.h
index ebaa74449b..c43fab7c4f 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -101,6 +101,7 @@ void vmx_update_debug_state(struct vcpu *v);
void vmx_update_exception_bitmap(struct vcpu *v);
void vmx_update_cpu_exec_control(struct vcpu *v);
void vmx_update_secondary_exec_control(struct vcpu *v);
+void vmx_sync_pir_to_irr(struct vcpu *v);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
--
2.24.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |