[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v8 2/2] xen/arm: trap guest WFI



Trap guest WFI, block the guest VCPU unless it has pending interrupts
(WFI should return if any interrupts arrive even if interrupts are
disabled).

Awake the guest vcpu when a new interrupt for it arrives.

Introduce gic_events_need_delivery: it checks whether the current vcpu
has any interrupts that need to be delivered either on the lrs or in
lr_pending.

Properly implement local_events_need_delivery: check if the guest
disabled interrupts, if they aren't disabled, return positive if
gic_events_need_delivery returns positive. Otherwise we still need to
check whether evtchn_upcall_pending is set but no
VGIC_IRQ_EVTCHN_CALLBACK irqs are in flight: it could be the race
described by commit db453468d92369e7182663fb13e14d83ec4ce456 "arm: vgic:
fix race between evtchn upcall and evtchnop_send". If that is the case
it means that an event needs to be injected.
If all these tests are negative then no events need to be delivered.

Implement local_event_delivery_enable by clearing PSR_IRQ_MASK.


Changes in v8:
- remove the mask check from _local_events_need_delivery, add an
unconditional mask check in local_events_need_delivery.

Changes in v7:
- clear PSR_IRQ_MASK in the implementation of
local_event_delivery_enable.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/arch/arm/gic.c          |    8 +++++++-
 xen/arch/arm/traps.c        |   17 ++++++++++++++++-
 xen/arch/arm/vgic.c         |    1 +
 xen/include/asm-arm/event.h |   41 +++++++++++++++++++++++++++++++++--------
 xen/include/asm-arm/gic.h   |    1 +
 5 files changed, 58 insertions(+), 10 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 0d1ab5a..5e83c50 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -589,13 +589,19 @@ static void gic_inject_irq_stop(void)
     }
 }
 
+int gic_events_need_delivery(void)
+{
+    return (!list_empty(&current->arch.vgic.lr_pending) ||
+            this_cpu(lr_mask));
+}
+
 void gic_inject(void)
 {
     if ( vcpu_info(current, evtchn_upcall_pending) )
         vgic_vcpu_inject_irq(current, VGIC_IRQ_EVTCHN_CALLBACK, 1);
 
     gic_restore_pending_irqs(current);
-    if (!this_cpu(lr_mask))
+    if (!gic_events_need_delivery())
         gic_inject_irq_stop();
     else
         gic_inject_irq_start();
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index b7487b7..b6dcd1e 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -29,7 +29,9 @@
 #include <xen/hypercall.h>
 #include <xen/softirq.h>
 #include <xen/domain_page.h>
+#include <public/sched.h>
 #include <public/xen.h>
+#include <asm/event.h>
 #include <asm/regs.h>
 #include <asm/cpregs.h>
 
@@ -59,7 +61,7 @@ void __cpuinit init_traps(void)
     WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2);
 
     /* Setup hypervisor traps */
-    WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM, HCR_EL2);
+    WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI, 
HCR_EL2);
     isb();
 }
 
@@ -931,6 +933,19 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs 
*regs)
     union hsr hsr = { .bits = READ_SYSREG32(ESR_EL2) };
 
     switch (hsr.ec) {
+    case HSR_EC_WFI_WFE:
+        /* at the moment we only trap WFI */
+        vcpu_block();
+        /* The ARM spec declares that even if local irqs are masked in
+         * the CPSR register, an irq should wake up a cpu from WFI anyway.
+         * For this reason we need to check for irqs that need delivery,
+         * ignoring the CPSR register, *after* calling SCHEDOP_block to
+         * avoid races with vgic_vcpu_inject_irq.
+         */
+        if ( _local_events_need_delivery() )
+            vcpu_unblock(current);
+        regs->pc += hsr.len ? 4 : 2;
+        break;
     case HSR_EC_CP15_32:
         if ( ! is_pv32_domain(current->domain) )
             goto bad_trap;
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 4d8da02..b30da78 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -618,6 +618,7 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, 
int virtual)
 out:
     spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
     /* we have a new higher priority irq, inject it into the guest */
+    vcpu_unblock(v);
 }
 
 /*
diff --git a/xen/include/asm-arm/event.h b/xen/include/asm-arm/event.h
index 10f58af..271f3ed 100644
--- a/xen/include/asm-arm/event.h
+++ b/xen/include/asm-arm/event.h
@@ -1,27 +1,52 @@
 #ifndef __ASM_EVENT_H__
 #define __ASM_EVENT_H__
 
+#include <asm/gic.h>
+#include <asm/domain.h>
+
 void vcpu_kick(struct vcpu *v);
 void vcpu_mark_events_pending(struct vcpu *v);
 
+static inline int _local_events_need_delivery(void)
+{
+    struct pending_irq *p = irq_to_pending(current, VGIC_IRQ_EVTCHN_CALLBACK);
+
+    /* XXX: if the first interrupt has already been delivered, we should
+     * check whether any higher priority interrupts are in the
+     * lr_pending queue or in the LR registers and return 1 only in that
+     * case.
+     * In practice the guest interrupt handler should run with
+     * interrupts disabled so this shouldn't be a problem in the general
+     * case.
+     */
+    if ( gic_events_need_delivery() )
+        return 1;
+
+    if ( vcpu_info(current, evtchn_upcall_pending) &&
+        !vcpu_info(current, evtchn_upcall_mask) &&
+        list_empty(&p->inflight) )
+        return 1;
+
+    return 0;
+}
+
 static inline int local_events_need_delivery(void)
 {
-    /* TODO
-     * return (vcpu_info(v, evtchn_upcall_pending) &&
-                        !vcpu_info(v, evtchn_upcall_mask)); */
+    struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+    /* guest IRQs are masked */
+    if ( (regs->cpsr & PSR_IRQ_MASK) )
         return 0;
+    return _local_events_need_delivery();
 }
 
 int local_event_delivery_is_enabled(void);
 
-static inline void local_event_delivery_disable(void)
-{
-    /* TODO current->vcpu_info->evtchn_upcall_mask = 1; */
-}
-
 static inline void local_event_delivery_enable(void)
 {
     /* TODO current->vcpu_info->evtchn_upcall_mask = 0; */
+    struct cpu_user_regs *regs = guest_cpu_user_regs();
+    regs->cpsr &= ~PSR_IRQ_MASK;
 }
 
 /* No arch specific virq definition now. Default to global. */
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 24c0d5c..92711d5 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -143,6 +143,7 @@ extern void gic_route_ppis(void);
 extern void gic_route_spis(void);
 
 extern void gic_inject(void);
+extern int gic_events_need_delivery(void);
 
 extern void __cpuinit init_maintenance_interrupt(void);
 extern void gic_set_guest_irq(struct vcpu *v, unsigned int irq,
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.