[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4] xen/arm: trap guest WFI
Trap guest WFI, block the guest VCPU unless it has pending interrupts. Awake the guest vcpu when a new interrupt for it arrrives. Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> Changes in v4: - local_events_need_delivery: check on the lr_pending list rather the inflight list; - local_events_need_delivery: no need to return true if evtchn_upcall_pending is set and the VGIC_IRQ_EVTCHN_CALLBACK irq is currently inflight: it just means that events are being handled. diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index e9c84c7..e2a072b 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -457,7 +457,7 @@ int construct_dom0(struct domain *d) v->arch.sctlr = SCTLR_BASE; - WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM, HCR_EL2); + WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI, HCR_EL2); isb(); local_abort_enable(); diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 600113c..0612f85 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -29,6 +29,7 @@ #include <xen/hypercall.h> #include <xen/softirq.h> #include <xen/domain_page.h> +#include <public/sched.h> #include <public/xen.h> #include <asm/regs.h> #include <asm/cpregs.h> @@ -920,6 +921,11 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs) union hsr hsr = { .bits = READ_SYSREG32(ESR_EL2) }; switch (hsr.ec) { + /* at the moment we only trap WFI */ + case HSR_EC_WFI_WFE: + do_sched_op_compat(SCHEDOP_block, 0); + regs->pc += hsr.len ? 4 : 2; + break; case HSR_EC_CP15_32: if ( ! is_pv32_domain(current->domain) ) goto bad_trap; diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 0d24df0..8efcefc 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -608,12 +608,14 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) { list_add_tail(&n->inflight, &iter->inflight); spin_unlock_irqrestore(&v->arch.vgic.lock, flags); - return; + goto out; } } list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs); spin_unlock_irqrestore(&v->arch.vgic.lock, flags); /* we have a new higher priority irq, inject it into the guest */ +out: + vcpu_unblock(v); } /* diff --git a/xen/include/asm-arm/event.h b/xen/include/asm-arm/event.h index 10f58af..cbcaba8 100644 --- a/xen/include/asm-arm/event.h +++ b/xen/include/asm-arm/event.h @@ -1,15 +1,18 @@ #ifndef __ASM_EVENT_H__ #define __ASM_EVENT_H__ +#include <asm/gic.h> +#include <asm/domain.h> + void vcpu_kick(struct vcpu *v); void vcpu_mark_events_pending(struct vcpu *v); static inline int local_events_need_delivery(void) { - /* TODO - * return (vcpu_info(v, evtchn_upcall_pending) && - !vcpu_info(v, evtchn_upcall_mask)); */ - return 0; + struct pending_irq *p = irq_to_pending(current, VGIC_IRQ_EVTCHN_CALLBACK); + return ( !list_empty(¤t->arch.vgic.lr_pending) || + (vcpu_info(current, evtchn_upcall_pending) && + list_empty(&p->inflight))); } int local_event_delivery_is_enabled(void); _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |