Trap guest WFI, block the guest VCPU unless it has pending interrupts.
Awake the guest vcpu when a new interrupt for it arrrives.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Changes in v3:
- rebased;
- implement local_events_need_delivery;
- move the case HSR_EC_WFI_WFE to do_trap_hypervisor.
---
xen/arch/arm/domain_build.c | 2 +-
xen/arch/arm/traps.c | 6 ++++++
xen/arch/arm/vgic.c | 4 +++-
xen/include/asm-arm/event.h | 8 ++++----
4 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index e9c84c7..e2a072b 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -457,7 +457,7 @@ int construct_dom0(struct domain *d)
v->arch.sctlr = SCTLR_BASE;
- WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM, HCR_EL2);
+ WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI,
HCR_EL2);
isb();
local_abort_enable();
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 600113c..0612f85 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -29,6 +29,7 @@
#include <xen/hypercall.h>
#include <xen/softirq.h>
#include <xen/domain_page.h>
+#include <public/sched.h>
#include <public/xen.h>
#include <asm/regs.h>
#include <asm/cpregs.h>
@@ -920,6 +921,11 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs
*regs)
union hsr hsr = { .bits = READ_SYSREG32(ESR_EL2) };
switch (hsr.ec) {
+ /* at the moment we only trap WFI */
+ case HSR_EC_WFI_WFE:
+ do_sched_op_compat(SCHEDOP_block, 0);
+ regs->pc += hsr.len ? 4 : 2;
+ break;
case HSR_EC_CP15_32:
if ( ! is_pv32_domain(current->domain) )
goto bad_trap;
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 0d24df0..8efcefc 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -608,12 +608,14 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int
irq, int virtual)
{
list_add_tail(&n->inflight, &iter->inflight);
spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
- return;
+ goto out;
}
}
list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs);
spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
/* we have a new higher priority irq, inject it into the guest */
+out:
+ vcpu_unblock(v);
}
/*
diff --git a/xen/include/asm-arm/event.h b/xen/include/asm-arm/event.h
index 10f58af..6f5b5e4 100644
--- a/xen/include/asm-arm/event.h
+++ b/xen/include/asm-arm/event.h
@@ -1,15 +1,15 @@
#ifndef __ASM_EVENT_H__
#define __ASM_EVENT_H__
+#include <asm/gic.h>
+
void vcpu_kick(struct vcpu *v);
void vcpu_mark_events_pending(struct vcpu *v);
static inline int local_events_need_delivery(void)
{
- /* TODO
- * return (vcpu_info(v, evtchn_upcall_pending) &&
- !vcpu_info(v, evtchn_upcall_mask)); */
- return 0;
+ return ( !list_empty(¤t->arch.vgic.inflight_irqs) ||
+ vcpu_info(current, evtchn_upcall_pending) );
}
int local_event_delivery_is_enabled(void);
--
1.7.2.5