Most of v1 one went in already. This rebases Rebases the remainder of v1 and addressed the comments by dropping the use of the GROUP1 bit. I think SMP is a substantial feature of Xen on ARM for 4.3 and it is therefore worthy of a freeze exception. It touches only ARM specific code. I would like to apply Julien''s "implement smp_call_function" series[0] on top of it which fixes reboot/shutdown but which makes the x86 smp_call_function generic and so necessarily touches x86 and generic code. If that is considered too risky (I personally think it should be OK, it''s a pretty obvious move) then we could duplicate that code for ARM. [0] v2 at <1366119508-9227-3-git-send-email-julien.grall@citrix.com> Ian.
Ian Campbell
2013-Apr-17 12:52 UTC
[PATCH 1/3] arm: gic: implement IPIs using SGI mechanism
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: Drop GROUP1 bit, it has no meaning unless you are in secure mode.
printk not panic in smp_call_function to avoid infinite loop.
---
xen/arch/arm/arm32/mode_switch.S | 2 +-
xen/arch/arm/gic.c | 85 +++++++++++++++++++++++++++++++++++---
xen/arch/arm/smp.c | 14 ++----
xen/include/asm-arm/gic.h | 22 +++++++++-
4 files changed, 105 insertions(+), 18 deletions(-)
diff --git a/xen/arch/arm/arm32/mode_switch.S b/xen/arch/arm/arm32/mode_switch.S
index bc2be74..d6741d0 100644
--- a/xen/arch/arm/arm32/mode_switch.S
+++ b/xen/arch/arm/arm32/mode_switch.S
@@ -43,7 +43,7 @@ kick_cpus:
mov r2, #0x1
str r2, [r0, #(GICD_CTLR * 4)] /* enable distributor */
mov r2, #0xfe0000
- str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody */
+ str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody, SGI0 =
Event check */
dsb
str r2, [r0, #(GICD_CTLR * 4)] /* disable distributor */
mov pc, lr
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 12f2e7f..07c816b 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -356,10 +356,52 @@ void __init gic_init(void)
spin_unlock(&gic.lock);
}
+void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi)
+{
+ unsigned long mask = cpumask_bits(cpumask)[0];
+
+ ASSERT(sgi < 16); /* There are only 16 SGIs */
+
+ mask &= cpumask_bits(&cpu_online_map)[0];
+
+ ASSERT(mask < 0x100); /* The target bitmap only supports 8 CPUs */
+
+ dsb();
+
+ GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST
+ | (mask<<GICD_SGI_TARGET_SHIFT)
+ | sgi;
+}
+
+void send_SGI_one(unsigned int cpu, enum gic_sgi sgi)
+{
+ ASSERT(cpu < 7); /* Targets bitmap only supports 8 CPUs */
+ send_SGI_mask(cpumask_of(cpu), sgi);
+}
+
+void send_SGI_self(enum gic_sgi sgi)
+{
+ ASSERT(sgi < 16); /* There are only 16 SGIs */
+
+ dsb();
+
+ GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF
+ | sgi;
+}
+
+void send_SGI_allbutself(enum gic_sgi sgi)
+{
+ ASSERT(sgi < 16); /* There are only 16 SGIs */
+
+ dsb();
+
+ GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS
+ | sgi;
+}
+
void smp_send_state_dump(unsigned int cpu)
{
- printk("WARNING: unable to send state dump request to CPU%d\n",
cpu);
- /* XXX TODO -- send an SGI */
+ send_SGI_one(cpu, GIC_SGI_DUMP_STATE);
}
/* Set up the per-CPU parts of the GIC for a secondary CPU */
@@ -592,6 +634,28 @@ out:
return retval;
}
+static void do_sgi(struct cpu_user_regs *regs, int othercpu, enum gic_sgi sgi)
+{
+ /* Lower the priority */
+ GICC[GICC_EOIR] = sgi;
+
+ switch (sgi)
+ {
+ case GIC_SGI_EVENT_CHECK:
+ /* Nothing to do, will check for events on return path */
+ break;
+ case GIC_SGI_DUMP_STATE:
+ dump_execstate(regs);
+ break;
+ default:
+ panic("Unhandled SGI %d on CPU%d\n", sgi,
smp_processor_id());
+ break;
+ }
+
+ /* Deactivate */
+ GICC[GICC_DIR] = sgi;
+}
+
/* Accept an interrupt from the GIC and dispatch its handler */
void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
{
@@ -602,14 +666,23 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
do {
intack = GICC[GICC_IAR];
irq = intack & GICC_IA_IRQ;
- local_irq_enable();
- if (likely(irq < 1021))
+ if ( likely(irq >= 16 && irq < 1021) )
+ {
+ local_irq_enable();
do_IRQ(regs, irq, is_fiq);
+ local_irq_disable();
+ }
+ else if (unlikely(irq < 16))
+ {
+ unsigned int cpu = (intack & GICC_IA_CPU_MASK) >>
GICC_IA_CPU_SHIFT;
+ do_sgi(regs, cpu, irq);
+ }
else
+ {
+ local_irq_disable();
break;
-
- local_irq_disable();
+ }
} while (1);
}
diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c
index 12260f4..2a429bd 100644
--- a/xen/arch/arm/smp.c
+++ b/xen/arch/arm/smp.c
@@ -3,10 +3,11 @@
#include <asm/smp.h>
#include <asm/cpregs.h>
#include <asm/page.h>
+#include <asm/gic.h>
void flush_tlb_mask(const cpumask_t *mask)
{
- /* XXX IPI other processors */
+ /* No need to IPI other processors on ARM, the processor takes care of it.
*/
flush_xen_data_tlb();
}
@@ -15,17 +16,12 @@ void smp_call_function(
void *info,
int wait)
{
- /* TODO: No SMP just now, does not include self so nothing to do.
- cpumask_t allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
- on_selected_cpus(&allbutself, func, info, wait);
- */
+ printk("%s not implmented\n", __func__);
}
+
void smp_send_event_check_mask(const cpumask_t *mask)
{
- /* TODO: No SMP just now, does not include self so nothing to do.
- send_IPI_mask(mask, EVENT_CHECK_VECTOR);
- */
+ send_SGI_mask(mask, GIC_SGI_EVENT_CHECK);
}
/*
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 6bf50bb..24c0d5c 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -51,6 +51,13 @@
#define GICD_SPENDSGIRN (0xF2C/4)
#define GICD_ICPIDR2 (0xFE8/4)
+#define GICD_SGI_TARGET_LIST (0UL<<24)
+#define GICD_SGI_TARGET_OTHERS (1UL<<24)
+#define GICD_SGI_TARGET_SELF (2UL<<24)
+#define GICD_SGI_TARGET_SHIFT (16)
+#define GICD_SGI_TARGET_MASK (0xFFUL<<GICD_SGI_TARGET_SHIFT)
+#define GICD_SGI_GROUP1 (1UL<<15)
+
#define GICC_CTLR (0x0000/4)
#define GICC_PMR (0x0004/4)
#define GICC_BPR (0x0008/4)
@@ -83,8 +90,9 @@
#define GICC_CTL_ENABLE 0x1
#define GICC_CTL_EOI (0x1 << 9)
-#define GICC_IA_IRQ 0x03ff
-#define GICC_IA_CPU 0x1c00
+#define GICC_IA_IRQ 0x03ff
+#define GICC_IA_CPU_MASK 0x1c00
+#define GICC_IA_CPU_SHIFT 10
#define GICH_HCR_EN (1 << 0)
#define GICH_HCR_UIE (1 << 1)
@@ -157,6 +165,16 @@ extern int gicv_setup(struct domain *d);
extern void gic_save_state(struct vcpu *v);
extern void gic_restore_state(struct vcpu *v);
+/* SGI (AKA IPIs) */
+enum gic_sgi {
+ GIC_SGI_EVENT_CHECK = 0,
+ GIC_SGI_DUMP_STATE = 1,
+};
+extern void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi);
+extern void send_SGI_one(unsigned int cpu, enum gic_sgi sgi);
+extern void send_SGI_self(enum gic_sgi sgi);
+extern void send_SGI_allbutself(enum gic_sgi sgi);
+
/* print useful debug info */
extern void gic_dump_info(struct vcpu *v);
--
1.7.2.5
The current cpuid is held in x22 not x12.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
xen/arch/arm/arm64/head.S | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index bbde419..c18ef2b 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -307,7 +307,7 @@ paging:
dsb sy
ldr x0, =smp_up_cpu
ldr x1, [x0] /* Which CPU is being booted? */
- cmp x1, x12 /* Is it us? */
+ cmp x1, x22 /* Is it us? */
b.ne 1b
launch:
--
1.7.2.5
Ian Campbell
2013-Apr-17 12:52 UTC
[PATCH 3/3] arm: vgic: fix race in vgic_vcpu_inject_irq
The initial check for a still pending interrupt
(!list_empty(&n->inflight))
needs to be covered by the vgic lock to avoid trying to insert the IRQ into the
inflight list simultaneously on 2 pCPUS. Expand the area covered by the lock
appropriately.
Also consolidate the unlocks on the exit path into one location.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
xen/arch/arm/vgic.c | 11 +++++++----
1 files changed, 7 insertions(+), 4 deletions(-)
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index dbfcd04..b30da78 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -584,9 +584,14 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq,
int virtual)
struct pending_irq *iter, *n = irq_to_pending(v, irq);
unsigned long flags;
- /* irq still pending */
+ spin_lock_irqsave(&v->arch.vgic.lock, flags);
+
+ /* irq already pending */
if (!list_empty(&n->inflight))
+ {
+ spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
return;
+ }
priority = byte_read(rank->ipriority[REG_RANK_INDEX(8, idx)], 0, byte);
@@ -601,20 +606,18 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int
irq, int virtual)
if ( rank->ienable & (1 << (irq % 32)) )
gic_set_guest_irq(v, irq, GICH_LR_PENDING, priority);
- spin_lock_irqsave(&v->arch.vgic.lock, flags);
list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight )
{
if ( iter->priority > priority )
{
list_add_tail(&n->inflight, &iter->inflight);
- spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
goto out;
}
}
list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs);
+out:
spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
/* we have a new higher priority irq, inject it into the guest */
-out:
vcpu_unblock(v);
}
--
1.7.2.5
On 17/04/13 13:52, Ian Campbell wrote:> Most of v1 one went in already. This rebases Rebases the remainder of v1 > and addressed the comments by dropping the use of the GROUP1 bit. > > I think SMP is a substantial feature of Xen on ARM for 4.3 and it is > therefore worthy of a freeze exception. It touches only ARM specific > code.Yeah, I think ARM without SMP is almost a bug. :-) So for this series: Acked-by: George Dunlap <george.dunlap@eu.citrix.com>> I would like to apply Julien''s "implement smp_call_function" series[0] > on top of it which fixes reboot/shutdown but which makes the x86 > smp_call_function generic and so necessarily touches x86 and generic > code. If that is considered too risky (I personally think it should be > OK, it''s a pretty obvious move) then we could duplicate that code for > ARM. > > [0] v2 at <1366119508-9227-3-git-send-email-julien.grall@citrix.com>Let me take a look. The most important question is, if there''s a bug, will we find it before the release? Since smp_call_function() is called by basically everyone all the time, I think we can be pretty confident that we''ll find any bugs in x86 code. Would you agree? -George
On Wed, 2013-04-17 at 14:20 +0100, George Dunlap wrote:> On 17/04/13 13:52, Ian Campbell wrote: > > Most of v1 one went in already. This rebases Rebases the remainder of v1 > > and addressed the comments by dropping the use of the GROUP1 bit. > > > > I think SMP is a substantial feature of Xen on ARM for 4.3 and it is > > therefore worthy of a freeze exception. It touches only ARM specific > > code. > > Yeah, I think ARM without SMP is almost a bug. :-) So for this series: > > Acked-by: George Dunlap <george.dunlap@eu.citrix.com> > > > I would like to apply Julien''s "implement smp_call_function" series[0] > > on top of it which fixes reboot/shutdown but which makes the x86 > > smp_call_function generic and so necessarily touches x86 and generic > > code. If that is considered too risky (I personally think it should be > > OK, it''s a pretty obvious move) then we could duplicate that code for > > ARM. > > > > [0] v2 at <1366119508-9227-3-git-send-email-julien.grall@citrix.com> > > Let me take a look. The most important question is, if there''s a bug, > will we find it before the release? Since smp_call_function() is called > by basically everyone all the time, I think we can be pretty confident > that we''ll find any bugs in x86 code. Would you agree?It''s not called much on ARM but I believe it is on x86 so Yes. It is also a pretty simple move with the only change being to refactor the one arch specific line in to a per arch function. Ian.
On 17/04/13 14:28, Ian Campbell wrote:> On Wed, 2013-04-17 at 14:20 +0100, George Dunlap wrote: >> On 17/04/13 13:52, Ian Campbell wrote: >>> Most of v1 one went in already. This rebases Rebases the remainder of v1 >>> and addressed the comments by dropping the use of the GROUP1 bit. >>> >>> I think SMP is a substantial feature of Xen on ARM for 4.3 and it is >>> therefore worthy of a freeze exception. It touches only ARM specific >>> code. >> Yeah, I think ARM without SMP is almost a bug. :-) So for this series: >> >> Acked-by: George Dunlap <george.dunlap@eu.citrix.com> >> >>> I would like to apply Julien''s "implement smp_call_function" series[0] >>> on top of it which fixes reboot/shutdown but which makes the x86 >>> smp_call_function generic and so necessarily touches x86 and generic >>> code. If that is considered too risky (I personally think it should be >>> OK, it''s a pretty obvious move) then we could duplicate that code for >>> ARM. >>> >>> [0] v2 at <1366119508-9227-3-git-send-email-julien.grall@citrix.com> >> Let me take a look. The most important question is, if there''s a bug, >> will we find it before the release? Since smp_call_function() is called >> by basically everyone all the time, I think we can be pretty confident >> that we''ll find any bugs in x86 code. Would you agree? > It''s not called much on ARM but I believe it is on x86 so Yes. > > It is also a pretty simple move with the only change being to refactor > the one arch specific line in to a per arch function.If Keir or Jan think that if there is a regression we are highly likely to catch it before the release, I''m OK with Julien''s series. -George
Stefano Stabellini
2013-Apr-18 11:38 UTC
Re: [PATCH 1/3] arm: gic: implement IPIs using SGI mechanism
On Wed, 17 Apr 2013, Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > --- > v2: Drop GROUP1 bit, it has no meaning unless you are in secure mode. > printk not panic in smp_call_function to avoid infinite loop.Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/arm32/mode_switch.S | 2 +- > xen/arch/arm/gic.c | 85 +++++++++++++++++++++++++++++++++++--- > xen/arch/arm/smp.c | 14 ++---- > xen/include/asm-arm/gic.h | 22 +++++++++- > 4 files changed, 105 insertions(+), 18 deletions(-) > > diff --git a/xen/arch/arm/arm32/mode_switch.S b/xen/arch/arm/arm32/mode_switch.S > index bc2be74..d6741d0 100644 > --- a/xen/arch/arm/arm32/mode_switch.S > +++ b/xen/arch/arm/arm32/mode_switch.S > @@ -43,7 +43,7 @@ kick_cpus: > mov r2, #0x1 > str r2, [r0, #(GICD_CTLR * 4)] /* enable distributor */ > mov r2, #0xfe0000 > - str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody */ > + str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody, SGI0 = Event check */ > dsb > str r2, [r0, #(GICD_CTLR * 4)] /* disable distributor */ > mov pc, lr > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c > index 12f2e7f..07c816b 100644 > --- a/xen/arch/arm/gic.c > +++ b/xen/arch/arm/gic.c > @@ -356,10 +356,52 @@ void __init gic_init(void) > spin_unlock(&gic.lock); > } > > +void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi) > +{ > + unsigned long mask = cpumask_bits(cpumask)[0]; > + > + ASSERT(sgi < 16); /* There are only 16 SGIs */ > + > + mask &= cpumask_bits(&cpu_online_map)[0]; > + > + ASSERT(mask < 0x100); /* The target bitmap only supports 8 CPUs */ > + > + dsb(); > + > + GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST > + | (mask<<GICD_SGI_TARGET_SHIFT) > + | sgi; > +} > + > +void send_SGI_one(unsigned int cpu, enum gic_sgi sgi) > +{ > + ASSERT(cpu < 7); /* Targets bitmap only supports 8 CPUs */ > + send_SGI_mask(cpumask_of(cpu), sgi); > +} > + > +void send_SGI_self(enum gic_sgi sgi) > +{ > + ASSERT(sgi < 16); /* There are only 16 SGIs */ > + > + dsb(); > + > + GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF > + | sgi; > +} > + > +void send_SGI_allbutself(enum gic_sgi sgi) > +{ > + ASSERT(sgi < 16); /* There are only 16 SGIs */ > + > + dsb(); > + > + GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS > + | sgi; > +} > + > void smp_send_state_dump(unsigned int cpu) > { > - printk("WARNING: unable to send state dump request to CPU%d\n", cpu); > - /* XXX TODO -- send an SGI */ > + send_SGI_one(cpu, GIC_SGI_DUMP_STATE); > } > > /* Set up the per-CPU parts of the GIC for a secondary CPU */ > @@ -592,6 +634,28 @@ out: > return retval; > } > > +static void do_sgi(struct cpu_user_regs *regs, int othercpu, enum gic_sgi sgi) > +{ > + /* Lower the priority */ > + GICC[GICC_EOIR] = sgi; > + > + switch (sgi) > + { > + case GIC_SGI_EVENT_CHECK: > + /* Nothing to do, will check for events on return path */ > + break; > + case GIC_SGI_DUMP_STATE: > + dump_execstate(regs); > + break; > + default: > + panic("Unhandled SGI %d on CPU%d\n", sgi, smp_processor_id()); > + break; > + } > + > + /* Deactivate */ > + GICC[GICC_DIR] = sgi; > +} > + > /* Accept an interrupt from the GIC and dispatch its handler */ > void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) > { > @@ -602,14 +666,23 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) > do { > intack = GICC[GICC_IAR]; > irq = intack & GICC_IA_IRQ; > - local_irq_enable(); > > - if (likely(irq < 1021)) > + if ( likely(irq >= 16 && irq < 1021) ) > + { > + local_irq_enable(); > do_IRQ(regs, irq, is_fiq); > + local_irq_disable(); > + } > + else if (unlikely(irq < 16)) > + { > + unsigned int cpu = (intack & GICC_IA_CPU_MASK) >> GICC_IA_CPU_SHIFT; > + do_sgi(regs, cpu, irq); > + } > else > + { > + local_irq_disable(); > break; > - > - local_irq_disable(); > + } > } while (1); > } > > diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c > index 12260f4..2a429bd 100644 > --- a/xen/arch/arm/smp.c > +++ b/xen/arch/arm/smp.c > @@ -3,10 +3,11 @@ > #include <asm/smp.h> > #include <asm/cpregs.h> > #include <asm/page.h> > +#include <asm/gic.h> > > void flush_tlb_mask(const cpumask_t *mask) > { > - /* XXX IPI other processors */ > + /* No need to IPI other processors on ARM, the processor takes care of it. */ > flush_xen_data_tlb(); > } > > @@ -15,17 +16,12 @@ void smp_call_function( > void *info, > int wait) > { > - /* TODO: No SMP just now, does not include self so nothing to do. > - cpumask_t allbutself = cpu_online_map; > - cpu_clear(smp_processor_id(), allbutself); > - on_selected_cpus(&allbutself, func, info, wait); > - */ > + printk("%s not implmented\n", __func__); > } > + > void smp_send_event_check_mask(const cpumask_t *mask) > { > - /* TODO: No SMP just now, does not include self so nothing to do. > - send_IPI_mask(mask, EVENT_CHECK_VECTOR); > - */ > + send_SGI_mask(mask, GIC_SGI_EVENT_CHECK); > } > > /* > diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h > index 6bf50bb..24c0d5c 100644 > --- a/xen/include/asm-arm/gic.h > +++ b/xen/include/asm-arm/gic.h > @@ -51,6 +51,13 @@ > #define GICD_SPENDSGIRN (0xF2C/4) > #define GICD_ICPIDR2 (0xFE8/4) > > +#define GICD_SGI_TARGET_LIST (0UL<<24) > +#define GICD_SGI_TARGET_OTHERS (1UL<<24) > +#define GICD_SGI_TARGET_SELF (2UL<<24) > +#define GICD_SGI_TARGET_SHIFT (16) > +#define GICD_SGI_TARGET_MASK (0xFFUL<<GICD_SGI_TARGET_SHIFT) > +#define GICD_SGI_GROUP1 (1UL<<15) > + > #define GICC_CTLR (0x0000/4) > #define GICC_PMR (0x0004/4) > #define GICC_BPR (0x0008/4) > @@ -83,8 +90,9 @@ > #define GICC_CTL_ENABLE 0x1 > #define GICC_CTL_EOI (0x1 << 9) > > -#define GICC_IA_IRQ 0x03ff > -#define GICC_IA_CPU 0x1c00 > +#define GICC_IA_IRQ 0x03ff > +#define GICC_IA_CPU_MASK 0x1c00 > +#define GICC_IA_CPU_SHIFT 10 > > #define GICH_HCR_EN (1 << 0) > #define GICH_HCR_UIE (1 << 1) > @@ -157,6 +165,16 @@ extern int gicv_setup(struct domain *d); > extern void gic_save_state(struct vcpu *v); > extern void gic_restore_state(struct vcpu *v); > > +/* SGI (AKA IPIs) */ > +enum gic_sgi { > + GIC_SGI_EVENT_CHECK = 0, > + GIC_SGI_DUMP_STATE = 1, > +}; > +extern void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi); > +extern void send_SGI_one(unsigned int cpu, enum gic_sgi sgi); > +extern void send_SGI_self(enum gic_sgi sgi); > +extern void send_SGI_allbutself(enum gic_sgi sgi); > + > /* print useful debug info */ > extern void gic_dump_info(struct vcpu *v); > > -- > 1.7.2.5 >
Ian Campbell
2013-Apr-18 14:16 UTC
Re: [PATCH 3/3] arm: vgic: fix race in vgic_vcpu_inject_irq
On Wed, 2013-04-17 at 13:52 +0100, Ian Campbell wrote:> The initial check for a still pending interrupt (!list_empty(&n->inflight)) > needs to be covered by the vgic lock to avoid trying to insert the IRQ into the > inflight list simultaneously on 2 pCPUS. Expand the area covered by the lock > appropriately. > > Also consolidate the unlocks on the exit path into one location. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>Thanks, I''ve applied this series. I got some rejects when applying this particular patch since it was based on Stefano''s "xen/arm: trap guest WFI", the rejects was down to the lack of the out: label and vcpu_kick at the end of vgic_vcpu_inject_irq. What actually got applied is: commit e83d6b9432af603200f065b499b8e4b78e92842d Author: Ian Campbell <ian.campbell@citrix.com> Date: Wed Apr 17 13:52:34 2013 +0100 arm: vgic: fix race in vgic_vcpu_inject_irq The initial check for a still pending interrupt (!list_empty(&n->inflight)) needs to be covered by the vgic lock to avoid trying to insert the IRQ into the inflight list simultaneously on 2 pCPUS. Expand the area covered by the lock appropriately. Also consolidate the unlocks on the exit path into one location. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index d9ceaaa..4d8da02 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -584,9 +584,14 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) struct pending_irq *iter, *n = irq_to_pending(v, irq); unsigned long flags; - /* irq still pending */ + spin_lock_irqsave(&v->arch.vgic.lock, flags); + + /* irq already pending */ if (!list_empty(&n->inflight)) + { + spin_unlock_irqrestore(&v->arch.vgic.lock, flags); return; + } priority = byte_read(rank->ipriority[REG_RANK_INDEX(8, idx)], 0, byte); @@ -601,17 +606,16 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) if ( rank->ienable & (1 << (irq % 32)) ) gic_set_guest_irq(v, irq, GICH_LR_PENDING, priority); - spin_lock_irqsave(&v->arch.vgic.lock, flags); list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight ) { if ( iter->priority > priority ) { list_add_tail(&n->inflight, &iter->inflight); - spin_unlock_irqrestore(&v->arch.vgic.lock, flags); - return; + goto out; } } list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs); +out: spin_unlock_irqrestore(&v->arch.vgic.lock, flags); /* we have a new higher priority irq, inject it into the guest */ }