Ian Campbell
2010-Oct-29 08:33 UTC
[Xen-devel] [PATCH] xen: events: do not unmask polled ipis on restore.
The Xen PV spinlock backend attempts to setup an IPI to IRQ binding which is only to be used via xen_poll_irq rather received directly. Unfortunately restore_cpu_ipis unconditionally unmasks each IPI leading to: [ 21.970432] ------------[ cut here ]------------ [ 21.970432] kernel BUG at /local/scratch/ianc/devel/kernels/linux-2.6/arch/x86/xen/spinlock.c:343! [ 21.970432] invalid opcode: 0000 [#1] SMP [ 21.970432] last sysfs file: /sys/devices/virtual/net/lo/operstate [ 21.970432] Modules linked in: [ 21.970432] [ 21.970432] Pid: 0, comm: swapper Not tainted (2.6.32.24-x86_32p-xen-01034-g787c727 #34) [ 21.970432] EIP: 0061:[<c102e209>] EFLAGS: 00010046 CPU: 3 [ 21.970432] EIP is at dummy_handler+0x3/0x7 [ 21.970432] EAX: 0000021c EBX: dfc16880 ECX: 0000001a EDX: 00000000 [ 21.970432] ESI: dfc02c00 EDI: 00000001 EBP: dfc47e10 ESP: dfc47e10 [ 21.970432] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0069 [ 21.970432] Process swapper (pid: 0, ti=dfc46000 task=dfc39440 task.ti=dfc46000) [ 21.970432] Stack: [ 21.970432] dfc47e30 c10a39f0 0000021c 00000000 00000000 dfc16880 0000021c 00000001 [ 21.970432] <0> dfc47e40 c10a4f08 0000021c 00000000 dfc47e78 c12240a7 c1839284 c1839284 [ 21.970432] <0> 00000200 00000000 00000000 f5720000 c1f3d028 c1f3d02c 00000180 dfc47e90 [ 21.970432] Call Trace: [ 21.970432] [<c10a39f0>] ? handle_IRQ_event+0x5f/0x122 [ 21.970432] [<c10a4f08>] ? handle_percpu_irq+0x2f/0x55 [ 21.970432] [<c12240a7>] ? __xen_evtchn_do_upcall+0xdb/0x15f [ 21.970432] [<c122481e>] ? xen_evtchn_do_upcall+0x20/0x30 [ 21.970432] [<c1030d47>] ? xen_do_upcall+0x7/0xc [ 21.970432] [<c102007b>] ? apic_reg_read+0xd3/0x22d [ 21.970432] [<c1002227>] ? hypercall_page+0x227/0x1005 [ 21.970432] [<c102d30b>] ? xen_force_evtchn_callback+0xf/0x14 [ 21.970432] [<c102da7c>] ? check_events+0x8/0xc [ 21.970432] [<c102da3b>] ? xen_irq_enable_direct_end+0x0/0x1 [ 21.970432] [<c105e485>] ? finish_task_switch+0x62/0xba [ 21.970432] [<c14e3f84>] ? schedule+0x808/0x89d [ 21.970432] [<c1084dc5>] ? hrtimer_start_expires+0x1a/0x22 [ 21.970432] [<c1085154>] ? tick_nohz_restart_sched_tick+0x15a/0x162 [ 21.970432] [<c102f43a>] ? cpu_idle+0x6d/0x6f [ 21.970432] [<c14db29e>] ? cpu_bringup_and_idle+0xd/0xf [ 21.970432] Code: 5d 0f 95 c0 0f b6 c0 c3 55 66 83 78 02 00 89 e5 5d 0f 95 c0 0f b6 c0 c3 55 b2 01 86 10 31 c0 84 d2 89 e5 0f 94 c0 5d c3 55 89 e5 <0f> 0b eb fe 55 80 3d 4c ce 84 c1 00 89 e5 57 56 89 c6 53 74 15 [ 21.970432] EIP: [<c102e209>] dummy_handler+0x3/0x7 SS:ESP 0069:dfc47e10 [ 21.970432] ---[ end trace c0b71f7e12cf3011 ]--- Add a new bind function which explicitly binds a polled-only IPI and track this state in the event channel core so that we can do the right thing on restore. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- arch/x86/xen/spinlock.c | 16 +++------------- drivers/xen/events.c | 45 ++++++++++++++++++++++++++++++++++++++++----- include/xen/events.h | 4 ++++ 3 files changed, 47 insertions(+), 18 deletions(-) diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 36a5141..09655ca 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -338,29 +338,19 @@ static void xen_spin_unlock(struct raw_spinlock *lock) xen_spin_unlock_slow(xl); } -static irqreturn_t dummy_handler(int irq, void *dev_id) -{ - BUG(); - return IRQ_HANDLED; -} - void __cpuinit xen_init_lock_cpu(int cpu) { int irq; const char *name; name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); - irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, + irq = bind_polled_ipi_to_irq(XEN_SPIN_UNLOCK_VECTOR, cpu, - dummy_handler, IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, - name, - NULL); + name); - if (irq >= 0) { - disable_irq(irq); /* make sure it''s never delivered */ + if (irq >= 0) per_cpu(lock_kicker_irq, cpu) = irq; - } printk("cpu %d spinlock event irq %d\n", cpu, irq); } diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 7b29ae1..f8b53b5 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -94,7 +94,10 @@ struct irq_info union { unsigned short virq; - enum ipi_vector ipi; + struct { + enum ipi_vector vector; + unsigned char polled; + } ipi; struct { unsigned short gsi; unsigned char vector; @@ -148,7 +151,7 @@ static struct irq_info mk_evtchn_info(unsigned short evtchn) static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) { return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, - .cpu = 0, .u.ipi = ipi }; + .cpu = 0, .u.ipi.vector = ipi, .u.ipi.polled = 0 }; } static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) @@ -191,7 +194,7 @@ static enum ipi_vector ipi_from_irq(unsigned irq) BUG_ON(info == NULL); BUG_ON(info->type != IRQT_IPI); - return info->u.ipi; + return info->u.ipi.vector; } static unsigned virq_from_irq(unsigned irq) @@ -971,6 +974,33 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, return irq; } + +static irqreturn_t polled_ipi_handler(int irq, void *dev_id) +{ + BUG(); + return IRQ_HANDLED; +} + +int bind_polled_ipi_to_irq(enum ipi_vector ipi, + unsigned int cpu, + unsigned long irqflags, + const char *devname) +{ + int irq, retval; + + irq = bind_ipi_to_irqhandler(ipi, cpu, polled_ipi_handler, + irqflags, devname, NULL); + if (irq < 0) + return irq; + + info_for_irq(irq)->u.ipi.polled = 1; + + disable_irq(irq); /* make sure it''s never delivered */ + + return irq; + +} + void unbind_from_irqhandler(unsigned int irq, void *dev_id) { free_irq(irq, dev_id); @@ -1290,6 +1320,7 @@ static void restore_cpu_virqs(unsigned int cpu) static void restore_cpu_ipis(unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; + int polled; int ipi, irq, evtchn; for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { @@ -1306,12 +1337,16 @@ static void restore_cpu_ipis(unsigned int cpu) evtchn = bind_ipi.port; /* Record the new mapping. */ + polled = info_for_irq(irq)->u.ipi.polled; evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_ipi_info(evtchn, ipi); bind_evtchn_to_cpu(evtchn, cpu); - /* Ready for use. */ - unmask_evtchn(evtchn); + /* Ready for use. Polled IPIs remain masked */ + if (polled) + info_for_irq(irq)->u.ipi.polled = 1; + else + unmask_evtchn(evtchn); } } diff --git a/include/xen/events.h b/include/xen/events.h index 7e17e2a..b2f09ad 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -24,6 +24,10 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned long irqflags, const char *devname, void *dev_id); +int bind_polled_ipi_to_irq(enum ipi_vector ipi, + unsigned int cpu, + unsigned long irqflags, + const char *devname); int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, unsigned int remote_port, irq_handler_t handler, -- 1.5.6.5 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Jeremy Fitzhardinge
2010-Oct-29 16:44 UTC
[Xen-devel] Re: [PATCH] xen: events: do not unmask polled ipis on restore.
On 10/29/2010 01:33 AM, Ian Campbell wrote:> The Xen PV spinlock backend attempts to setup an IPI to IRQ binding > which is only to be used via xen_poll_irq rather received directly. > > Unfortunately restore_cpu_ipis unconditionally unmasks each IPI > leading to:Gosh I wonder why we hadn''t seen this before?> [ 21.970432] ------------[ cut here ]------------ > [ 21.970432] kernel BUG at /local/scratch/ianc/devel/kernels/linux-2.6/arch/x86/xen/spinlock.c:343! > [ 21.970432] invalid opcode: 0000 [#1] SMP > [ 21.970432] last sysfs file: /sys/devices/virtual/net/lo/operstate > [ 21.970432] Modules linked in: > [ 21.970432] > [ 21.970432] Pid: 0, comm: swapper Not tainted (2.6.32.24-x86_32p-xen-01034-g787c727 #34) > [ 21.970432] EIP: 0061:[<c102e209>] EFLAGS: 00010046 CPU: 3 > [ 21.970432] EIP is at dummy_handler+0x3/0x7 > [ 21.970432] EAX: 0000021c EBX: dfc16880 ECX: 0000001a EDX: 00000000 > [ 21.970432] ESI: dfc02c00 EDI: 00000001 EBP: dfc47e10 ESP: dfc47e10 > [ 21.970432] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0069 > [ 21.970432] Process swapper (pid: 0, ti=dfc46000 task=dfc39440 task.ti=dfc46000) > [ 21.970432] Stack: > [ 21.970432] dfc47e30 c10a39f0 0000021c 00000000 00000000 dfc16880 0000021c 00000001 > [ 21.970432] <0> dfc47e40 c10a4f08 0000021c 00000000 dfc47e78 c12240a7 c1839284 c1839284 > [ 21.970432] <0> 00000200 00000000 00000000 f5720000 c1f3d028 c1f3d02c 00000180 dfc47e90 > [ 21.970432] Call Trace: > [ 21.970432] [<c10a39f0>] ? handle_IRQ_event+0x5f/0x122 > [ 21.970432] [<c10a4f08>] ? handle_percpu_irq+0x2f/0x55 > [ 21.970432] [<c12240a7>] ? __xen_evtchn_do_upcall+0xdb/0x15f > [ 21.970432] [<c122481e>] ? xen_evtchn_do_upcall+0x20/0x30 > [ 21.970432] [<c1030d47>] ? xen_do_upcall+0x7/0xc > [ 21.970432] [<c102007b>] ? apic_reg_read+0xd3/0x22d > [ 21.970432] [<c1002227>] ? hypercall_page+0x227/0x1005 > [ 21.970432] [<c102d30b>] ? xen_force_evtchn_callback+0xf/0x14 > [ 21.970432] [<c102da7c>] ? check_events+0x8/0xc > [ 21.970432] [<c102da3b>] ? xen_irq_enable_direct_end+0x0/0x1 > [ 21.970432] [<c105e485>] ? finish_task_switch+0x62/0xba > [ 21.970432] [<c14e3f84>] ? schedule+0x808/0x89d > [ 21.970432] [<c1084dc5>] ? hrtimer_start_expires+0x1a/0x22 > [ 21.970432] [<c1085154>] ? tick_nohz_restart_sched_tick+0x15a/0x162 > [ 21.970432] [<c102f43a>] ? cpu_idle+0x6d/0x6f > [ 21.970432] [<c14db29e>] ? cpu_bringup_and_idle+0xd/0xf > [ 21.970432] Code: 5d 0f 95 c0 0f b6 c0 c3 55 66 83 78 02 00 89 e5 5d 0f 95 c0 0f b6 c0 c3 55 b2 01 86 10 31 c0 84 d2 89 e5 0f 94 c0 5d c3 55 89 e5 <0f> 0b eb fe 55 80 3d 4c ce 84 c1 00 89 e5 57 56 89 c6 53 74 15 > [ 21.970432] EIP: [<c102e209>] dummy_handler+0x3/0x7 SS:ESP 0069:dfc47e10 > [ 21.970432] ---[ end trace c0b71f7e12cf3011 ]--- > > Add a new bind function which explicitly binds a polled-only IPI and > track this state in the event channel core so that we can do the right > thing on restore.This doesn''t seem to be the right fix though. What if an IPI happens to be blocked at suspend time? I wonder if this shouldn''t be done at the irq layer, based on the desc''s irq state? J> Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > --- > arch/x86/xen/spinlock.c | 16 +++------------- > drivers/xen/events.c | 45 ++++++++++++++++++++++++++++++++++++++++----- > include/xen/events.h | 4 ++++ > 3 files changed, 47 insertions(+), 18 deletions(-) > > diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c > index 36a5141..09655ca 100644 > --- a/arch/x86/xen/spinlock.c > +++ b/arch/x86/xen/spinlock.c > @@ -338,29 +338,19 @@ static void xen_spin_unlock(struct raw_spinlock *lock) > xen_spin_unlock_slow(xl); > } > > -static irqreturn_t dummy_handler(int irq, void *dev_id) > -{ > - BUG(); > - return IRQ_HANDLED; > -} > - > void __cpuinit xen_init_lock_cpu(int cpu) > { > int irq; > const char *name; > > name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); > - irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, > + irq = bind_polled_ipi_to_irq(XEN_SPIN_UNLOCK_VECTOR, > cpu, > - dummy_handler, > IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, > - name, > - NULL); > + name); > > - if (irq >= 0) { > - disable_irq(irq); /* make sure it''s never delivered */ > + if (irq >= 0) > per_cpu(lock_kicker_irq, cpu) = irq; > - } > > printk("cpu %d spinlock event irq %d\n", cpu, irq); > } > diff --git a/drivers/xen/events.c b/drivers/xen/events.c > index 7b29ae1..f8b53b5 100644 > --- a/drivers/xen/events.c > +++ b/drivers/xen/events.c > @@ -94,7 +94,10 @@ struct irq_info > > union { > unsigned short virq; > - enum ipi_vector ipi; > + struct { > + enum ipi_vector vector; > + unsigned char polled; > + } ipi; > struct { > unsigned short gsi; > unsigned char vector; > @@ -148,7 +151,7 @@ static struct irq_info mk_evtchn_info(unsigned short evtchn) > static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) > { > return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, > - .cpu = 0, .u.ipi = ipi }; > + .cpu = 0, .u.ipi.vector = ipi, .u.ipi.polled = 0 }; > } > > static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) > @@ -191,7 +194,7 @@ static enum ipi_vector ipi_from_irq(unsigned irq) > BUG_ON(info == NULL); > BUG_ON(info->type != IRQT_IPI); > > - return info->u.ipi; > + return info->u.ipi.vector; > } > > static unsigned virq_from_irq(unsigned irq) > @@ -971,6 +974,33 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, > return irq; > } > > + > +static irqreturn_t polled_ipi_handler(int irq, void *dev_id) > +{ > + BUG(); > + return IRQ_HANDLED; > +} > + > +int bind_polled_ipi_to_irq(enum ipi_vector ipi, > + unsigned int cpu, > + unsigned long irqflags, > + const char *devname) > +{ > + int irq, retval; > + > + irq = bind_ipi_to_irqhandler(ipi, cpu, polled_ipi_handler, > + irqflags, devname, NULL); > + if (irq < 0) > + return irq; > + > + info_for_irq(irq)->u.ipi.polled = 1; > + > + disable_irq(irq); /* make sure it''s never delivered */ > + > + return irq; > + > +} > + > void unbind_from_irqhandler(unsigned int irq, void *dev_id) > { > free_irq(irq, dev_id); > @@ -1290,6 +1320,7 @@ static void restore_cpu_virqs(unsigned int cpu) > static void restore_cpu_ipis(unsigned int cpu) > { > struct evtchn_bind_ipi bind_ipi; > + int polled; > int ipi, irq, evtchn; > > for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { > @@ -1306,12 +1337,16 @@ static void restore_cpu_ipis(unsigned int cpu) > evtchn = bind_ipi.port; > > /* Record the new mapping. */ > + polled = info_for_irq(irq)->u.ipi.polled; > evtchn_to_irq[evtchn] = irq; > irq_info[irq] = mk_ipi_info(evtchn, ipi); > bind_evtchn_to_cpu(evtchn, cpu); > > - /* Ready for use. */ > - unmask_evtchn(evtchn); > + /* Ready for use. Polled IPIs remain masked */ > + if (polled) > + info_for_irq(irq)->u.ipi.polled = 1; > + else > + unmask_evtchn(evtchn); > > } > } > diff --git a/include/xen/events.h b/include/xen/events.h > index 7e17e2a..b2f09ad 100644 > --- a/include/xen/events.h > +++ b/include/xen/events.h > @@ -24,6 +24,10 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, > unsigned long irqflags, > const char *devname, > void *dev_id); > +int bind_polled_ipi_to_irq(enum ipi_vector ipi, > + unsigned int cpu, > + unsigned long irqflags, > + const char *devname); > int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, > unsigned int remote_port, > irq_handler_t handler,_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ian Campbell
2010-Oct-29 17:35 UTC
[Xen-devel] Re: [PATCH] xen: events: do not unmask polled ipis on restore.
On Fri, 2010-10-29 at 17:44 +0100, Jeremy Fitzhardinge wrote:> On 10/29/2010 01:33 AM, Ian Campbell wrote: > > The Xen PV spinlock backend attempts to setup an IPI to IRQ binding > > which is only to be used via xen_poll_irq rather received directly. > > > > Unfortunately restore_cpu_ipis unconditionally unmasks each IPI > > leading to: > > Gosh I wonder why we hadn''t seen this before?I must admit I don''t normally enable CONFIG_PARAVIRT_SPINLOCKS I happened to be running someone else''s config when I noticed this. Possibly relates to switching to the edge triggered handler and the related mask/unmask vs clear etc frobbing? _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Jeremy Fitzhardinge
2010-Oct-29 17:42 UTC
Re: [Xen-devel] Re: [PATCH] xen: events: do not unmask polled ipis on restore.
On 10/29/2010 10:35 AM, Ian Campbell wrote:> On Fri, 2010-10-29 at 17:44 +0100, Jeremy Fitzhardinge wrote: >> On 10/29/2010 01:33 AM, Ian Campbell wrote: >>> The Xen PV spinlock backend attempts to setup an IPI to IRQ binding >>> which is only to be used via xen_poll_irq rather received directly. >>> >>> Unfortunately restore_cpu_ipis unconditionally unmasks each IPI >>> leading to: >> Gosh I wonder why we hadn''t seen this before? > I must admit I don''t normally enable CONFIG_PARAVIRT_SPINLOCKS I > happened to be running someone else''s config when I noticed this. > > Possibly relates to switching to the edge triggered handler and the > related mask/unmask vs clear etc frobbing?Possibly. Did you see my comment further down? J> > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel >_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ian Campbell
2010-Oct-29 17:45 UTC
Re: [Xen-devel] Re: [PATCH] xen: events: do not unmask polled ipis on restore.
On Fri, 2010-10-29 at 18:42 +0100, Jeremy Fitzhardinge wrote:> On 10/29/2010 10:35 AM, Ian Campbell wrote: > > On Fri, 2010-10-29 at 17:44 +0100, Jeremy Fitzhardinge wrote: > >> On 10/29/2010 01:33 AM, Ian Campbell wrote: > >>> The Xen PV spinlock backend attempts to setup an IPI to IRQ binding > >>> which is only to be used via xen_poll_irq rather received directly. > >>> > >>> Unfortunately restore_cpu_ipis unconditionally unmasks each IPI > >>> leading to: > >> Gosh I wonder why we hadn''t seen this before? > > I must admit I don''t normally enable CONFIG_PARAVIRT_SPINLOCKS I > > happened to be running someone else''s config when I noticed this. > > > > Possibly relates to switching to the edge triggered handler and the > > related mask/unmask vs clear etc frobbing? > > Possibly. Did you see my comment further down?No! Going back now... Ian. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ian Campbell
2010-Oct-29 17:52 UTC
[Xen-devel] Re: [PATCH] xen: events: do not unmask polled ipis on restore.
On Fri, 2010-10-29 at 17:44 +0100, Jeremy Fitzhardinge wrote:> On 10/29/2010 01:33 AM, Ian Campbell wrote:> > Add a new bind function which explicitly binds a polled-only IPI and > > track this state in the event channel core so that we can do the right > > thing on restore. > > This doesn''t seem to be the right fix though. What if an IPI happens to > be blocked at suspend time?Not just IPIs but other types of IRQ too? I guess in the majority of cases a spurious evtchn firing doesn''t do any real harm. IIRC the VCPU placement stuff (which gets redone on resume IIRC) causes all evtchns to retrigger so we must be getting away with it pretty regularly...> I wonder if this shouldn''t be done at the irq layer, based on the desc''s > irq state?It looks like suspend_device_irqs/resume_device_irqs takes care of the mask/unmask element of restore for us (including unmasking irqs marked with IRQF_NO_SUSPEND when appropriate). So we know the evtchn will be masked on save and Xen brings us back up with all evtchns masked so all restore_cpu_ipis needs to do is the rebinding of ipi to evtchn? Ian. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ian Campbell
2010-Nov-01 14:51 UTC
Re: [Xen-devel] Re: [PATCH] xen: events: do not unmask polled ipis on restore.
On Fri, 2010-10-29 at 18:52 +0100, Ian Campbell wrote:> > > > I wonder if this shouldn''t be done at the irq layer, based on the > > desc''s irq state? > > It looks like suspend_device_irqs/resume_device_irqs takes care of the > mask/unmask element of restore for us (including unmasking irqs marked > with IRQF_NO_SUSPEND when appropriate). So we know the evtchn will be > masked on save and Xen brings us back up with all evtchns masked so > all restore_cpu_ipis needs to do is the rebinding of ipi to evtchn?A naive attempt at this (i.e. remove the unmask_evtchn calls from restore_cpu_{ipis,virqs}) doesn''t work, since we (unsurprisingly) end up with some evtchn''s remaining masked... I''ll take another look. It''s possible that this will also interact with Stefano''s changes to the irq_chip interactions since he is trying to ensure that our callbacks have the semantics expected by the core. BTW, do you think the polled-only IPI are unusual/special enough to have their own interface with the event channel core (e.g. bind_polled_ipi_to_irq) even if the internals of the implementation doesn''t turn into anything particularly unusual? Ian. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ian Campbell
2010-Nov-01 16:31 UTC
Re: [Xen-devel] Re: [PATCH] xen: events: do not unmask polled ipis on restore.
We only need to consider IRQF_NO_SUSPEND interrupts manually in the Xen event channel code. 8<--------------------->From 47bac02c4664fdd1037ad73bdf457fa07a0a31b7 Mon Sep 17 00:00:00 2001From: Ian Campbell <ian.campbell@citrix.com> Date: Mon, 1 Nov 2010 16:30:09 +0000 Subject: [PATCH] xen: events: do not unmask event channels on resume The IRQ core code will take care of disabling and reenabling interrupts over suspend resume automatically, therefore we do not need to do this in the Xen event channel code. The only exception is those event channels marked IRQF_NO_SUSPEND which the IRQ core ignores. We must unmask these ourselves, taking care to obey the current IRQ_DISABLED status. Failure check for IRQ_DISABLED leads to enabling polled only event channels, such as that associated with the pv spinlocks, which must never be enabled: [ 21.970432] ------------[ cut here ]------------ [ 21.970432] kernel BUG at arch/x86/xen/spinlock.c:343! [ 21.970432] invalid opcode: 0000 [#1] SMP [ 21.970432] last sysfs file: /sys/devices/virtual/net/lo/operstate [ 21.970432] Modules linked in: [ 21.970432] [ 21.970432] Pid: 0, comm: swapper Not tainted (2.6.32.24-x86_32p-xen-01034-g787c727 #34) [ 21.970432] EIP: 0061:[<c102e209>] EFLAGS: 00010046 CPU: 3 [ 21.970432] EIP is at dummy_handler+0x3/0x7 [ 21.970432] EAX: 0000021c EBX: dfc16880 ECX: 0000001a EDX: 00000000 [ 21.970432] ESI: dfc02c00 EDI: 00000001 EBP: dfc47e10 ESP: dfc47e10 [ 21.970432] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0069 [ 21.970432] Process swapper (pid: 0, ti=dfc46000 task=dfc39440 task.ti=dfc46000) [ 21.970432] Stack: [ 21.970432] dfc47e30 c10a39f0 0000021c 00000000 00000000 dfc16880 0000021c 00000001 [ 21.970432] <0> dfc47e40 c10a4f08 0000021c 00000000 dfc47e78 c12240a7 c1839284 c1839284 [ 21.970432] <0> 00000200 00000000 00000000 f5720000 c1f3d028 c1f3d02c 00000180 dfc47e90 [ 21.970432] Call Trace: [ 21.970432] [<c10a39f0>] ? handle_IRQ_event+0x5f/0x122 [ 21.970432] [<c10a4f08>] ? handle_percpu_irq+0x2f/0x55 [ 21.970432] [<c12240a7>] ? __xen_evtchn_do_upcall+0xdb/0x15f [ 21.970432] [<c122481e>] ? xen_evtchn_do_upcall+0x20/0x30 [ 21.970432] [<c1030d47>] ? xen_do_upcall+0x7/0xc [ 21.970432] [<c102007b>] ? apic_reg_read+0xd3/0x22d [ 21.970432] [<c1002227>] ? hypercall_page+0x227/0x1005 [ 21.970432] [<c102d30b>] ? xen_force_evtchn_callback+0xf/0x14 [ 21.970432] [<c102da7c>] ? check_events+0x8/0xc [ 21.970432] [<c102da3b>] ? xen_irq_enable_direct_end+0x0/0x1 [ 21.970432] [<c105e485>] ? finish_task_switch+0x62/0xba [ 21.970432] [<c14e3f84>] ? schedule+0x808/0x89d [ 21.970432] [<c1084dc5>] ? hrtimer_start_expires+0x1a/0x22 [ 21.970432] [<c1085154>] ? tick_nohz_restart_sched_tick+0x15a/0x162 [ 21.970432] [<c102f43a>] ? cpu_idle+0x6d/0x6f [ 21.970432] [<c14db29e>] ? cpu_bringup_and_idle+0xd/0xf [ 21.970432] Code: 5d 0f 95 c0 0f b6 c0 c3 55 66 83 78 02 00 89 e5 5d 0f 95 \ c0 0f b6 c0 c3 55 b2 01 86 10 31 c0 84 d2 89 e5 0f 94 c0 5d c3 55 89 e5 <0f> 0b \ eb fe 55 80 3d 4c ce 84 c1 00 89 e5 57 56 89 c6 53 74 15 [ 21.970432] EIP: [<c102e209>] dummy_handler+0x3/0x7 SS:ESP 0069:dfc47e10 [ 21.970432] ---[ end trace c0b71f7e12cf3011 ]--- Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- drivers/xen/events.c | 25 ++++++++++++++++++------- 1 files changed, 18 insertions(+), 7 deletions(-) diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 7b29ae1..8682331 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -1281,9 +1281,6 @@ static void restore_cpu_virqs(unsigned int cpu) evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_virq_info(evtchn, virq); bind_evtchn_to_cpu(evtchn, cpu); - - /* Ready for use. */ - unmask_evtchn(evtchn); } } @@ -1309,10 +1306,6 @@ static void restore_cpu_ipis(unsigned int cpu) evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_ipi_info(evtchn, ipi); bind_evtchn_to_cpu(evtchn, cpu); - - /* Ready for use. */ - unmask_evtchn(evtchn); - } } @@ -1385,6 +1378,7 @@ EXPORT_SYMBOL_GPL(xen_ignore_irq); void xen_irq_resume(void) { unsigned int cpu, irq, evtchn; + struct irq_desc *desc; init_evtchn_cpu_bindings(); @@ -1404,6 +1398,23 @@ void xen_irq_resume(void) restore_cpu_ipis(cpu); } + /* + * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These + * are not handled by the IRQ core. + */ + for_each_irq_desc(irq, desc) { + if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND)) + continue; + if (desc->status & IRQ_DISABLED) + continue; + + evtchn = evtchn_from_irq(irq); + if (evtchn == -1) + continue; + + unmask_evtchn(evtchn); + } + if (pirq_eoi_does_unmask) { struct physdev_pirq_eoi_gmfn eoi_gmfn; -- 1.5.6.5 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel