Here's couple of patches to improve the softlockup watchdog. The first changes the softlockup timer from using jiffies to sched_clock() as a timebase. Xen and VMI implement sched_clock() as counting unstolen time, so time stolen by the hypervisor won't cause the watchdog to bite. The second adds per-cpu enable flags for the watchdog timer. This allows the timer to be disabled when the CPU goes into a (potentially unbounded) tickless sleep. I know this conflicts with fix-bogus-softlockup-warning-with-sysrq-t.patch in -mm2. I think that patch incorrectly changes the behaviour of the softlockup watchdog, and a better solution is to temporarily disable the watchdog while doing something known to be cpu-consuming, like a long sysreq output. J --
Jeremy Fitzhardinge
2007-Apr-18 13:02 UTC
[patch 2/2] percpu enable flag for softlockup watchdog
On a NO_HZ system, there may be an arbitrarily long delay between
ticks on a CPU. When we're disabling ticks for a CPU, also disable
the softlockup watchdog timer.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Zachary Amsden <zach@vmware.com>
Cc: James Morris <jmorris@namei.org>
Cc: Dan Hecht <dhecht@vmware.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
---
include/linux/sched.h | 8 ++++++++
kernel/softlockup.c | 23 +++++++++++++++++++----
kernel/time/tick-sched.c | 34 +++++++++++++++-------------------
3 files changed, 42 insertions(+), 23 deletions(-)
==================================================================---
a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -232,10 +232,18 @@ extern void scheduler_tick(void);
#ifdef CONFIG_DETECT_SOFTLOCKUP
extern void softlockup_tick(void);
+extern void softlockup_enable(void);
+extern void softlockup_disable(void);
extern void spawn_softlockup_task(void);
extern void touch_softlockup_watchdog(void);
#else
static inline void softlockup_tick(void)
+{
+}
+static inline void softlockup_enable(void)
+{
+}
+static inline void softlockup_disable(void)
{
}
static inline void spawn_softlockup_task(void)
==================================================================---
a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -20,6 +20,7 @@ static DEFINE_PER_CPU(unsigned long long
static DEFINE_PER_CPU(unsigned long long, touch_timestamp);
static DEFINE_PER_CPU(unsigned long long, print_timestamp);
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
+static DEFINE_PER_CPU(int, enabled);
static int did_panic = 0;
@@ -41,6 +42,18 @@ void touch_softlockup_watchdog(void)
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
+void softlockup_enable(void)
+{
+ touch_softlockup_watchdog();
+ wmb(); /* update timestamp before enable */
+ __get_cpu_var(enabled) = 1;
+}
+
+void softlockup_disable(void)
+{
+ __get_cpu_var(enabled) = 0;
+}
+
/*
* This callback runs from the timer interrupt, and checks
* whether the watchdog thread has hung or not:
@@ -51,8 +64,8 @@ void softlockup_tick(void)
unsigned long long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
unsigned long long now;
- /* watchdog task hasn't updated timestamp yet */
- if (touch_timestamp == 0)
+ /* return if not enabled */
+ if (!__get_cpu_var(enabled))
return;
/* report at most once a second */
@@ -95,8 +108,8 @@ static int watchdog(void * __bind_cpu)
sched_setscheduler(current, SCHED_FIFO, ¶m);
current->flags |= PF_NOFREEZE;
- /* initialize timestamp */
- touch_softlockup_watchdog();
+ /* enable on this cpu */
+ softlockup_enable();
/*
* Run briefly once per second to reset the softlockup timestamp.
@@ -109,6 +122,8 @@ static int watchdog(void * __bind_cpu)
touch_softlockup_watchdog();
schedule();
}
+
+ softlockup_disable();
return 0;
}
==================================================================---
a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -228,6 +228,8 @@ void tick_nohz_stop_sched_tick(void)
ts->idle_tick = ts->sched_timer.expires;
ts->tick_stopped = 1;
ts->idle_jiffies = last_jiffies;
+
+ softlockup_disable();
}
/*
* calculate the expiry time for the next timer wheel
@@ -255,6 +257,7 @@ void tick_nohz_stop_sched_tick(void)
cpu_clear(cpu, nohz_cpu_mask);
}
raise_softirq_irqoff(TIMER_SOFTIRQ);
+
out:
ts->next_jiffies = next_jiffies;
ts->last_jiffies = last_jiffies;
@@ -311,6 +314,8 @@ void tick_nohz_restart_sched_tick(void)
ts->tick_stopped = 0;
hrtimer_cancel(&ts->sched_timer);
ts->sched_timer.expires = ts->idle_tick;
+
+ softlockup_enable();
while (1) {
/* Forward the time to expire in the future */
@@ -355,17 +360,12 @@ static void tick_nohz_handler(struct clo
tick_do_update_jiffies64(now);
/*
- * When we are idle and the tick is stopped, we have to touch
- * the watchdog as we might not schedule for a really long
- * time. This happens on complete idle SMP systems while
- * waiting on the login prompt. We also increment the "start
- * of idle" jiffy stamp so the idle accounting adjustment we
- * do when we go busy again does not account too much ticks.
- */
- if (ts->tick_stopped) {
- touch_softlockup_watchdog();
+ * Increment the "start of idle" jiffy stamp so the idle
+ * accounting adjustment we do when we go busy again does not
+ * account too much ticks.
+ */
+ if (ts->tick_stopped)
ts->idle_jiffies++;
- }
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
@@ -450,17 +450,12 @@ static enum hrtimer_restart tick_sched_t
*/
if (regs) {
/*
- * When we are idle and the tick is stopped, we have to touch
- * the watchdog as we might not schedule for a really long
- * time. This happens on complete idle SMP systems while
- * waiting on the login prompt. We also increment the "start of
- * idle" jiffy stamp so the idle accounting adjustment we do
- * when we go busy again does not account too much ticks.
+ * Increment the "start of idle" jiffy stamp so the
+ * idle accounting adjustment we do when we go busy
+ * again does not account too much ticks.
*/
- if (ts->tick_stopped) {
- touch_softlockup_watchdog();
+ if (ts->tick_stopped)
ts->idle_jiffies++;
- }
/*
* update_process_times() might take tasklist_lock, hence
* drop the base lock. sched-tick hrtimers are per-CPU and
@@ -522,6 +517,7 @@ void tick_cancel_sched_timer(int cpu)
if (ts->sched_timer.base)
hrtimer_cancel(&ts->sched_timer);
ts->tick_stopped = 0;
+ softlockup_enable();
ts->nohz_mode = NOHZ_MODE_INACTIVE;
}
#endif /* HIGH_RES_TIMERS */
--
Jeremy Fitzhardinge
2007-Apr-18 13:02 UTC
[patch 1/2] Ignore stolen time in the softlockup watchdog
The softlockup watchdog is currently a nuisance in a virtual machine,
since the whole system could have the CPU stolen from it for a long
period of time. While it would be unlikely for a guest domain to be
denied timer interrupts for over 10s, it could happen and any softlockup
message would be completely spurious.
Earlier I proposed that sched_clock() return time in unstolen
nanoseconds, which is how Xen and VMI currently implement it. If the
softlockup watchdog uses sched_clock() to measure time, it would
automatically ignore stolen time, and therefore only report when the
guest itself locked up. When running native, sched_clock() returns
real-time nanoseconds, so the behaviour would be unchanged.
Note that sched_clock() used this way is inherently per-cpu, so this
patch makes sure that the per-processor watchdog thread initialized
its own timestamp.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Zachary Amsden <zach@vmware.com>
Cc: James Morris <jmorris@namei.org>
Cc: Dan Hecht <dhecht@vmware.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Prarit Bhargava <prarit@redhat.com>
Cc: Chris Lalancette <clalance@redhat.com>
Cc: Rick Lindsley <ricklind@us.ibm.com>
---
kernel/softlockup.c | 28 +++++++++++++++++++---------
1 file changed, 19 insertions(+), 9 deletions(-)
==================================================================---
a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -17,8 +17,8 @@
static DEFINE_SPINLOCK(print_lock);
-static DEFINE_PER_CPU(unsigned long, touch_timestamp);
-static DEFINE_PER_CPU(unsigned long, print_timestamp);
+static DEFINE_PER_CPU(unsigned long long, touch_timestamp);
+static DEFINE_PER_CPU(unsigned long long, print_timestamp);
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
static int did_panic = 0;
@@ -37,7 +37,7 @@ static struct notifier_block panic_block
void touch_softlockup_watchdog(void)
{
- __raw_get_cpu_var(touch_timestamp) = jiffies;
+ __raw_get_cpu_var(touch_timestamp) = sched_clock();
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
@@ -48,10 +48,15 @@ void softlockup_tick(void)
void softlockup_tick(void)
{
int this_cpu = smp_processor_id();
- unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
+ unsigned long long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
+ unsigned long long now;
- /* prevent double reports: */
- if (per_cpu(print_timestamp, this_cpu) == touch_timestamp ||
+ /* watchdog task hasn't updated timestamp yet */
+ if (touch_timestamp == 0)
+ return;
+
+ /* report at most once a second */
+ if (per_cpu(print_timestamp, this_cpu) < (touch_timestamp + NSEC_PER_SEC)
||
did_panic ||
!per_cpu(watchdog_task, this_cpu))
return;
@@ -62,12 +67,14 @@ void softlockup_tick(void)
return;
}
+ now = sched_clock();
+
/* Wake up the high-prio watchdog task every second: */
- if (time_after(jiffies, touch_timestamp + HZ))
+ if (now > (touch_timestamp + NSEC_PER_SEC))
wake_up_process(per_cpu(watchdog_task, this_cpu));
/* Warn about unreasonable 10+ seconds delays: */
- if (time_after(jiffies, touch_timestamp + 10*HZ)) {
+ if (now > (touch_timestamp + 10ull*NSEC_PER_SEC)) {
per_cpu(print_timestamp, this_cpu) = touch_timestamp;
spin_lock(&print_lock);
@@ -87,6 +94,9 @@ static int watchdog(void * __bind_cpu)
sched_setscheduler(current, SCHED_FIFO, ¶m);
current->flags |= PF_NOFREEZE;
+
+ /* initialize timestamp */
+ touch_softlockup_watchdog();
/*
* Run briefly once per second to reset the softlockup timestamp.
@@ -120,7 +130,7 @@ cpu_callback(struct notifier_block *nfb,
printk("watchdog for %i failed\n", hotcpu);
return NOTIFY_BAD;
}
- per_cpu(touch_timestamp, hotcpu) = jiffies;
+ per_cpu(touch_timestamp, hotcpu) = 0;
per_cpu(watchdog_task, hotcpu) = p;
kthread_bind(p, hotcpu);
break;
--
Seemingly Similar Threads
- [patch 0/2] softlockup watchdog improvements
- [patch 0/4] Revised softlockup watchdog improvement patches
- [patch 0/4] Revised softlockup watchdog improvement patches
- [PATCH RFC] Change softlockup watchdog to ignore stolen time
- [PATCH RFC] Change softlockup watchdog to ignore stolen time