Displaying 20 results from an estimated 38 matches for "nohz_cpu_mask".
2007 Apr 18
1
[RFC, PATCH 24/24] i386 Vmi no idle hz
..._stop_hz_timer(void)
+{
+ /* Note that cpu_set, cpu_clear are (SMP safe) atomic on x86. */
+
+ unsigned long seq, next;
+ unsigned long long real_cycles_expiry;
+ int cpu = smp_processor_id();
+
+ /* Allow disabling via /proc/sys/kernel/hz_timer. */
+ if (sysctl_hz_timer != 0)
+ return;
+
+ /* Set nohz_cpu_mask, check rcu_pending in same order as S390. */
+ cpu_set(cpu, nohz_cpu_mask);
+ if (rcu_pending(cpu) || local_softirq_pending()) {
+ cpu_clear(cpu, nohz_cpu_mask);
+ return;
+ }
+
+ next = next_timer_interrupt();
+
+ if (jiffies + VMI_MIN_NO_IDLE_HZ_SKIPPED_TICKS >= next) {
+ cpu_clear(cpu, noh...
2007 Apr 18
1
[RFC, PATCH 24/24] i386 Vmi no idle hz
..._stop_hz_timer(void)
+{
+ /* Note that cpu_set, cpu_clear are (SMP safe) atomic on x86. */
+
+ unsigned long seq, next;
+ unsigned long long real_cycles_expiry;
+ int cpu = smp_processor_id();
+
+ /* Allow disabling via /proc/sys/kernel/hz_timer. */
+ if (sysctl_hz_timer != 0)
+ return;
+
+ /* Set nohz_cpu_mask, check rcu_pending in same order as S390. */
+ cpu_set(cpu, nohz_cpu_mask);
+ if (rcu_pending(cpu) || local_softirq_pending()) {
+ cpu_clear(cpu, nohz_cpu_mask);
+ return;
+ }
+
+ next = next_timer_interrupt();
+
+ if (jiffies + VMI_MIN_NO_IDLE_HZ_SKIPPED_TICKS >= next) {
+ cpu_clear(cpu, noh...
2007 Apr 18
0
[PATCH 1/9] Vmi timer fixes round two.patch
...time_lock);
}
@@ -380,7 +377,6 @@ int vmi_stop_hz_timer(void)
unsigned long seq, next;
unsigned long long real_cycles_expiry;
int cpu = smp_processor_id();
- int idle;
BUG_ON(!irqs_disabled());
if (sysctl_hz_timer != 0)
@@ -388,13 +384,13 @@ int vmi_stop_hz_timer(void)
cpu_set(cpu, nohz_cpu_mask);
smp_mb();
+
if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
- (next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
+ (next = next_timer_interrupt(),
+ time_before_eq(next, jiffies + HZ/CONFIG_VMI_ALARM_HZ))) {
cpu_clear(cpu, nohz_cpu_mask);
- next = jiffies...
2007 Apr 18
0
[PATCH 1/9] Vmi timer fixes round two.patch
...time_lock);
}
@@ -380,7 +377,6 @@ int vmi_stop_hz_timer(void)
unsigned long seq, next;
unsigned long long real_cycles_expiry;
int cpu = smp_processor_id();
- int idle;
BUG_ON(!irqs_disabled());
if (sysctl_hz_timer != 0)
@@ -388,13 +384,13 @@ int vmi_stop_hz_timer(void)
cpu_set(cpu, nohz_cpu_mask);
smp_mb();
+
if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
- (next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
+ (next = next_timer_interrupt(),
+ time_before_eq(next, jiffies + HZ/CONFIG_VMI_ALARM_HZ))) {
cpu_clear(cpu, nohz_cpu_mask);
- next = jiffies...
2007 Apr 18
0
[PATCH 6/6] VMI timer patches
...int vmi_stop_hz_timer(void)
+{
+ /* Note that cpu_set, cpu_clear are (SMP safe) atomic on x86. */
+
+ unsigned long seq, next;
+ unsigned long long real_cycles_expiry;
+ int cpu = smp_processor_id();
+ int idle;
+
+ BUG_ON(!irqs_disabled());
+ if (sysctl_hz_timer != 0)
+ return 0;
+
+ cpu_set(cpu, nohz_cpu_mask);
+ smp_mb();
+ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
+ (next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
+ cpu_clear(cpu, nohz_cpu_mask);
+ next = jiffies;
+ idle = 0;
+ } else
+ idle = 1;
+
+ /* Convert jiffies to the real cycle counter. */
+ do {
+ seq = r...
2007 Apr 18
0
[PATCH 6/6] VMI timer patches
...int vmi_stop_hz_timer(void)
+{
+ /* Note that cpu_set, cpu_clear are (SMP safe) atomic on x86. */
+
+ unsigned long seq, next;
+ unsigned long long real_cycles_expiry;
+ int cpu = smp_processor_id();
+ int idle;
+
+ BUG_ON(!irqs_disabled());
+ if (sysctl_hz_timer != 0)
+ return 0;
+
+ cpu_set(cpu, nohz_cpu_mask);
+ smp_mb();
+ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
+ (next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
+ cpu_clear(cpu, nohz_cpu_mask);
+ next = jiffies;
+ idle = 0;
+ } else
+ idle = 1;
+
+ /* Convert jiffies to the real cycle counter. */
+ do {
+ seq = r...
2007 Apr 18
0
[PATCH 5/5] Vmi timer.patch
...int vmi_stop_hz_timer(void)
+{
+ /* Note that cpu_set, cpu_clear are (SMP safe) atomic on x86. */
+
+ unsigned long seq, next;
+ unsigned long long real_cycles_expiry;
+ int cpu = smp_processor_id();
+ int idle;
+
+ BUG_ON(!irqs_disabled());
+ if (sysctl_hz_timer != 0)
+ return 0;
+
+ cpu_set(cpu, nohz_cpu_mask);
+ smp_mb();
+ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
+ (next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
+ cpu_clear(cpu, nohz_cpu_mask);
+ next = jiffies;
+ idle = 0;
+ } else
+ idle = 1;
+
+ /* Convert jiffies to the real cycle counter. */
+ do {
+ seq = r...
2007 Apr 18
0
[PATCH 5/5] Vmi timer.patch
...int vmi_stop_hz_timer(void)
+{
+ /* Note that cpu_set, cpu_clear are (SMP safe) atomic on x86. */
+
+ unsigned long seq, next;
+ unsigned long long real_cycles_expiry;
+ int cpu = smp_processor_id();
+ int idle;
+
+ BUG_ON(!irqs_disabled());
+ if (sysctl_hz_timer != 0)
+ return 0;
+
+ cpu_set(cpu, nohz_cpu_mask);
+ smp_mb();
+ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
+ (next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
+ cpu_clear(cpu, nohz_cpu_mask);
+ next = jiffies;
+ idle = 0;
+ } else
+ idle = 1;
+
+ /* Convert jiffies to the real cycle counter. */
+ do {
+ seq = r...
2007 Apr 18
1
[PATCH 9/10] Vmi timer update.patch
...sabled. */
-int vmi_stop_hz_timer(void)
-{
- /* Note that cpu_set, cpu_clear are (SMP safe) atomic on x86. */
-
- unsigned long seq, next;
- unsigned long long real_cycles_expiry;
- int cpu = smp_processor_id();
-
- BUG_ON(!irqs_disabled());
- if (sysctl_hz_timer != 0)
- return 0;
-
- cpu_set(cpu, nohz_cpu_mask);
- smp_mb();
-
- if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
- (next = next_timer_interrupt(),
- time_before_eq(next, jiffies + HZ/CONFIG_VMI_ALARM_HZ))) {
- cpu_clear(cpu, nohz_cpu_mask);
- return 0;
- }
-
- /* Convert jiffies to the real cycle counter. */
- do {
- seq = read...
2007 Apr 18
1
[PATCH 9/10] Vmi timer update.patch
...sabled. */
-int vmi_stop_hz_timer(void)
-{
- /* Note that cpu_set, cpu_clear are (SMP safe) atomic on x86. */
-
- unsigned long seq, next;
- unsigned long long real_cycles_expiry;
- int cpu = smp_processor_id();
-
- BUG_ON(!irqs_disabled());
- if (sysctl_hz_timer != 0)
- return 0;
-
- cpu_set(cpu, nohz_cpu_mask);
- smp_mb();
-
- if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
- (next = next_timer_interrupt(),
- time_before_eq(next, jiffies + HZ/CONFIG_VMI_ALARM_HZ))) {
- cpu_clear(cpu, nohz_cpu_mask);
- return 0;
- }
-
- /* Convert jiffies to the real cycle counter. */
- do {
- seq = read...
2007 Apr 18
2
[patch 0/2] softlockup watchdog improvements
Here's couple of patches to improve the softlockup watchdog.
The first changes the softlockup timer from using jiffies to sched_clock()
as a timebase. Xen and VMI implement sched_clock() as counting unstolen
time, so time stolen by the hypervisor won't cause the watchdog to bite.
The second adds per-cpu enable flags for the watchdog timer. This allows
the timer to be disabled when the
2007 Apr 18
2
[patch 0/2] softlockup watchdog improvements
Here's couple of patches to improve the softlockup watchdog.
The first changes the softlockup timer from using jiffies to sched_clock()
as a timebase. Xen and VMI implement sched_clock() as counting unstolen
time, so time stolen by the hypervisor won't cause the watchdog to bite.
The second adds per-cpu enable flags for the watchdog timer. This allows
the timer to be disabled when the
2007 Apr 18
5
[patch 0/4] Revised softlockup watchdog improvement patches
Hi Ingo,
This series of patches implements a number of improvements to the
softlockup watchdog and its users.
They are:
1. Make the watchdog ignore stolen time
When running under a hypervisor, the kernel may lose an arbitrary amount
of time as "stolen time". This may cause the softlockup watchdog to
trigger spruiously. Xen and VMI implement sched_clock() as measuring
unstolen time,
2007 Apr 18
5
[patch 0/4] Revised softlockup watchdog improvement patches
Hi Ingo,
This series of patches implements a number of improvements to the
softlockup watchdog and its users.
They are:
1. Make the watchdog ignore stolen time
When running under a hypervisor, the kernel may lose an arbitrary amount
of time as "stolen time". This may cause the softlockup watchdog to
trigger spruiously. Xen and VMI implement sched_clock() as measuring
unstolen time,
2007 Apr 18
31
[PATCH 00/28] Updates for firstfloor paravirt-ops patches
Hi Andi,
This is a set of updates for the firstfloor patch queue.
Quick rundown:
revert-mm-x86_64-mm-account-for-module-percpu-space-separately-from-kernel-percpu.patch
separate-module-percpu-space.patch
Update the module percpu accounting patch
fix-ff-allow-percpu-variables-to-be-page-aligned.patch
Make sure the percpu memory allocation is page-aligned
2007 Apr 18
31
[PATCH 00/28] Updates for firstfloor paravirt-ops patches
Hi Andi,
This is a set of updates for the firstfloor patch queue.
Quick rundown:
revert-mm-x86_64-mm-account-for-module-percpu-space-separately-from-kernel-percpu.patch
separate-module-percpu-space.patch
Update the module percpu accounting patch
fix-ff-allow-percpu-variables-to-be-page-aligned.patch
Make sure the percpu memory allocation is page-aligned
2007 Apr 18
34
[patch 00/34] Xen-pv_ops: Xen guest implementation for paravirt_ops interface
Hi Andi,
This patch series implements the Linux Xen guest as a paravirt_ops
backend. The features in implemented this patch series are:
* domU only
* UP and SMP guest support (NEW!)
* dynamic ticks (NEW!)
* writable pagetables, with late pinning/early unpinning
(no shadow pagetable support)
* supports both PAE and non-PAE modes
(non-PAE may be broken at the moment)
* xen hvc console
2007 Apr 18
34
[patch 00/34] Xen-pv_ops: Xen guest implementation for paravirt_ops interface
Hi Andi,
This patch series implements the Linux Xen guest as a paravirt_ops
backend. The features in implemented this patch series are:
* domU only
* UP and SMP guest support (NEW!)
* dynamic ticks (NEW!)
* writable pagetables, with late pinning/early unpinning
(no shadow pagetable support)
* supports both PAE and non-PAE modes
(non-PAE may be broken at the moment)
* xen hvc console
2007 Apr 18
34
[patch 00/34] Xen-pv_ops: Xen guest implementation for paravirt_ops interface
Hi Andi,
This patch series implements the Linux Xen guest as a paravirt_ops
backend. The features in implemented this patch series are:
* domU only
* UP and SMP guest support (NEW!)
* dynamic ticks (NEW!)
* writable pagetables, with late pinning/early unpinning
(no shadow pagetable support)
* supports both PAE and non-PAE modes
(non-PAE may be broken at the moment)
* xen hvc console
2007 Apr 18
20
[patch 00/20] XEN-paravirt: Xen guest implementation for paravirt_ops interface
This patch series implements the Linux Xen guest in terms of the
paravirt-ops interface. The features in implemented this patch series
are:
* domU only
* UP only (most code is SMP-safe, but there's no way to create a new vcpu)
* writable pagetables, with late pinning/early unpinning
(no shadow pagetable support)
* supports both PAE and non-PAE modes
* xen console
* virtual block