Displaying 20 results from an estimated 24 matches for "_q_pending_loops".
2020 Jul 02
2
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...7b6bb7
> --- /dev/null
> +++ b/arch/powerpc/include/asm/qspinlock.h
> @@ -0,0 +1,20 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_POWERPC_QSPINLOCK_H
> +#define _ASM_POWERPC_QSPINLOCK_H
> +
> +#include <asm-generic/qspinlock_types.h>
> +
> +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
> +
> +#define smp_mb__after_spinlock() smp_mb()
> +
> +static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
> +{
> + smp_mb();
> + return atomic_read(&lock->val);
> +}
Why do you need the smp_mb() here?
Will
2020 Jul 02
2
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...7b6bb7
> --- /dev/null
> +++ b/arch/powerpc/include/asm/qspinlock.h
> @@ -0,0 +1,20 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_POWERPC_QSPINLOCK_H
> +#define _ASM_POWERPC_QSPINLOCK_H
> +
> +#include <asm-generic/qspinlock_types.h>
> +
> +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
> +
> +#define smp_mb__after_spinlock() smp_mb()
> +
> +static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
> +{
> + smp_mb();
> + return atomic_read(&lock->val);
> +}
Why do you need the smp_mb() here?
Will
2020 Jul 02
3
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...h
> >> @@ -0,0 +1,20 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 */
> >> +#ifndef _ASM_POWERPC_QSPINLOCK_H
> >> +#define _ASM_POWERPC_QSPINLOCK_H
> >> +
> >> +#include <asm-generic/qspinlock_types.h>
> >> +
> >> +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
> >> +
> >> +#define smp_mb__after_spinlock() smp_mb()
> >> +
> >> +static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
> >> +{
> >> + smp_mb();
> >> + return atomic_read(&lock-&...
2020 Jul 02
3
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...h
> >> @@ -0,0 +1,20 @@
> >> +/* SPDX-License-Identifier: GPL-2.0 */
> >> +#ifndef _ASM_POWERPC_QSPINLOCK_H
> >> +#define _ASM_POWERPC_QSPINLOCK_H
> >> +
> >> +#include <asm-generic/qspinlock_types.h>
> >> +
> >> +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
> >> +
> >> +#define smp_mb__after_spinlock() smp_mb()
> >> +
> >> +static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
> >> +{
> >> + smp_mb();
> >> + return atomic_read(&lock-&...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
..."cr0", "memory"
+ : again );
+
+ atomic_cond_read_acquire(a, !(VAL & _Q_LOCKED_MASK));
+// clear_pending_set_locked(lock);
+ WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
+// lockevent_inc(lock_pending);
+ return;
+ }
+
+ if (val == _Q_PENDING_VAL) {
+ int cnt = _Q_PENDING_LOOPS;
+ val = atomic_cond_read_relaxed(a,
+ (VAL != _Q_PENDING_VAL) || !cnt--);
+ if (!(val & ~_Q_LOCKED_MASK))
+ goto again;
+ }
+ queued_spin_lock_slowpath_queue(lock);
}
#define queued_spin_lock queued_spin_lock
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qs...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
..."cr0", "memory"
+ : again );
+
+ atomic_cond_read_acquire(a, !(VAL & _Q_LOCKED_MASK));
+// clear_pending_set_locked(lock);
+ WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
+// lockevent_inc(lock_pending);
+ return;
+ }
+
+ if (val == _Q_PENDING_VAL) {
+ int cnt = _Q_PENDING_LOOPS;
+ val = atomic_cond_read_relaxed(a,
+ (VAL != _Q_PENDING_VAL) || !cnt--);
+ if (!(val & ~_Q_LOCKED_MASK))
+ goto again;
+ }
+ queued_spin_lock_slowpath_queue(lock);
}
#define queued_spin_lock queued_spin_lock
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qs...
2020 Jul 05
1
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...960a0de2467 100644
> --- a/arch/powerpc/include/asm/qspinlock.h
> +++ b/arch/powerpc/include/asm/qspinlock.h
> @@ -3,9 +3,36 @@
> #define _ASM_POWERPC_QSPINLOCK_H
>
> #include <asm-generic/qspinlock_types.h>
> +#include <asm/paravirt.h>
>
> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
>
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +
> +static __always_inline void queued_spin_...
2020 Jul 02
0
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...++ b/arch/powerpc/include/asm/qspinlock.h
>> @@ -0,0 +1,20 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +#ifndef _ASM_POWERPC_QSPINLOCK_H
>> +#define _ASM_POWERPC_QSPINLOCK_H
>> +
>> +#include <asm-generic/qspinlock_types.h>
>> +
>> +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
>> +
>> +#define smp_mb__after_spinlock() smp_mb()
>> +
>> +static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
>> +{
>> + smp_mb();
>> + return atomic_read(&lock->val);
>> +}
>
> Why...
2020 Jul 02
0
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...0 @@
>> >> +/* SPDX-License-Identifier: GPL-2.0 */
>> >> +#ifndef _ASM_POWERPC_QSPINLOCK_H
>> >> +#define _ASM_POWERPC_QSPINLOCK_H
>> >> +
>> >> +#include <asm-generic/qspinlock_types.h>
>> >> +
>> >> +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
>> >> +
>> >> +#define smp_mb__after_spinlock() smp_mb()
>> >> +
>> >> +static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
>> >> +{
>> >> + smp_mb();
>> >> + re...
2020 Jul 21
0
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...> +
> + atomic_cond_read_acquire(a, !(VAL & _Q_LOCKED_MASK));
> +// clear_pending_set_locked(lock);
> + WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
> +// lockevent_inc(lock_pending);
> + return;
> + }
> +
> + if (val == _Q_PENDING_VAL) {
> + int cnt = _Q_PENDING_LOOPS;
> + val = atomic_cond_read_relaxed(a,
> + (VAL != _Q_PENDING_VAL) || !cnt--);
> + if (!(val & ~_Q_LOCKED_MASK))
> + goto again;
> + }
> + queued_spin_lock_slowpath_queue(lock);
> }
> #define queued_spin_lock queued_spin_lock
>
I am fine...
2020 Jul 02
0
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
....h
new file mode 100644
index 000000000000..f84da77b6bb7
--- /dev/null
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_QSPINLOCK_H
+#define _ASM_POWERPC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
+
+#define smp_mb__after_spinlock() smp_mb()
+
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ smp_mb();
+ return atomic_read(&lock->val);
+}
+#define queued_spin_is_locked queued_spin_is_locked
+
+#include <asm-generic/qspin...
2020 Jul 06
0
[PATCH v3 4/6] powerpc/64s: implement queued spinlocks and rwlocks
....h
new file mode 100644
index 000000000000..c49e33e24edd
--- /dev/null
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_QSPINLOCK_H
+#define _ASM_POWERPC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
+
+#define smp_mb__after_spinlock() smp_mb()
+
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ /*
+ * This barrier was added to simple spinlocks by commit 51d7d5205d338,
+ * but it should now be possible to remove it, asm arm64 has d...
2020 Jul 02
0
[PATCH 6/8] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...erpc/include/asm/qspinlock.h
index f84da77b6bb7..997a9a32df77 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -3,9 +3,36 @@
#define _ASM_POWERPC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
+#include <asm/paravirt.h>
#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+static __always_inline void queued_spin_lock_slowpath(struct qspinlock...
2020 Jul 03
0
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...erpc/include/asm/qspinlock.h
index c49e33e24edd..0960a0de2467 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -3,9 +3,36 @@
#define _ASM_POWERPC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
+#include <asm/paravirt.h>
#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+static __always_inline void queued_spin_lock_slowpath(struct qspinlock...
2020 Jul 06
0
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...erpc/include/asm/qspinlock.h
index c49e33e24edd..f5066f00a08c 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -3,9 +3,47 @@
#define _ASM_POWERPC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
+#include <asm/paravirt.h>
#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+static __...
2020 Jul 23
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...acquire(a, !(VAL & _Q_LOCKED_MASK));
>> +// clear_pending_set_locked(lock);
>> + WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
>> +// lockevent_inc(lock_pending);
>> + return;
>> + }
>> +
>> + if (val == _Q_PENDING_VAL) {
>> + int cnt = _Q_PENDING_LOOPS;
>> + val = atomic_cond_read_relaxed(a,
>> + (VAL != _Q_PENDING_VAL) || !cnt--);
>> + if (!(val & ~_Q_LOCKED_MASK))
>> + goto again;
>> + }
>> + queued_spin_lock_slowpath_queue(lock);
>> }
>> #define queued_spin_lock queu...
2020 Jul 23
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...acquire(a, !(VAL & _Q_LOCKED_MASK));
>> +// clear_pending_set_locked(lock);
>> + WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
>> +// lockevent_inc(lock_pending);
>> + return;
>> + }
>> +
>> + if (val == _Q_PENDING_VAL) {
>> + int cnt = _Q_PENDING_LOOPS;
>> + val = atomic_cond_read_relaxed(a,
>> + (VAL != _Q_PENDING_VAL) || !cnt--);
>> + if (!(val & ~_Q_LOCKED_MASK))
>> + goto again;
>> + }
>> + queued_spin_lock_slowpath_queue(lock);
>> }
>> #define queued_spin_lock queu...
2020 Jul 03
7
[PATCH v2 0/6] powerpc: queued spinlocks and rwlocks
v2 is updated to account for feedback from Will, Peter, and
Waiman (thank you), and trims off a couple of RFC and unrelated
patches.
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued
2020 Jul 02
12
[PATCH 0/8] powerpc: queued spinlocks and rwlocks
This series adds an option to use queued spinlocks for powerpc, and
makes it the default for the Book3S-64 subarch.
This effort starts with the generic code so it's very simple but
still very performant. There are optimisations that can be made to
slowpaths, but I think it's better to attack those incrementally
if/when we find things, and try to add the improvements to generic
code as
2020 Jul 24
8
[PATCH v4 0/6] powerpc: queued spinlocks and rwlocks
Updated with everybody's feedback (thanks all), and more performance
results.
What I've found is I might have been measuring the worst load point for
the paravirt case, and by looking at a range of loads it's clear that
queued spinlocks are overall better even on PV, doubly so when you look
at the generally much improved worst case latencies.
I have defaulted it to N even though