Displaying 20 results from an estimated 23 matches for "queued_spin_is_locked".
Did you mean:
queue_spin_is_locked
2020 Jul 02
0
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_QSPINLOCK_H
+#define _ASM_POWERPC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
+
+#define smp_mb__after_spinlock() smp_mb()
+
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ smp_mb();
+ return atomic_read(&lock->val);
+}
+#define queued_spin_is_locked queued_spin_is_locked
+
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_POWERPC_QSPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/sp...
2020 Jul 06
0
[PATCH v3 4/6] powerpc/64s: implement queued spinlocks and rwlocks
...25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_QSPINLOCK_H
+#define _ASM_POWERPC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
+
+#define smp_mb__after_spinlock() smp_mb()
+
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ /*
+ * This barrier was added to simple spinlocks by commit 51d7d5205d338,
+ * but it should now be possible to remove it, asm arm64 has done with
+ * commit c6f5d02b6a0f.
+ */
+ smp_mb();
+ return atomic_read(&lock->val);
+}
+#define queued_spin_is_locked qu...
2020 Jul 02
0
[PATCH 6/8] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...qspinlock *lock)
+{
+ u32 val = 0;
+
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
+ return;
+
+ queued_spin_lock_slowpath(lock, val);
+}
+#define queued_spin_lock queued_spin_lock
+
#define smp_mb__after_spinlock() smp_mb()
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
@@ -15,6 +42,34 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
}
#define queued_spin_is_locked queued_spin_is_locked
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define SPIN_THRESHOLD (1<<15) /* not tuned */
+
+static __always_inline void pv_wai...
2020 Jul 03
0
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...qspinlock *lock)
+{
+ u32 val = 0;
+
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
+ return;
+
+ queued_spin_lock_slowpath(lock, val);
+}
+#define queued_spin_lock queued_spin_lock
+
#define smp_mb__after_spinlock() smp_mb()
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
@@ -20,6 +47,34 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
}
#define queued_spin_is_locked queued_spin_is_locked
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define SPIN_THRESHOLD (1<<15) /* not tuned */
+
+static __always_inline void pv_wai...
2020 Jul 06
0
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...qspinlock *lock)
+{
+ u32 val = 0;
+
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
+ return;
+
+ queued_spin_lock_slowpath(lock, val);
+}
+#define queued_spin_lock queued_spin_lock
+
#define smp_mb__after_spinlock() smp_mb()
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
@@ -20,6 +58,34 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
}
#define queued_spin_is_locked queued_spin_is_locked
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define SPIN_THRESHOLD (1<<15) /* not tuned */
+
+static __always_inline void pv_wai...
2020 Jul 02
2
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...+#ifndef _ASM_POWERPC_QSPINLOCK_H
> +#define _ASM_POWERPC_QSPINLOCK_H
> +
> +#include <asm-generic/qspinlock_types.h>
> +
> +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
> +
> +#define smp_mb__after_spinlock() smp_mb()
> +
> +static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
> +{
> + smp_mb();
> + return atomic_read(&lock->val);
> +}
Why do you need the smp_mb() here?
Will
2020 Jul 02
2
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...+#ifndef _ASM_POWERPC_QSPINLOCK_H
> +#define _ASM_POWERPC_QSPINLOCK_H
> +
> +#include <asm-generic/qspinlock_types.h>
> +
> +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
> +
> +#define smp_mb__after_spinlock() smp_mb()
> +
> +static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
> +{
> + smp_mb();
> + return atomic_read(&lock->val);
> +}
Why do you need the smp_mb() here?
Will
2020 Jul 02
3
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...H
> >> +
> >> +#include <asm-generic/qspinlock_types.h>
> >> +
> >> +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
> >> +
> >> +#define smp_mb__after_spinlock() smp_mb()
> >> +
> >> +static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
> >> +{
> >> + smp_mb();
> >> + return atomic_read(&lock->val);
> >> +}
> >
> > Why do you need the smp_mb() here?
>
> A long and sad tale that ends here 51d7d5205d338
>
> Should probably at least refer to...
2020 Jul 02
3
[PATCH 5/8] powerpc/64s: implement queued spinlocks and rwlocks
...H
> >> +
> >> +#include <asm-generic/qspinlock_types.h>
> >> +
> >> +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
> >> +
> >> +#define smp_mb__after_spinlock() smp_mb()
> >> +
> >> +static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
> >> +{
> >> + smp_mb();
> >> + return atomic_read(&lock->val);
> >> +}
> >
> > Why do you need the smp_mb() here?
>
> A long and sad tale that ends here 51d7d5205d338
>
> Should probably at least refer to...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...clude/asm/qspinlock.h
> @@ -0,0 +1,66 @@
> +#ifndef _ASM_POWERPC_QSPINLOCK_H
> +#define _ASM_POWERPC_QSPINLOCK_H
> +
> +#include <asm-generic/qspinlock_types.h>
> +
> +#define SPIN_THRESHOLD (1 << 15)
> +#define queued_spin_unlock queued_spin_unlock
> +#define queued_spin_is_locked queued_spin_is_locked
> +#define queued_spin_unlock_wait queued_spin_unlock_wait
> +
> +extern void queued_spin_unlock_wait(struct qspinlock *lock);
> +
> +static inline u8 *__qspinlock_lock_byte(struct qspinlock *lock)
> +{
> + return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...clude/asm/qspinlock.h
> @@ -0,0 +1,66 @@
> +#ifndef _ASM_POWERPC_QSPINLOCK_H
> +#define _ASM_POWERPC_QSPINLOCK_H
> +
> +#include <asm-generic/qspinlock_types.h>
> +
> +#define SPIN_THRESHOLD (1 << 15)
> +#define queued_spin_unlock queued_spin_unlock
> +#define queued_spin_is_locked queued_spin_is_locked
> +#define queued_spin_unlock_wait queued_spin_unlock_wait
> +
> +extern void queued_spin_unlock_wait(struct qspinlock *lock);
> +
> +static inline u8 *__qspinlock_lock_byte(struct qspinlock *lock)
> +{
> + return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG...
2020 Jul 03
7
[PATCH v2 0/6] powerpc: queued spinlocks and rwlocks
v2 is updated to account for feedback from Will, Peter, and
Waiman (thank you), and trims off a couple of RFC and unrelated
patches.
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
.....4c89256
--- /dev/null
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_POWERPC_QSPINLOCK_H
+#define _ASM_POWERPC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+#define SPIN_THRESHOLD (1 << 15)
+#define queued_spin_unlock queued_spin_unlock
+#define queued_spin_is_locked queued_spin_is_locked
+#define queued_spin_unlock_wait queued_spin_unlock_wait
+
+extern void queued_spin_unlock_wait(struct qspinlock *lock);
+
+static inline u8 *__qspinlock_lock_byte(struct qspinlock *lock)
+{
+ return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
+}
+
+static inline void...
2020 Jul 24
8
[PATCH v4 0/6] powerpc: queued spinlocks and rwlocks
Updated with everybody's feedback (thanks all), and more performance
results.
What I've found is I might have been measuring the worst load point for
the paravirt case, and by looking at a range of loads it's clear that
queued spinlocks are overall better even on PV, doubly so when you look
at the generally much improved worst case latencies.
I have defaulted it to N even though
2020 Jul 02
12
[PATCH 0/8] powerpc: queued spinlocks and rwlocks
This series adds an option to use queued spinlocks for powerpc, and
makes it the default for the Book3S-64 subarch.
This effort starts with the generic code so it's very simple but
still very performant. There are optimisations that can be made to
slowpaths, but I think it's better to attack those incrementally
if/when we find things, and try to add the improvements to generic
code as
2020 Jul 06
13
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you).
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued spinlocks and rwlocks
powerpc/pseries: implement paravirt
2020 Jul 06
13
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you).
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued spinlocks and rwlocks
powerpc/pseries: implement paravirt
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v7 -> v8:
add one patch to drop a function call
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v7 -> v8:
add one patch to drop a function call
2016 Dec 06
6
[PATCH v9 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v8 -> v9:
mv qspinlocm config entry to