search for: queued_spin_lock

Displaying 20 results from an estimated 26 matches for "queued_spin_lock".

Did you mean: queue_spin_lock
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...owerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h index b752d34517b3..26d8766a1106 100644 --- a/arch/powerpc/include/asm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -31,16 +31,57 @@ static inline void queued_spin_unlock(struct qspinlock *lock) #else extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void queued_spin_lock_slowpath_queue(struct qspinlock *lock); #endif static __always_inline void queued_spin_lock(struct qspinlock *lock) { - u32 val = 0; - - if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL))) + a...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...owerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h index b752d34517b3..26d8766a1106 100644 --- a/arch/powerpc/include/asm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -31,16 +31,57 @@ static inline void queued_spin_unlock(struct qspinlock *lock) #else extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void queued_spin_lock_slowpath_queue(struct qspinlock *lock); #endif static __always_inline void queued_spin_lock(struct qspinlock *lock) { - u32 val = 0; - - if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL))) + a...
2020 Jul 23
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...k.h >> index b752d34517b3..26d8766a1106 100644 >> --- a/arch/powerpc/include/asm/qspinlock.h >> +++ b/arch/powerpc/include/asm/qspinlock.h >> @@ -31,16 +31,57 @@ static inline void queued_spin_unlock(struct qspinlock *lock) >> >> #else >> extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); >> +extern void queued_spin_lock_slowpath_queue(struct qspinlock *lock); >> #endif >> >> static __always_inline void queued_spin_lock(struct qspinlock *lock) >> { >> - u32 val = 0; >> - >> - if (like...
2020 Jul 23
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...k.h >> index b752d34517b3..26d8766a1106 100644 >> --- a/arch/powerpc/include/asm/qspinlock.h >> +++ b/arch/powerpc/include/asm/qspinlock.h >> @@ -31,16 +31,57 @@ static inline void queued_spin_unlock(struct qspinlock *lock) >> >> #else >> extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); >> +extern void queued_spin_lock_slowpath_queue(struct qspinlock *lock); >> #endif >> >> static __always_inline void queued_spin_lock(struct qspinlock *lock) >> { >> - u32 val = 0; >> - >> - if (like...
2020 Jul 02
0
[PATCH 6/8] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...nlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -3,9 +3,36 @@ #define _ASM_POWERPC_QSPINLOCK_H #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else...
2020 Jul 03
0
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...nlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -3,9 +3,36 @@ #define _ASM_POWERPC_QSPINLOCK_H #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else...
2020 Jul 06
0
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...nlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -3,9 +3,47 @@ #define _ASM_POWERPC_QSPINLOCK_H #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_proces...
2020 Jul 21
0
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...powerpc/include/asm/qspinlock.h > index b752d34517b3..26d8766a1106 100644 > --- a/arch/powerpc/include/asm/qspinlock.h > +++ b/arch/powerpc/include/asm/qspinlock.h > @@ -31,16 +31,57 @@ static inline void queued_spin_unlock(struct qspinlock *lock) > > #else > extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > +extern void queued_spin_lock_slowpath_queue(struct qspinlock *lock); > #endif > > static __always_inline void queued_spin_lock(struct qspinlock *lock) > { > - u32 val = 0; > - > - if (likely(atomic_try_cmpxchg_lock(&...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...#define arch_spin_unlock arch_spin_unlock > + > +static inline int arch_spin_trylock(arch_spinlock_t *lock) > +{ > + CLEAR_IO_SYNC; > + return queued_spin_trylock(lock); > +} > + > +static inline void arch_spin_lock(arch_spinlock_t *lock) > +{ > + CLEAR_IO_SYNC; > + queued_spin_lock(lock); > +} > + > +static inline > +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) > +{ > + CLEAR_IO_SYNC; > + queued_spin_lock(lock); > +} > + > +static inline void arch_spin_unlock(arch_spinlock_t *lock) > +{ > + SYNC_IO; > + queued_sp...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...#define arch_spin_unlock arch_spin_unlock > + > +static inline int arch_spin_trylock(arch_spinlock_t *lock) > +{ > + CLEAR_IO_SYNC; > + return queued_spin_trylock(lock); > +} > + > +static inline void arch_spin_lock(arch_spinlock_t *lock) > +{ > + CLEAR_IO_SYNC; > + queued_spin_lock(lock); > +} > + > +static inline > +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) > +{ > + CLEAR_IO_SYNC; > + queued_spin_lock(lock); > +} > + > +static inline void arch_spin_unlock(arch_spinlock_t *lock) > +{ > + SYNC_IO; > + queued_sp...
2020 Jul 03
7
[PATCH v2 0/6] powerpc: queued spinlocks and rwlocks
v2 is updated to account for feedback from Will, Peter, and Waiman (thank you), and trims off a couple of RFC and unrelated patches. Thanks, Nick Nicholas Piggin (6): powerpc/powernv: must include hvcall.h to get PAPR defines powerpc/pseries: move some PAPR paravirt functions to their own file powerpc: move spinlock implementation to simple_spinlock powerpc/64s: implement queued
2020 Jul 07
6
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
Excerpts from Waiman Long's message of July 7, 2020 4:39 am: > On 7/6/20 12:35 AM, Nicholas Piggin wrote: >> v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you). >> >> Thanks, >> Nick >> >> Nicholas Piggin (6): >> powerpc/powernv: must include hvcall.h to get PAPR defines >> powerpc/pseries: move some PAPR
2020 Jul 07
6
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
Excerpts from Waiman Long's message of July 7, 2020 4:39 am: > On 7/6/20 12:35 AM, Nicholas Piggin wrote: >> v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you). >> >> Thanks, >> Nick >> >> Nicholas Piggin (6): >> powerpc/powernv: must include hvcall.h to get PAPR defines >> powerpc/pseries: move some PAPR
2020 Jul 24
8
[PATCH v4 0/6] powerpc: queued spinlocks and rwlocks
Updated with everybody's feedback (thanks all), and more performance results. What I've found is I might have been measuring the worst load point for the paravirt case, and by looking at a range of loads it's clear that queued spinlocks are overall better even on PV, doubly so when you look at the generally much improved worst case latencies. I have defaulted it to N even though
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...ck +#define arch_spin_lock_flags arch_spin_lock_flags +#define arch_spin_unlock arch_spin_unlock + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + CLEAR_IO_SYNC; + return queued_spin_trylock(lock); +} + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + CLEAR_IO_SYNC; + queued_spin_lock(lock); +} + +static inline +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +{ + CLEAR_IO_SYNC; + queued_spin_lock(lock); +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + SYNC_IO; + queued_spin_unlock(lock); +} +#endif /* _ASM_POWERPC_QSPINLOCK_H */ diff -...
2020 Jul 02
12
[PATCH 0/8] powerpc: queued spinlocks and rwlocks
This series adds an option to use queued spinlocks for powerpc, and makes it the default for the Book3S-64 subarch. This effort starts with the generic code so it's very simple but still very performant. There are optimisations that can be made to slowpaths, but I think it's better to attack those incrementally if/when we find things, and try to add the improvements to generic code as
2020 Jul 06
13
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you). Thanks, Nick Nicholas Piggin (6): powerpc/powernv: must include hvcall.h to get PAPR defines powerpc/pseries: move some PAPR paravirt functions to their own file powerpc: move spinlock implementation to simple_spinlock powerpc/64s: implement queued spinlocks and rwlocks powerpc/pseries: implement paravirt
2020 Jul 06
13
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you). Thanks, Nick Nicholas Piggin (6): powerpc/powernv: must include hvcall.h to get PAPR defines powerpc/pseries: move some PAPR paravirt functions to their own file powerpc: move spinlock implementation to simple_spinlock powerpc/64s: implement queued spinlocks and rwlocks powerpc/pseries: implement paravirt
2020 Jul 23
0
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
On 7/23/20 9:30 AM, Nicholas Piggin wrote: >> I would prefer to extract out the pending bit handling code out into a >> separate helper function which can be overridden by the arch code >> instead of breaking the slowpath into 2 pieces. > You mean have the arch provide a queued_spin_lock_slowpath_pending > function that the slow path calls? > > I would actually prefer the pending handling can be made inline in > the queued_spin_lock function, especially with out-of-line locks it > makes sense to put it there. > > We could ifdef out queued_spin_lock_slowpath_que...
2020 Jul 09
0
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...rpc/include/asm/qspinlock_paravirt.h | 7 ++ >> arch/powerpc/platforms/pseries/Kconfig | 5 ++ >> arch/powerpc/platforms/pseries/setup.c | 6 +- >> include/asm-generic/qspinlock.h | 2 + > Another ack? > I am OK with adding the #ifdef around queued_spin_lock(). Acked-by: Waiman Long <longman at redhat.com> >> diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h >> index 7a8546660a63..f2d51f929cf5 100644 >> --- a/arch/powerpc/include/asm/paravirt.h >> +++ b/arch/powerpc/include/asm/paravi...