search for: _q_tail_cpu_bits

Displaying 20 results from an estimated 85 matches for "_q_tail_cpu_bits".

2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...tructure @@ -314,12 +318,6 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, */ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { - struct mcs_spinlock *prev, *next, *node; - u32 old, tail; - int idx; - - BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); - if (pv_enabled()) goto pv_queue; @@ -397,6 +395,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) queue: lockevent_inc(lock_slowpath); pv_queue: + __queued_spin_lock_slowpath_queue(lock); +} +EXPORT_SYMBOL(queued_spin_lock_slowpath); + +void queued_spin_lock_slow...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...tructure @@ -314,12 +318,6 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, */ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { - struct mcs_spinlock *prev, *next, *node; - u32 old, tail; - int idx; - - BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); - if (pv_enabled()) goto pv_queue; @@ -397,6 +395,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) queue: lockevent_inc(lock_slowpath); pv_queue: + __queued_spin_lock_slowpath_queue(lock); +} +EXPORT_SYMBOL(queued_spin_lock_slowpath); + +void queued_spin_lock_slow...
2020 Jul 21
0
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...ys_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, > */ > void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) > { > - struct mcs_spinlock *prev, *next, *node; > - u32 old, tail; > - int idx; > - > - BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); > - > if (pv_enabled()) > goto pv_queue; > > @@ -397,6 +395,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) > queue: > lockevent_inc(lock_slowpath); > pv_queue: > + __queued_spin_lock_slowpath_queue(lock); > +} > +EXPORT_SYM...
2014 May 08
1
[PATCH v10 03/19] qspinlock: Add pending bit
...n a separate function, but you don't need the pointer thing. Note how after you fail the trylock_pending() you touch the second (node) cacheline. > @@ -110,6 +184,9 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val) > > BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); > > + if (trylock_pending(lock, &val)) > + return; /* Lock acquired */ > + > node = this_cpu_ptr(&mcs_nodes[0]); > idx = node->count++; > tail = encode_tail(smp_processor_id(), idx); > @@ -119,15 +196,18 @@ void queue_spin_lock_slowpath(struct qspinloc...
2014 May 08
1
[PATCH v10 03/19] qspinlock: Add pending bit
...n a separate function, but you don't need the pointer thing. Note how after you fail the trylock_pending() you touch the second (node) cacheline. > @@ -110,6 +184,9 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val) > > BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); > > + if (trylock_pending(lock, &val)) > + return; /* Lock acquired */ > + > node = this_cpu_ptr(&mcs_nodes[0]); > idx = node->count++; > tail = encode_tail(smp_processor_id(), idx); > @@ -119,15 +196,18 @@ void queue_spin_lock_slowpath(struct qspinloc...
2020 Jul 23
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
..._lock(struct qspinlock *lock, >> */ >> void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) >> { >> - struct mcs_spinlock *prev, *next, *node; >> - u32 old, tail; >> - int idx; >> - >> - BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); >> - >> if (pv_enabled()) >> goto pv_queue; >> >> @@ -397,6 +395,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) >> queue: >> lockevent_inc(lock_slowpath); >> pv_queue: >> + __queued_spin_lock_slowpath_q...
2020 Jul 23
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
..._lock(struct qspinlock *lock, >> */ >> void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) >> { >> - struct mcs_spinlock *prev, *next, *node; >> - u32 old, tail; >> - int idx; >> - >> - BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); >> - >> if (pv_enabled()) >> goto pv_queue; >> >> @@ -397,6 +395,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) >> queue: >> lockevent_inc(lock_slowpath); >> pv_queue: >> + __queued_spin_lock_slowpath_q...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...CCESS_ONCE(l->locked) = _Q_LOCKED_VAL; > barrier(); Why? If we have a simple test-and-set lock like below, we'll never get here at all. > @@ -252,6 +260,18 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val) > > BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); > > +#ifdef CONFIG_VIRT_UNFAIR_LOCKS > + /* > + * A simple test and set unfair lock > + */ > + if (static_key_false(&virt_unfairlocks_enabled)) { > + cpu_relax(); /* Relax after a failed lock attempt */ Meh, I don't think anybody can tell the difference if you...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...CCESS_ONCE(l->locked) = _Q_LOCKED_VAL; > barrier(); Why? If we have a simple test-and-set lock like below, we'll never get here at all. > @@ -252,6 +260,18 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val) > > BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); > > +#ifdef CONFIG_VIRT_UNFAIR_LOCKS > + /* > + * A simple test and set unfair lock > + */ > + if (static_key_false(&virt_unfairlocks_enabled)) { > + cpu_relax(); /* Relax after a failed lock attempt */ Meh, I don't think anybody can tell the difference if you...
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the paravirt crap. The first few patches are taken from Waiman's latest series, but the virt support is completely new. Its primary aim is to not mess up the native code. I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple smp guests. I've not done Xen, but the patch should be
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the paravirt crap. The first few patches are taken from Waiman's latest series, but the virt support is completely new. Its primary aim is to not mess up the native code. I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple smp guests. I've not done Xen, but the patch should be
2014 Jun 17
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...l/locking/qspinlock.c | 58 +++++++++++++++++++++------------- > 2 files changed, 38 insertions(+), 22 deletions(-) > > --- a/include/asm-generic/qspinlock_types.h > +++ b/include/asm-generic/qspinlock_types.h > @@ -61,6 +61,8 @@ typedef struct qspinlock { > #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) > #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) > > +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) > + > #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) > #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) > > --- a/...
2014 Jun 17
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...l/locking/qspinlock.c | 58 +++++++++++++++++++++------------- > 2 files changed, 38 insertions(+), 22 deletions(-) > > --- a/include/asm-generic/qspinlock_types.h > +++ b/include/asm-generic/qspinlock_types.h > @@ -61,6 +61,8 @@ typedef struct qspinlock { > #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) > #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) > > +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) > + > #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) > #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) > > --- a/...
2014 Jun 15
0
[PATCH 08/11] qspinlock: Revert to test-and-set on hypervisors
...always_inline bool virt_queue_spin_lock(struct qspinlock *lock) +{ + return false; +} +#endif + /* * Initializier */ --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -247,6 +247,9 @@ void queue_spin_lock_slowpath(struct qsp BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); + if (virt_queue_spin_lock(lock)) + return; + /* * wait for in-progress pending->locked hand-overs *
2014 Jun 15
0
[PATCH 06/11] qspinlock: Optimize pending bit
...ux-2.6/kernel/locking/qspinlock.c =================================================================== --- linux-2.6.orig/kernel/locking/qspinlock.c +++ linux-2.6/kernel/locking/qspinlock.c @@ -226,6 +226,16 @@ void queue_spin_lock_slowpath(struct qsp BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); /* + * wait for in-progress pending->locked hand-overs + * + * 0,1,0 -> 0,0,1 + */ + if (val == _Q_PENDING_VAL) { + while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL) + cpu_relax(); + } + + /* * trylock || pending * * 0,0,0 -> 0,0,1 ; trylock
2014 Jun 15
0
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...inlock_types.h | 2 + kernel/locking/qspinlock.c | 58 +++++++++++++++++++++------------- 2 files changed, 38 insertions(+), 22 deletions(-) --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -61,6 +61,8 @@ typedef struct qspinlock { #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) + #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) --- a/kernel/locking/qspinlock.c +++ b/kernel/...
2014 Apr 17
0
[PATCH v9 04/19] qspinlock: Extract out the exchange of tail code word
..., 41 insertions(+), 22 deletions(-) diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index bd25081..ed5d89a 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -61,6 +61,8 @@ typedef struct qspinlock { #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) + #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) diff --git a/kernel/locking/qspinlock.c b/kern...
2014 Jun 15
0
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...FSET 0 +#define _Q_LOCKED_BITS 8 +#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) + +#define _Q_TAIL_IDX_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) +#define _Q_TAIL_IDX_BITS 2 +#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) + +#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) +#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) +#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) + +#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) + +#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ Index: linux-2.6/kernel/Kconfig.locks =================================================================== --- linux-2.6....
2014 Jun 15
0
[PATCH 03/11] qspinlock: Add pending bit
...-> (*,0,0) ---> (*,0,1) -' : + * queue : ^--' : */ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val) { @@ -110,6 +114,65 @@ void queue_spin_lock_slowpath(struct qsp BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); + /* + * trylock || pending + * + * 0,0,0 -> 0,0,1 ; trylock + * 0,0,1 -> 0,1,1 ; pending + */ + for (;;) { + /* + * If we observe any contention; queue. + */ + if (val & ~_Q_LOCKED_MASK) + goto queue; + + new = _Q_LOCKED_VAL; + if (val == new) + new |= _Q_PENDING_VA...
2015 Mar 16
0
[PATCH 1/9] qspinlock: A simple generic 4-byte queue spinlock
...FSET 0 +#define _Q_LOCKED_BITS 8 +#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) + +#define _Q_TAIL_IDX_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) +#define _Q_TAIL_IDX_BITS 2 +#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) + +#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) +#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) +#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) + +#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) + +#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -235,6 +235,13 @@ config LOCK_SPIN_ON_OWNER def_bool y...