search for: _q_locked_val

Displaying 20 results from an estimated 206 matches for "_q_locked_val".

2014 Jun 17
5
[PATCH 03/11] qspinlock: Add pending bit
...generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val); */ static __always_inline void queue_spin_lock(struct qspinlock *lock) { - u32 val; + u32 val, new; val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); if (likely(val == 0)) return; + + /* One more attempt - but if we fail mark it as pending. */ + if (val == _Q_LOCKED_VAL) { + new = Q_LOCKED_VAL |_Q_PENDING_VAL; + + old = atomic_cmpxchg(&lock->val, val, new); + if (old == _Q_LOCKED_VAL) /* YEEY! */ + return; + val = old; + }...
2014 Jun 17
5
[PATCH 03/11] qspinlock: Add pending bit
...generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val); */ static __always_inline void queue_spin_lock(struct qspinlock *lock) { - u32 val; + u32 val, new; val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); if (likely(val == 0)) return; + + /* One more attempt - but if we fail mark it as pending. */ + if (val == _Q_LOCKED_VAL) { + new = Q_LOCKED_VAL |_Q_PENDING_VAL; + + old = atomic_cmpxchg(&lock->val, val, new); + if (old == _Q_LOCKED_VAL) /* YEEY! */ + return; + val = old; + }...
2016 May 26
2
[PATCH v3 5/6] pv-qspinlock: use cmpxchg_release in __pv_queued_spin_unlock
...> +++ b/kernel/locking/qspinlock_paravirt.h > @@ -614,7 +614,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock) > * unhash. Otherwise it would be possible to have multiple @lock > * entries, which would be BAD. > */ > - locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); > + locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0); > if (likely(locked == _Q_LOCKED_VAL)) > return; This patch fails to explain _why_ it can be relaxed. And seeing how this cmpxchg() can actually unlock the lock, I don't see how this can possibly be correct....
2016 May 26
2
[PATCH v3 5/6] pv-qspinlock: use cmpxchg_release in __pv_queued_spin_unlock
...> +++ b/kernel/locking/qspinlock_paravirt.h > @@ -614,7 +614,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock) > * unhash. Otherwise it would be possible to have multiple @lock > * entries, which would be BAD. > */ > - locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); > + locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0); > if (likely(locked == _Q_LOCKED_VAL)) > return; This patch fails to explain _why_ it can be relaxed. And seeing how this cmpxchg() can actually unlock the lock, I don't see how this can possibly be correct....
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...oid queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void queued_spin_lock_slowpath_queue(struct qspinlock *lock); #endif static __always_inline void queued_spin_lock(struct qspinlock *lock) { - u32 val = 0; - - if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL))) + atomic_t *a = &lock->val; + u32 val; + +again: + asm volatile( +"1:\t" PPC_LWARX(%0,0,%1,1) " # queued_spin_lock \n" + : "=&r" (val) + : "r" (&a->counter) + : "memory"); + + if (likely(val == 0)) { + asm_volatile_goto( + &q...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...oid queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void queued_spin_lock_slowpath_queue(struct qspinlock *lock); #endif static __always_inline void queued_spin_lock(struct qspinlock *lock) { - u32 val = 0; - - if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL))) + atomic_t *a = &lock->val; + u32 val; + +again: + asm volatile( +"1:\t" PPC_LWARX(%0,0,%1,1) " # queued_spin_lock \n" + : "=&r" (val) + : "r" (&a->counter) + : "memory"); + + if (likely(val == 0)) { + asm_volatile_goto( + &q...
2014 Jun 23
1
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...;lock->val, val, new); > > > + if (old == val) > > > + break; > > > + > > > + val = old; > > > + } > > > + > > > + /* > > > + * we won the trylock; forget about queueing. > > > + */ > > > + if (new == _Q_LOCKED_VAL) > > > + goto release; > > > + > > > + /* > > > + * if there was a previous node; link it and wait. > > > + */ > > > + if (old & ~_Q_LOCKED_MASK) { > > > + prev = decode_tail(old); > > > + ACCESS_ONCE(prev->next) =...
2014 Jun 23
1
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...;lock->val, val, new); > > > + if (old == val) > > > + break; > > > + > > > + val = old; > > > + } > > > + > > > + /* > > > + * we won the trylock; forget about queueing. > > > + */ > > > + if (new == _Q_LOCKED_VAL) > > > + goto release; > > > + > > > + /* > > > + * if there was a previous node; link it and wait. > > > + */ > > > + if (old & ~_Q_LOCKED_MASK) { > > > + prev = decode_tail(old); > > > + ACCESS_ONCE(prev->next) =...
2014 Jun 18
0
[PATCH 03/11] qspinlock: Add pending bit
Il 17/06/2014 22:36, Konrad Rzeszutek Wilk ha scritto: > + /* One more attempt - but if we fail mark it as pending. */ > + if (val == _Q_LOCKED_VAL) { > + new = Q_LOCKED_VAL |_Q_PENDING_VAL; > + > + old = atomic_cmpxchg(&lock->val, val, new); > + if (old == _Q_LOCKED_VAL) /* YEEY! */ > + return; > + val = old; > + } Note that Peter's code is in a for(;;) loop: + for (;;) { + /* + * If we observe any...
2014 Jun 16
4
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...to) I presume what you mean is that if we are the next after the lock-holder we need only to update the 'next' (or the composite value of smp_processor_idx | idx) to point to us. As in, swap the 'L' with 'I' (looking at the doc) > + */ > + for (;;) { > + new = _Q_LOCKED_VAL; > + if (val) Could you add a comment here, like this: /* * N.B. Initially 'val' will have some value (as we are called * after the _Q_LOCKED_VAL could not be set by queue_spin_lock). * But on subsequent iterations, either the lock holder will * decrement the val (queue_spin_unloc...
2014 Jun 16
4
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...to) I presume what you mean is that if we are the next after the lock-holder we need only to update the 'next' (or the composite value of smp_processor_idx | idx) to point to us. As in, swap the 'L' with 'I' (looking at the doc) > + */ > + for (;;) { > + new = _Q_LOCKED_VAL; > + if (val) Could you add a comment here, like this: /* * N.B. Initially 'val' will have some value (as we are called * after the _Q_LOCKED_VAL could not be set by queue_spin_lock). * But on subsequent iterations, either the lock holder will * decrement the val (queue_spin_unloc...
2014 Jun 12
2
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...ve any contention; queue. > >>+ * If we observe that the queue is not empty or both > >>+ * the pending and lock bits are set, queue > >> */ > >>- if (val & ~_Q_LOCKED_MASK) > >>+ if ((val & _Q_TAIL_MASK) || > >>+ (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL))) > >> goto queue; > >>+ if (val == _Q_PENDING_VAL) { > >>+ /* > >>+ * Pending bit is set, but not the lock bit. > >>+ * Assuming that the pending bit holder is going to > >>+ * set the lock bit and clear the pend...
2014 Jun 12
2
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...ve any contention; queue. > >>+ * If we observe that the queue is not empty or both > >>+ * the pending and lock bits are set, queue > >> */ > >>- if (val & ~_Q_LOCKED_MASK) > >>+ if ((val & _Q_TAIL_MASK) || > >>+ (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL))) > >> goto queue; > >>+ if (val == _Q_PENDING_VAL) { > >>+ /* > >>+ * Pending bit is set, but not the lock bit. > >>+ * Assuming that the pending bit holder is going to > >>+ * set the lock bit and clear the pend...
2014 Apr 23
2
[PATCH v9 05/19] qspinlock: Optimize for smaller NR_CPUS
On 04/18/2014 05:40 PM, Waiman Long wrote: > On 04/18/2014 03:05 PM, Peter Zijlstra wrote: >> On Fri, Apr 18, 2014 at 01:52:50PM -0400, Waiman Long wrote: >>> I am confused by your notation. >> Nah, I think I was confused :-) Make the 1 _Q_LOCKED_VAL though, as >> that's the proper constant to use. > > Everyone gets confused once in a while:-) I have plenty of that myself. > > I will change 1 to _Q_LOCKED_VAL as suggested. > > -Longman The attached patch file contains the additional changes that I had made to qspi...
2014 Apr 23
2
[PATCH v9 05/19] qspinlock: Optimize for smaller NR_CPUS
On 04/18/2014 05:40 PM, Waiman Long wrote: > On 04/18/2014 03:05 PM, Peter Zijlstra wrote: >> On Fri, Apr 18, 2014 at 01:52:50PM -0400, Waiman Long wrote: >>> I am confused by your notation. >> Nah, I think I was confused :-) Make the 1 _Q_LOCKED_VAL though, as >> that's the proper constant to use. > > Everyone gets confused once in a while:-) I have plenty of that myself. > > I will change 1 to _Q_LOCKED_VAL as suggested. > > -Longman The attached patch file contains the additional changes that I had made to qspi...
2014 Jun 15
0
[PATCH 03/11] qspinlock: Add pending bit
...1 +#define _Q_PENDING_MASK _Q_SET_MASK(PENDING) + +#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) #define _Q_TAIL_IDX_BITS 2 #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) @@ -57,5 +62,6 @@ typedef struct qspinlock { #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) +#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -83,24 +83,28 @@ static inline struct mcs_spinlock *decod return per_cpu_ptr(&mcs_nodes...
2014 Jun 12
2
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...ACCESS_ONCE(*(u8 *)lock) = 0; > barrier(); > } > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +/* > + * The lock byte can have a value of _Q_LOCKED_SLOWPATH to indicate > + * that it needs to go through the slowpath to do the unlocking. > + */ > +#define _Q_LOCKED_SLOWPATH (_Q_LOCKED_VAL | 2) > + > +extern void queue_spin_unlock_slowpath(struct qspinlock *lock); > + > +static inline void queue_spin_unlock(struct qspinlock *lock) > +{ > + barrier(); > + if (static_key_false(&paravirt_spinlocks_enabled)) { > + /* > + * Need to atomically clear the lo...
2014 Jun 12
2
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...ACCESS_ONCE(*(u8 *)lock) = 0; > barrier(); > } > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +/* > + * The lock byte can have a value of _Q_LOCKED_SLOWPATH to indicate > + * that it needs to go through the slowpath to do the unlocking. > + */ > +#define _Q_LOCKED_SLOWPATH (_Q_LOCKED_VAL | 2) > + > +extern void queue_spin_unlock_slowpath(struct qspinlock *lock); > + > +static inline void queue_spin_unlock(struct qspinlock *lock) > +{ > + barrier(); > + if (static_key_false(&paravirt_spinlocks_enabled)) { > + /* > + * Need to atomically clear the lo...
2014 Jun 17
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...; +++ b/include/asm-generic/qspinlock_types.h > @@ -61,6 +61,8 @@ typedef struct qspinlock { > #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) > #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) > > +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) > + > #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) > #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) > > --- a/kernel/locking/qspinlock.c > +++ b/kernel/locking/qspinlock.c > @@ -86,6 +86,31 @@ static inline struct mcs_spinlock *decod > #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q...
2014 Jun 17
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...; +++ b/include/asm-generic/qspinlock_types.h > @@ -61,6 +61,8 @@ typedef struct qspinlock { > #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) > #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) > > +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) > + > #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) > #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) > > --- a/kernel/locking/qspinlock.c > +++ b/kernel/locking/qspinlock.c > @@ -86,6 +86,31 @@ static inline struct mcs_spinlock *decod > #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q...