search for: _q_pending_val

Displaying 20 results from an estimated 70 matches for "_q_pending_val".

2014 Jun 12
2
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...ion; queue. > >>+ * If we observe that the queue is not empty or both > >>+ * the pending and lock bits are set, queue > >> */ > >>- if (val & ~_Q_LOCKED_MASK) > >>+ if ((val & _Q_TAIL_MASK) || > >>+ (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL))) > >> goto queue; > >>+ if (val == _Q_PENDING_VAL) { > >>+ /* > >>+ * Pending bit is set, but not the lock bit. > >>+ * Assuming that the pending bit holder is going to > >>+ * set the lock bit and clear the pending bit soon, &...
2014 Jun 12
2
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...ion; queue. > >>+ * If we observe that the queue is not empty or both > >>+ * the pending and lock bits are set, queue > >> */ > >>- if (val & ~_Q_LOCKED_MASK) > >>+ if ((val & _Q_TAIL_MASK) || > >>+ (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL))) > >> goto queue; > >>+ if (val == _Q_PENDING_VAL) { > >>+ /* > >>+ * Pending bit is set, but not the lock bit. > >>+ * Assuming that the pending bit holder is going to > >>+ * set the lock bit and clear the pending bit soon, &...
2014 Jun 11
2
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...{ > /* > - * If we observe any contention; queue. > + * If we observe that the queue is not empty or both > + * the pending and lock bits are set, queue > */ > - if (val & ~_Q_LOCKED_MASK) > + if ((val & _Q_TAIL_MASK) || > + (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL))) > goto queue; > > + if (val == _Q_PENDING_VAL) { > + /* > + * Pending bit is set, but not the lock bit. > + * Assuming that the pending bit holder is going to > + * set the lock bit and clear the pending bit soon, > + * it is better to wait than to ex...
2014 Jun 11
2
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...{ > /* > - * If we observe any contention; queue. > + * If we observe that the queue is not empty or both > + * the pending and lock bits are set, queue > */ > - if (val & ~_Q_LOCKED_MASK) > + if ((val & _Q_TAIL_MASK) || > + (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL))) > goto queue; > > + if (val == _Q_PENDING_VAL) { > + /* > + * Pending bit is set, but not the lock bit. > + * Assuming that the pending bit holder is going to > + * set the lock bit and clear the pending bit soon, > + * it is better to wait than to ex...
2014 Apr 17
2
[PATCH v9 06/19] qspinlock: prolong the stay in the pending bit path
...any contention; queue. > + * If we observe that the queue is not empty, > + * return and be queued. > */ > - if (val & ~_Q_LOCKED_MASK) > + if (val & _Q_TAIL_MASK) > return 0; > > + if ((val & _Q_LOCKED_PENDING_MASK) == > + (_Q_LOCKED_VAL|_Q_PENDING_VAL)) { > + /* > + * If both the lock and pending bits are set, we wait > + * a while to see if that either bit will be cleared. > + * If that is no change, we return and be queued. > + */ > + if (!retry) > + return 0; > + retry--; > + cpu_relax(); >...
2014 Apr 17
2
[PATCH v9 06/19] qspinlock: prolong the stay in the pending bit path
...any contention; queue. > + * If we observe that the queue is not empty, > + * return and be queued. > */ > - if (val & ~_Q_LOCKED_MASK) > + if (val & _Q_TAIL_MASK) > return 0; > > + if ((val & _Q_LOCKED_PENDING_MASK) == > + (_Q_LOCKED_VAL|_Q_PENDING_VAL)) { > + /* > + * If both the lock and pending bits are set, we wait > + * a while to see if that either bit will be cleared. > + * If that is no change, we return and be queued. > + */ > + if (!retry) > + return 0; > + retry--; > + cpu_relax(); >...
2014 Jun 12
0
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...> + * If we observe that the queue is not empty or both >>>> + * the pending and lock bits are set, queue >>>> */ >>>> - if (val& ~_Q_LOCKED_MASK) >>>> + if ((val& _Q_TAIL_MASK) || >>>> + (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL))) >>>> goto queue; >>>> + if (val == _Q_PENDING_VAL) { >>>> + /* >>>> + * Pending bit is set, but not the lock bit. >>>> + * Assuming that the pending bit holder is going to >>>> + * set the lock bit and clear...
2014 May 08
2
[PATCH v10 06/19] qspinlock: prolong the stay in the pending bit path
...(;;) { > /* > - * If we observe any contention; queue. > + * If we observe that the queue is not empty, > + * return and be queued. > */ > - if (val & ~_Q_LOCKED_MASK) > + if (val & _Q_TAIL_MASK) > return 0; > > + if (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL)) { > + /* > + * If both the lock and pending bits are set, we wait > + * a while to see if that either bit will be cleared. > + * If that is no change, we return and be queued. > + */ > + if (!retry) > + return 0; > + retry--; > + cpu_relax(); >...
2014 May 08
2
[PATCH v10 06/19] qspinlock: prolong the stay in the pending bit path
...(;;) { > /* > - * If we observe any contention; queue. > + * If we observe that the queue is not empty, > + * return and be queued. > */ > - if (val & ~_Q_LOCKED_MASK) > + if (val & _Q_TAIL_MASK) > return 0; > > + if (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL)) { > + /* > + * If both the lock and pending bits are set, we wait > + * a while to see if that either bit will be cleared. > + * If that is no change, we return and be queued. > + */ > + if (!retry) > + return 0; > + retry--; > + cpu_relax(); >...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...counter) + : "cr0", "memory" + : again ); return; - - queued_spin_lock_slowpath(lock, val); + } + + if (likely(val == _Q_LOCKED_VAL)) { + asm_volatile_goto( + " stwcx. %0,0,%1 \n" + " bne- %l[again] \n" + : + : "r"(_Q_LOCKED_VAL | _Q_PENDING_VAL), "r" (&a->counter) + : "cr0", "memory" + : again ); + + atomic_cond_read_acquire(a, !(VAL & _Q_LOCKED_MASK)); +// clear_pending_set_locked(lock); + WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); +// lockevent_inc(lock_pending); + return; + } + +...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...counter) + : "cr0", "memory" + : again ); return; - - queued_spin_lock_slowpath(lock, val); + } + + if (likely(val == _Q_LOCKED_VAL)) { + asm_volatile_goto( + " stwcx. %0,0,%1 \n" + " bne- %l[again] \n" + : + : "r"(_Q_LOCKED_VAL | _Q_PENDING_VAL), "r" (&a->counter) + : "cr0", "memory" + : again ); + + atomic_cond_read_acquire(a, !(VAL & _Q_LOCKED_MASK)); +// clear_pending_set_locked(lock); + WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); +// lockevent_inc(lock_pending); + return; + } + +...
2014 Jun 17
5
[PATCH 03/11] qspinlock: Add pending bit
...ys_inline void queue_spin_lock(struct qspinlock *lock) { - u32 val; + u32 val, new; val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); if (likely(val == 0)) return; + + /* One more attempt - but if we fail mark it as pending. */ + if (val == _Q_LOCKED_VAL) { + new = Q_LOCKED_VAL |_Q_PENDING_VAL; + + old = atomic_cmpxchg(&lock->val, val, new); + if (old == _Q_LOCKED_VAL) /* YEEY! */ + return; + val = old; + } queue_spin_lock_slowpath(lock, val); } and then the slowpath preserves most of the old logic path (with the pending bit stuff)? > > Signed-off-by: Peter Zijl...
2014 Jun 17
5
[PATCH 03/11] qspinlock: Add pending bit
...ys_inline void queue_spin_lock(struct qspinlock *lock) { - u32 val; + u32 val, new; val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); if (likely(val == 0)) return; + + /* One more attempt - but if we fail mark it as pending. */ + if (val == _Q_LOCKED_VAL) { + new = Q_LOCKED_VAL |_Q_PENDING_VAL; + + old = atomic_cmpxchg(&lock->val, val, new); + if (old == _Q_LOCKED_VAL) /* YEEY! */ + return; + val = old; + } queue_spin_lock_slowpath(lock, val); } and then the slowpath preserves most of the old logic path (with the pending bit stuff)? > > Signed-off-by: Peter Zijl...
2014 Apr 18
0
[PATCH v9 06/19] qspinlock: prolong the stay in the pending bit path
...* If we observe that the queue is not empty, >> + * return and be queued. >> */ >> - if (val& ~_Q_LOCKED_MASK) >> + if (val& _Q_TAIL_MASK) >> return 0; >> >> + if ((val& _Q_LOCKED_PENDING_MASK) == >> + (_Q_LOCKED_VAL|_Q_PENDING_VAL)) { >> + /* >> + * If both the lock and pending bits are set, we wait >> + * a while to see if that either bit will be cleared. >> + * If that is no change, we return and be queued. >> + */ >> + if (!retry) >> + return 0; >> + ret...
2014 Jun 11
0
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...f we observe any contention; queue. >> + * If we observe that the queue is not empty or both >> + * the pending and lock bits are set, queue >> */ >> - if (val & ~_Q_LOCKED_MASK) >> + if ((val & _Q_TAIL_MASK) || >> + (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL))) >> goto queue; >> >> + if (val == _Q_PENDING_VAL) { >> + /* >> + * Pending bit is set, but not the lock bit. >> + * Assuming that the pending bit holder is going to >> + * set the lock bit and clear the pending bit soon, >> +...
2014 Apr 23
0
[PATCH v9 05/19] qspinlock: Optimize for smaller NR_CPUS
...*pval; > + u32 old, new; > int retry = 1; > > /* > @@ -593,8 +589,7 @@ static inline int trylock_pending(struct qspinlock *lock, u32 *pval) > if (val & _Q_TAIL_MASK) > return 0; > > - if ((val & _Q_LOCKED_PENDING_MASK) == > - (_Q_LOCKED_VAL|_Q_PENDING_VAL)) { > + if (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL)) { > /* > * If both the lock and pending bits are set, we wait > * a while to see if that either bit will be cleared. > @@ -605,9 +600,9 @@ static inline int trylock_pending(struct qspinlock *lock, u32 *pval) > r...
2015 Mar 16
0
[PATCH 3/9] qspinlock: Add pending bit
...efine _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) #define _Q_TAIL_IDX_BITS 2 #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) @@ -54,5 +59,6 @@ typedef struct qspinlock { #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) +#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -94,24 +94,28 @@ static inline struct mcs_spinlock *decod return per_cpu_ptr(&mcs_nodes[idx], cpu); } +#define _Q_LOCKED_PENDING_MASK (_Q_LO...
2020 Jul 21
0
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...gain ); > return; > - > - queued_spin_lock_slowpath(lock, val); > + } > + > + if (likely(val == _Q_LOCKED_VAL)) { > + asm_volatile_goto( > + " stwcx. %0,0,%1 \n" > + " bne- %l[again] \n" > + : > + : "r"(_Q_LOCKED_VAL | _Q_PENDING_VAL), "r" (&a->counter) > + : "cr0", "memory" > + : again ); > + > + atomic_cond_read_acquire(a, !(VAL & _Q_LOCKED_MASK)); > +// clear_pending_set_locked(lock); > + WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); > +// lockevent_in...
2014 May 30
0
[PATCH v11 06/16] qspinlock: prolong the stay in the pending bit path
...inlock *lock, u32 val) */ for (;;) { /* - * If we observe any contention; queue. + * If we observe that the queue is not empty or both + * the pending and lock bits are set, queue */ - if (val & ~_Q_LOCKED_MASK) + if ((val & _Q_TAIL_MASK) || + (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL))) goto queue; + if (val == _Q_PENDING_VAL) { + /* + * Pending bit is set, but not the lock bit. + * Assuming that the pending bit holder is going to + * set the lock bit and clear the pending bit soon, + * it is better to wait than to exit at this point. + */ + cpu_relax(...
2014 May 07
0
[PATCH v10 06/19] qspinlock: prolong the stay in the pending bit path
...ng(struct qspinlock *lock, u32 *pval) */ for (;;) { /* - * If we observe any contention; queue. + * If we observe that the queue is not empty, + * return and be queued. */ - if (val & ~_Q_LOCKED_MASK) + if (val & _Q_TAIL_MASK) return 0; + if (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL)) { + /* + * If both the lock and pending bits are set, we wait + * a while to see if that either bit will be cleared. + * If that is no change, we return and be queued. + */ + if (!retry) + return 0; + retry--; + cpu_relax(); + cpu_relax(); + *pval = val = atomic_read(&a...