search for: atomic_cmpxchg

Displaying 20 results from an estimated 168 matches for "atomic_cmpxchg".

2014 Jun 17
5
[PATCH 03/11] qspinlock: Add pending bit
...e8..29cc9c7 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val); */ static __always_inline void queue_spin_lock(struct qspinlock *lock) { - u32 val; + u32 val, new; val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); if (likely(val == 0)) return; + + /* One more attempt - but if we fail mark it as pending. */ + if (val == _Q_LOCKED_VAL) { + new = Q_LOCKED_VAL |_Q_PENDING_VAL; + + old = atomic_cmpxchg(&lock->val, val, new); + if (old == _Q_LOCKED_VAL) /* YEEY!...
2014 Jun 17
5
[PATCH 03/11] qspinlock: Add pending bit
...e8..29cc9c7 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val); */ static __always_inline void queue_spin_lock(struct qspinlock *lock) { - u32 val; + u32 val, new; val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); if (likely(val == 0)) return; + + /* One more attempt - but if we fail mark it as pending. */ + if (val == _Q_LOCKED_VAL) { + new = Q_LOCKED_VAL |_Q_PENDING_VAL; + + old = atomic_cmpxchg(&lock->val, val, new); + if (old == _Q_LOCKED_VAL) /* YEEY!...
2014 Jun 18
2
[PATCH 05/11] qspinlock: Optimize for smaller NR_CPUS
Il 15/06/2014 14:47, Peter Zijlstra ha scritto: > > - for (;;) { > - new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; > - > - old = atomic_cmpxchg(&lock->val, val, new); > - if (old == val) > - break; > - > - val = old; > - } > + clear_pending_set_locked(lock, val); > return; Might as well add clear_pending_set_locked already in patch 3. Paolo
2014 Jun 18
2
[PATCH 05/11] qspinlock: Optimize for smaller NR_CPUS
Il 15/06/2014 14:47, Peter Zijlstra ha scritto: > > - for (;;) { > - new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; > - > - old = atomic_cmpxchg(&lock->val, val, new); > - if (old == val) > - break; > - > - val = old; > - } > + clear_pending_set_locked(lock, val); > return; Might as well add clear_pending_set_locked already in patch 3. Paolo
2014 Jul 07
2
[PATCH 05/11] qspinlock: Optimize for smaller NR_CPUS
...35, Peter Zijlstra ha scritto: > On Wed, Jun 18, 2014 at 01:39:52PM +0200, Paolo Bonzini wrote: >> Il 15/06/2014 14:47, Peter Zijlstra ha scritto: >>> >>> - for (;;) { >>> - new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; >>> - >>> - old = atomic_cmpxchg(&lock->val, val, new); >>> - if (old == val) >>> - break; >>> - >>> - val = old; >>> - } >>> + clear_pending_set_locked(lock, val); >>> return; >> >> >> Might as well add clear_pending_set_locked already in...
2014 Jul 07
2
[PATCH 05/11] qspinlock: Optimize for smaller NR_CPUS
...35, Peter Zijlstra ha scritto: > On Wed, Jun 18, 2014 at 01:39:52PM +0200, Paolo Bonzini wrote: >> Il 15/06/2014 14:47, Peter Zijlstra ha scritto: >>> >>> - for (;;) { >>> - new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; >>> - >>> - old = atomic_cmpxchg(&lock->val, val, new); >>> - if (old == val) >>> - break; >>> - >>> - val = old; >>> - } >>> + clear_pending_set_locked(lock, val); >>> return; >> >> >> Might as well add clear_pending_set_locked already in...
2014 Jun 17
3
[PATCH 03/11] qspinlock: Add pending bit
...generic/qspinlock.h > >@@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val); > > */ > > static __always_inline void queue_spin_lock(struct qspinlock *lock) > > { > >- u32 val; > >+ u32 val, new; > > > > val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); > > if (likely(val == 0)) > > return; > >+ > >+ /* One more attempt - but if we fail mark it as pending. */ > >+ if (val == _Q_LOCKED_VAL) { > >+ new = Q_LOCKED_VAL |_Q_PENDING_VAL; > >+ > >+ old = atomic_c...
2014 Jun 18
1
[PATCH 07/11] qspinlock: Use a simple write to grab the lock, if applicable
On Sun, Jun 15, 2014 at 02:47:04PM +0200, Peter Zijlstra wrote: > From: Waiman Long <Waiman.Long at hp.com> > > Currently, atomic_cmpxchg() is used to get the lock. However, this is > not really necessary if there is more than one task in the queue and > the queue head don't need to reset the queue code word. For that case, s/queue code word/tail {number,value}/ ? > a simple write to set the lock bit is enough as the...
2014 Jun 17
3
[PATCH 03/11] qspinlock: Add pending bit
...generic/qspinlock.h > >@@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val); > > */ > > static __always_inline void queue_spin_lock(struct qspinlock *lock) > > { > >- u32 val; > >+ u32 val, new; > > > > val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); > > if (likely(val == 0)) > > return; > >+ > >+ /* One more attempt - but if we fail mark it as pending. */ > >+ if (val == _Q_LOCKED_VAL) { > >+ new = Q_LOCKED_VAL |_Q_PENDING_VAL; > >+ > >+ old = atomic_c...
2014 Jun 18
1
[PATCH 07/11] qspinlock: Use a simple write to grab the lock, if applicable
On Sun, Jun 15, 2014 at 02:47:04PM +0200, Peter Zijlstra wrote: > From: Waiman Long <Waiman.Long at hp.com> > > Currently, atomic_cmpxchg() is used to get the lock. However, this is > not really necessary if there is more than one task in the queue and > the queue head don't need to reset the queue code word. For that case, s/queue code word/tail {number,value}/ ? > a simple write to set the lock bit is enough as the...
2014 Mar 03
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...0,0,1 ; trylock * 0,0,1 -> 0,1,1 ; pending */ val = atomic_read(&lock->val); #if !OPT2 for (;;) { /* * If we observe any contention; queue. */ if (val & ~_Q_LOCKED_MASK) goto queue; new = _QSPINLOCK_LOCKED; if (val == new) new |= _QSPINLOCK_PENDING; old = atomic_cmpxchg(&lock->val, val, new); if (old == val) break; val = old; } /* * we won the trylock */ if (new == _QSPINLOCK_LOCKED) return; #else /* * we can ignore the (unlikely) trylock case and have a fall-through on * the wait below. */ if (val & ~_Q_LOCKED_MASK) goto qu...
2014 Mar 03
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...0,0,1 ; trylock * 0,0,1 -> 0,1,1 ; pending */ val = atomic_read(&lock->val); #if !OPT2 for (;;) { /* * If we observe any contention; queue. */ if (val & ~_Q_LOCKED_MASK) goto queue; new = _QSPINLOCK_LOCKED; if (val == new) new |= _QSPINLOCK_PENDING; old = atomic_cmpxchg(&lock->val, val, new); if (old == val) break; val = old; } /* * we won the trylock */ if (new == _QSPINLOCK_LOCKED) return; #else /* * we can ignore the (unlikely) trylock case and have a fall-through on * the wait below. */ if (val & ~_Q_LOCKED_MASK) goto qu...
2014 Jun 17
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...; + * p,*,* -> n,*,* ; prev = xchg(lock, node) > + */ > +static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) > +{ > + u32 old, new, val = atomic_read(&lock->val); > + > + for (;;) { > + new = (val & _Q_LOCKED_PENDING_MASK) | tail; > + old = atomic_cmpxchg(&lock->val, val, new); > + if (old == val) > + break; > + > + val = old; > + } > + return old; > +} > + > +/** > * queue_spin_lock_slowpath - acquire the queue spinlock > * @lock: Pointer to queue spinlock structure > * @val: Current value of the...
2014 Jun 17
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...; + * p,*,* -> n,*,* ; prev = xchg(lock, node) > + */ > +static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) > +{ > + u32 old, new, val = atomic_read(&lock->val); > + > + for (;;) { > + new = (val & _Q_LOCKED_PENDING_MASK) | tail; > + old = atomic_cmpxchg(&lock->val, val, new); > + if (old == val) > + break; > + > + val = old; > + } > + return old; > +} > + > +/** > * queue_spin_lock_slowpath - acquire the queue spinlock > * @lock: Pointer to queue spinlock structure > * @val: Current value of the...
2014 Mar 04
0
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...inlock *lock, u32 *ocode, u32 ncode) { while (true) { u32 qlcode = atomic_read(&lock->qlcode); if (qlcode == 0) { /* * Try to get the lock */ if (atomic_cmpxchg(&lock->qlcode, 0, _QSPINLOCK_LOCKED) == 0) return 1; } else if (qlcode & _QSPINLOCK_LOCKED) { *ocode = atomic_cmpxchg(&lock->qlcode, qlcode,...
2014 Jun 18
0
[PATCH 03/11] qspinlock: Add pending bit
Il 17/06/2014 22:36, Konrad Rzeszutek Wilk ha scritto: > + /* One more attempt - but if we fail mark it as pending. */ > + if (val == _Q_LOCKED_VAL) { > + new = Q_LOCKED_VAL |_Q_PENDING_VAL; > + > + old = atomic_cmpxchg(&lock->val, val, new); > + if (old == _Q_LOCKED_VAL) /* YEEY! */ > + return; > + val = old; > + } Note that Peter's code is in a for(;;) loop: + for (;;) { + /* + * If we observe any contention; queue. + */ + if (val & ~_Q_LOCKED_MASK) + goto queue; + + ne...
2011 Apr 01
4
[PATCH 1/5] staging: hv: change camel case funct names to lower case funct in hv_mouse
...@@ -211,7 +211,7 @@ static struct mousevsc_dev *AllocInputDevice(struct hv_device *Device) /* * Set to 2 to allow both inbound and outbound traffics - * (ie GetInputDevice() and MustGetInputDevice()) to proceed. + * (ie get_input_device() and must_get_input_device()) to proceed. */ atomic_cmpxchg(&inputDevice->RefCount, 0, 2); @@ -221,7 +221,7 @@ static struct mousevsc_dev *AllocInputDevice(struct hv_device *Device) return inputDevice; } -static void FreeInputDevice(struct mousevsc_dev *Device) +static void free_input_device(struct mousevsc_dev *Device) { WARN_ON(atomic_rea...
2011 Apr 01
4
[PATCH 1/5] staging: hv: change camel case funct names to lower case funct in hv_mouse
...@@ -211,7 +211,7 @@ static struct mousevsc_dev *AllocInputDevice(struct hv_device *Device) /* * Set to 2 to allow both inbound and outbound traffics - * (ie GetInputDevice() and MustGetInputDevice()) to proceed. + * (ie get_input_device() and must_get_input_device()) to proceed. */ atomic_cmpxchg(&inputDevice->RefCount, 0, 2); @@ -221,7 +221,7 @@ static struct mousevsc_dev *AllocInputDevice(struct hv_device *Device) return inputDevice; } -static void FreeInputDevice(struct mousevsc_dev *Device) +static void free_input_device(struct mousevsc_dev *Device) { WARN_ON(atomic_rea...
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the paravirt crap. The first few patches are taken from Waiman's latest series, but the virt support is completely new. Its primary aim is to not mess up the native code. I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple smp guests. I've not done Xen, but the patch should be
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the paravirt crap. The first few patches are taken from Waiman's latest series, but the virt support is completely new. Its primary aim is to not mess up the native code. I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple smp guests. I've not done Xen, but the patch should be