search for: arch_mcs_spin_unlock_contended

Displaying 20 results from an estimated 55 matches for "arch_mcs_spin_unlock_contended".

2014 May 07
0
[PATCH v10 09/19] qspinlock: Prepare for unfair lock support
...goto release; /* No contention */ + else if (old & _Q_LOCKED_MASK) + goto retry_queue_wait; val = old; } @@ -435,7 +447,7 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val) while (!(next = (struct qnode *)ACCESS_ONCE(node->mcs.next))) arch_mutex_cpu_relax(); - arch_mcs_spin_unlock_contended(&next->mcs.locked); + arch_mcs_spin_unlock_contended(&next->qhead); release: /* -- 1.7.1
2014 May 07
0
[PATCH v10 08/19] qspinlock: Make a new qnode structure to support virtualization
...); } /* @@ -422,15 +432,15 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val) /* * contended path; wait for next, release. */ - while (!(next = ACCESS_ONCE(node->next))) + while (!(next = (struct qnode *)ACCESS_ONCE(node->mcs.next))) arch_mutex_cpu_relax(); - arch_mcs_spin_unlock_contended(&next->locked); + arch_mcs_spin_unlock_contended(&next->mcs.locked); release: /* * release the node */ - this_cpu_dec(mcs_nodes[0].count); + this_cpu_dec(qnodes[0].mcs.count); } EXPORT_SYMBOL(queue_spin_lock_slowpath); -- 1.7.1
2014 Jun 15
0
[PATCH 07/11] qspinlock: Use a simple write to grab the lock, if applicable
...val = old; } @@ -355,12 +384,10 @@ void queue_spin_lock_slowpath(struct qsp /* * contended path; wait for next, release. */ - if (new != _Q_LOCKED_VAL) { - while (!(next = ACCESS_ONCE(node->next))) - cpu_relax(); + while (!(next = ACCESS_ONCE(node->next))) + cpu_relax(); - arch_mcs_spin_unlock_contended(&next->locked); - } + arch_mcs_spin_unlock_contended(&next->locked); release: /*
2014 Apr 17
0
[PATCH v9 07/19] qspinlock: Use a simple write to grab the lock, if applicable
...queue_spin_lock_slowpath(struct qspinlock *lock, u32 val) /* * contended path; wait for next, release. */ - if (new != _Q_LOCKED_VAL) { - while (!(next = ACCESS_ONCE(node->next))) - arch_mutex_cpu_relax(); + while (!(next = ACCESS_ONCE(node->next))) + arch_mutex_cpu_relax(); - arch_mcs_spin_unlock_contended(&next->locked); - } + arch_mcs_spin_unlock_contended(&next->locked); release: /* -- 1.7.1
2014 May 07
0
[PATCH v10 07/19] qspinlock: Use a simple write to grab the lock, if applicable
...queue_spin_lock_slowpath(struct qspinlock *lock, u32 val) /* * contended path; wait for next, release. */ - if (new != _Q_LOCKED_VAL) { - while (!(next = ACCESS_ONCE(node->next))) - arch_mutex_cpu_relax(); + while (!(next = ACCESS_ONCE(node->next))) + arch_mutex_cpu_relax(); - arch_mcs_spin_unlock_contended(&next->locked); - } + arch_mcs_spin_unlock_contended(&next->locked); release: /* -- 1.7.1
2014 Jun 18
1
[PATCH 07/11] qspinlock: Use a simple write to grab the lock, if applicable
..._spin_lock_slowpath(struct qsp > /* > * contended path; wait for next, release. > */ > - if (new != _Q_LOCKED_VAL) { > - while (!(next = ACCESS_ONCE(node->next))) > - cpu_relax(); > + while (!(next = ACCESS_ONCE(node->next))) > + cpu_relax(); > > - arch_mcs_spin_unlock_contended(&next->locked); > - } > + arch_mcs_spin_unlock_contended(&next->locked); > > release: > /* > >
2014 Jun 18
1
[PATCH 07/11] qspinlock: Use a simple write to grab the lock, if applicable
..._spin_lock_slowpath(struct qsp > /* > * contended path; wait for next, release. > */ > - if (new != _Q_LOCKED_VAL) { > - while (!(next = ACCESS_ONCE(node->next))) > - cpu_relax(); > + while (!(next = ACCESS_ONCE(node->next))) > + cpu_relax(); > > - arch_mcs_spin_unlock_contended(&next->locked); > - } > + arch_mcs_spin_unlock_contended(&next->locked); > > release: > /* > >
2014 Jun 16
4
[PATCH 10/11] qspinlock: Paravirt support
...* > * *,x,y -> *,0,0 > */ > + pv_wait_head(lock); > while ((val = smp_load_acquire(&lock->val.counter))& > _Q_LOCKED_PENDING_MASK) > cpu_relax(); > @@ -391,6 +567,7 @@ void queue_spin_lock_slowpath(struct qsp > cpu_relax(); > > arch_mcs_spin_unlock_contended(&next->locked); > + pv_kick_node(next); > pv_kick_node is an expensive operation and it can significantly slow down the locking operation if we have to do it for every subsequent task in the queue. -Longman -------------- next part -------------- An HTML attachment was scrubbed...
2014 Jun 16
4
[PATCH 10/11] qspinlock: Paravirt support
...* > * *,x,y -> *,0,0 > */ > + pv_wait_head(lock); > while ((val = smp_load_acquire(&lock->val.counter))& > _Q_LOCKED_PENDING_MASK) > cpu_relax(); > @@ -391,6 +567,7 @@ void queue_spin_lock_slowpath(struct qsp > cpu_relax(); > > arch_mcs_spin_unlock_contended(&next->locked); > + pv_kick_node(next); > pv_kick_node is an expensive operation and it can significantly slow down the locking operation if we have to do it for every subsequent task in the queue. -Longman -------------- next part -------------- An HTML attachment was scrubbed...
2015 Apr 08
2
[PATCH v15 16/16] unfair qspinlock: a queue based unfair lock
.../* + * Step 3 + * A next node has to be present if the lock has a different + * tail code. So wait until the next pointer is set. + */ + while (!(next = (struct uf_node *)READ_ONCE(node->next))) + cpu_relax(); + if (isqhead) { + struct mcs_spinlock *nxt = (struct mcs_spinlock *)next; + + arch_mcs_spin_unlock_contended(&nxt->locked); + return true; /* Done for queue head */ + } + + WRITE_ONCE(pn->prev->mcs.next, (struct mcs_spinlock *)next); + + /* + * Need to make sure that prev and prev_tail of the next node + * are set up before modifying them. + */ + while (!READ_ONCE(next->prev) || !READ_...
2015 Apr 08
2
[PATCH v15 16/16] unfair qspinlock: a queue based unfair lock
.../* + * Step 3 + * A next node has to be present if the lock has a different + * tail code. So wait until the next pointer is set. + */ + while (!(next = (struct uf_node *)READ_ONCE(node->next))) + cpu_relax(); + if (isqhead) { + struct mcs_spinlock *nxt = (struct mcs_spinlock *)next; + + arch_mcs_spin_unlock_contended(&nxt->locked); + return true; /* Done for queue head */ + } + + WRITE_ONCE(pn->prev->mcs.next, (struct mcs_spinlock *)next); + + /* + * Need to make sure that prev and prev_tail of the next node + * are set up before modifying them. + */ + while (!READ_ONCE(next->prev) || !READ_...
2014 Jun 16
4
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...ave done: "prev->next = node;" And then exited out of 'val = atomic_read(&lock->val))' which suggests that queue_spin_unlock has called us. How can we be contended again? Thanks! > + while (!(next = ACCESS_ONCE(node->next))) > + cpu_relax(); > + > + arch_mcs_spin_unlock_contended(&next->locked); > + } > + > +release: > + /* > + * release the node > + */ > + this_cpu_dec(mcs_nodes[0].count); > +} > +EXPORT_SYMBOL(queue_spin_lock_slowpath); > >
2014 Jun 16
4
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...ave done: "prev->next = node;" And then exited out of 'val = atomic_read(&lock->val))' which suggests that queue_spin_unlock has called us. How can we be contended again? Thanks! > + while (!(next = ACCESS_ONCE(node->next))) > + cpu_relax(); > + > + arch_mcs_spin_unlock_contended(&next->locked); > + } > + > +release: > + /* > + * release the node > + */ > + this_cpu_dec(mcs_nodes[0].count); > +} > +EXPORT_SYMBOL(queue_spin_lock_slowpath); > >
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the paravirt crap. The first few patches are taken from Waiman's latest series, but the virt support is completely new. Its primary aim is to not mess up the native code. I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple smp guests. I've not done Xen, but the patch should be
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the paravirt crap. The first few patches are taken from Waiman's latest series, but the virt support is completely new. Its primary aim is to not mess up the native code. I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple smp guests. I've not done Xen, but the patch should be
2015 Apr 07
0
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
...lways_inline void __pv_wait_head(struct qspinlock *lock, #define pv_init_node __pv_init_node #define pv_wait_node __pv_wait_node -#define pv_kick_node __pv_kick_node +#define pv_scan_next __pv_scan_next #define pv_wait_head __pv_wait_head @@ -441,7 +441,7 @@ queue: cpu_relax(); arch_mcs_spin_unlock_contended(&next->locked); - pv_kick_node(next); + pv_scan_next(lock, next); release: /* @@ -462,7 +462,7 @@ EXPORT_SYMBOL(queue_spin_lock_slowpath); #undef pv_init_node #undef pv_wait_node -#undef pv_kick_node +#undef pv_scan_next #undef pv_wait_head #undef queue_spin_lock_slowpath diff...
2014 Jun 23
0
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...h_mcs_spin_lock_contended() and goes wait on the 'locked' state. So what we do here is wait for 'node->next' to be set; it might still be NULL if the other cpu is between: prev = xchg(lock->tail, node); and: prev->next = node; Once we observe the next node, we call arch_mcs_spin_unlock_contended() on it, which sets its mcs_spinlock::locked and makes the new 'top of queue' drop out of arch_mcs_spin_lock_contended and spin on the 'locked' state as said above.
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...is is because the set_locked() function below * does not imply a full barrier. */ + pv_wait_head(lock); while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK) cpu_relax(); @@ -406,6 +449,7 @@ void queue_spin_lock_slowpath(struct qsp cpu_relax(); arch_mcs_spin_unlock_contended(&next->locked); + pv_kick_node(next); release: /* @@ -414,3 +458,26 @@ void queue_spin_lock_slowpath(struct qsp this_cpu_dec(mcs_nodes[0].count); } EXPORT_SYMBOL(queue_spin_lock_slowpath); + +/* + * Generate the paravirt code for queue_spin_unlock_slowpath(). + */ +#if !defined(_GEN...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...is is because the set_locked() function below * does not imply a full barrier. */ + pv_wait_head(lock); while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK) cpu_relax(); @@ -406,6 +449,7 @@ void queue_spin_lock_slowpath(struct qsp cpu_relax(); arch_mcs_spin_unlock_contended(&next->locked); + pv_kick_node(next); release: /* @@ -414,3 +458,26 @@ void queue_spin_lock_slowpath(struct qsp this_cpu_dec(mcs_nodes[0].count); } EXPORT_SYMBOL(queue_spin_lock_slowpath); + +/* + * Generate the paravirt code for queue_spin_unlock_slowpath(). + */ +#if !defined(_GEN...
2014 Apr 17
33
[PATCH v9 00/19] qspinlock: a 4-byte queue spinlock with PV support
v8->v9: - Integrate PeterZ's version of the queue spinlock patch with some modification: http://lkml.kernel.org/r/20140310154236.038181843 at infradead.org - Break the more complex patches into smaller ones to ease review effort. - Fix a racing condition in the PV qspinlock code. v7->v8: - Remove one unneeded atomic operation from the slowpath, thus improving