Displaying 20 results from an estimated 65 matches for "this_cpu_dec".
Did you mean:
__this_cpu_dec
2014 Feb 26
1
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
On Wed, Feb 26, 2014 at 10:14:21AM -0500, Waiman Long wrote:
> +static void put_qnode(void)
> +{
> + struct qnode_set *qset = this_cpu_ptr(&qnset);
> +
> + qset->node_idx--;
> +}
That very much wants to be: this_cpu_dec().
2014 Feb 26
1
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
On Wed, Feb 26, 2014 at 10:14:21AM -0500, Waiman Long wrote:
> +static void put_qnode(void)
> +{
> + struct qnode_set *qset = this_cpu_ptr(&qnset);
> +
> + qset->node_idx--;
> +}
That very much wants to be: this_cpu_dec().
2014 May 07
0
[PATCH v10 08/19] qspinlock: Make a new qnode structure to support virtualization
...e (!(next = ACCESS_ONCE(node->next)))
+ while (!(next = (struct qnode *)ACCESS_ONCE(node->mcs.next)))
arch_mutex_cpu_relax();
- arch_mcs_spin_unlock_contended(&next->locked);
+ arch_mcs_spin_unlock_contended(&next->mcs.locked);
release:
/*
* release the node
*/
- this_cpu_dec(mcs_nodes[0].count);
+ this_cpu_dec(qnodes[0].mcs.count);
}
EXPORT_SYMBOL(queue_spin_lock_slowpath);
--
1.7.1
2014 Oct 27
2
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
...TAIL_CPU_BITS));
- if (pv_enabled())
+ if (pv_enabled()) {
+ pv_queue_spin_lock_slowpath(lock, val);
+ return;
+ }
+
+ if (in_pv_code())
goto queue;
if (virt_queue_spin_lock(lock))
@@ -474,3 +502,23 @@ release:
this_cpu_dec(mcs_nodes[0].count);
}
EXPORT_SYMBOL(queue_spin_lock_slowpath);
+
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+/*
+ * Generate the PV version of the queue_spin_lock_slowpath function
+ */
+#undef pv_init_node
+#undef pv_wait_check
+#undef pv_link_and_wait_...
2014 Oct 27
2
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
...TAIL_CPU_BITS));
- if (pv_enabled())
+ if (pv_enabled()) {
+ pv_queue_spin_lock_slowpath(lock, val);
+ return;
+ }
+
+ if (in_pv_code())
goto queue;
if (virt_queue_spin_lock(lock))
@@ -474,3 +502,23 @@ release:
this_cpu_dec(mcs_nodes[0].count);
}
EXPORT_SYMBOL(queue_spin_lock_slowpath);
+
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+/*
+ * Generate the PV version of the queue_spin_lock_slowpath function
+ */
+#undef pv_init_node
+#undef pv_wait_check
+#undef pv_link_and_wait_...
2014 May 21
0
[RFC 08/07] qspinlock: integrate pending bit into queue
...15 +433,18 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* attempt the trylock once more in the hope someone let go while we
* weren't watching.
*/
- if (queue_spin_trylock(lock))
+ // is some of the re-checking counterproductive?
+ if (trylock(lock, &val)) {
+ this_cpu_dec(mcs_nodes[0].count); // ugly
+ return;
+ }
+ if (trypending(lock, &val))
goto release;
/*
- * we already touched the queueing cacheline; don't bother with pending
- * stuff.
- *
* p,*,* -> n,*,*
*/
+ // racing for pending/queue till here; safe
old = xchg_tail(lock, tai...
2014 Oct 27
0
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
...If you have two functions you might as well use the PV stuff to patch in
the right function call at the usage sites and avoid:
> + if (pv_enabled()) {
> + pv_queue_spin_lock_slowpath(lock, val);
> + return;
> + }
this alltogether.
> this_cpu_dec(mcs_nodes[0].count);
> }
> EXPORT_SYMBOL(queue_spin_lock_slowpath);
> +
> +#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
> +/*
> + * Generate the PV version of the queue_spin_lock_slowpath function
> + */
> +#undef pv_init_node
> +#un...
2014 May 14
2
[PATCH v10 03/19] qspinlock: Add pending bit
2014-05-14 19:00+0200, Peter Zijlstra:
> On Wed, May 14, 2014 at 06:51:24PM +0200, Radim Kr?m?? wrote:
> > Ok.
> > I've seen merit in pvqspinlock even with slightly slower first-waiter,
> > so I would have happily sacrificed those horrible branches.
> > (I prefer elegant to optimized code, but I can see why we want to be
> > strictly better than ticketlock.)
2014 May 14
2
[PATCH v10 03/19] qspinlock: Add pending bit
2014-05-14 19:00+0200, Peter Zijlstra:
> On Wed, May 14, 2014 at 06:51:24PM +0200, Radim Kr?m?? wrote:
> > Ok.
> > I've seen merit in pvqspinlock even with slightly slower first-waiter,
> > so I would have happily sacrificed those horrible branches.
> > (I prefer elegant to optimized code, but I can see why we want to be
> > strictly better than ticketlock.)
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...val.counter)) & _Q_LOCKED_PENDING_MASK)
cpu_relax();
@@ -406,6 +449,7 @@ void queue_spin_lock_slowpath(struct qsp
cpu_relax();
arch_mcs_spin_unlock_contended(&next->locked);
+ pv_kick_node(next);
release:
/*
@@ -414,3 +458,26 @@ void queue_spin_lock_slowpath(struct qsp
this_cpu_dec(mcs_nodes[0].count);
}
EXPORT_SYMBOL(queue_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for queue_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef pv_enabled
+#define pv_enabled()...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...val.counter)) & _Q_LOCKED_PENDING_MASK)
cpu_relax();
@@ -406,6 +449,7 @@ void queue_spin_lock_slowpath(struct qsp
cpu_relax();
arch_mcs_spin_unlock_contended(&next->locked);
+ pv_kick_node(next);
release:
/*
@@ -414,3 +458,26 @@ void queue_spin_lock_slowpath(struct qsp
this_cpu_dec(mcs_nodes[0].count);
}
EXPORT_SYMBOL(queue_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for queue_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef pv_enabled
+#define pv_enabled()...
2014 Oct 24
3
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
On 10/24/2014 04:47 AM, Peter Zijlstra wrote:
> On Thu, Oct 16, 2014 at 02:10:38PM -0400, Waiman Long wrote:
>> +static inline void pv_init_node(struct mcs_spinlock *node)
>> +{
>> + struct pv_qnode *pn = (struct pv_qnode *)node;
>> +
>> + BUILD_BUG_ON(sizeof(struct pv_qnode)> 5*sizeof(struct mcs_spinlock));
>> +
>> + if (!pv_enabled())
>> +
2014 Oct 24
3
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
On 10/24/2014 04:47 AM, Peter Zijlstra wrote:
> On Thu, Oct 16, 2014 at 02:10:38PM -0400, Waiman Long wrote:
>> +static inline void pv_init_node(struct mcs_spinlock *node)
>> +{
>> + struct pv_qnode *pn = (struct pv_qnode *)node;
>> +
>> + BUILD_BUG_ON(sizeof(struct pv_qnode)> 5*sizeof(struct mcs_spinlock));
>> +
>> + if (!pv_enabled())
>> +
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
..._head(lock, node);
while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
cpu_relax();
@@ -397,6 +441,7 @@ queue:
cpu_relax();
arch_mcs_spin_unlock_contended(&next->locked);
+ pv_kick_node(next);
release:
/*
@@ -405,3 +450,25 @@ release:
this_cpu_dec(mcs_nodes[0].count);
}
EXPORT_SYMBOL(queue_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for queue_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef pv_enabled
+#define pv_enabled()...
2015 Apr 24
0
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
..._head(lock, node);
while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
cpu_relax();
@@ -397,6 +440,7 @@ queue:
cpu_relax();
arch_mcs_spin_unlock_contended(&next->locked);
+ pv_kick_node(next);
release:
/*
@@ -405,3 +449,25 @@ release:
this_cpu_dec(mcs_nodes[0].count);
}
EXPORT_SYMBOL(queue_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for queue_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef pv_enabled
+#define pv_enabled()...
2014 Jun 15
0
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...val)
+ break;
+
+ val = old;
+ }
+
+ /*
+ * contended path; wait for next, release.
+ */
+ if (new != _Q_LOCKED_VAL) {
+ while (!(next = ACCESS_ONCE(node->next)))
+ cpu_relax();
+
+ arch_mcs_spin_unlock_contended(&next->locked);
+ }
+
+release:
+ /*
+ * release the node
+ */
+ this_cpu_dec(mcs_nodes[0].count);
+}
+EXPORT_SYMBOL(queue_spin_lock_slowpath);
2014 Mar 03
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...ontended path; wait for next, release.
*/
if (new != _QSPINLOCK_LOCKED) {
while (!(next = ACCESS_ONCE(node->next)))
arch_mutex_cpu_relax();
arch_mcs_spin_unlock_contended(&next->locked);
}
release:
/*
* release the node
*/
this_cpu_ptr(&mcs_nodes[0])->count--;
// this_cpu_dec(mcs_nodes[0].count);
}
EXPORT_SYMBOL(queue_spin_lock_slowpath);
-------------- next part --------------
A non-text attachment was scrubbed...
Name: spinlocks.tar.bz2
Type: application/octet-stream
Size: 10164 bytes
Desc: not available
URL: <http://lists.linuxfoundation.org/pipermail/virtualizati...
2014 Mar 03
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...ontended path; wait for next, release.
*/
if (new != _QSPINLOCK_LOCKED) {
while (!(next = ACCESS_ONCE(node->next)))
arch_mutex_cpu_relax();
arch_mcs_spin_unlock_contended(&next->locked);
}
release:
/*
* release the node
*/
this_cpu_ptr(&mcs_nodes[0])->count--;
// this_cpu_dec(mcs_nodes[0].count);
}
EXPORT_SYMBOL(queue_spin_lock_slowpath);
-------------- next part --------------
A non-text attachment was scrubbed...
Name: spinlocks.tar.bz2
Type: application/octet-stream
Size: 10164 bytes
Desc: not available
URL: <http://lists.linuxfoundation.org/pipermail/virtualizati...
2015 Mar 16
0
[PATCH 1/9] qspinlock: A simple generic 4-byte queue spinlock
...== val)
+ break;
+
+ val = old;
+ }
+
+ /*
+ * contended path; wait for next, release.
+ */
+ if (new != _Q_LOCKED_VAL) {
+ while (!(next = READ_ONCE(node->next)))
+ cpu_relax();
+
+ arch_mcs_spin_unlock_contended(&next->locked);
+ }
+
+release:
+ /*
+ * release the node
+ */
+ this_cpu_dec(mcs_nodes[0].count);
+}
+EXPORT_SYMBOL(queue_spin_lock_slowpath);
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...val.counter)) & _Q_LOCKED_PENDING_MASK)
cpu_relax();
@@ -397,6 +440,7 @@ void queue_spin_lock_slowpath(struct qsp
cpu_relax();
arch_mcs_spin_unlock_contended(&next->locked);
+ pv_kick_node(next);
release:
/*
@@ -405,3 +449,25 @@ void queue_spin_lock_slowpath(struct qsp
this_cpu_dec(mcs_nodes[0].count);
}
EXPORT_SYMBOL(queue_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for queue_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef pv_enabled
+#define pv_enabled()...