Displaying 20 results from an estimated 224 matches for "trylock".
2014 Jun 17
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...re
> * @val: Current value of the queue spinlock 32-bit word
> @@ -182,36 +207,25 @@ void queue_spin_lock_slowpath(struct qsp
> node->next = NULL;
>
> /*
> - * we already touched the queueing cacheline; don't bother with pending
> - * stuff.
> - *
> - * trylock || xchg(lock, node)
> - *
> - * 0,0,0 -> 0,0,1 ; trylock
> - * p,y,x -> n,y,x ; prev = xchg(lock, node)
> + * We touched a (possibly) cold cacheline in the per-cpu queue node;
> + * attempt the trylock once more in the hope someone let go while we
> + * weren't wat...
2014 Jun 17
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...re
> * @val: Current value of the queue spinlock 32-bit word
> @@ -182,36 +207,25 @@ void queue_spin_lock_slowpath(struct qsp
> node->next = NULL;
>
> /*
> - * we already touched the queueing cacheline; don't bother with pending
> - * stuff.
> - *
> - * trylock || xchg(lock, node)
> - *
> - * 0,0,0 -> 0,0,1 ; trylock
> - * p,y,x -> n,y,x ; prev = xchg(lock, node)
> + * We touched a (possibly) cold cacheline in the per-cpu queue node;
> + * attempt the trylock once more in the hope someone let go while we
> + * weren't wat...
2014 Jun 18
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...> - for (;;) {
> - new = _Q_LOCKED_VAL;
> - if (val)
> - new = tail | (val & _Q_LOCKED_PENDING_MASK);
> -
> - old = atomic_cmpxchg(&lock->val, val, new);
> - if (old == val)
> - break;
> -
> - val = old;
> - }
>
> /*
> - * we won the trylock; forget about queueing.
> */
> - if (new == _Q_LOCKED_VAL)
> - goto release;
>
> The trylock happens if the "if (val)" hits the else branch.
>
> What the patch does is change it from attempting two transition with a
> single cmpxchg:
>
> - * 0,0,0 -&g...
2014 Jun 18
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...> - for (;;) {
> - new = _Q_LOCKED_VAL;
> - if (val)
> - new = tail | (val & _Q_LOCKED_PENDING_MASK);
> -
> - old = atomic_cmpxchg(&lock->val, val, new);
> - if (old == val)
> - break;
> -
> - val = old;
> - }
>
> /*
> - * we won the trylock; forget about queueing.
> */
> - if (new == _Q_LOCKED_VAL)
> - goto release;
>
> The trylock happens if the "if (val)" hits the else branch.
>
> What the patch does is change it from attempting two transition with a
> single cmpxchg:
>
> - * 0,0,0 -&g...
2014 Jun 18
0
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...rate patch.
It doesn't really add a new try, the old code is:
- for (;;) {
- new = _Q_LOCKED_VAL;
- if (val)
- new = tail | (val & _Q_LOCKED_PENDING_MASK);
-
- old = atomic_cmpxchg(&lock->val, val, new);
- if (old == val)
- break;
-
- val = old;
- }
/*
- * we won the trylock; forget about queueing.
*/
- if (new == _Q_LOCKED_VAL)
- goto release;
The trylock happens if the "if (val)" hits the else branch.
What the patch does is change it from attempting two transition with a
single cmpxchg:
- * 0,0,0 -> 0,0,1 ; trylock
- * p,y,x -> n,y,x ; prev...
2014 May 08
1
[PATCH v10 03/19] qspinlock: Add pending bit
On Wed, May 07, 2014 at 11:01:31AM -0400, Waiman Long wrote:
> +/**
> + * trylock_pending - try to acquire queue spinlock using the pending bit
> + * @lock : Pointer to queue spinlock structure
> + * @pval : Pointer to value of the queue spinlock 32-bit word
> + * Return: 1 if lock acquired, 0 otherwise
> + */
> +static inline int trylock_pending(struct qspinlock...
2014 May 08
1
[PATCH v10 03/19] qspinlock: Add pending bit
On Wed, May 07, 2014 at 11:01:31AM -0400, Waiman Long wrote:
> +/**
> + * trylock_pending - try to acquire queue spinlock using the pending bit
> + * @lock : Pointer to queue spinlock structure
> + * @pval : Pointer to value of the queue spinlock 32-bit word
> + * Return: 1 if lock acquired, 0 otherwise
> + */
> +static inline int trylock_pending(struct qspinlock...
2017 Apr 20
1
[PATCH net-next v2 2/5] virtio-net: transmit napi
...;
>
> I'm not sure this is best or even correct. Consider we clean xmit packets
> speculatively in virtnet_poll_tx(), we need call free_old_xmit_skbs()
> unconditionally. This can also help to reduce the possible of napi
> rescheduling in virtnet_poll_tx().
Because of the use of trylock there. Absolutely, thanks! Perhaps I should
only use trylock in the opportunistic clean path from the rx softirq and
full locking in the tx softirq.
I previously observed that cleaning here would, counterintuitively,
reduce efficiency. It reverted the improvements of cleaning transmit
completions...
2017 Apr 20
1
[PATCH net-next v2 2/5] virtio-net: transmit napi
...;
>
> I'm not sure this is best or even correct. Consider we clean xmit packets
> speculatively in virtnet_poll_tx(), we need call free_old_xmit_skbs()
> unconditionally. This can also help to reduce the possible of napi
> rescheduling in virtnet_poll_tx().
Because of the use of trylock there. Absolutely, thanks! Perhaps I should
only use trylock in the opportunistic clean path from the rx softirq and
full locking in the tx softirq.
I previously observed that cleaning here would, counterintuitively,
reduce efficiency. It reverted the improvements of cleaning transmit
completions...
2014 Jun 18
0
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...gt;> - if (val)
>> - new = tail | (val& _Q_LOCKED_PENDING_MASK);
>> -
>> - old = atomic_cmpxchg(&lock->val, val, new);
>> - if (old == val)
>> - break;
>> -
>> - val = old;
>> - }
>>
>> /*
>> - * we won the trylock; forget about queueing.
>> */
>> - if (new == _Q_LOCKED_VAL)
>> - goto release;
>>
>> The trylock happens if the "if (val)" hits the else branch.
>>
>> What the patch does is change it from attempting two transition with a
>> single cmp...
2014 Apr 17
2
[PATCH v9 03/19] qspinlock: Add pending bit
On Thu, Apr 17, 2014 at 11:03:55AM -0400, Waiman Long wrote:
> +/**
> + * trylock_pending - try to acquire queue spinlock using the pending bit
> + * @lock : Pointer to queue spinlock structure
> + * @pval : Pointer to value of the queue spinlock 32-bit word
> + * Return: 1 if lock acquired, 0 otherwise
> + */
> +static inline int trylock_pending(struct qspinlock...
2014 Apr 17
2
[PATCH v9 03/19] qspinlock: Add pending bit
On Thu, Apr 17, 2014 at 11:03:55AM -0400, Waiman Long wrote:
> +/**
> + * trylock_pending - try to acquire queue spinlock using the pending bit
> + * @lock : Pointer to queue spinlock structure
> + * @pval : Pointer to value of the queue spinlock 32-bit word
> + * Return: 1 if lock acquired, 0 otherwise
> + */
> +static inline int trylock_pending(struct qspinlock...
2014 Jun 15
0
[PATCH 03/11] qspinlock: Add pending bit
...#39; :
+ * queue : ^--' :
*/
void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
@@ -110,6 +114,65 @@ void queue_spin_lock_slowpath(struct qsp
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+ /*
+ * trylock || pending
+ *
+ * 0,0,0 -> 0,0,1 ; trylock
+ * 0,0,1 -> 0,1,1 ; pending
+ */
+ for (;;) {
+ /*
+ * If we observe any contention; queue.
+ */
+ if (val & ~_Q_LOCKED_MASK)
+ goto queue;
+
+ new = _Q_LOCKED_VAL;
+ if (val == new)
+ new |= _Q_PENDING_VAL;
+
+ old = atomic_cm...
2014 Apr 17
0
[PATCH v9 03/19] qspinlock: Add pending bit
...x b97a1ad..d35362a 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -83,23 +83,93 @@ static inline struct mcs_spinlock *decode_tail(u32 tail)
return per_cpu_ptr(&mcs_nodes[idx], cpu);
}
+#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
+
+/**
+ * trylock_pending - try to acquire queue spinlock using the pending bit
+ * @lock : Pointer to queue spinlock structure
+ * @pval : Pointer to value of the queue spinlock 32-bit word
+ * Return: 1 if lock acquired, 0 otherwise
+ */
+static inline int trylock_pending(struct qspinlock *lock, u32 *pval)
+{
+ u3...
2014 May 07
0
[PATCH v10 03/19] qspinlock: Add pending bit
...x b97a1ad..6467bfc 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -83,23 +83,97 @@ static inline struct mcs_spinlock *decode_tail(u32 tail)
return per_cpu_ptr(&mcs_nodes[idx], cpu);
}
+#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
+
+/**
+ * trylock_pending - try to acquire queue spinlock using the pending bit
+ * @lock : Pointer to queue spinlock structure
+ * @pval : Pointer to value of the queue spinlock 32-bit word
+ * Return: 1 if lock acquired, 0 otherwise
+ */
+static inline int trylock_pending(struct qspinlock *lock, u32 *pval)
+{
+ u3...
2014 Jun 15
0
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...* @lock: Pointer to queue spinlock structure
* @val: Current value of the queue spinlock 32-bit word
@@ -182,36 +207,25 @@ void queue_spin_lock_slowpath(struct qsp
node->next = NULL;
/*
- * we already touched the queueing cacheline; don't bother with pending
- * stuff.
- *
- * trylock || xchg(lock, node)
- *
- * 0,0,0 -> 0,0,1 ; trylock
- * p,y,x -> n,y,x ; prev = xchg(lock, node)
+ * We touched a (possibly) cold cacheline in the per-cpu queue node;
+ * attempt the trylock once more in the hope someone let go while we
+ * weren't watching.
*/
- for (;;) {
- n...
2014 Apr 17
0
[PATCH v9 04/19] qspinlock: Extract out the exchange of tail code word
...chg_tail(struct qspinlock *lock, u32 tail, u32 *pval)
+{
+ u32 old, new, val = *pval;
+
+ for (;;) {
+ new = (val & _Q_LOCKED_PENDING_MASK) | tail;
+ old = atomic_cmpxchg(&lock->val, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+ *pval = new;
+ return old;
+}
+
+/**
* trylock_pending - try to acquire queue spinlock using the pending bit
* @lock : Pointer to queue spinlock structure
* @pval : Pointer to value of the queue spinlock 32-bit word
@@ -192,36 +220,25 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
node->next = NULL;
/*
- * we al...
2014 Mar 03
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...: (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
* queue : :
*
*/
void queue_spin_lock_slowpath(struct qspinlock *lock)
{
struct mcs_spinlock *prev, *next, *node;
u32 val, new, old, code;
int idx;
#if PENDING
/*
* trylock || pending
*
* 0,0,0 -> 0,0,1 ; trylock
* 0,0,1 -> 0,1,1 ; pending
*/
val = atomic_read(&lock->val);
#if !OPT2
for (;;) {
/*
* If we observe any contention; queue.
*/
if (val & ~_Q_LOCKED_MASK)
goto queue;
new = _QSPINLOCK_LOCKED;
if (val == new)
new...
2014 Mar 03
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...: (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
* queue : :
*
*/
void queue_spin_lock_slowpath(struct qspinlock *lock)
{
struct mcs_spinlock *prev, *next, *node;
u32 val, new, old, code;
int idx;
#if PENDING
/*
* trylock || pending
*
* 0,0,0 -> 0,0,1 ; trylock
* 0,0,1 -> 0,1,1 ; pending
*/
val = atomic_read(&lock->val);
#if !OPT2
for (;;) {
/*
* If we observe any contention; queue.
*/
if (val & ~_Q_LOCKED_MASK)
goto queue;
new = _QSPINLOCK_LOCKED;
if (val == new)
new...
2014 Jun 17
5
[PATCH 03/11] qspinlock: Add pending bit
...--' :
> */
> void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
> {
> @@ -110,6 +114,65 @@ void queue_spin_lock_slowpath(struct qsp
>
> BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>
> + /*
> + * trylock || pending
> + *
> + * 0,0,0 -> 0,0,1 ; trylock
> + * 0,0,1 -> 0,1,1 ; pending
> + */
> + for (;;) {
> + /*
> + * If we observe any contention; queue.
> + */
> + if (val & ~_Q_LOCKED_MASK)
> + goto queue;
> +
> + new = _Q_LOCKED_VAL;
> +...