Displaying 20 results from an estimated 49 matches for "arch_mcs_spin_lock_contend".
Did you mean:
arch_mcs_spin_lock_contended
2014 Jun 23
1
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...t; > +
> > > + /*
> > > + * if there was a previous node; link it and wait.
> > > + */
> > > + if (old & ~_Q_LOCKED_MASK) {
> > > + prev = decode_tail(old);
> > > + ACCESS_ONCE(prev->next) = node;
> > > +
> > > + arch_mcs_spin_lock_contended(&node->locked);
>
> Could you add a comment here:
>
> /* We are spinning forever until the previous node updates locked - which
> it does once the it has updated lock->val with our tail number. */
That's incorrect -- or at least, I understand that to be incorrect....
2014 Jun 23
1
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...t; > +
> > > + /*
> > > + * if there was a previous node; link it and wait.
> > > + */
> > > + if (old & ~_Q_LOCKED_MASK) {
> > > + prev = decode_tail(old);
> > > + ACCESS_ONCE(prev->next) = node;
> > > +
> > > + arch_mcs_spin_lock_contended(&node->locked);
>
> Could you add a comment here:
>
> /* We are spinning forever until the previous node updates locked - which
> it does once the it has updated lock->val with our tail number. */
That's incorrect -- or at least, I understand that to be incorrect....
2014 Jun 16
4
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...linux-2.6.orig/kernel/locking/mcs_spinlock.h
> +++ linux-2.6/kernel/locking/mcs_spinlock.h
> @@ -17,6 +17,7 @@
> struct mcs_spinlock {
> struct mcs_spinlock *next;
> int locked; /* 1 if lock acquired */
> + int count;
This could use a comment.
> };
>
> #ifndef arch_mcs_spin_lock_contended
> Index: linux-2.6/kernel/locking/qspinlock.c
> ===================================================================
> --- /dev/null
> +++ linux-2.6/kernel/locking/qspinlock.c
> @@ -0,0 +1,197 @@
> +/*
> + * Queue spinlock
> + *
> + * This program is free software; you c...
2014 Jun 16
4
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...linux-2.6.orig/kernel/locking/mcs_spinlock.h
> +++ linux-2.6/kernel/locking/mcs_spinlock.h
> @@ -17,6 +17,7 @@
> struct mcs_spinlock {
> struct mcs_spinlock *next;
> int locked; /* 1 if lock acquired */
> + int count;
This could use a comment.
> };
>
> #ifndef arch_mcs_spin_lock_contended
> Index: linux-2.6/kernel/locking/qspinlock.c
> ===================================================================
> --- /dev/null
> +++ linux-2.6/kernel/locking/qspinlock.c
> @@ -0,0 +1,197 @@
> +/*
> + * Queue spinlock
> + *
> + * This program is free software; you c...
2014 Jun 17
0
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...OCKED_VAL)
> > + goto release;
> > +
> > + /*
> > + * if there was a previous node; link it and wait.
> > + */
> > + if (old & ~_Q_LOCKED_MASK) {
> > + prev = decode_tail(old);
> > + ACCESS_ONCE(prev->next) = node;
> > +
> > + arch_mcs_spin_lock_contended(&node->locked);
Could you add a comment here:
/* We are spinning forever until the previous node updates locked - which
it does once the it has updated lock->val with our tail number. */
> > + }
> > +
> > + /*
> > + * we're at the head of the waitqueue, w...
2014 Jun 23
0
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...OCKED_VAL)
> > + goto release;
> > +
> > + /*
> > + * if there was a previous node; link it and wait.
> > + */
> > + if (old & ~_Q_LOCKED_MASK) {
> > + prev = decode_tail(old);
> > + ACCESS_ONCE(prev->next) = node;
> > +
> > + arch_mcs_spin_lock_contended(&node->locked);
> > + }
> > +
> > + /*
> > + * we're at the head of the waitqueue, wait for the owner to go away.
> > + *
> > + * *,x -> *,0
> > + */
> > + while ((val = atomic_read(&lock->val)) & _Q_LOCKED_MASK)
> &g...
2014 May 07
0
[PATCH v10 08/19] qspinlock: Make a new qnode structure to support virtualization
...virtualization features like unfair lock
and para-virtualized spinlock, it is necessary to store additional
CPU specific data into the queue node structure. As a result, a new
qnode structure is created and the mcs_spinlock structure is now part
of the new structure.
It is also necessary to expand arch_mcs_spin_lock_contended() to the
underlying while loop as additional code will need to be inserted
into the loop.
Signed-off-by: Waiman Long <Waiman.Long at hp.com>
---
kernel/locking/qspinlock.c | 36 +++++++++++++++++++++++-------------
1 files changed, 23 insertions(+), 13 deletions(-)
diff --git a/kernel/...
2014 Jun 15
0
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...h
===================================================================
--- linux-2.6.orig/kernel/locking/mcs_spinlock.h
+++ linux-2.6/kernel/locking/mcs_spinlock.h
@@ -17,6 +17,7 @@
struct mcs_spinlock {
struct mcs_spinlock *next;
int locked; /* 1 if lock acquired */
+ int count;
};
#ifndef arch_mcs_spin_lock_contended
Index: linux-2.6/kernel/locking/qspinlock.c
===================================================================
--- /dev/null
+++ linux-2.6/kernel/locking/qspinlock.c
@@ -0,0 +1,197 @@
+/*
+ * Queue spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under...
2015 Mar 16
0
[PATCH 1/9] qspinlock: A simple generic 4-byte queue spinlock
...o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -17,6 +17,7 @@
struct mcs_spinlock {
struct mcs_spinlock *next;
int locked; /* 1 if lock acquired */
+ int count; /* nesting count, see qspinlock.c */
};
#ifndef arch_mcs_spin_lock_contended
--- /dev/null
+++ b/kernel/locking/qspinlock.c
@@ -0,0 +1,209 @@
+/*
+ * Queue spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the L...
2016 Oct 25
0
[GIT PULL v2 4/5] processor.h: Remove cpu_relax_lowlatency users
...q))
- cpu_relax_lowlatency();
+ cpu_relax();
preempt_enable();
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index c835270..6a385aa 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -28,7 +28,7 @@ struct mcs_spinlock {
#define arch_mcs_spin_lock_contended(l) \
do { \
while (!(smp_load_acquire(l))) \
- cpu_relax_lowlatency(); \
+ cpu_relax(); \
} while (0)
#endif
@@ -108,7 +108,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
return;
/* Wait until the next pointer is set */...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
...*node) { }
#define pv_enabled() false
@@ -399,7 +399,7 @@ void queue_spin_lock_slowpath(struct qsp
prev = decode_tail(old);
WRITE_ONCE(prev->next, node);
- pv_wait_node(node);
+ pv_wait_node(old, node);
arch_mcs_spin_lock_contended(&node->locked);
}
@@ -414,7 +414,7 @@ void queue_spin_lock_slowpath(struct qsp
* sequentiality; this is because the set_locked() function below
* does not imply a full barrier.
*/
- pv_wait_head(lock);
+ pv_wait_head(lock, node);
whi...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
...*node) { }
#define pv_enabled() false
@@ -399,7 +399,7 @@ void queue_spin_lock_slowpath(struct qsp
prev = decode_tail(old);
WRITE_ONCE(prev->next, node);
- pv_wait_node(node);
+ pv_wait_node(old, node);
arch_mcs_spin_lock_contended(&node->locked);
}
@@ -414,7 +414,7 @@ void queue_spin_lock_slowpath(struct qsp
* sequentiality; this is because the set_locked() function below
* does not imply a full barrier.
*/
- pv_wait_head(lock);
+ pv_wait_head(lock, node);
whi...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...node->locked = 0;
node->next = NULL;
+ pv_init_node(node);
/*
* We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -360,6 +401,7 @@ void queue_spin_lock_slowpath(struct qsp
prev = decode_tail(old);
WRITE_ONCE(prev->next, node);
+ pv_wait_node(node);
arch_mcs_spin_lock_contended(&node->locked);
}
@@ -374,6 +416,7 @@ void queue_spin_lock_slowpath(struct qsp
* sequentiality; this is because the set_locked() function below
* does not imply a full barrier.
*/
+ pv_wait_head(lock);
while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKE...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...node->locked = 0;
node->next = NULL;
+ pv_init_node(node);
/*
* We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -360,6 +401,7 @@ void queue_spin_lock_slowpath(struct qsp
prev = decode_tail(old);
WRITE_ONCE(prev->next, node);
+ pv_wait_node(node);
arch_mcs_spin_lock_contended(&node->locked);
}
@@ -374,6 +416,7 @@ void queue_spin_lock_slowpath(struct qsp
* sequentiality; this is because the set_locked() function below
* does not imply a full barrier.
*/
+ pv_wait_head(lock);
while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKE...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...+ pv_init_node(node);
>
> /*
> * We touched a (possibly) cold cacheline in the per-cpu queue node;
> @@ -360,6 +401,7 @@ void queue_spin_lock_slowpath(struct qsp
> prev = decode_tail(old);
> WRITE_ONCE(prev->next, node);
>
> + pv_wait_node(node);
> arch_mcs_spin_lock_contended(&node->locked);
> }
>
> @@ -374,6 +416,7 @@ void queue_spin_lock_slowpath(struct qsp
> * sequentiality; this is because the set_locked() function below
> * does not imply a full barrier.
> */
> + pv_wait_head(lock);
> while ((val = smp_load_acquire...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...+ pv_init_node(node);
>
> /*
> * We touched a (possibly) cold cacheline in the per-cpu queue node;
> @@ -360,6 +401,7 @@ void queue_spin_lock_slowpath(struct qsp
> prev = decode_tail(old);
> WRITE_ONCE(prev->next, node);
>
> + pv_wait_node(node);
> arch_mcs_spin_lock_contended(&node->locked);
> }
>
> @@ -374,6 +416,7 @@ void queue_spin_lock_slowpath(struct qsp
> * sequentiality; this is because the set_locked() function below
> * does not imply a full barrier.
> */
> + pv_wait_head(lock);
> while ((val = smp_load_acquire...
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...6 +366,7 @@ queue:
node += idx;
node->locked = 0;
node->next = NULL;
+ pv_init_node(node);
/*
* We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -350,6 +392,7 @@ queue:
prev = decode_tail(old);
WRITE_ONCE(prev->next, node);
+ pv_wait_node(node);
arch_mcs_spin_lock_contended(&node->locked);
}
@@ -365,6 +408,7 @@ queue:
* does not imply a full barrier.
*
*/
+ pv_wait_head(lock, node);
while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
cpu_relax();
@@ -397,6 +441,7 @@ queue:
cpu_relax();
arch_mcs_s...
2015 Apr 24
0
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...6 +365,7 @@ queue:
node += idx;
node->locked = 0;
node->next = NULL;
+ pv_init_node(node);
/*
* We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -350,6 +391,7 @@ queue:
prev = decode_tail(old);
WRITE_ONCE(prev->next, node);
+ pv_wait_node(node);
arch_mcs_spin_lock_contended(&node->locked);
}
@@ -365,6 +407,7 @@ queue:
* does not imply a full barrier.
*
*/
+ pv_wait_head(lock, node);
while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
cpu_relax();
@@ -397,6 +440,7 @@ queue:
cpu_relax();
arch_mcs_s...
2014 May 30
19
[PATCH v11 00/16] qspinlock: a 4-byte queue spinlock with PV support
v10->v11:
- Use a simple test-and-set unfair lock to simplify the code,
but performance may suffer a bit for large guest with many CPUs.
- Take out Raghavendra KT's test results as the unfair lock changes
may render some of his results invalid.
- Add PV support without increasing the size of the core queue node
structure.
- Other minor changes to address some of the
2014 May 30
19
[PATCH v11 00/16] qspinlock: a 4-byte queue spinlock with PV support
v10->v11:
- Use a simple test-and-set unfair lock to simplify the code,
but performance may suffer a bit for large guest with many CPUs.
- Take out Raghavendra KT's test results as the unfair lock changes
may render some of his results invalid.
- Add PV support without increasing the size of the core queue node
structure.
- Other minor changes to address some of the