Displaying 20 results from an estimated 73 matches for "queue_spin_lock".
2014 Mar 13
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + queue_spin_lock_unfair(lock);
> + else
> + queue_spin_lock(lock);
> +}
So I would have expected something like:
if (static_key_false(¶virt_spinlock)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return;
}
At the top of queue_spin_lock_slowpath().
> +static inline int arch_spin...
2014 Mar 13
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + queue_spin_lock_unfair(lock);
> + else
> + queue_spin_lock(lock);
> +}
So I would have expected something like:
if (static_key_false(¶virt_spinlock)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return;
}
At the top of queue_spin_lock_slowpath().
> +static inline int arch_spin...
2014 Oct 29
1
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...RO_FENCE */
#define queue_spin_unlock queue_spin_unlock
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+/*
+ * The lock byte can have a value of _Q_LOCKED_SLOWPATH to indicate
+ * that it needs to go through the slowpath to do the unlocking.
+ */
+#define _Q_LOCKED_SLOWPATH (_Q_LOCKED_VAL | 2)
+
+extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+/*
+ * Paravirtualized versions of queue_spin_lock and queue_spin_unlock
+ */
+
+#define queue_spin_lock queue_spin_lock
+/**
+ * queue_spin_lock - acquire a queue spinlock
+ * @...
2014 Oct 29
1
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...RO_FENCE */
#define queue_spin_unlock queue_spin_unlock
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+/*
+ * The lock byte can have a value of _Q_LOCKED_SLOWPATH to indicate
+ * that it needs to go through the slowpath to do the unlocking.
+ */
+#define _Q_LOCKED_SLOWPATH (_Q_LOCKED_VAL | 2)
+
+extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+/*
+ * Paravirtualized versions of queue_spin_lock and queue_spin_unlock
+ */
+
+#define queue_spin_lock queue_spin_lock
+/**
+ * queue_spin_lock - acquire a queue spinlock
+ * @...
2014 Jun 15
0
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queue_spin_trylock(struct qspinlock *lock)
+{
+ if (!atomic_read(&lock->val) &&
+ (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
+ return 1;
+ return 0;
+}
+
+extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+/**
+ * queue_spin_lock - acquire a queue spinlock
+ * @lock: Pointer to queue spinlock structure
+ */
+static __always_inline void queue_spin_lock(struct qspinlock *lock)
+{
+ u32 val;
+
+ val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
+ if...
2014 Nov 03
0
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...Each CPU waiting for the lock will spin until it
> + * reaches a threshold. When that happens, it will put itself to a halt state
> + * so that the hypervisor can reuse the CPU cycles in some other guests as
> + * well as returning other hold-up CPUs faster.
> +/**
> + * queue_spin_lock - acquire a queue spinlock
> + * @lock: Pointer to queue spinlock structure
> + *
> + * N.B. INLINE_SPIN_LOCK should not be enabled when PARAVIRT_SPINLOCK is on.
One should write a compile time fail for that, not a comment.
> + */
> +static __always_inline void queue_spin_lock(stru...
2014 Jun 17
5
[PATCH 03/11] qspinlock: Add pending bit
...nother cmpxchg in case
the unlocker has just unlocked itself.
So something like:
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index e8a7ae8..29cc9c7 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
*/
static __always_inline void queue_spin_lock(struct qspinlock *lock)
{
- u32 val;
+ u32 val, new;
val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
if (likely(val == 0))
return;
+
+ /* One more attempt - but if we fail mark it as pe...
2014 Jun 17
5
[PATCH 03/11] qspinlock: Add pending bit
...nother cmpxchg in case
the unlocker has just unlocked itself.
So something like:
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index e8a7ae8..29cc9c7 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
*/
static __always_inline void queue_spin_lock(struct qspinlock *lock)
{
- u32 val;
+ u32 val, new;
val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
if (likely(val == 0))
return;
+
+ /* One more attempt - but if we fail mark it as pe...
2015 Mar 16
0
[PATCH 1/9] qspinlock: A simple generic 4-byte queue spinlock
...spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queue_spin_trylock(struct qspinlock *lock)
+{
+ if (!atomic_read(&lock->val) &&
+ (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
+ return 1;
+ return 0;
+}
+
+extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+/**
+ * queue_spin_lock - acquire a queue spinlock
+ * @lock: Pointer to queue spinlock structure
+ */
+static __always_inline void queue_spin_lock(struct qspinlock *lock)
+{
+ u32 val;
+
+ val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
+ if...
2014 Jun 17
3
[PATCH 03/11] qspinlock: Add pending bit
...> >So something like:
> >
> >diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> >index e8a7ae8..29cc9c7 100644
> >--- a/include/asm-generic/qspinlock.h
> >+++ b/include/asm-generic/qspinlock.h
> >@@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> > */
> > static __always_inline void queue_spin_lock(struct qspinlock *lock)
> > {
> >- u32 val;
> >+ u32 val, new;
> >
> > val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
> > if (likely(...
2014 Jun 17
3
[PATCH 03/11] qspinlock: Add pending bit
...> >So something like:
> >
> >diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> >index e8a7ae8..29cc9c7 100644
> >--- a/include/asm-generic/qspinlock.h
> >+++ b/include/asm-generic/qspinlock.h
> >@@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> > */
> > static __always_inline void queue_spin_lock(struct qspinlock *lock)
> > {
> >- u32 val;
> >+ u32 val, new;
> >
> > val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
> > if (likely(...
2014 Mar 14
4
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...e:
> On 03/13/2014 11:15 AM, Peter Zijlstra wrote:
> >On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>+{
> >>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>+ queue_spin_lock_unfair(lock);
> >>+ else
> >>+ queue_spin_lock(lock);
> >>+}
> >So I would have expected something like:
> >
> > if (static_key_false(¶virt_spinlock)) {
> > while (!queue_spin_trylock(lock))
> > cpu_relax();
> > return;
>...
2014 Mar 14
4
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...e:
> On 03/13/2014 11:15 AM, Peter Zijlstra wrote:
> >On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>+{
> >>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>+ queue_spin_lock_unfair(lock);
> >>+ else
> >>+ queue_spin_lock(lock);
> >>+}
> >So I would have expected something like:
> >
> > if (static_key_false(¶virt_spinlock)) {
> > while (!queue_spin_trylock(lock))
> > cpu_relax();
> > return;
>...
2014 Mar 17
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...jlstra wrote:
> >>>On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>>>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>>>+{
> >>>>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>>>+ queue_spin_lock_unfair(lock);
> >>>>+ else
> >>>>+ queue_spin_lock(lock);
> >>>>+}
> >>>So I would have expected something like:
> >>>
> >>> if (static_key_false(¶virt_spinlock)) {
> >>> while (!queue_spin_tryloc...
2014 Mar 17
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...jlstra wrote:
> >>>On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>>>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>>>+{
> >>>>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>>>+ queue_spin_lock_unfair(lock);
> >>>>+ else
> >>>>+ queue_spin_lock(lock);
> >>>>+}
> >>>So I would have expected something like:
> >>>
> >>> if (static_key_false(¶virt_spinlock)) {
> >>> while (!queue_spin_tryloc...
2014 Apr 02
0
[PATCH v8 01/10] qspinlock: A generic 4-byte queue spinlock implementation
...ode's i_lock.
With the patch, the perf profile at 1500 users became:
26.82% swapper [kernel.kallsyms] [k] cpu_idle_loop
4.66% reaim [kernel.kallsyms] [k] mutex_spin_on_owner
3.97% reaim [kernel.kallsyms] [k] update_cfs_rq_blocked_load
2.40% reaim [kernel.kallsyms] [k] queue_spin_lock_slowpath
|--88.31%-- _raw_spin_lock
| |--36.02%-- inode_sb_list_add
| |--35.09%-- evict
| |--16.89%-- xlog_cil_insert_items
| |--6.30%-- try_to_wake_up
| |--2.20%-- _xfs_...
2014 Feb 26
0
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
...ode's i_lock.
With the patch, the perf profile at 1500 users became:
26.82% swapper [kernel.kallsyms] [k] cpu_idle_loop
4.66% reaim [kernel.kallsyms] [k] mutex_spin_on_owner
3.97% reaim [kernel.kallsyms] [k] update_cfs_rq_blocked_load
2.40% reaim [kernel.kallsyms] [k] queue_spin_lock_slowpath
|--88.31%-- _raw_spin_lock
| |--36.02%-- inode_sb_list_add
| |--35.09%-- evict
| |--16.89%-- xlog_cil_insert_items
| |--6.30%-- try_to_wake_up
| |--2.20%-- _xfs_...
2014 Feb 27
0
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
...ode's i_lock.
With the patch, the perf profile at 1500 users became:
26.82% swapper [kernel.kallsyms] [k] cpu_idle_loop
4.66% reaim [kernel.kallsyms] [k] mutex_spin_on_owner
3.97% reaim [kernel.kallsyms] [k] update_cfs_rq_blocked_load
2.40% reaim [kernel.kallsyms] [k] queue_spin_lock_slowpath
|--88.31%-- _raw_spin_lock
| |--36.02%-- inode_sb_list_add
| |--35.09%-- evict
| |--16.89%-- xlog_cil_insert_items
| |--6.30%-- try_to_wake_up
| |--2.20%-- _xfs_...
2014 Oct 29
15
[PATCH v13 00/11] qspinlock: a 4-byte queue spinlock with PV support
v12->v13:
- Change patch 9 to generate separate versions of the
queue_spin_lock_slowpath functions for bare metal and PV guest. This
reduces the performance impact of the PV code on bare metal systems.
v11->v12:
- Based on PeterZ's version of the qspinlock patch
(https://lkml.org/lkml/2014/6/15/63).
- Incorporated many of the review comments from Konrad Wilk an...
2014 Oct 29
15
[PATCH v13 00/11] qspinlock: a 4-byte queue spinlock with PV support
v12->v13:
- Change patch 9 to generate separate versions of the
queue_spin_lock_slowpath functions for bare metal and PV guest. This
reduces the performance impact of the PV code on bare metal systems.
v11->v12:
- Based on PeterZ's version of the qspinlock patch
(https://lkml.org/lkml/2014/6/15/63).
- Incorporated many of the review comments from Konrad Wilk an...