Displaying 11 results from an estimated 11 matches for "native_spin_unlock".
2014 Oct 29
1
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...fter finding the queue head to avoid racing
+ * condition between the queue head and the lock holder.
+ */
+void queue_spin_unlock_slowpath(struct qspinlock *lock)
+{
+ struct mcs_spinlock *node = pv_get_qhead(lock);
+
+ /*
+ * Found the queue head, now release the lock before waking it up
+ */
+ native_spin_unlock(lock);
+ pv_kick_node(node);
+}
+EXPORT_SYMBOL(queue_spin_unlock_slowpath);
+
+#endif /* _ASM_X86_PVQSPINLOCK_H */
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 05a77fe..28daa2b 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlo...
2014 Oct 29
1
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...fter finding the queue head to avoid racing
+ * condition between the queue head and the lock holder.
+ */
+void queue_spin_unlock_slowpath(struct qspinlock *lock)
+{
+ struct mcs_spinlock *node = pv_get_qhead(lock);
+
+ /*
+ * Found the queue head, now release the lock before waking it up
+ */
+ native_spin_unlock(lock);
+ pv_kick_node(node);
+}
+EXPORT_SYMBOL(queue_spin_unlock_slowpath);
+
+#endif /* _ASM_X86_PVQSPINLOCK_H */
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 05a77fe..28daa2b 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlo...
2014 Oct 16
2
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
.../x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 05a77fe..e267943 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -5,21 +5,59 @@
#include <asm-generic/qspinlock_types.h>
#ifndef CONFIG_X86_PPRO_FENCE
+static __always_inline void native_spin_unlock(struct qspinlock *lock)
+{
+ barrier();
+ ACCESS_ONCE(*(u8 *)lock) = 0;
+}
+#else
+static __always_inline void native_spin_unlock(struct qspinlock *lock)
+{
+ atomic_dec(&lock->val);
+}
+#endif /* !CONFIG_X86_PPRO_FENCE */
#define queue_spin_unlock queue_spin_unlock
+#ifdef CONFIG_PARAVIR...
2014 Oct 16
2
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
.../x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 05a77fe..e267943 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -5,21 +5,59 @@
#include <asm-generic/qspinlock_types.h>
#ifndef CONFIG_X86_PPRO_FENCE
+static __always_inline void native_spin_unlock(struct qspinlock *lock)
+{
+ barrier();
+ ACCESS_ONCE(*(u8 *)lock) = 0;
+}
+#else
+static __always_inline void native_spin_unlock(struct qspinlock *lock)
+{
+ atomic_dec(&lock->val);
+}
+#endif /* !CONFIG_X86_PPRO_FENCE */
#define queue_spin_unlock queue_spin_unlock
+#ifdef CONFIG_PARAVIR...
2014 Nov 03
0
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...l site that will
> + * have to be patched.
again if you hard rely on the not inlining make a build fail not a
comment.
> */
> static inline void queue_spin_unlock(struct qspinlock *lock)
> {
> barrier();
> + if (!static_key_false(¶virt_spinlocks_enabled)) {
> + native_spin_unlock(lock);
> + return;
> + }
>
> + /*
> + * Need to atomically clear the lock byte to avoid racing with
> + * queue head waiter trying to set _QLOCK_LOCKED_SLOWPATH.
> + */
> + if (unlikely(cmpxchg((u8 *)lock, _Q_LOCKED_VAL, 0) != _Q_LOCKED_VAL))
> + queue_spin_unlock_...
2014 Oct 29
15
[PATCH v13 00/11] qspinlock: a 4-byte queue spinlock with PV support
v12->v13:
- Change patch 9 to generate separate versions of the
queue_spin_lock_slowpath functions for bare metal and PV guest. This
reduces the performance impact of the PV code on bare metal systems.
v11->v12:
- Based on PeterZ's version of the qspinlock patch
(https://lkml.org/lkml/2014/6/15/63).
- Incorporated many of the review comments from Konrad Wilk and
Paolo
2014 Oct 29
15
[PATCH v13 00/11] qspinlock: a 4-byte queue spinlock with PV support
v12->v13:
- Change patch 9 to generate separate versions of the
queue_spin_lock_slowpath functions for bare metal and PV guest. This
reduces the performance impact of the PV code on bare metal systems.
v11->v12:
- Based on PeterZ's version of the qspinlock patch
(https://lkml.org/lkml/2014/6/15/63).
- Incorporated many of the review comments from Konrad Wilk and
Paolo
2014 Oct 16
15
[PATCH v12 00/11] qspinlock: a 4-byte queue spinlock with PV support
v11->v12:
- Based on PeterZ's version of the qspinlock patch
(https://lkml.org/lkml/2014/6/15/63).
- Incorporated many of the review comments from Konrad Wilk and
Paolo Bonzini.
- The pvqspinlock code is largely from my previous version with
PeterZ's way of going from queue tail to head and his idea of
using callee saved calls to KVM and XEN codes.
v10->v11:
- Use a
2014 Oct 16
15
[PATCH v12 00/11] qspinlock: a 4-byte queue spinlock with PV support
v11->v12:
- Based on PeterZ's version of the qspinlock patch
(https://lkml.org/lkml/2014/6/15/63).
- Incorporated many of the review comments from Konrad Wilk and
Paolo Bonzini.
- The pvqspinlock code is largely from my previous version with
PeterZ's way of going from queue tail to head and his idea of
using callee saved calls to KVM and XEN codes.
v10->v11:
- Use a
2015 Jan 20
13
[PATCH v14 00/11] qspinlock: a 4-byte queue spinlock with PV support
v13->v14:
- Patches 1 & 2: Add queue_spin_unlock_wait() to accommodate commit
78bff1c86 from Oleg Nesterov.
- Fix the system hang problem when using PV qspinlock in an
over-committed guest due to a racing condition in the
pv_set_head_in_tail() function.
- Increase the MAYHALT_THRESHOLD from 10 to 1024.
- Change kick_cpu into a regular function pointer instead of a
2015 Jan 20
13
[PATCH v14 00/11] qspinlock: a 4-byte queue spinlock with PV support
v13->v14:
- Patches 1 & 2: Add queue_spin_unlock_wait() to accommodate commit
78bff1c86 from Oleg Nesterov.
- Fix the system hang problem when using PV qspinlock in an
over-committed guest due to a racing condition in the
pv_set_head_in_tail() function.
- Increase the MAYHALT_THRESHOLD from 10 to 1024.
- Change kick_cpu into a regular function pointer instead of a