Displaying 20 results from an estimated 31 matches for "pv_queue_spin_unlock".
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
...value.
> So we need to have
> some kind of synchronization mechanism to let the lookup CPU know when is a
> good time to look up.
No, its all already ordered and working.
pv_wait_head():
pv_hash()
/* MB as per cmpxchg */
cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL);
VS
__pv_queue_spin_unlock():
if (xchg(&l->locked, 0) != _Q_SLOW_VAL)
return;
/* MB as per xchg */
pv_hash_find(lock);
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
...value.
> So we need to have
> some kind of synchronization mechanism to let the lookup CPU know when is a
> good time to look up.
No, its all already ordered and working.
pv_wait_head():
pv_hash()
/* MB as per cmpxchg */
cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL);
VS
__pv_queue_spin_unlock():
if (xchg(&l->locked, 0) != _Q_SLOW_VAL)
return;
/* MB as per xchg */
pv_hash_find(lock);
2014 Jun 12
2
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...+}
> +#endif /* CONFIG_PARAVIRT_SPINLOCKS */
Ideally we'd make all this use alternatives or so, such that the actual
function remains short enough to actually inline;
static inline void queue_spin_unlock(struct qspinlock *lock)
{
pv_spinlock_alternative(
ACCESS_ONCE(*(u8 *)lock) = 0,
pv_queue_spin_unlock(lock));
}
Or however that trickery works.
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 836 bytes
Desc: not available
URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20140612/a2...
2014 Jun 12
2
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...+}
> +#endif /* CONFIG_PARAVIRT_SPINLOCKS */
Ideally we'd make all this use alternatives or so, such that the actual
function remains short enough to actually inline;
static inline void queue_spin_unlock(struct qspinlock *lock)
{
pv_spinlock_alternative(
ACCESS_ONCE(*(u8 *)lock) = 0,
pv_queue_spin_unlock(lock));
}
Or however that trickery works.
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 836 bytes
Desc: not available
URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20140612/a2...
2015 Apr 02
0
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 07:20:57PM +0200, Peter Zijlstra wrote:
> pv_wait_head():
>
> pv_hash()
> /* MB as per cmpxchg */
> cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL);
>
> VS
>
> __pv_queue_spin_unlock():
>
> if (xchg(&l->locked, 0) != _Q_SLOW_VAL)
> return;
>
> /* MB as per xchg */
> pv_hash_find(lock);
>
>
Something like so.. compile tested only.
I took out the LFSR because that was likely over engineering from my
side :-)
--- a/kernel/locking/qspinlo...
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...efore setting _Q_SLOW_VAL
> >>+ *
> >>+ * [S] lp = lock [RmW] l = l->locked = 0
> >>+ * MB MB
> >>+ * [S] l->locked = _Q_SLOW_VAL [L] lp
> >>+ *
> >>+ * Matches the cmpxchg() in pv_queue_spin_unlock().
> >>+ */
> >>+ if (!slow_set&&
> >>+ !cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) {
> >>+ /*
> >>+ * The lock is free and _Q_SLOW_VAL has never been
> >>+ * set. Need to clear the hash bucket before getting...
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...efore setting _Q_SLOW_VAL
> >>+ *
> >>+ * [S] lp = lock [RmW] l = l->locked = 0
> >>+ * MB MB
> >>+ * [S] l->locked = _Q_SLOW_VAL [L] lp
> >>+ *
> >>+ * Matches the cmpxchg() in pv_queue_spin_unlock().
> >>+ */
> >>+ if (!slow_set&&
> >>+ !cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) {
> >>+ /*
> >>+ * The lock is free and _Q_SLOW_VAL has never been
> >>+ * set. Need to clear the hash bucket before getting...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
...MB
+ * [L] lock->tail [L] tail->head
+ */
+ new_tail = pv_decode_tail(atomic_read(&lock->val));
+ } while (tail != new_tail);
+}
/*
* Wait for l->locked to become clear; halt the vcpu after a short spin.
* __pv_queue_spin_unlock() will wake us.
*/
-static void pv_wait_head(struct qspinlock *lock)
+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
{
struct __qspinlock *l = (void *)lock;
int loop;
@@ -121,28 +179,24 @@ static void pv_wait_head(struct qspinloc
for (;;) {...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
...MB
+ * [L] lock->tail [L] tail->head
+ */
+ new_tail = pv_decode_tail(atomic_read(&lock->val));
+ } while (tail != new_tail);
+}
/*
* Wait for l->locked to become clear; halt the vcpu after a short spin.
* __pv_queue_spin_unlock() will wake us.
*/
-static void pv_wait_head(struct qspinlock *lock)
+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
{
struct __qspinlock *l = (void *)lock;
int loop;
@@ -121,28 +179,24 @@ static void pv_wait_head(struct qspinloc
for (;;) {...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...deletion(-)
>
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -118,6 +118,9 @@ static __always_inline bool virt_queue_s
> }
> #endif
>
> +extern void __pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +extern void __pv_queue_spin_unlock(struct qspinlock *lock);
> +
> /*
> * Initializier
> */
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -18,6 +18,9 @@
> * Authors: Waiman Long<waiman.long at hp.com>
> * Peter Zijlstra<peterz at infradead.org>...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...deletion(-)
>
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -118,6 +118,9 @@ static __always_inline bool virt_queue_s
> }
> #endif
>
> +extern void __pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +extern void __pv_queue_spin_unlock(struct qspinlock *lock);
> +
> /*
> * Initializier
> */
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -18,6 +18,9 @@
> * Authors: Waiman Long<waiman.long at hp.com>
> * Peter Zijlstra<peterz at infradead.org>...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
Implement the paravirt qspinlock for x86-kvm.
We use the regular paravirt call patching to switch between:
native_queue_spin_lock_slowpath() __pv_queue_spin_lock_slowpath()
native_queue_spin_unlock() __pv_queue_spin_unlock()
We use a callee saved call for the unlock function which reduces the
i-cache footprint and allows 'inlining' of SPIN_UNLOCK functions
again.
We further optimize the unlock path by patching the direct call with a
"movb $0,%arg1" if we are indeed using the native unlock code. Th...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
Implement the paravirt qspinlock for x86-kvm.
We use the regular paravirt call patching to switch between:
native_queue_spin_lock_slowpath() __pv_queue_spin_lock_slowpath()
native_queue_spin_unlock() __pv_queue_spin_unlock()
We use a callee saved call for the unlock function which reduces the
i-cache footprint and allows 'inlining' of SPIN_UNLOCK functions
again.
We further optimize the unlock path by patching the direct call with a
"movb $0,%arg1" if we are indeed using the native unlock code. Th...
2014 Jun 12
0
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...INLOCKS */
> Ideally we'd make all this use alternatives or so, such that the actual
> function remains short enough to actually inline;
>
> static inline void queue_spin_unlock(struct qspinlock *lock)
> {
> pv_spinlock_alternative(
> ACCESS_ONCE(*(u8 *)lock) = 0,
> pv_queue_spin_unlock(lock));
> }
>
> Or however that trickery works.
I think the paravirt version of the unlock function is already short
enough. In addition, whenever PARAVIRT_SPINLOCKS is enabled, the
inlining of the unlock function is disabled so that the jump label
paravirt_spinlocks_enabled won't...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...his relies on the architecture to provide two paravirt hypercalls:
> + *
> + * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
> + * pv_kick(cpu) -- wakes a suspended vcpu
> + *
> + * Using these we implement __pv_queue_spin_lock_slowpath() and
> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
> + * native_queue_spin_unlock().
> + */
> +
> +#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET)
> +
> +enum vcpu_state {
> + vcpu_running = 0,
> + vcpu_halted,
> +};
> +
> +struct pv_node {
> + struct mcs_spin...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...his relies on the architecture to provide two paravirt hypercalls:
> + *
> + * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
> + * pv_kick(cpu) -- wakes a suspended vcpu
> + *
> + * Using these we implement __pv_queue_spin_lock_slowpath() and
> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
> + * native_queue_spin_unlock().
> + */
> +
> +#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET)
> +
> +enum vcpu_state {
> + vcpu_running = 0,
> + vcpu_halted,
> +};
> +
> +struct pv_node {
> + struct mcs_spin...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...+++++
3 files changed, 248 insertions(+), 1 deletion(-)
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -118,6 +118,9 @@ static __always_inline bool virt_queue_s
}
#endif
+extern void __pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queue_spin_unlock(struct qspinlock *lock);
+
/*
* Initializier
*/
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -18,6 +18,9 @@
* Authors: Waiman Long <waiman.long at hp.com>
* Peter Zijlstra <peterz at infradead.org>
*/
+
+#ifndef _GEN_PV_LOCK_SLOWPATH
+
#inclu...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...+++++
3 files changed, 248 insertions(+), 1 deletion(-)
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -118,6 +118,9 @@ static __always_inline bool virt_queue_s
}
#endif
+extern void __pv_queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queue_spin_unlock(struct qspinlock *lock);
+
/*
* Initializier
*/
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -18,6 +18,9 @@
* Authors: Waiman Long <waiman.long at hp.com>
* Peter Zijlstra <peterz at infradead.org>
*/
+
+#ifndef _GEN_PV_LOCK_SLOWPATH
+
#inclu...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...ecture to provide two paravirt hypercalls:
>> + *
>> + * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
>> + * pv_kick(cpu) -- wakes a suspended vcpu
>> + *
>> + * Using these we implement __pv_queue_spin_lock_slowpath() and
>> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
>> + * native_queue_spin_unlock().
>> + */
>> +
>> +#define _Q_SLOW_VAL (3U<< _Q_LOCKED_OFFSET)
>> +
>> +enum vcpu_state {
>> + vcpu_running = 0,
>> + vcpu_halted,
>> +};
>> +
>&...
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...* of spinning them.
+ *
+ * This relies on the architecture to provide two paravirt hypercalls:
+ *
+ * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
+ * pv_kick(cpu) -- wakes a suspended vcpu
+ *
+ * Using these we implement __pv_queue_spin_lock_slowpath() and
+ * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
+ * native_queue_spin_unlock().
+ */
+
+#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET)
+
+enum vcpu_state {
+ vcpu_running = 0,
+ vcpu_halted,
+};
+
+struct pv_node {
+ struct mcs_spinlock mcs;
+ struct mcs_spinlock __res[3];
+
+ int cpu;
+ u...