Displaying 20 results from an estimated 99 matches for "slock".
Did you mean:
block
2010 Nov 03
25
[PATCH 00/20] x86: ticket lock rewrite and paravirtualization
...checking waiters
x86/ticketlock: loosen ordering restraints on unlock
x86/ticketlock: prevent compiler reordering into locked region
x86/ticketlock: don't inline _spin_unlock when using paravirt
spinlocks
x86/ticketlock: clarify barrier in arch_spin_lock
x86/ticketlock: remove .slock
x86/ticketlocks: use overlapping read to eliminate mb()
x86/ticketlock: rename ticketpair to head_tail
arch/x86/Kconfig | 3 +
arch/x86/include/asm/paravirt.h | 30 +---
arch/x86/include/asm/paravirt_types.h | 8 +-
arch/x86/include/asm/spinlock.h | 2...
2010 Nov 03
25
[PATCH 00/20] x86: ticket lock rewrite and paravirtualization
...checking waiters
x86/ticketlock: loosen ordering restraints on unlock
x86/ticketlock: prevent compiler reordering into locked region
x86/ticketlock: don't inline _spin_unlock when using paravirt
spinlocks
x86/ticketlock: clarify barrier in arch_spin_lock
x86/ticketlock: remove .slock
x86/ticketlocks: use overlapping read to eliminate mb()
x86/ticketlock: rename ticketpair to head_tail
arch/x86/Kconfig | 3 +
arch/x86/include/asm/paravirt.h | 30 +---
arch/x86/include/asm/paravirt_types.h | 8 +-
arch/x86/include/asm/spinlock.h | 2...
2010 Nov 03
25
[PATCH 00/20] x86: ticket lock rewrite and paravirtualization
...checking waiters
x86/ticketlock: loosen ordering restraints on unlock
x86/ticketlock: prevent compiler reordering into locked region
x86/ticketlock: don't inline _spin_unlock when using paravirt
spinlocks
x86/ticketlock: clarify barrier in arch_spin_lock
x86/ticketlock: remove .slock
x86/ticketlocks: use overlapping read to eliminate mb()
x86/ticketlock: rename ticketpair to head_tail
arch/x86/Kconfig | 3 +
arch/x86/include/asm/paravirt.h | 30 +---
arch/x86/include/asm/paravirt_types.h | 8 +-
arch/x86/include/asm/spinlock.h | 2...
2014 Feb 27
0
[PATCH RFC v5 7/8] pvqspinlock, x86: Add qspinlock para-virtualization support
...he qspinlock can work roughly the same as the
pvticketlock, using the same lock_spinning and unlock_lock hooks.
The x86-specific codepath can use bit 1 in the ->wait byte as "I have
halted, please kick me".
value = _QSPINLOCK_WAITING;
i = 0;
do
cpu_relax();
while (ACCESS_ONCE(slock->lock) && i++ < BUSY_WAIT);
if (ACCESS_ONCE(slock->lock)) {
value |= _QSPINLOCK_HALTED;
xchg(&slock->wait, value >> 8);
if (ACCESS_ONCE(slock->lock)) {
... call lock_spinning hook ...
}
}
/*
* Set the lock bit & clear the halted+waiting bits
*...
2020 Jul 06
0
[PATCH v3 3/6] powerpc: move spinlock implementation to simple_spinlock
...r */
+#ifdef __BIG_ENDIAN__
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
+#else
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
+#endif
+#else
+#define LOCK_TOKEN 1
+#endif
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.slock == 0;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ smp_mb();
+ return !arch_spin_value_unlocked(*lock);
+}
+
+/*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+ */
+static inline unsigned long __arch_spin_try...
2014 Feb 27
3
[PATCH RFC v5 7/8] pvqspinlock, x86: Add qspinlock para-virtualization support
On 02/27/2014 08:15 PM, Paolo Bonzini wrote:
[...]
>> But neither of the VCPUs being kicked here are halted -- they're either
>> running or runnable (descheduled by the hypervisor).
>
> /me actually looks at Waiman's code...
>
> Right, this is really different from pvticketlocks, where the *unlock*
> primitive wakes up a sleeping VCPU. It is more similar to PLE
2014 Feb 27
3
[PATCH RFC v5 7/8] pvqspinlock, x86: Add qspinlock para-virtualization support
On 02/27/2014 08:15 PM, Paolo Bonzini wrote:
[...]
>> But neither of the VCPUs being kicked here are halted -- they're either
>> running or runnable (descheduled by the hypervisor).
>
> /me actually looks at Waiman's code...
>
> Right, this is really different from pvticketlocks, where the *unlock*
> primitive wakes up a sleeping VCPU. It is more similar to PLE
2014 Feb 26
0
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
..._OPS
+#define smp_u8_store_release(p, v) \
+do { \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+/*
+ * As the qcode will be accessed as a 16-bit word, no offset is needed
+ */
+#define _QCODE_VAL_OFFSET 0
+
/*
* x86-64 specific queue spinlock union structure
+ * Besides the slock and lock fields, the other fields are only
+ * valid with less than 16K CPUs.
*/
union arch_qspinlock {
struct qspinlock slock;
- u8 lock; /* Lock bit */
+ struct {
+ u8 lock; /* Lock bit */
+ u8 wait; /* Waiting bit */
+ u16 qcode; /* Queue code */
+ };
+ u16 lock_wait; /* Lock and wa...
2014 Feb 27
0
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
..._OPS
+#define smp_u8_store_release(p, v) \
+do { \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+/*
+ * As the qcode will be accessed as a 16-bit word, no offset is needed
+ */
+#define _QCODE_VAL_OFFSET 0
+
/*
* x86-64 specific queue spinlock union structure
+ * Besides the slock and lock fields, the other fields are only
+ * valid with less than 16K CPUs.
*/
union arch_qspinlock {
struct qspinlock slock;
- u8 lock; /* Lock bit */
+ struct {
+ u8 lock; /* Lock bit */
+ u8 wait; /* Waiting bit */
+ u16 qcode; /* Queue code */
+ };
+ u16 lock_wait; /* Lock and wa...
2020 Jul 02
12
[PATCH 0/8] powerpc: queued spinlocks and rwlocks
This series adds an option to use queued spinlocks for powerpc, and
makes it the default for the Book3S-64 subarch.
This effort starts with the generic code so it's very simple but
still very performant. There are optimisations that can be made to
slowpaths, but I think it's better to attack those incrementally
if/when we find things, and try to add the improvements to generic
code as
2010 Nov 16
23
[PATCH 00/14] PV ticket locks without expanding spinlock
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com>
Hi all,
This is a revised version of the pvticket lock series.
The early part of the series is mostly unchanged: it converts the bulk
of the ticket lock code into C and makes the "small" and "large"
ticket code common. The only changes are the incorporation of various
review comments.
The latter part of
2010 Nov 16
23
[PATCH 00/14] PV ticket locks without expanding spinlock
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com>
Hi all,
This is a revised version of the pvticket lock series.
The early part of the series is mostly unchanged: it converts the bulk
of the ticket lock code into C and makes the "small" and "large"
ticket code common. The only changes are the incorporation of various
review comments.
The latter part of
2010 Nov 16
23
[PATCH 00/14] PV ticket locks without expanding spinlock
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com>
Hi all,
This is a revised version of the pvticket lock series.
The early part of the series is mostly unchanged: it converts the bulk
of the ticket lock code into C and makes the "small" and "large"
ticket code common. The only changes are the incorporation of various
review comments.
The latter part of
2020 Jul 03
7
[PATCH v2 0/6] powerpc: queued spinlocks and rwlocks
v2 is updated to account for feedback from Will, Peter, and
Waiman (thank you), and trims off a couple of RFC and unrelated
patches.
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued
2020 Jul 24
8
[PATCH v4 0/6] powerpc: queued spinlocks and rwlocks
Updated with everybody's feedback (thanks all), and more performance
results.
What I've found is I might have been measuring the worst load point for
the paravirt case, and by looking at a range of loads it's clear that
queued spinlocks are overall better even on PV, doubly so when you look
at the generally much improved worst case latencies.
I have defaulted it to N even though
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...arrier()
> +#define __rw_yield(x) barrier()
> +#define SHARED_PROCESSOR 0
> +#endif
> +
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> +#include <asm/qspinlock.h>
> +#else
> ?static __always_inline int arch_spin_value_unlocked(arch_spinlock_t
> lock)
> ?{
> ? return lock.slock == 0;
> @@ -106,18 +120,6 @@ static inline int
> arch_spin_trylock(arch_spinlock_t *lock)
> ? * held.??Conveniently, we have a word in the paca that holds this
> ? * value.
> ? */
> -
> -#if defined(CONFIG_PPC_SPLPAR)
> -/* We only yield to the hypervisor if we are in shared...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...arrier()
> +#define __rw_yield(x) barrier()
> +#define SHARED_PROCESSOR 0
> +#endif
> +
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> +#include <asm/qspinlock.h>
> +#else
> ?static __always_inline int arch_spin_value_unlocked(arch_spinlock_t
> lock)
> ?{
> ? return lock.slock == 0;
> @@ -106,18 +120,6 @@ static inline int
> arch_spin_trylock(arch_spinlock_t *lock)
> ? * held.??Conveniently, we have a word in the paca that holds this
> ? * value.
> ? */
> -
> -#if defined(CONFIG_PPC_SPLPAR)
> -/* We only yield to the hypervisor if we are in shared...
2007 Apr 18
2
[PATCH] Fix CONFIG_PARAVIRT for 2.6.19-rc5-mm1
...6 @@
#define CLI_STRING "cli"
#define STI_STRING "sti"
#define CLI_STI_CLOBBERS
-#define CLI_STI_INPUT_ARGS
#endif /* CONFIG_PARAVIRT */
/*
@@ -59,27 +58,26 @@ static inline void __raw_spin_lock_flags
{
asm volatile(
"\n1:\t"
- LOCK_PREFIX " ; decb %[slock]\n\t"
+ LOCK_PREFIX " ; decb %0\n\t"
"jns 5f\n"
"2:\t"
- "testl $0x200, %[flags]\n\t"
+ "testl $0x200, %1\n\t"
"jz 4f\n\t"
STI_STRING "\n"
"3:\t"
"rep;nop\n\t"
- "cmpb $0, %[slo...
2007 Apr 18
2
[PATCH] Fix CONFIG_PARAVIRT for 2.6.19-rc5-mm1
...6 @@
#define CLI_STRING "cli"
#define STI_STRING "sti"
#define CLI_STI_CLOBBERS
-#define CLI_STI_INPUT_ARGS
#endif /* CONFIG_PARAVIRT */
/*
@@ -59,27 +58,26 @@ static inline void __raw_spin_lock_flags
{
asm volatile(
"\n1:\t"
- LOCK_PREFIX " ; decb %[slock]\n\t"
+ LOCK_PREFIX " ; decb %0\n\t"
"jns 5f\n"
"2:\t"
- "testl $0x200, %[flags]\n\t"
+ "testl $0x200, %1\n\t"
"jz 4f\n\t"
STI_STRING "\n"
"3:\t"
"rep;nop\n\t"
- "cmpb $0, %[slo...
2020 Jul 06
13
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you).
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued spinlocks and rwlocks
powerpc/pseries: implement paravirt