Displaying 20 results from an estimated 90 matches for "lock_wait".
2014 Mar 12
2
[PATCH v6 04/11] qspinlock: Optimized code path for 2 contending tasks
...; + arch_mutex_cpu_relax();
> +
> + /*
> + * Set the lock bit& clear the waiting bit simultaneously
> + * It is assumed that there is no lock stealing with this
> + * quick path active.
> + *
> + * A direct memory store of _QSPINLOCK_LOCKED into the
> + * lock_wait field causes problem with the lockref code, e.g.
> + * ACCESS_ONCE(qlock->lock_wait) = _QSPINLOCK_LOCKED;
> + *
> + * It is not currently clear why this happens. A workaround
> + * is to use atomic instruction to store the new value.
> + */
> + {
> + u16 lw =...
2014 Mar 12
2
[PATCH v6 04/11] qspinlock: Optimized code path for 2 contending tasks
...; + arch_mutex_cpu_relax();
> +
> + /*
> + * Set the lock bit& clear the waiting bit simultaneously
> + * It is assumed that there is no lock stealing with this
> + * quick path active.
> + *
> + * A direct memory store of _QSPINLOCK_LOCKED into the
> + * lock_wait field causes problem with the lockref code, e.g.
> + * ACCESS_ONCE(qlock->lock_wait) = _QSPINLOCK_LOCKED;
> + *
> + * It is not currently clear why this happens. A workaround
> + * is to use atomic instruction to store the new value.
> + */
> + {
> + u16 lw =...
2014 Mar 12
0
[PATCH v6 04/11] qspinlock: Optimized code path for 2 contending tasks
...acbe155..7f3129c 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -21,9 +21,10 @@ union arch_qspinlock {
struct qspinlock slock;
struct {
u8 lock; /* Lock bit */
- u8 reserved;
+ u8 wait; /* Waiting bit */
u16 qcode; /* Queue code */
};
+ u16 lock_wait; /* Lock and wait bits */
u32 qlcode; /* Complete lock word */
};
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 52d3580..0030fad 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -112,6 +112,8 @@ static DEFINE_PER_CPU_ALIGNED(struct qnod...
2015 Feb 16
1
[Xen-devel] [PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...ts);
> + u8 old = READ_ONCE(zero_stats);
> if (unlikely(old)) {
> ret = cmpxchg(&zero_stats, old, 0);
> /* This ensures only one fellow resets the stat */
> @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
> struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
> int cpu = smp_processor_id();
> u64 start;
> + __ticket_t head;
> unsigned long flags;
>
> /* If kicker interrupts not initialized yet, just spin */
> @@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinloc...
2015 Feb 16
1
[Xen-devel] [PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...ts);
> + u8 old = READ_ONCE(zero_stats);
> if (unlikely(old)) {
> ret = cmpxchg(&zero_stats, old, 0);
> /* This ensures only one fellow resets the stat */
> @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
> struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
> int cpu = smp_processor_id();
> u64 start;
> + __ticket_t head;
> unsigned long flags;
>
> /* If kicker interrupts not initialized yet, just spin */
> @@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinloc...
2014 Feb 26
2
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
..._qspinlock *qlock = (union arch_qspinlock *)lock;
> + u16 old;
> +
> + /*
> + * Fall into the quick spinning code path only if no one is waiting
> + * or the lock is available.
> + */
> + if (unlikely((qsval != _QSPINLOCK_LOCKED) &&
> + (qsval != _QSPINLOCK_WAITING)))
> + return 0;
> +
> + old = xchg(&qlock->lock_wait, _QSPINLOCK_WAITING|_QSPINLOCK_LOCKED);
> +
> + if (old == 0) {
> + /*
> + * Got the lock, can clear the waiting bit now
> + */
> + smp_u8_store_release(&qlock->wait, 0);
So we just did an ato...
2014 Feb 26
2
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
..._qspinlock *qlock = (union arch_qspinlock *)lock;
> + u16 old;
> +
> + /*
> + * Fall into the quick spinning code path only if no one is waiting
> + * or the lock is available.
> + */
> + if (unlikely((qsval != _QSPINLOCK_LOCKED) &&
> + (qsval != _QSPINLOCK_WAITING)))
> + return 0;
> +
> + old = xchg(&qlock->lock_wait, _QSPINLOCK_WAITING|_QSPINLOCK_LOCKED);
> +
> + if (old == 0) {
> + /*
> + * Got the lock, can clear the waiting bit now
> + */
> + smp_u8_store_release(&qlock->wait, 0);
So we just did an ato...
2014 Mar 13
0
[PATCH v6 04/11] qspinlock: Optimized code path for 2 contending tasks
...g wrote:
> >+ /*
> >+ * Set the lock bit& clear the waiting bit simultaneously
> >+ * It is assumed that there is no lock stealing with this
> >+ * quick path active.
> >+ *
> >+ * A direct memory store of _QSPINLOCK_LOCKED into the
> >+ * lock_wait field causes problem with the lockref code, e.g.
> >+ * ACCESS_ONCE(qlock->lock_wait) = _QSPINLOCK_LOCKED;
> >+ *
> >+ * It is not currently clear why this happens. A workaround
> >+ * is to use atomic instruction to store the new value.
> >+ */
> >...
2014 Feb 26
0
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...ture
+ * Besides the slock and lock fields, the other fields are only
+ * valid with less than 16K CPUs.
*/
union arch_qspinlock {
struct qspinlock slock;
- u8 lock; /* Lock bit */
+ struct {
+ u8 lock; /* Lock bit */
+ u8 wait; /* Waiting bit */
+ u16 qcode; /* Queue code */
+ };
+ u16 lock_wait; /* Lock and wait bits */
};
#define queue_spin_unlock queue_spin_unlock
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index df981d0..3a02a9e 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -48,7 +48...
2014 Feb 27
0
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...ture
+ * Besides the slock and lock fields, the other fields are only
+ * valid with less than 16K CPUs.
*/
union arch_qspinlock {
struct qspinlock slock;
- u8 lock; /* Lock bit */
+ struct {
+ u8 lock; /* Lock bit */
+ u8 wait; /* Waiting bit */
+ u16 qcode; /* Queue code */
+ };
+ u16 lock_wait; /* Lock and wait bits */
};
#define queue_spin_unlock queue_spin_unlock
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index df981d0..3a02a9e 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -48,7 +48...
2014 Feb 27
0
[PATCH RFC v5 7/8] pvqspinlock, x86: Add qspinlock para-virtualization support
...ut this may result in some ping ponging?
Actually, I think the qspinlock can work roughly the same as the
pvticketlock, using the same lock_spinning and unlock_lock hooks.
The x86-specific codepath can use bit 1 in the ->wait byte as "I have
halted, please kick me".
value = _QSPINLOCK_WAITING;
i = 0;
do
cpu_relax();
while (ACCESS_ONCE(slock->lock) && i++ < BUSY_WAIT);
if (ACCESS_ONCE(slock->lock)) {
value |= _QSPINLOCK_HALTED;
xchg(&slock->wait, value >> 8);
if (ACCESS_ONCE(slock->lock)) {
... call lock_spinning hook ...
}
}
/*
*...
2014 Feb 27
0
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
..._qspinlock *)lock;
>> + u16 old;
>> +
>> + /*
>> + * Fall into the quick spinning code path only if no one is waiting
>> + * or the lock is available.
>> + */
>> + if (unlikely((qsval != _QSPINLOCK_LOCKED)&&
>> + (qsval != _QSPINLOCK_WAITING)))
>> + return 0;
>> +
>> + old = xchg(&qlock->lock_wait, _QSPINLOCK_WAITING|_QSPINLOCK_LOCKED);
>> +
>> + if (old == 0) {
>> + /*
>> + * Got the lock, can clear the waiting bit now
>> + */
>> + smp_u8_store_release(&qlock-&...
2014 Feb 27
3
[PATCH RFC v5 7/8] pvqspinlock, x86: Add qspinlock para-virtualization support
On 02/27/2014 08:15 PM, Paolo Bonzini wrote:
[...]
>> But neither of the VCPUs being kicked here are halted -- they're either
>> running or runnable (descheduled by the hypervisor).
>
> /me actually looks at Waiman's code...
>
> Right, this is really different from pvticketlocks, where the *unlock*
> primitive wakes up a sleeping VCPU. It is more similar to PLE
2014 Feb 27
3
[PATCH RFC v5 7/8] pvqspinlock, x86: Add qspinlock para-virtualization support
On 02/27/2014 08:15 PM, Paolo Bonzini wrote:
[...]
>> But neither of the VCPUs being kicked here are halted -- they're either
>> running or runnable (descheduled by the hypervisor).
>
> /me actually looks at Waiman's code...
>
> Right, this is really different from pvticketlocks, where the *unlock*
> primitive wakes up a sleeping VCPU. It is more similar to PLE
2015 Feb 15
7
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...n problem ??
Changes since V4:
- one more usage of tickets_equal() (Oleg)
- head > tail situation can lead to false contended check (Oleg)
Changes since V3:
- Detailed changelog (PeterZ)
- Replace ACCESS_ONCE with READ_ONCE (oleg)
- Add xen changes (Oleg)
- Correct break logic in unlock_wait() (Oleg)
Changes since V2:
- Move the slowpath flag to head, this enables xadd usage in unlock code
and inturn we can get rid of read/write after unlock (Oleg)
- usage of ticket_equals (Oleg)
Changes since V1:
- Add missing TICKET_LOCK_INC before unlock kick (fixes hang in overcommit:...
2015 Feb 15
7
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...n problem ??
Changes since V4:
- one more usage of tickets_equal() (Oleg)
- head > tail situation can lead to false contended check (Oleg)
Changes since V3:
- Detailed changelog (PeterZ)
- Replace ACCESS_ONCE with READ_ONCE (oleg)
- Add xen changes (Oleg)
- Correct break logic in unlock_wait() (Oleg)
Changes since V2:
- Move the slowpath flag to head, this enables xadd usage in unlock code
and inturn we can get rid of read/write after unlock (Oleg)
- usage of ticket_equals (Oleg)
Changes since V1:
- Add missing TICKET_LOCK_INC before unlock kick (fixes hang in overcommit:...
2014 Mar 12
17
[PATCH v6 00/11] qspinlock: a 4-byte queue spinlock with PV support
v5->v6:
- Change the optimized 2-task contending code to make it fairer at the
expense of a bit of performance.
- Add a patch to support unfair queue spinlock for Xen.
- Modify the PV qspinlock code to follow what was done in the PV
ticketlock.
- Add performance data for the unfair lock as well as the PV
support code.
v4->v5:
- Move the optimized 2-task contending code to the
2014 Mar 12
17
[PATCH v6 00/11] qspinlock: a 4-byte queue spinlock with PV support
v5->v6:
- Change the optimized 2-task contending code to make it fairer at the
expense of a bit of performance.
- Add a patch to support unfair queue spinlock for Xen.
- Modify the PV qspinlock code to follow what was done in the PV
ticketlock.
- Add performance data for the unfair lock as well as the PV
support code.
v4->v5:
- Move the optimized 2-task contending code to the
2013 Aug 06
16
[PATCH V12 0/14] Paravirtualized ticket spinlocks
This series replaces the existing paravirtualized spinlock mechanism
with a paravirtualized ticketlock mechanism. The series provides
implementation for both Xen and KVM.
The current set of patches are for Xen/x86 spinlock/KVM guest side, to be included
against -tip.
I 'll be sending a separate patchset for KVM host based on kvm tree.
Please note I have added the below performance result
2013 Aug 06
16
[PATCH V12 0/14] Paravirtualized ticket spinlocks
This series replaces the existing paravirtualized spinlock mechanism
with a paravirtualized ticketlock mechanism. The series provides
implementation for both Xen and KVM.
The current set of patches are for Xen/x86 spinlock/KVM guest side, to be included
against -tip.
I 'll be sending a separate patchset for KVM host based on kvm tree.
Please note I have added the below performance result