Displaying 20 results from an estimated 46 matches for "arch_qspinlock".
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
.../**
> + * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
> + * @lock : Pointer to queue spinlock structure
> + * Return: 1 if lock acquired, 0 if failed
> + */
> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> + return 1;
> + return 0;
> +}
> +
> +/**
> + * queue_spin_lock_unfair - acquire a queue spinlock unfairly
> + * @lock: Pointer to q...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
.../**
> + * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
> + * @lock : Pointer to queue spinlock structure
> + * Return: 1 if lock acquired, 0 if failed
> + */
> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> + return 1;
> + return 0;
> +}
> +
> +/**
> + * queue_spin_lock_unfair - acquire a queue spinlock unfairly
> + * @lock: Pointer to q...
2014 Jun 12
2
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...acquire the queue spinlock unfairly
> >>+ * @lock : Pointer to queue spinlock structure
> >>+ * Return: 1 if lock acquired, 0 if failed
> >>+ */
> >>+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> >>+{
> >>+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> >>+
> >>+ if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> >>+ return 1;
> >>+ return 0;
> >>+}
> >>+
> >>+/**
> >>+ * queue_spin_lock_unfair...
2014 Jun 12
2
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...acquire the queue spinlock unfairly
> >>+ * @lock : Pointer to queue spinlock structure
> >>+ * Return: 1 if lock acquired, 0 if failed
> >>+ */
> >>+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> >>+{
> >>+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> >>+
> >>+ if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> >>+ return 1;
> >>+ return 0;
> >>+}
> >>+
> >>+/**
> >>+ * queue_spin_lock_unfair...
2014 Mar 12
0
[PATCH v6 04/11] qspinlock: Optimized code path for 2 contending tasks
...+++++++++++++++++++++++++++++++-
2 files changed, 136 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index acbe155..7f3129c 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -21,9 +21,10 @@ union arch_qspinlock {
struct qspinlock slock;
struct {
u8 lock; /* Lock bit */
- u8 reserved;
+ u8 wait; /* Waiting bit */
u16 qcode; /* Queue code */
};
+ u16 lock_wait; /* Lock and wait bits */
u32 qlcode; /* Complete lock word */
};
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qs...
2014 Feb 26
0
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...); \
+} while (0)
+
+/*
+ * As the qcode will be accessed as a 16-bit word, no offset is needed
+ */
+#define _QCODE_VAL_OFFSET 0
+
/*
* x86-64 specific queue spinlock union structure
+ * Besides the slock and lock fields, the other fields are only
+ * valid with less than 16K CPUs.
*/
union arch_qspinlock {
struct qspinlock slock;
- u8 lock; /* Lock bit */
+ struct {
+ u8 lock; /* Lock bit */
+ u8 wait; /* Waiting bit */
+ u16 qcode; /* Queue code */
+ };
+ u16 lock_wait; /* Lock and wait bits */
};
#define queue_spin_unlock queue_spin_unlock
diff --git a/include/asm-generic/qspinlock_...
2014 Feb 27
0
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...); \
+} while (0)
+
+/*
+ * As the qcode will be accessed as a 16-bit word, no offset is needed
+ */
+#define _QCODE_VAL_OFFSET 0
+
/*
* x86-64 specific queue spinlock union structure
+ * Besides the slock and lock fields, the other fields are only
+ * valid with less than 16K CPUs.
*/
union arch_qspinlock {
struct qspinlock slock;
- u8 lock; /* Lock bit */
+ struct {
+ u8 lock; /* Lock bit */
+ u8 wait; /* Waiting bit */
+ u16 qcode; /* Queue code */
+ };
+ u16 lock_wait; /* Lock and wait bits */
};
#define queue_spin_unlock queue_spin_unlock
diff --git a/include/asm-generic/qspinlock_...
2014 Jun 12
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...ck unfairly
>>>> + * @lock : Pointer to queue spinlock structure
>>>> + * Return: 1 if lock acquired, 0 if failed
>>>> + */
>>>> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
>>>> +{
>>>> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
>>>> +
>>>> + if (!qlock->locked&& (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
>>>> + return 1;
>>>> + return 0;
>>>> +}
>>>> +
>>>> +/**
>>>...
2014 Jun 12
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...trylock_unfair - try to acquire the queue spinlock unfairly
>> + * @lock : Pointer to queue spinlock structure
>> + * Return: 1 if lock acquired, 0 if failed
>> + */
>> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
>> +{
>> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
>> +
>> + if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
>> + return 1;
>> + return 0;
>> +}
>> +
>> +/**
>> + * queue_spin_lock_unfair - acquire a queue spinlock unfai...
2014 May 30
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...LOCKS
+extern struct static_key virt_unfairlocks_enabled;
+#endif
+
#define queue_spin_unlock queue_spin_unlock
/**
* queue_spin_unlock - release a queue spinlock
@@ -26,4 +30,79 @@ static inline void queue_spin_unlock(struct qspinlock *lock)
#include <asm-generic/qspinlock.h>
+union arch_qspinlock {
+ atomic_t val;
+ u8 locked;
+};
+
+#ifdef CONFIG_VIRT_UNFAIR_LOCKS
+/**
+ * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
+ * @lock : Pointer to queue spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queue_spin_trylock_unfa...
2014 Feb 26
0
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...ck *lock)
#include <asm-generic/qspinlock.h>
+#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
+/**
+ * queue_spin_lock_unfair - acquire a queue spinlock unfairly
+ * @lock: Pointer to queue spinlock structure
+ */
+static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (likely(cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0))
+ return;
+ /*
+ * Since the lock is now unfair, there is no need to activate
+ * the 2-task quick spinning code path.
+ */
+ queue_spin_lock_slowpath(lock, -1);
+}
+
+/**
+ * queue_s...
2014 Mar 12
0
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...ck *lock)
#include <asm-generic/qspinlock.h>
+#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
+/**
+ * queue_spin_lock_unfair - acquire a queue spinlock unfairly
+ * @lock: Pointer to queue spinlock structure
+ */
+static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (likely(cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0))
+ return;
+ /*
+ * Since the lock is now unfair, we should not activate the 2-task
+ * quick spinning code path which disallows lock stealing.
+ */
+ queue_spin_lock_slowpath(lock, -1...
2014 May 07
0
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...S
+extern struct static_key paravirt_unfairlocks_enabled;
+#endif
+
#define queue_spin_unlock queue_spin_unlock
/**
* queue_spin_unlock - release a queue spinlock
@@ -26,4 +30,79 @@ static inline void queue_spin_unlock(struct qspinlock *lock)
#include <asm-generic/qspinlock.h>
+union arch_qspinlock {
+ atomic_t val;
+ u8 locked;
+};
+
+#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
+/**
+ * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
+ * @lock : Pointer to queue spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queue_spin_trylock_...
2014 Mar 12
0
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...to _QSPINLOCK_LOCKED_SLOWPATH before
+ * trying to hibernate itself. It is possible that the
+ * lock byte had been set to _QSPINLOCK_LOCKED_SLOWPATH
+ * already. In this case, just proceeds to sleeping.
+ */
+ ACCESS_ONCE(pv->cpustate) = PV_CPU_HALTED;
+ lockval = cmpxchg(&((union arch_qspinlock *)lock)->lock,
+ _QSPINLOCK_LOCKED, _QSPINLOCK_LOCKED_SLOWPATH);
+ if (lockval == 0) {
+ /*
+ * Can exit now as the lock is free
+ */
+ ACCESS_ONCE(pv->cpustate) = PV_CPU_ACTIVE;
+ *count = 0;
+ return;
+ }
+ __queue_hibernate();
+ ACCESS_ONCE(pv->cpustate) = PV_CPU_A...
2014 Feb 26
0
[PATCH v5 2/8] qspinlock, x86: Enable x86-64 to use queue spinlock
...def _ASM_X86_QSPINLOCK_H
+#define _ASM_X86_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
+
+#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS
+
+/*
+ * x86-64 specific queue spinlock union structure
+ */
+union arch_qspinlock {
+ struct qspinlock slock;
+ u8 lock; /* Lock bit */
+};
+
+#define queue_spin_unlock queue_spin_unlock
+/**
+ * queue_spin_unlock - release a queue spinlock
+ * @lock : Pointer to queue spinlock structure
+ *
+ * No special memory barrier other than a compiler one is needed for the
+ * x86 arch...
2014 Feb 27
0
[PATCH v5 2/8] qspinlock, x86: Enable x86-64 to use queue spinlock
...def _ASM_X86_QSPINLOCK_H
+#define _ASM_X86_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
+
+#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS
+
+/*
+ * x86-64 specific queue spinlock union structure
+ */
+union arch_qspinlock {
+ struct qspinlock slock;
+ u8 lock; /* Lock bit */
+};
+
+#define queue_spin_unlock queue_spin_unlock
+/**
+ * queue_spin_unlock - release a queue spinlock
+ * @lock : Pointer to queue spinlock structure
+ *
+ * No special memory barrier other than a compiler one is needed for the
+ * x86 arch...
2014 Mar 19
0
[PATCH v7 02/11] qspinlock, x86: Enable x86-64 to use queue spinlock
...def _ASM_X86_QSPINLOCK_H
+#define _ASM_X86_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
+
+#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS
+
+/*
+ * x86-64 specific queue spinlock union structure
+ */
+union arch_qspinlock {
+ struct qspinlock slock;
+ u8 lock; /* Lock bit */
+};
+
+#define queue_spin_unlock queue_spin_unlock
+/**
+ * queue_spin_unlock - release a queue spinlock
+ * @lock : Pointer to queue spinlock structure
+ *
+ * No special memory barrier other than a compiler one is needed for the
+ * x86 arch...
2014 Apr 01
10
[PATCH v8 00/10] qspinlock: a 4-byte queue spinlock with PV support
v7->v8:
- Remove one unneeded atomic operation from the slowpath, thus
improving performance.
- Simplify some of the codes and add more comments.
- Test for X86_FEATURE_HYPERVISOR CPU feature bit to enable/disable
unfair lock.
- Reduce unfair lock slowpath lock stealing frequency depending
on its distance from the queue head.
- Add performance data for IvyBridge-EX CPU.
2014 Apr 01
10
[PATCH v8 00/10] qspinlock: a 4-byte queue spinlock with PV support
v7->v8:
- Remove one unneeded atomic operation from the slowpath, thus
improving performance.
- Simplify some of the codes and add more comments.
- Test for X86_FEATURE_HYPERVISOR CPU feature bit to enable/disable
unfair lock.
- Reduce unfair lock slowpath lock stealing frequency depending
on its distance from the queue head.
- Add performance data for IvyBridge-EX CPU.
2014 Mar 19
15
[PATCH v7 00/11] qspinlock: a 4-byte queue spinlock with PV support
v6->v7:
- Remove an atomic operation from the 2-task contending code
- Shorten the names of some macros
- Make the queue waiter to attempt to steal lock when unfair lock is
enabled.
- Remove lock holder kick from the PV code and fix a race condition
- Run the unfair lock & PV code on overcommitted KVM guests to collect
performance data.
v5->v6:
- Change the optimized