Displaying 20 results from an estimated 36 matches for "queue_spin_trylock_unfair".
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...t; mainly due to the use of a static key. However, uncontended lock-unlock
> operation are really just a tiny percentage of a real workload. So
> there should no noticeable change in application performance.
No, entirely unacceptable.
> +#ifdef CONFIG_VIRT_UNFAIR_LOCKS
> +/**
> + * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
> + * @lock : Pointer to queue spinlock structure
> + * Return: 1 if lock acquired, 0 if failed
> + */
> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qs...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...t; mainly due to the use of a static key. However, uncontended lock-unlock
> operation are really just a tiny percentage of a real workload. So
> there should no noticeable change in application performance.
No, entirely unacceptable.
> +#ifdef CONFIG_VIRT_UNFAIR_LOCKS
> +/**
> + * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
> + * @lock : Pointer to queue spinlock structure
> + * Return: 1 if lock acquired, 0 if failed
> + */
> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qs...
2014 Jun 12
2
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...ended lock-unlock
> >>operation are really just a tiny percentage of a real workload. So
> >>there should no noticeable change in application performance.
> >No, entirely unacceptable.
> >
> >>+#ifdef CONFIG_VIRT_UNFAIR_LOCKS
> >>+/**
> >>+ * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
> >>+ * @lock : Pointer to queue spinlock structure
> >>+ * Return: 1 if lock acquired, 0 if failed
> >>+ */
> >>+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> >>+{
> >&g...
2014 Jun 12
2
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...ended lock-unlock
> >>operation are really just a tiny percentage of a real workload. So
> >>there should no noticeable change in application performance.
> >No, entirely unacceptable.
> >
> >>+#ifdef CONFIG_VIRT_UNFAIR_LOCKS
> >>+/**
> >>+ * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
> >>+ * @lock : Pointer to queue spinlock structure
> >>+ * Return: 1 if lock acquired, 0 if failed
> >>+ */
> >>+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> >>+{
> >&g...
2014 Jun 12
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...a static key. However, uncontended lock-unlock
>> operation are really just a tiny percentage of a real workload. So
>> there should no noticeable change in application performance.
> No, entirely unacceptable.
>
>> +#ifdef CONFIG_VIRT_UNFAIR_LOCKS
>> +/**
>> + * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
>> + * @lock : Pointer to queue spinlock structure
>> + * Return: 1 if lock acquired, 0 if failed
>> + */
>> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
>> +{
>> + union arch_qspinlock...
2015 Apr 08
2
[PATCH v15 16/16] unfair qspinlock: a queue based unfair lock
...queue_spin_unlock(struct qspinlock *lock)
}
#endif
-#define virt_queue_spin_lock virt_queue_spin_lock
+#ifndef static_cpu_has_hypervisor
+#define static_cpu_has_hypervisor static_cpu_has(X86_FEATURE_HYPERVISOR)
+#endif
-static inline bool virt_queue_spin_lock(struct qspinlock *lock)
+#define queue_spin_trylock_unfair queue_spin_trylock_unfair
+static inline bool queue_spin_trylock_unfair(struct qspinlock *lock)
{
- if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
- return false;
-
- while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
- cpu_relax();
+ u8 *l = (u8 *)lock;
- return true;
+ return !RE...
2015 Apr 08
2
[PATCH v15 16/16] unfair qspinlock: a queue based unfair lock
...queue_spin_unlock(struct qspinlock *lock)
}
#endif
-#define virt_queue_spin_lock virt_queue_spin_lock
+#ifndef static_cpu_has_hypervisor
+#define static_cpu_has_hypervisor static_cpu_has(X86_FEATURE_HYPERVISOR)
+#endif
-static inline bool virt_queue_spin_lock(struct qspinlock *lock)
+#define queue_spin_trylock_unfair queue_spin_trylock_unfair
+static inline bool queue_spin_trylock_unfair(struct qspinlock *lock)
{
- if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
- return false;
-
- while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
- cpu_relax();
+ u8 *l = (u8 *)lock;
- return true;
+ return !RE...
2014 Feb 26
0
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...pinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (likely(cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0))
+ return;
+ /*
+ * Since the lock is now unfair, there is no need to activate
+ * the 2-task quick spinning code path.
+ */
+ queue_spin_lock_slowpath(lock, -1);
+}
+
+/**
+ * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
+ * @lock : Pointer to queue spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (!qlock...
2014 Mar 12
0
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...arch_qspinlock *)lock;
+
+ if (likely(cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0))
+ return;
+ /*
+ * Since the lock is now unfair, we should not activate the 2-task
+ * quick spinning code path which disallows lock stealing.
+ */
+ queue_spin_lock_slowpath(lock, -1);
+}
+
+/**
+ * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
+ * @lock : Pointer to queue spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (!qlock...
2014 May 30
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...queue_spin_unlock
/**
* queue_spin_unlock - release a queue spinlock
@@ -26,4 +30,79 @@ static inline void queue_spin_unlock(struct qspinlock *lock)
#include <asm-generic/qspinlock.h>
+union arch_qspinlock {
+ atomic_t val;
+ u8 locked;
+};
+
+#ifdef CONFIG_VIRT_UNFAIR_LOCKS
+/**
+ * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
+ * @lock : Pointer to queue spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (!qlock...
2014 Jun 12
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...gt;> operation are really just a tiny percentage of a real workload. So
>>>> there should no noticeable change in application performance.
>>> No, entirely unacceptable.
>>>
>>>> +#ifdef CONFIG_VIRT_UNFAIR_LOCKS
>>>> +/**
>>>> + * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
>>>> + * @lock : Pointer to queue spinlock structure
>>>> + * Return: 1 if lock acquired, 0 if failed
>>>> + */
>>>> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
>>>...
2014 May 07
0
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...e_spin_unlock
/**
* queue_spin_unlock - release a queue spinlock
@@ -26,4 +30,79 @@ static inline void queue_spin_unlock(struct qspinlock *lock)
#include <asm-generic/qspinlock.h>
+union arch_qspinlock {
+ atomic_t val;
+ u8 locked;
+};
+
+#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
+/**
+ * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
+ * @lock : Pointer to queue spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (!qlock...
2014 Mar 13
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
..._key_false(¶virt_spinlock)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return;
}
At the top of queue_spin_lock_slowpath().
> +static inline int arch_spin_trylock(struct qspinlock *lock)
> +{
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + return queue_spin_trylock_unfair(lock);
> + else
> + return queue_spin_trylock(lock);
> +}
That just doesn't make any kind of sense; a trylock cannot be fair or
unfair.
2014 Mar 13
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
..._key_false(¶virt_spinlock)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return;
}
At the top of queue_spin_lock_slowpath().
> +static inline int arch_spin_trylock(struct qspinlock *lock)
> +{
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + return queue_spin_trylock_unfair(lock);
> + else
> + return queue_spin_trylock(lock);
> +}
That just doesn't make any kind of sense; a trylock cannot be fair or
unfair.
2014 Feb 26
2
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...if (likely(cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0))
> + return;
> + /*
> + * Since the lock is now unfair, there is no need to activate
> + * the 2-task quick spinning code path.
> + */
> + queue_spin_lock_slowpath(lock, -1);
> +}
> +
> +/**
> + * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
> + * @lock : Pointer to queue spinlock structure
> + * Return: 1 if lock acquired, 0 if failed
> + */
> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qs...
2014 Feb 26
2
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...if (likely(cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0))
> + return;
> + /*
> + * Since the lock is now unfair, there is no need to activate
> + * the 2-task quick spinning code path.
> + */
> + queue_spin_lock_slowpath(lock, -1);
> +}
> +
> +/**
> + * queue_spin_trylock_unfair - try to acquire the queue spinlock unfairly
> + * @lock : Pointer to queue spinlock structure
> + * Return: 1 if lock acquired, 0 if failed
> + */
> +static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qs...
2014 Mar 13
0
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...hemes. It is a compromise to provide some lock unfairness
without sacrificing the good cacheline behavior of the queue spinlock.
>> +static inline int arch_spin_trylock(struct qspinlock *lock)
>> +{
>> + if (static_key_false(¶virt_unfairlocks_enabled))
>> + return queue_spin_trylock_unfair(lock);
>> + else
>> + return queue_spin_trylock(lock);
>> +}
> That just doesn't make any kind of sense; a trylock cannot be fair or
> unfair.
Because I use a different cmpxchg for the fair and unfair versions, I
also need a different version for trylock.
-Longman
2014 May 07
0
[PATCH v10 12/19] unfair qspinlock: Variable frequency lock stealing mechanism
...r_get_lock(struct qspinlock *lock, struct qnode *node, u32 tail, int count)
+{
+ u32 prev_tail;
+ int isqhead;
+ struct qnode *next;
+
+ if (!static_key_false(¶virt_unfairlocks_enabled) ||
+ ((count & node->lsteal_mask) != node->lsteal_mask))
+ return false;
+
+ if (!queue_spin_trylock_unfair(lock)) {
+ /*
+ * Lock stealing fails, re-adjust the lsteal mask so that
+ * it is about double of the previous node.
+ */
+ struct qnode *prev = node->qprev;
+
+ node->lsteal_mask = prev->qhead ? LSTEAL_MIN_MASK :
+ (prev->lsteal_mask << 1) + 1;
+ if (node->ls...
2014 May 30
19
[PATCH v11 00/16] qspinlock: a 4-byte queue spinlock with PV support
v10->v11:
- Use a simple test-and-set unfair lock to simplify the code,
but performance may suffer a bit for large guest with many CPUs.
- Take out Raghavendra KT's test results as the unfair lock changes
may render some of his results invalid.
- Add PV support without increasing the size of the core queue node
structure.
- Other minor changes to address some of the
2014 May 30
19
[PATCH v11 00/16] qspinlock: a 4-byte queue spinlock with PV support
v10->v11:
- Use a simple test-and-set unfair lock to simplify the code,
but performance may suffer a bit for large guest with many CPUs.
- Take out Raghavendra KT's test results as the unfair lock changes
may render some of his results invalid.
- Add PV support without increasing the size of the core queue node
structure.
- Other minor changes to address some of the