Displaying 20 results from an estimated 40 matches for "queue_spin_lock_unfair".
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...t queue_spin_trylock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> + return 1;
> + return 0;
> +}
> +
> +/**
> + * queue_spin_lock_unfair - acquire a queue spinlock unfairly
> + * @lock: Pointer to queue spinlock structure
> + */
> +static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (likely(cmpxchg(&qlock-&...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...t queue_spin_trylock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> + return 1;
> + return 0;
> +}
> +
> +/**
> + * queue_spin_lock_unfair - acquire a queue spinlock unfairly
> + * @lock: Pointer to queue spinlock structure
> + */
> +static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (likely(cmpxchg(&qlock-&...
2014 Jun 12
2
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...;+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> >>+
> >>+ if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> >>+ return 1;
> >>+ return 0;
> >>+}
> >>+
> >>+/**
> >>+ * queue_spin_lock_unfair - acquire a queue spinlock unfairly
> >>+ * @lock: Pointer to queue spinlock structure
> >>+ */
> >>+static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
> >>+{
> >>+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
>...
2014 Jun 12
2
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...;+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> >>+
> >>+ if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
> >>+ return 1;
> >>+ return 0;
> >>+}
> >>+
> >>+/**
> >>+ * queue_spin_lock_unfair - acquire a queue spinlock unfairly
> >>+ * @lock: Pointer to queue spinlock structure
> >>+ */
> >>+static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
> >>+{
> >>+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
>...
2014 Jun 12
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...lock *lock)
>> +{
>> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
>> +
>> + if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
>> + return 1;
>> + return 0;
>> +}
>> +
>> +/**
>> + * queue_spin_lock_unfair - acquire a queue spinlock unfairly
>> + * @lock: Pointer to queue spinlock structure
>> + */
>> +static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
>> +{
>> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
>> +
>> + if...
2014 Mar 13
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + queue_spin_lock_unfair(lock);
> + else
> + queue_spin_lock(lock);
> +}
So I would have expected something like:
if (static_key_false(¶virt_spinlock)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return;
}
At the top of queue_spin_lock_slowpath().
> +static inline int arch_spin_tryloc...
2014 Mar 13
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + queue_spin_lock_unfair(lock);
> + else
> + queue_spin_lock(lock);
> +}
So I would have expected something like:
if (static_key_false(¶virt_spinlock)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return;
}
At the top of queue_spin_lock_slowpath().
> +static inline int arch_spin_tryloc...
2014 Feb 26
0
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...ude/asm/qspinlock.h
index 98db42e..c278aed 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -56,4 +56,78 @@ static inline void queue_spin_unlock(struct qspinlock *lock)
#include <asm-generic/qspinlock.h>
+#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
+/**
+ * queue_spin_lock_unfair - acquire a queue spinlock unfairly
+ * @lock: Pointer to queue spinlock structure
+ */
+static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (likely(cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0...
2014 Mar 12
0
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...ude/asm/qspinlock.h
index 7f3129c..0e6740a 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -51,4 +51,76 @@ static inline void queue_spin_unlock(struct qspinlock *lock)
#include <asm-generic/qspinlock.h>
+#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
+/**
+ * queue_spin_lock_unfair - acquire a queue spinlock unfairly
+ * @lock: Pointer to queue spinlock structure
+ */
+static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (likely(cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0...
2014 Feb 26
2
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...0644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -56,4 +56,78 @@ static inline void queue_spin_unlock(struct qspinlock *lock)
>
> #include <asm-generic/qspinlock.h>
>
> +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
> +/**
> + * queue_spin_lock_unfair - acquire a queue spinlock unfairly
> + * @lock: Pointer to queue spinlock structure
> + */
> +static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (likely(cmpxchg(&qlock-&...
2014 Feb 26
2
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...0644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -56,4 +56,78 @@ static inline void queue_spin_unlock(struct qspinlock *lock)
>
> #include <asm-generic/qspinlock.h>
>
> +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
> +/**
> + * queue_spin_lock_unfair - acquire a queue spinlock unfairly
> + * @lock: Pointer to queue spinlock structure
> + */
> +static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (likely(cmpxchg(&qlock-&...
2014 May 30
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...uired, 0 if failed
+ */
+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
+ return 1;
+ return 0;
+}
+
+/**
+ * queue_spin_lock_unfair - acquire a queue spinlock unfairly
+ * @lock: Pointer to queue spinlock structure
+ */
+static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (likely(cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))...
2014 Jun 12
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...(union arch_qspinlock *)lock;
>>>> +
>>>> + if (!qlock->locked&& (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
>>>> + return 1;
>>>> + return 0;
>>>> +}
>>>> +
>>>> +/**
>>>> + * queue_spin_lock_unfair - acquire a queue spinlock unfairly
>>>> + * @lock: Pointer to queue spinlock structure
>>>> + */
>>>> +static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
>>>> +{
>>>> + union arch_qspinlock *qlock = (union arch_qsp...
2014 May 07
0
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...uired, 0 if failed
+ */
+static __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (!qlock->locked && (cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
+ return 1;
+ return 0;
+}
+
+/**
+ * queue_spin_lock_unfair - acquire a queue spinlock unfairly
+ * @lock: Pointer to queue spinlock structure
+ */
+static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (likely(cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))...
2014 Mar 14
4
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...e:
> On 03/13/2014 11:15 AM, Peter Zijlstra wrote:
> >On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>+{
> >>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>+ queue_spin_lock_unfair(lock);
> >>+ else
> >>+ queue_spin_lock(lock);
> >>+}
> >So I would have expected something like:
> >
> > if (static_key_false(¶virt_spinlock)) {
> > while (!queue_spin_trylock(lock))
> > cpu_relax();
> > return;
> >...
2014 Mar 14
4
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...e:
> On 03/13/2014 11:15 AM, Peter Zijlstra wrote:
> >On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>+{
> >>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>+ queue_spin_lock_unfair(lock);
> >>+ else
> >>+ queue_spin_lock(lock);
> >>+}
> >So I would have expected something like:
> >
> > if (static_key_false(¶virt_spinlock)) {
> > while (!queue_spin_trylock(lock))
> > cpu_relax();
> > return;
> >...
2014 Mar 17
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...jlstra wrote:
> >>>On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>>>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>>>+{
> >>>>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>>>+ queue_spin_lock_unfair(lock);
> >>>>+ else
> >>>>+ queue_spin_lock(lock);
> >>>>+}
> >>>So I would have expected something like:
> >>>
> >>> if (static_key_false(¶virt_spinlock)) {
> >>> while (!queue_spin_trylock(lock)...
2014 Mar 17
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...jlstra wrote:
> >>>On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>>>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>>>+{
> >>>>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>>>+ queue_spin_lock_unfair(lock);
> >>>>+ else
> >>>>+ queue_spin_lock(lock);
> >>>>+}
> >>>So I would have expected something like:
> >>>
> >>> if (static_key_false(¶virt_spinlock)) {
> >>> while (!queue_spin_trylock(lock)...
2014 Mar 13
0
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On 03/13/2014 11:15 AM, Peter Zijlstra wrote:
> On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
>> +static inline void arch_spin_lock(struct qspinlock *lock)
>> +{
>> + if (static_key_false(¶virt_unfairlocks_enabled))
>> + queue_spin_lock_unfair(lock);
>> + else
>> + queue_spin_lock(lock);
>> +}
> So I would have expected something like:
>
> if (static_key_false(¶virt_spinlock)) {
> while (!queue_spin_trylock(lock))
> cpu_relax();
> return;
> }
>
> At the top of queue_spin_lock...
2014 Feb 28
0
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...gt;> +/**
>> + * arch_spin_lock - acquire a queue spinlock
>> + * @lock: Pointer to queue spinlock structure
>> + */
>> +static inline void arch_spin_lock(struct qspinlock *lock)
>> +{
>> + if (static_key_false(¶virt_unfairlocks_enabled)) {
>> + queue_spin_lock_unfair(lock);
>> + return;
>> + }
>> + queue_spin_lock(lock);
> What happens when you are booting and you are in the middle of using a
> ticketlock (say you are waiting for it and your are in the slow-path)
> and suddenly the unfairlocks_enabled is turned on.
The static key...