Displaying 20 results from an estimated 130 matches for "arch_spin_trylock".
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...0);
> +}
> +
> +static inline int queued_spin_is_locked(struct qspinlock *lock)
> +{
> + smp_mb();
> + return atomic_read(&lock->val);
> +}
> +
> +#include <asm-generic/qspinlock.h>
> +
> +/* we need override it as ppc has io_sync stuff */
> +#undef arch_spin_trylock
> +#undef arch_spin_lock
> +#undef arch_spin_lock_flags
> +#undef arch_spin_unlock
> +#define arch_spin_trylock arch_spin_trylock
> +#define arch_spin_lock arch_spin_lock
> +#define arch_spin_lock_flags arch_spin_lock_flags
> +#define arch_spin_unlock arch_spin_unlock
> +
&g...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...0);
> +}
> +
> +static inline int queued_spin_is_locked(struct qspinlock *lock)
> +{
> + smp_mb();
> + return atomic_read(&lock->val);
> +}
> +
> +#include <asm-generic/qspinlock.h>
> +
> +/* we need override it as ppc has io_sync stuff */
> +#undef arch_spin_trylock
> +#undef arch_spin_lock
> +#undef arch_spin_lock_flags
> +#undef arch_spin_unlock
> +#define arch_spin_trylock arch_spin_trylock
> +#define arch_spin_lock arch_spin_lock
> +#define arch_spin_lock_flags arch_spin_lock_flags
> +#define arch_spin_unlock arch_spin_unlock
> +
&g...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...t; + return;
> + /*
> + * Since the lock is now unfair, we should not activate the 2-task
> + * pending bit spinning code path which disallows lock stealing.
> + */
> + queue_spin_lock_slowpath(lock, -1);
> +}
Why is this needed?
> +/*
> + * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
> + * jump to the unfair versions if the static key virt_unfairlocks_enabled
> + * is true.
> + */
> +#undef arch_spin_lock
> +#undef arch_spin_trylock
> +#undef arch_spin_lock_flags
> +
> +/**
> + * arch_spin_lock - acquire a queue spinlock...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...t; + return;
> + /*
> + * Since the lock is now unfair, we should not activate the 2-task
> + * pending bit spinning code path which disallows lock stealing.
> + */
> + queue_spin_lock_slowpath(lock, -1);
> +}
Why is this needed?
> +/*
> + * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
> + * jump to the unfair versions if the static key virt_unfairlocks_enabled
> + * is true.
> + */
> +#undef arch_spin_lock
> +#undef arch_spin_trylock
> +#undef arch_spin_lock_flags
> +
> +/**
> + * arch_spin_lock - acquire a queue spinlock...
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...required */
+ smp_store_release(__qspinlock_lock_byte(lock), 0);
+}
+
+static inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ smp_mb();
+ return atomic_read(&lock->val);
+}
+
+#include <asm-generic/qspinlock.h>
+
+/* we need override it as ppc has io_sync stuff */
+#undef arch_spin_trylock
+#undef arch_spin_lock
+#undef arch_spin_lock_flags
+#undef arch_spin_unlock
+#define arch_spin_trylock arch_spin_trylock
+#define arch_spin_lock arch_spin_lock
+#define arch_spin_lock_flags arch_spin_lock_flags
+#define arch_spin_unlock arch_spin_unlock
+
+static inline int arch_spin_trylock(arch_...
2014 Jun 12
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...in the fast path to get the best possible performance in
a virtual guest.
Yes, I could take that out to allow either unfair or paravirt spinlock,
but not both. I do think that a little bit of unfairness will help in
the virtual environment.
>> +/*
>> + * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
>> + * jump to the unfair versions if the static key virt_unfairlocks_enabled
>> + * is true.
>> + */
>> +#undef arch_spin_lock
>> +#undef arch_spin_trylock
>> +#undef arch_spin_lock_flags
>> +
>> +/**
>> + * arch_s...
2014 Feb 26
0
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...__always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (!qlock->lock &&
+ (cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0))
+ return 1;
+ return 0;
+}
+
+/*
+ * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
+ * jump to the unfair versions if the static key paravirt_unfairlocks_enabled
+ * is true.
+ */
+#undef arch_spin_lock
+#undef arch_spin_trylock
+#undef arch_spin_lock_flags
+
+extern struct static_key paravirt_unfairlocks_enabled;
+
+/**
+ * arch_spin_lock - acquire...
2014 Mar 12
0
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...atic __always_inline int queue_spin_trylock_unfair(struct qspinlock *lock)
+{
+ union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
+
+ if (!qlock->lock && (cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0))
+ return 1;
+ return 0;
+}
+
+/*
+ * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
+ * jump to the unfair versions if the static key paravirt_unfairlocks_enabled
+ * is true.
+ */
+#undef arch_spin_lock
+#undef arch_spin_trylock
+#undef arch_spin_lock_flags
+
+extern struct static_key paravirt_unfairlocks_enabled;
+
+/**
+ * arch_spin_lock - acquire...
2014 May 30
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...likely(cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
+ return;
+ /*
+ * Since the lock is now unfair, we should not activate the 2-task
+ * pending bit spinning code path which disallows lock stealing.
+ */
+ queue_spin_lock_slowpath(lock, -1);
+}
+
+/*
+ * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
+ * jump to the unfair versions if the static key virt_unfairlocks_enabled
+ * is true.
+ */
+#undef arch_spin_lock
+#undef arch_spin_trylock
+#undef arch_spin_lock_flags
+
+/**
+ * arch_spin_lock - acquire a queue spinlock
+ * @lock: Pointer to queue spinlock structur...
2014 May 07
0
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...likely(cmpxchg(&qlock->locked, 0, _Q_LOCKED_VAL) == 0))
+ return;
+ /*
+ * Since the lock is now unfair, we should not activate the 2-task
+ * pending bit spinning code path which disallows lock stealing.
+ */
+ queue_spin_lock_slowpath(lock, -1);
+}
+
+/*
+ * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
+ * jump to the unfair versions if the static key paravirt_unfairlocks_enabled
+ * is true.
+ */
+#undef arch_spin_lock
+#undef arch_spin_trylock
+#undef arch_spin_lock_flags
+
+/**
+ * arch_spin_lock - acquire a queue spinlock
+ * @lock: Pointer to queue spinlock stru...
2014 Feb 26
2
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...pinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (!qlock->lock &&
> + (cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0))
> + return 1;
> + return 0;
> +}
> +
> +/*
> + * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
> + * jump to the unfair versions if the static key paravirt_unfairlocks_enabled
> + * is true.
> + */
> +#undef arch_spin_lock
> +#undef arch_spin_trylock
> +#undef arch_spin_lock_flags
> +
> +extern struct static_key paravirt_unfairlocks_enabl...
2014 Feb 26
2
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...pinlock *lock)
> +{
> + union arch_qspinlock *qlock = (union arch_qspinlock *)lock;
> +
> + if (!qlock->lock &&
> + (cmpxchg(&qlock->lock, 0, _QSPINLOCK_LOCKED) == 0))
> + return 1;
> + return 0;
> +}
> +
> +/*
> + * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
> + * jump to the unfair versions if the static key paravirt_unfairlocks_enabled
> + * is true.
> + */
> +#undef arch_spin_lock
> +#undef arch_spin_trylock
> +#undef arch_spin_lock_flags
> +
> +extern struct static_key paravirt_unfairlocks_enabl...
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v7 -> v8:
add one patch to drop a function call
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v7 -> v8:
add one patch to drop a function call
2014 Mar 13
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...spin_lock_unfair(lock);
> + else
> + queue_spin_lock(lock);
> +}
So I would have expected something like:
if (static_key_false(¶virt_spinlock)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return;
}
At the top of queue_spin_lock_slowpath().
> +static inline int arch_spin_trylock(struct qspinlock *lock)
> +{
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + return queue_spin_trylock_unfair(lock);
> + else
> + return queue_spin_trylock(lock);
> +}
That just doesn't make any kind of sense; a trylock cannot be fair or
unfair.
2014 Mar 13
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...spin_lock_unfair(lock);
> + else
> + queue_spin_lock(lock);
> +}
So I would have expected something like:
if (static_key_false(¶virt_spinlock)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return;
}
At the top of queue_spin_lock_slowpath().
> +static inline int arch_spin_trylock(struct qspinlock *lock)
> +{
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + return queue_spin_trylock_unfair(lock);
> + else
> + return queue_spin_trylock(lock);
> +}
That just doesn't make any kind of sense; a trylock cannot be fair or
unfair.
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ock)
} while (--count);
__ticket_lock_spinning(lock, inc.tail);
}
-out: barrier(); /* make sure nothing creeps before the lock is taken */
+out:
+ __ticket_check_and_clear_slowpath(lock);
+
+ barrier(); /* make sure nothing creeps before the lock is taken */
}
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -115,47 +139,21 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}
-static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
- arch_spinlock...
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ock)
} while (--count);
__ticket_lock_spinning(lock, inc.tail);
}
-out: barrier(); /* make sure nothing creeps before the lock is taken */
+out:
+ __ticket_check_and_clear_slowpath(lock);
+
+ barrier(); /* make sure nothing creeps before the lock is taken */
}
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -115,47 +139,21 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}
-static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
- arch_spinlock...
2015 Feb 08
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ck);
> +
> + barrier(); /* make sure nothing creeps before the lock is taken */
Which means that if "goto out" path is only ever used for fastpath
locks, you can limit calling __ticket_check_and_clear_slowpath() to the
slowpath case.
> }
>
> static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
> @@ -115,47 +139,21 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
> return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
> }
>
> -static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock...
2015 Feb 08
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ck);
> +
> + barrier(); /* make sure nothing creeps before the lock is taken */
Which means that if "goto out" path is only ever used for fastpath
locks, you can limit calling __ticket_check_and_clear_slowpath() to the
slowpath case.
> }
>
> static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
> @@ -115,47 +139,21 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
> return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
> }
>
> -static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock...