Displaying 20 results from an estimated 124 matches for "arch_spin_unlock".
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...; +{
> + smp_mb();
> + return atomic_read(&lock->val);
> +}
> +
> +#include <asm-generic/qspinlock.h>
> +
> +/* we need override it as ppc has io_sync stuff */
> +#undef arch_spin_trylock
> +#undef arch_spin_lock
> +#undef arch_spin_lock_flags
> +#undef arch_spin_unlock
> +#define arch_spin_trylock arch_spin_trylock
> +#define arch_spin_lock arch_spin_lock
> +#define arch_spin_lock_flags arch_spin_lock_flags
> +#define arch_spin_unlock arch_spin_unlock
> +
> +static inline int arch_spin_trylock(arch_spinlock_t *lock)
> +{
> + CLEAR_IO_SYNC;...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...; +{
> + smp_mb();
> + return atomic_read(&lock->val);
> +}
> +
> +#include <asm-generic/qspinlock.h>
> +
> +/* we need override it as ppc has io_sync stuff */
> +#undef arch_spin_trylock
> +#undef arch_spin_lock
> +#undef arch_spin_lock_flags
> +#undef arch_spin_unlock
> +#define arch_spin_trylock arch_spin_trylock
> +#define arch_spin_lock arch_spin_lock
> +#define arch_spin_lock_flags arch_spin_lock_flags
> +#define arch_spin_unlock arch_spin_unlock
> +
> +static inline int arch_spin_trylock(arch_spinlock_t *lock)
> +{
> + CLEAR_IO_SYNC;...
2015 Feb 10
4
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...from head into tail which would be wrong).
>
> Unfortunately xadd could result in head overflow as tail is high.
>
> The other option was repeated cmpxchg which is bad I believe.
> Any suggestions?
Stupid question... what if we simply move SLOWPATH from .tail to .head?
In this case arch_spin_unlock() could do xadd(tickets.head) and check
the result
In this case __ticket_check_and_clear_slowpath() really needs to cmpxchg
the whole .head_tail. Plus obviously more boring changes. This needs a
separate patch even _if_ this can work.
BTW. If we move "clear slowpath" into "lock&q...
2015 Feb 10
4
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...from head into tail which would be wrong).
>
> Unfortunately xadd could result in head overflow as tail is high.
>
> The other option was repeated cmpxchg which is bad I believe.
> Any suggestions?
Stupid question... what if we simply move SLOWPATH from .tail to .head?
In this case arch_spin_unlock() could do xadd(tickets.head) and check
the result
In this case __ticket_check_and_clear_slowpath() really needs to cmpxchg
the whole .head_tail. Plus obviously more boring changes. This needs a
separate patch even _if_ this can work.
BTW. If we move "clear slowpath" into "lock&q...
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ smp_mb();
+ return atomic_read(&lock->val);
+}
+
+#include <asm-generic/qspinlock.h>
+
+/* we need override it as ppc has io_sync stuff */
+#undef arch_spin_trylock
+#undef arch_spin_lock
+#undef arch_spin_lock_flags
+#undef arch_spin_unlock
+#define arch_spin_trylock arch_spin_trylock
+#define arch_spin_lock arch_spin_lock
+#define arch_spin_lock_flags arch_spin_lock_flags
+#define arch_spin_unlock arch_spin_unlock
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ CLEAR_IO_SYNC;
+ return queued_spin_trylock(lock);
+}...
2015 Feb 06
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
On 02/06/2015 09:49 AM, Raghavendra K T wrote:
> static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
> {
> if (TICKET_SLOWPATH_FLAG &&
> - static_key_false(¶virt_ticketlocks_enabled)) {
> - arch_spinlock_t prev;
> + static_key_false(¶virt_ticketlocks_enabled)) {
> + __ticket_t prev_head;
>
> - prev = *lock;
> +...
2015 Feb 11
1
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ing xadd in unlock,
> we would have to make sure lsb bit is cleared so that we can live with 1
> bit overflow to tail which is unused. now either or both of head,tail
> lsb bit may be set after unlock.
Sorry, can't understand... could you spell?
If TICKET_SLOWPATH_FLAG lives in .head arch_spin_unlock() could simply do
head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
if (head & TICKET_SLOWPATH_FLAG)
__ticket_unlock_kick(head);
so it can't overflow to .tail?
But probably I missed your concern.
And we we do this, probably it makes sense to add something like
bool tick...
2015 Feb 06
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
On 02/06/2015 09:49 AM, Raghavendra K T wrote:
> static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
> {
> if (TICKET_SLOWPATH_FLAG &&
> - static_key_false(¶virt_ticketlocks_enabled)) {
> - arch_spinlock_t prev;
> + static_key_false(¶virt_ticketlocks_enabled)) {
> + __ticket_t prev_head;
>
> - prev = *lock;
> +...
2015 Feb 11
1
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ing xadd in unlock,
> we would have to make sure lsb bit is cleared so that we can live with 1
> bit overflow to tail which is unused. now either or both of head,tail
> lsb bit may be set after unlock.
Sorry, can't understand... could you spell?
If TICKET_SLOWPATH_FLAG lives in .head arch_spin_unlock() could simply do
head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
if (head & TICKET_SLOWPATH_FLAG)
__ticket_unlock_kick(head);
so it can't overflow to .tail?
But probably I missed your concern.
And we we do this, probably it makes sense to add something like
bool tick...
2020 Jul 06
0
[PATCH v3 3/6] powerpc: move spinlock implementation to simple_spinlock
...(flags_dis);
+ local_irq_restore(flags);
+ do {
+ HMT_low();
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
+ } while (unlikely(lock->slock != 0));
+ HMT_medium();
+ local_irq_restore(flags_dis);
+ }
+}
+#define arch_spin_lock_flags arch_spin_lock_flags
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ __asm__ __volatile__("# arch_spin_unlock\n\t"
+ PPC_RELEASE_BARRIER: : :"memory");
+ lock->slock = 0;
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in inter...
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...new.tickets.tail ||
- cmpxchg(&lock->head_tail, old.head_tail,
- new.head_tail) != old.head_tail) {
- /*
- * Lock still has someone queued for it, so wake up an
- * appropriate waiter.
- */
- __ticket_unlock_kick(lock, old.tickets.head);
- }
-}
-
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
- static_key_false(¶virt_ticketlocks_enabled)) {
- arch_spinlock_t prev;
+ static_key_false(¶virt_ticketlocks_enabled)) {
+ __ticket_t prev_head;
- prev = *lock;
+ prev_head = lock->tickets.head;
add_sm...
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...new.tickets.tail ||
- cmpxchg(&lock->head_tail, old.head_tail,
- new.head_tail) != old.head_tail) {
- /*
- * Lock still has someone queued for it, so wake up an
- * appropriate waiter.
- */
- __ticket_unlock_kick(lock, old.tickets.head);
- }
-}
-
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
- static_key_false(¶virt_ticketlocks_enabled)) {
- arch_spinlock_t prev;
+ static_key_false(¶virt_ticketlocks_enabled)) {
+ __ticket_t prev_head;
- prev = *lock;
+ prev_head = lock->tickets.head;
add_sm...
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v7 -> v8:
add one patch to drop a function call
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v7 -> v8:
add one patch to drop a function call
2014 Mar 03
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
Hi,
Here are some numbers for my version -- also attached is the test code.
I found that booting big machines is tediously slow so I lifted the
whole lot to userspace.
I measure the cycles spend in arch_spin_lock() + arch_spin_unlock().
The machines used are a 4 node (2 socket) AMD Interlagos, and a 2 node
(2 socket) Intel Westmere-EP.
AMD (ticket) AMD (qspinlock + pending + opt)
Local: Local:
1: 324.425530 1: 324.102142
2: 17141.324050 2: 620.185930
3: 52212.232343 3: 2524...
2014 Mar 03
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
Hi,
Here are some numbers for my version -- also attached is the test code.
I found that booting big machines is tediously slow so I lifted the
whole lot to userspace.
I measure the cycles spend in arch_spin_lock() + arch_spin_unlock().
The machines used are a 4 node (2 socket) AMD Interlagos, and a 2 node
(2 socket) Intel Westmere-EP.
AMD (ticket) AMD (qspinlock + pending + opt)
Local: Local:
1: 324.425530 1: 324.102142
2: 17141.324050 2: 620.185930
3: 52212.232343 3: 2524...
2015 Feb 09
2
[PATCH V2] x86 spinlock: Fix memory corruption on completing completions
...new.tickets.tail ||
- cmpxchg(&lock->head_tail, old.head_tail,
- new.head_tail) != old.head_tail) {
- /*
- * Lock still has someone queued for it, so wake up an
- * appropriate waiter.
- */
- __ticket_unlock_kick(lock, old.tickets.head);
- }
-}
-
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
- static_key_false(¶virt_ticketlocks_enabled)) {
- arch_spinlock_t prev;
+ static_key_false(¶virt_ticketlocks_enabled)) {
+ __ticket_t next_head;
- prev = *lock;
+ next_head = lock->tickets.head + TICKET_L...
2015 Feb 09
2
[PATCH V2] x86 spinlock: Fix memory corruption on completing completions
...new.tickets.tail ||
- cmpxchg(&lock->head_tail, old.head_tail,
- new.head_tail) != old.head_tail) {
- /*
- * Lock still has someone queued for it, so wake up an
- * appropriate waiter.
- */
- __ticket_unlock_kick(lock, old.tickets.head);
- }
-}
-
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
- static_key_false(¶virt_ticketlocks_enabled)) {
- arch_spinlock_t prev;
+ static_key_false(¶virt_ticketlocks_enabled)) {
+ __ticket_t next_head;
- prev = *lock;
+ next_head = lock->tickets.head + TICKET_L...
2016 Dec 06
6
[PATCH v9 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v8 -> v9:
mv qspinlocm config entry to
2016 Dec 06
6
[PATCH v9 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v8 -> v9:
mv qspinlocm config entry to