search for: arch_spinlock_t

Displaying 20 results from an estimated 186 matches for "arch_spinlock_t".

2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...; @@ -52,6 +52,20 @@ > ?#define SYNC_IO > ?#endif > ? > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor > mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > >lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void __rw_yield(arch_rwlock_t *lock); > +#else /* SPLPAR */ > +#define __spin_yield(x) barrier() > +#define __rw_yield(x) barrier() > +#define SHARED_PROCESSOR 0 > +#endif > + > +#ifdef CONFIG_QUEUED_SPINLOCKS > +#include <asm/qspinlock.h> > +#e...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...; @@ -52,6 +52,20 @@ > ?#define SYNC_IO > ?#endif > ? > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor > mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > >lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void __rw_yield(arch_rwlock_t *lock); > +#else /* SPLPAR */ > +#define __spin_yield(x) barrier() > +#define __rw_yield(x) barrier() > +#define SHARED_PROCESSOR 0 > +#endif > + > +#ifdef CONFIG_QUEUED_SPINLOCKS > +#include <asm/qspinlock.h> > +#e...
2015 Feb 09
2
[PATCH V2] x86 spinlock: Fix memory corruption on completing completions
...ry read. (Sasha) Patch has passed stress testing. diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 625660f..7fc50d7 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -49,6 +49,23 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) set_bit(0, (volatile unsigned long *)&lock->tickets.tail); } +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock) +{ + arch_spinlock_t old, new; + __ticket_t diff; + + old.tickets = READ_ONCE(lock->tickets); + diff = (old.tickets.tail & ~TICKET_SLOW...
2015 Feb 09
2
[PATCH V2] x86 spinlock: Fix memory corruption on completing completions
...ry read. (Sasha) Patch has passed stress testing. diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 625660f..7fc50d7 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -49,6 +49,23 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) set_bit(0, (volatile unsigned long *)&lock->tickets.tail); } +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock) +{ + arch_spinlock_t old, new; + __ticket_t diff; + + old.tickets = READ_ONCE(lock->tickets); + diff = (old.tickets.tail & ~TICKET_SLOW...
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 625660f..0829f86 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -49,6 +49,23 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) set_bit(0, (volatile unsigned long *)&lock->tickets.tail); } +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock) +{ + arch_spinlock_t old, new; + __ticket_t diff; + + old.tickets = READ_ONCE(lock->tickets); + diff = (old.tickets.tail & ~TICKET_SLOW...
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 625660f..0829f86 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -49,6 +49,23 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) set_bit(0, (volatile unsigned long *)&lock->tickets.tail); } +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock) +{ + arch_spinlock_t old, new; + __ticket_t diff; + + old.tickets = READ_ONCE(lock->tickets); + diff = (old.tickets.tail & ~TICKET_SLOW...
2015 Feb 08
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ns(+), 36 deletions(-) > > diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h > index 625660f..0829f86 100644 > --- a/arch/x86/include/asm/spinlock.h > +++ b/arch/x86/include/asm/spinlock.h > @@ -49,6 +49,23 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) > set_bit(0, (volatile unsigned long *)&lock->tickets.tail); > } > > +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock) > +{ > + arch_spinlock_t old, new; > + __ticket_t diff; > + > + old.tickets = READ_ONCE(lock->tickets);...
2015 Feb 08
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ns(+), 36 deletions(-) > > diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h > index 625660f..0829f86 100644 > --- a/arch/x86/include/asm/spinlock.h > +++ b/arch/x86/include/asm/spinlock.h > @@ -49,6 +49,23 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) > set_bit(0, (volatile unsigned long *)&lock->tickets.tail); > } > > +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock) > +{ > + arch_spinlock_t old, new; > + __ticket_t diff; > + > + old.tickets = READ_ONCE(lock->tickets);...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...f arch_spin_lock_flags > +#undef arch_spin_unlock > +#define arch_spin_trylock arch_spin_trylock > +#define arch_spin_lock arch_spin_lock > +#define arch_spin_lock_flags arch_spin_lock_flags > +#define arch_spin_unlock arch_spin_unlock > + > +static inline int arch_spin_trylock(arch_spinlock_t *lock) > +{ > + CLEAR_IO_SYNC; > + return queued_spin_trylock(lock); > +} > + > +static inline void arch_spin_lock(arch_spinlock_t *lock) > +{ > + CLEAR_IO_SYNC; > + queued_spin_lock(lock); > +} > + > +static inline > +void arch_spin_lock_flags(arch_spinlock_t...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...f arch_spin_lock_flags > +#undef arch_spin_unlock > +#define arch_spin_trylock arch_spin_trylock > +#define arch_spin_lock arch_spin_lock > +#define arch_spin_lock_flags arch_spin_lock_flags > +#define arch_spin_unlock arch_spin_unlock > + > +static inline int arch_spin_trylock(arch_spinlock_t *lock) > +{ > + CLEAR_IO_SYNC; > + return queued_spin_trylock(lock); > +} > + > +static inline void arch_spin_lock(arch_spinlock_t *lock) > +{ > + CLEAR_IO_SYNC; > + queued_spin_lock(lock); > +} > + > +static inline > +void arch_spin_lock_flags(arch_spinlock_t...
2020 Jul 06
0
[PATCH v3 3/6] powerpc: move spinlock implementation to simple_spinlock
...00yy when locked, where yy == CPU number */ +#ifdef __BIG_ENDIAN__ +#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) +#else +#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) +#endif +#else +#define LOCK_TOKEN 1 +#endif + +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.slock == 0; +} + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + smp_mb(); + return !arch_spin_value_unlocked(*lock); +} + +/* + * This returns the old value in the lock, so we succeeded + * in getting the lock if the return value is 0. + */ +static inline...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...ock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,20 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) barrier() +#define __rw_yield(x) barrier() +#define SHARED_PROCESSOR 0 +#endif + +#ifdef CONFIG_QUEUED_SPINLOCKS +#include <asm/qspinlock.h> +#else static __always_inline int arch_spin_value_un...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...ock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,20 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) barrier() +#define __rw_yield(x) barrier() +#define SHARED_PROCESSOR 0 +#endif + +#ifdef CONFIG_QUEUED_SPINLOCKS +#include <asm/qspinlock.h> +#else static __always_inline int arch_spin_value_un...
2010 Nov 16
23
[PATCH 00/14] PV ticket locks without expanding spinlock
...orporation of various review comments. The latter part of the series converts from pv spinlocks to pv ticket locks (ie, using the ticket lock fastpath as-is, but adding pv ops for the ticketlock slowpaths). The significant difference here is that rather than adding a new ticket_t-sized element to arch_spinlock_t - effectively doubling the size - I steal the LSB of the tickets themselves to store a bit. This allows the structure to remain the same size, but at the cost of halving the max number of CPUs (127 for a 8-bit ticket, and a hard max of 32767 overall). The extra bit (well, two, but one is unused)...
2010 Nov 16
23
[PATCH 00/14] PV ticket locks without expanding spinlock
...orporation of various review comments. The latter part of the series converts from pv spinlocks to pv ticket locks (ie, using the ticket lock fastpath as-is, but adding pv ops for the ticketlock slowpaths). The significant difference here is that rather than adding a new ticket_t-sized element to arch_spinlock_t - effectively doubling the size - I steal the LSB of the tickets themselves to store a bit. This allows the structure to remain the same size, but at the cost of halving the max number of CPUs (127 for a 8-bit ticket, and a hard max of 32767 overall). The extra bit (well, two, but one is unused)...
2010 Nov 16
23
[PATCH 00/14] PV ticket locks without expanding spinlock
...orporation of various review comments. The latter part of the series converts from pv spinlocks to pv ticket locks (ie, using the ticket lock fastpath as-is, but adding pv ops for the ticketlock slowpaths). The significant difference here is that rather than adding a new ticket_t-sized element to arch_spinlock_t - effectively doubling the size - I steal the LSB of the tickets themselves to store a bit. This allows the structure to remain the same size, but at the cost of halving the max number of CPUs (127 for a 8-bit ticket, and a hard max of 32767 overall). The extra bit (well, two, but one is unused)...
2010 Nov 03
25
[PATCH 00/20] x86: ticket lock rewrite and paravirtualization
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com> Hi all, This series does two major things: 1. It converts the bulk of the implementation to C, and makes the "small ticket" and "large ticket" code common. Only the actual size-dependent asm instructions are specific to the ticket size. The resulting generated asm is very similar to the current
2010 Nov 03
25
[PATCH 00/20] x86: ticket lock rewrite and paravirtualization
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com> Hi all, This series does two major things: 1. It converts the bulk of the implementation to C, and makes the "small ticket" and "large ticket" code common. Only the actual size-dependent asm instructions are specific to the ticket size. The resulting generated asm is very similar to the current
2010 Nov 03
25
[PATCH 00/20] x86: ticket lock rewrite and paravirtualization
From: Jeremy Fitzhardinge <jeremy.fitzhardinge at citrix.com> Hi all, This series does two major things: 1. It converts the bulk of the implementation to C, and makes the "small ticket" and "large ticket" code common. Only the actual size-dependent asm instructions are specific to the ticket size. The resulting generated asm is very similar to the current
2016 Jun 03
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...dif > > ? > > +#if defined(CONFIG_PPC_SPLPAR) > > +/* We only yield to the hypervisor if we are in shared processor > > mode */ > > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > > > > > > lppaca_ptr)) > > +extern void __spin_yield(arch_spinlock_t *lock); > > +extern void __rw_yield(arch_rwlock_t *lock); > > +#else /* SPLPAR */ > > +#define __spin_yield(x) barrier() > > +#define __rw_yield(x) barrier() > > +#define SHARED_PROCESSOR 0 > > +#endif > > + > > +#ifdef CONFIG_QUEUED_SPINLOCKS > &g...