search for: _q_tail_cpu_offset

Displaying 20 results from an estimated 39 matches for "_q_tail_cpu_offset".

2014 Jun 15
0
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...)\ + << _Q_ ## type ## _OFFSET) +#define _Q_LOCKED_OFFSET 0 +#define _Q_LOCKED_BITS 8 +#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) + +#define _Q_TAIL_IDX_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) +#define _Q_TAIL_IDX_BITS 2 +#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) + +#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) +#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) +#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) + +#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) + +#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ Index: linux-2.6/kernel/Kconfig.locks ================...
2015 Mar 16
0
[PATCH 1/9] qspinlock: A simple generic 4-byte queue spinlock
...)\ + << _Q_ ## type ## _OFFSET) +#define _Q_LOCKED_OFFSET 0 +#define _Q_LOCKED_BITS 8 +#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) + +#define _Q_TAIL_IDX_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) +#define _Q_TAIL_IDX_BITS 2 +#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) + +#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) +#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) +#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) + +#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) + +#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks...
2014 Jun 17
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...| 58 +++++++++++++++++++++------------- > 2 files changed, 38 insertions(+), 22 deletions(-) > > --- a/include/asm-generic/qspinlock_types.h > +++ b/include/asm-generic/qspinlock_types.h > @@ -61,6 +61,8 @@ typedef struct qspinlock { > #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) > #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) > > +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) > + > #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) > #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) > > --- a/kernel/locking/qspinlock....
2014 Jun 17
3
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...| 58 +++++++++++++++++++++------------- > 2 files changed, 38 insertions(+), 22 deletions(-) > > --- a/include/asm-generic/qspinlock_types.h > +++ b/include/asm-generic/qspinlock_types.h > @@ -61,6 +61,8 @@ typedef struct qspinlock { > #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) > #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) > > +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) > + > #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) > #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) > > --- a/kernel/locking/qspinlock....
2014 Jun 15
0
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...kernel/locking/qspinlock.c | 58 +++++++++++++++++++++------------- 2 files changed, 38 insertions(+), 22 deletions(-) --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -61,6 +61,8 @@ typedef struct qspinlock { #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) + #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -8...
2014 Jun 16
4
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...k, mcs_nodes[4]); > + > +/* > + * We must be able to distinguish between no-tail and the tail at 0:0, > + * therefore increment the cpu number by one. > + */ > + > +static inline u32 encode_tail(int cpu, int idx) > +{ > + u32 tail; > + > + tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; > + tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ Should there an ASSSERT (idx < 4) just in case we screw up somehow (I can't figure out how, but that is partially why ASSERTS are added). > + > + return tail; > +} > + > +static inline struct mcs_spinlock...
2014 Jun 16
4
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...k, mcs_nodes[4]); > + > +/* > + * We must be able to distinguish between no-tail and the tail at 0:0, > + * therefore increment the cpu number by one. > + */ > + > +static inline u32 encode_tail(int cpu, int idx) > +{ > + u32 tail; > + > + tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; > + tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ Should there an ASSSERT (idx < 4) just in case we screw up somehow (I can't figure out how, but that is partially why ASSERTS are added). > + > + return tail; > +} > + > +static inline struct mcs_spinlock...
2014 Apr 17
0
[PATCH v9 04/19] qspinlock: Extract out the exchange of tail code word
...deletions(-) diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index bd25081..ed5d89a 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -61,6 +61,8 @@ typedef struct qspinlock { #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) + #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c in...
2014 May 07
0
[PATCH v10 08/19] qspinlock: Make a new qnode structure to support virtualization
...* We must be able to distinguish between no-tail and the tail at 0:0, @@ -79,12 +88,12 @@ static inline u32 encode_tail(int cpu, int idx) return tail; } -static inline struct mcs_spinlock *decode_tail(u32 tail) +static inline struct qnode *decode_tail(u32 tail) { int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; - return per_cpu_ptr(&mcs_nodes[idx], cpu); + return per_cpu_ptr(&qnodes[idx], cpu); } #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) @@ -342,7 +351,7 @@ static inline int trylock_pending(...
2014 Mar 03
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...y differs between our versions. I'll try and slot your version in tomorrow. --- /* * Exactly fills one cacheline on 64bit. */ static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]); static inline u32 encode_tail(int cpu, int idx) { u32 code; code = (cpu + 1) << _Q_TAIL_CPU_OFFSET; code |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ return code; } static inline struct mcs_spinlock *decode_tail(u32 code) { int cpu = (code >> _Q_TAIL_CPU_OFFSET) - 1; int idx = (code >> _Q_TAIL_IDX_OFFSET) & _Q_TAIL_IDX_MASK; return per_cpu_ptr(&mcs_nodes[id...
2014 Mar 03
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...y differs between our versions. I'll try and slot your version in tomorrow. --- /* * Exactly fills one cacheline on 64bit. */ static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]); static inline u32 encode_tail(int cpu, int idx) { u32 code; code = (cpu + 1) << _Q_TAIL_CPU_OFFSET; code |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ return code; } static inline struct mcs_spinlock *decode_tail(u32 code) { int cpu = (code >> _Q_TAIL_CPU_OFFSET) - 1; int idx = (code >> _Q_TAIL_IDX_OFFSET) & _Q_TAIL_IDX_MASK; return per_cpu_ptr(&mcs_nodes[id...
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the paravirt crap. The first few patches are taken from Waiman's latest series, but the virt support is completely new. Its primary aim is to not mess up the native code. I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple smp guests. I've not done Xen, but the patch should be
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the paravirt crap. The first few patches are taken from Waiman's latest series, but the virt support is completely new. Its primary aim is to not mess up the native code. I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple smp guests. I've not done Xen, but the patch should be
2014 Feb 28
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
On Thu, Feb 27, 2014 at 03:42:19PM -0500, Waiman Long wrote: > >>+ old = xchg(&qlock->lock_wait, _QSPINLOCK_WAITING|_QSPINLOCK_LOCKED); > >>+ > >>+ if (old == 0) { > >>+ /* > >>+ * Got the lock, can clear the waiting bit now > >>+ */ > >>+ smp_u8_store_release(&qlock->wait, 0); > > > >So we just did an
2014 Feb 28
5
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
On Thu, Feb 27, 2014 at 03:42:19PM -0500, Waiman Long wrote: > >>+ old = xchg(&qlock->lock_wait, _QSPINLOCK_WAITING|_QSPINLOCK_LOCKED); > >>+ > >>+ if (old == 0) { > >>+ /* > >>+ * Got the lock, can clear the waiting bit now > >>+ */ > >>+ smp_u8_store_release(&qlock->wait, 0); > > > >So we just did an
2014 Jun 18
0
[PATCH 04/11] qspinlock: Extract out the exchange of tail code word
...+++++++++++++++------------- >> 2 files changed, 38 insertions(+), 22 deletions(-) >> >> --- a/include/asm-generic/qspinlock_types.h >> +++ b/include/asm-generic/qspinlock_types.h >> @@ -61,6 +61,8 @@ typedef struct qspinlock { >> #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) >> #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) >> >> +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) >> + >> #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) >> #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) >> >> ---...
2014 Jun 15
0
[PATCH 05/11] qspinlock: Optimize for smaller NR_CPUS
...CONFIG_NR_CPUS < (1U << 14) +#define _Q_PENDING_BITS 8 +#else #define _Q_PENDING_BITS 1 +#endif #define _Q_PENDING_MASK _Q_SET_MASK(PENDING) #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) @@ -61,6 +73,7 @@ typedef struct qspinlock { #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) +#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -22,6 +22,7 @@ #...
2014 Apr 17
0
[PATCH v9 05/19] qspinlock: Optimize for smaller NR_CPUS
...CONFIG_NR_CPUS < (1U << 14) +#define _Q_PENDING_BITS 8 +#else #define _Q_PENDING_BITS 1 +#endif #define _Q_PENDING_MASK _Q_SET_MASK(PENDING) #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) @@ -61,6 +73,7 @@ typedef struct qspinlock { #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) +#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index fcf06cb..55...
2014 Jun 23
0
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...locked; /* 1 if lock acquired */ > > + int count; > > This could use a comment. like so? int count; /* nesting count, see qspinlock.c */ > > +static inline u32 encode_tail(int cpu, int idx) > > +{ > > + u32 tail; > > + > > + tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; > > + tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ > > Should there an > > ASSSERT (idx < 4) > > just in case we screw up somehow (I can't figure out how, but > that is partially why ASSERTS are added). #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(idx...
2014 Jun 18
1
[PATCH 05/11] qspinlock: Optimize for smaller NR_CPUS
...efine _Q_PENDING_BITS 8 > +#else > #define _Q_PENDING_BITS 1 > +#endif > #define _Q_PENDING_MASK _Q_SET_MASK(PENDING) > > #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) > @@ -61,6 +73,7 @@ typedef struct qspinlock { > #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) > #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) > > +#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET > #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) > > #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) > --- a/kernel/locking/qspinlock.c > +++ b/kernel/...