search for: hash_align

Displaying 16 results from an estimated 16 matches for "hash_align".

Did you mean: hash_algo
2015 Mar 19
4
[PATCH 8/9] qspinlock: Generic paravirt support
..._LOCK_HASH_BITS +#define PB_LOCK_HASH_BITS 6 +#endif + +#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS) + +static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned; + +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) + +static inline u32 hash_align(u32 hash) +{ + return hash & ~(PV_HB_PER_LINE - 1); +} + +static struct qspinlock **pv_hash(struct qspinlock *lock) +{ + u32 hash = hash_ptr(lock, PV_LOCK_HASH_BITS); + struct pv_hash_bucket *hb, *end; + + if (!hash) + hash = 1; + + hb = &__pv_lock_hash[hash_align(hash)]; + for (;;) { + f...
2015 Mar 19
4
[PATCH 8/9] qspinlock: Generic paravirt support
..._LOCK_HASH_BITS +#define PB_LOCK_HASH_BITS 6 +#endif + +#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS) + +static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned; + +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) + +static inline u32 hash_align(u32 hash) +{ + return hash & ~(PV_HB_PER_LINE - 1); +} + +static struct qspinlock **pv_hash(struct qspinlock *lock) +{ + u32 hash = hash_ptr(lock, PV_LOCK_HASH_BITS); + struct pv_hash_bucket *hb, *end; + + if (!hash) + hash = 1; + + hb = &__pv_lock_hash[hash_align(hash)]; + for (;;) { + f...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...("PV qspinlock", > + sizeof(struct pv_hash_bucket), > + pv_hash_size, 0, HASH_EARLY, > + &pv_lock_hash_bits, NULL, > + pv_hash_size, pv_hash_size); pv_taps = lfsr_taps(pv_lock_hash_bits); > +} > + > +static inline u32 hash_align(u32 hash) > +{ > + return hash & ~(PV_HB_PER_LINE - 1); > +} > + > +static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) > +{ > + unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits); > + struct pv_hash_bucket *hb, *end; > + &gt...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...("PV qspinlock", > + sizeof(struct pv_hash_bucket), > + pv_hash_size, 0, HASH_EARLY, > + &pv_lock_hash_bits, NULL, > + pv_hash_size, pv_hash_size); pv_taps = lfsr_taps(pv_lock_hash_bits); > +} > + > +static inline u32 hash_align(u32 hash) > +{ > + return hash & ~(PV_HB_PER_LINE - 1); > +} > + > +static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) > +{ > + unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits); > + struct pv_hash_bucket *hb, *end; > + &gt...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...et), >> + pv_hash_size, 0, HASH_EARLY, >> + &pv_lock_hash_bits, NULL, >> + pv_hash_size, pv_hash_size); > pv_taps = lfsr_taps(pv_lock_hash_bits); > I don't understand what you meant here. >> +} >> + >> +static inline u32 hash_align(u32 hash) >> +{ >> + return hash& ~(PV_HB_PER_LINE - 1); >> +} >> + >> +static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) >> +{ >> + unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits); >> + struct pv_h...
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...* and hence cacheline aligned. + */ + pv_lock_hash = alloc_large_system_hash("PV qspinlock", + sizeof(struct pv_hash_bucket), + pv_hash_size, 0, HASH_EARLY, + &pv_lock_hash_bits, NULL, + pv_hash_size, pv_hash_size); +} + +static inline u32 hash_align(u32 hash) +{ + return hash & ~(PV_HB_PER_LINE - 1); +} + +static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) +{ + unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits); + struct pv_hash_bucket *hb, *end; + + if (!hash) + hash = 1; + + init_hash = hash;...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...lock, struct pv_node *node) > > +{ > > + unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits); > > + struct pv_hash_bucket *hb, *end; > > + > > + if (!hash) > > + hash = 1; > > + > > + init_hash = hash; > > + hb = &pv_lock_hash[hash_align(hash)]; > > + for (;;) { > > + for (end = hb + PV_HB_PER_LINE; hb < end; hb++) { > > + if (!cmpxchg(&hb->lock, NULL, lock)) { > > + WRITE_ONCE(hb->node, node); > > + /* > > + * We haven't set the _Q_SLOW_VAL yet. So > > + *...
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 12:28:30PM -0400, Waiman Long wrote: > On 04/01/2015 05:03 PM, Peter Zijlstra wrote: > >On Wed, Apr 01, 2015 at 03:58:58PM -0400, Waiman Long wrote: > >>On 04/01/2015 02:48 PM, Peter Zijlstra wrote: > >>I am sorry that I don't quite get what you mean here. My point is that in > >>the hashing step, a cpu will need to scan an empty
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 12:28:30PM -0400, Waiman Long wrote: > On 04/01/2015 05:03 PM, Peter Zijlstra wrote: > >On Wed, Apr 01, 2015 at 03:58:58PM -0400, Waiman Long wrote: > >>On 04/01/2015 02:48 PM, Peter Zijlstra wrote: > >>I am sorry that I don't quite get what you mean here. My point is that in > >>the hashing step, a cpu will need to scan an empty
2015 Apr 02
0
[PATCH 8/9] qspinlock: Generic paravirt support
..._LOCK_HASH_BITS +#define PB_LOCK_HASH_BITS 6 +#endif + +#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS) + +static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned; + +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) + +static inline u32 hash_align(u32 hash) +{ + return hash & ~(PV_HB_PER_LINE - 1); +} + +#define for_each_hash_bucket(hb, off, hash) \ + for (hash = hash_align(hash), off = 0, hb = &__pv_lock_hash[hash + off];\ + off < PV_LOCK_HASH_SIZE; \ + off++, hb = &__pv_lock_hash[(hash + off) % PV_LOCK_HASH_...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
On 03/16/2015 09:16 AM, Peter Zijlstra wrote: > Implement simple paravirt support for the qspinlock. > > Provide a separate (second) version of the spin_lock_slowpath for > paravirt along with a special unlock path. > > The second slowpath is generated by adding a few pv hooks to the > normal slowpath, but where those will compile away for the native > case, they expand
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
On 03/16/2015 09:16 AM, Peter Zijlstra wrote: > Implement simple paravirt support for the qspinlock. > > Provide a separate (second) version of the spin_lock_slowpath for > paravirt along with a special unlock path. > > The second slowpath is generated by adding a few pv hooks to the > normal slowpath, but where those will compile away for the native > case, they expand
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15: - Incorporate PeterZ's v15 qspinlock patch and improve upon the PV qspinlock code by dynamically allocating the hash table as well as some other performance optimization. - Simplified the Xen PV qspinlock code as suggested by David Vrabel <david.vrabel at citrix.com>. - Add benchmarking data for 3.19 kernel to compare the performance of a spinlock heavy test
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15: - Incorporate PeterZ's v15 qspinlock patch and improve upon the PV qspinlock code by dynamically allocating the hash table as well as some other performance optimization. - Simplified the Xen PV qspinlock code as suggested by David Vrabel <david.vrabel at citrix.com>. - Add benchmarking data for 3.19 kernel to compare the performance of a spinlock heavy test
2015 Apr 01
0
[PATCH 8/9] qspinlock: Generic paravirt support
...ons(-) > > --- /dev/null > > + > +static int pv_hash_find(struct qspinlock *lock) > +{ > + u64 hash = hash_ptr(lock, PV_LOCK_HASH_BITS); > + struct pv_hash_bucket *hb, *end; > + int cpu = -1; > + > + if (!hash) > + hash = 1; > + > + hb =&__pv_lock_hash[hash_align(hash)]; > + for (;;) { > + for (end = hb + PV_HB_PER_LINE; hb< end; hb++) { > + struct qspinlock *l = READ_ONCE(hb->lock); > + > + /* > + * If we hit an unused bucket, there is no match. > + */ > + if (!l) > + goto done; After more careful reading,...
2015 Apr 07
0
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
...OW_VAL is set and hash entry + * filled. With this state, the queue head CPU will always be kicked even + * if it is not halted to avoid potential racing condition. + */ enum vcpu_state { vcpu_running = 0, vcpu_halted, + vcpu_hashed }; struct pv_node { @@ -97,7 +104,13 @@ static inline u32 hash_align(u32 hash) return hash & ~(PV_HB_PER_LINE - 1); } -static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) +/* + * Set up an entry in the lock hash table + * This is not inlined to reduce size of generated code as it is included + * twice and is used only in the slow...