search for: pv_lock_hash_bit

Displaying 20 results from an estimated 23 matches for "pv_lock_hash_bit".

Did you mean: pv_lock_hash_bits
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...page-size aligned > >>+ * and hence cacheline aligned. > >>+ */ > >>+ pv_lock_hash = alloc_large_system_hash("PV qspinlock", > >>+ sizeof(struct pv_hash_bucket), > >>+ pv_hash_size, 0, HASH_EARLY, > >>+ &pv_lock_hash_bits, NULL, > >>+ pv_hash_size, pv_hash_size); > > pv_taps = lfsr_taps(pv_lock_hash_bits); > > > > I don't understand what you meant here. Let me explain (even though I propose taking all the LFSR stuff out). pv_lock_hash_bit is a runtime variable, therefore...
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...page-size aligned > >>+ * and hence cacheline aligned. > >>+ */ > >>+ pv_lock_hash = alloc_large_system_hash("PV qspinlock", > >>+ sizeof(struct pv_hash_bucket), > >>+ pv_hash_size, 0, HASH_EARLY, > >>+ &pv_lock_hash_bits, NULL, > >>+ pv_hash_size, pv_hash_size); > > pv_taps = lfsr_taps(pv_lock_hash_bits); > > > > I don't understand what you meant here. Let me explain (even though I propose taking all the LFSR stuff out). pv_lock_hash_bit is a runtime variable, therefore...
2015 Mar 19
4
[PATCH 8/9] qspinlock: Generic paravirt support
...all buckets in the + * same cacheline. + * + * http://en.wikipedia.org/wiki/Hash_table#Open_addressing + * + */ + +#define HB_RESERVED ((struct qspinlock *)1) + +struct pv_hash_bucket { + struct qspinlock *lock; + int cpu; +}; + +/* + * XXX dynamic allocate using nr_cpu_ids instead... + */ +#define PV_LOCK_HASH_BITS (2 + NR_CPUS_BITS) + +#if PV_LOCK_HASH_BITS < 6 +#undef PV_LOCK_HASH_BITS +#define PB_LOCK_HASH_BITS 6 +#endif + +#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS) + +static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned; + +#define PV_HB_PER_LINE (SMP_CAC...
2015 Mar 19
4
[PATCH 8/9] qspinlock: Generic paravirt support
...all buckets in the + * same cacheline. + * + * http://en.wikipedia.org/wiki/Hash_table#Open_addressing + * + */ + +#define HB_RESERVED ((struct qspinlock *)1) + +struct pv_hash_bucket { + struct qspinlock *lock; + int cpu; +}; + +/* + * XXX dynamic allocate using nr_cpu_ids instead... + */ +#define PV_LOCK_HASH_BITS (2 + NR_CPUS_BITS) + +#if PV_LOCK_HASH_BITS < 6 +#undef PV_LOCK_HASH_BITS +#define PB_LOCK_HASH_BITS 6 +#endif + +#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS) + +static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned; + +#define PV_HB_PER_LINE (SMP_CAC...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...gt; + struct qspinlock *lock; > + struct pv_node *node; > +}; > +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) > +#define HB_RESERVED ((struct qspinlock *)1) This is unused. > + > +static struct pv_hash_bucket *pv_lock_hash; > +static unsigned int pv_lock_hash_bits __read_mostly; static unsigned int pv_taps __read_mostly; > + > +#include <linux/hash.h> > +#include <linux/lfsr.h> > +#include <linux/bootmem.h> > + > +/* > + * Allocate memory for the PV qspinlock hash buckets > + * > + * This function should be cal...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...gt; + struct qspinlock *lock; > + struct pv_node *node; > +}; > +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) > +#define HB_RESERVED ((struct qspinlock *)1) This is unused. > + > +static struct pv_hash_bucket *pv_lock_hash; > +static unsigned int pv_lock_hash_bits __read_mostly; static unsigned int pv_taps __read_mostly; > + > +#include <linux/hash.h> > +#include <linux/lfsr.h> > +#include <linux/bootmem.h> > + > +/* > + * Allocate memory for the PV qspinlock hash buckets > + * > + * This function should be cal...
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 12:28:30PM -0400, Waiman Long wrote: > On 04/01/2015 05:03 PM, Peter Zijlstra wrote: > >On Wed, Apr 01, 2015 at 03:58:58PM -0400, Waiman Long wrote: > >>On 04/01/2015 02:48 PM, Peter Zijlstra wrote: > >>I am sorry that I don't quite get what you mean here. My point is that in > >>the hashing step, a cpu will need to scan an empty
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 12:28:30PM -0400, Waiman Long wrote: > On 04/01/2015 05:03 PM, Peter Zijlstra wrote: > >On Wed, Apr 01, 2015 at 03:58:58PM -0400, Waiman Long wrote: > >>On 04/01/2015 02:48 PM, Peter Zijlstra wrote: > >>I am sorry that I don't quite get what you mean here. My point is that in > >>the hashing step, a cpu will need to scan an empty
2015 Apr 02
0
[PATCH 8/9] qspinlock: Generic paravirt support
...of probing just the immediate bucket we probe all buckets in the + * same cacheline. + * + * http://en.wikipedia.org/wiki/Hash_table#Open_addressing + * + */ + +struct pv_hash_bucket { + struct qspinlock *lock; + int cpu; +}; + +/* + * XXX dynamic allocate using nr_cpu_ids instead... + */ +#define PV_LOCK_HASH_BITS (2 + NR_CPUS_BITS) + +#if PV_LOCK_HASH_BITS < 6 +#undef PV_LOCK_HASH_BITS +#define PB_LOCK_HASH_BITS 6 +#endif + +#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS) + +static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned; + +#define PV_HB_PER_LINE (SMP_CAC...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...;> +}; >> +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) >> +#define HB_RESERVED ((struct qspinlock *)1) > This is unused. You are right, I will remove that. >> + >> +static struct pv_hash_bucket *pv_lock_hash; >> +static unsigned int pv_lock_hash_bits __read_mostly; > static unsigned int pv_taps __read_mostly; It will depend on whether we keep the lfsr code or not. >> + >> +#include<linux/hash.h> >> +#include<linux/lfsr.h> >> +#include<linux/bootmem.h> >> + >> +/* >> + * Allocate...
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...AX_BITS LFSR_MIN_BITS +#endif + +struct pv_hash_bucket { + struct qspinlock *lock; + struct pv_node *node; +}; +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) +#define HB_RESERVED ((struct qspinlock *)1) + +static struct pv_hash_bucket *pv_lock_hash; +static unsigned int pv_lock_hash_bits __read_mostly; + +#include <linux/hash.h> +#include <linux/lfsr.h> +#include <linux/bootmem.h> + +/* + * Allocate memory for the PV qspinlock hash buckets + * + * This function should be called from the paravirt spinlock initialization + * routine. + */ +void __init __pv_init_loc...
2015 Apr 24
0
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...ash_entry)) +#define PV_HB_MIN (PAGE_SIZE / sizeof(struct pv_hash_bucket)) + +struct pv_hash_entry { + struct qspinlock *lock; + struct pv_node *node; +}; + +struct pv_hash_bucket { + struct pv_hash_entry ent[PV_HE_PER_LINE]; +}; + +static struct pv_hash_bucket *pv_lock_hash; +static unsigned int pv_lock_hash_bits __read_mostly; + +/* + * Allocate memory for the PV qspinlock hash buckets + * + * This function should be called from the paravirt spinlock initialization + * routine. + */ +void __init __pv_init_lock_hash(void) +{ + int pv_hash_size = 4 * num_possible_cpus() / PV_HE_PER_LINE; + + if (pv_hash_siz...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...pr 06, 2015 at 10:55:44PM -0400, Waiman Long wrote: > > +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) > > +static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) > > +{ > > + unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits); > > + struct pv_hash_bucket *hb, *end; > > + > > + if (!hash) > > + hash = 1; > > + > > + init_hash = hash; > > + hb = &pv_lock_hash[hash_align(hash)]; > > + for (;;) { > > + for (end = hb + PV_HB_PER_LINE; hb < end; hb++) { > &gt...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
On 03/16/2015 09:16 AM, Peter Zijlstra wrote: > Implement simple paravirt support for the qspinlock. > > Provide a separate (second) version of the spin_lock_slowpath for > paravirt along with a special unlock path. > > The second slowpath is generated by adding a few pv hooks to the > normal slowpath, but where those will compile away for the native > case, they expand
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
On 03/16/2015 09:16 AM, Peter Zijlstra wrote: > Implement simple paravirt support for the qspinlock. > > Provide a separate (second) version of the spin_lock_slowpath for > paravirt along with a special unlock path. > > The second slowpath is generated by adding a few pv hooks to the > normal slowpath, but where those will compile away for the native > case, they expand
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...down. + * + */ +#define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry)) +#define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry)) + +struct pv_hash_entry { + struct qspinlock *lock; + struct pv_node *node; +}; + +static struct pv_hash_bucket *pv_lock_hash; +static unsigned int pv_lock_hash_bits __read_mostly; + +/* + * Allocate memory for the PV qspinlock hash buckets + * + * This function should be called from the paravirt spinlock initialization + * routine. + */ +void __init __pv_init_lock_hash(void) +{ + int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE); + + if (pv_ha...
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...down. + * + */ +#define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry)) +#define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry)) + +struct pv_hash_entry { + struct qspinlock *lock; + struct pv_node *node; +}; + +static struct pv_hash_bucket *pv_lock_hash; +static unsigned int pv_lock_hash_bits __read_mostly; + +/* + * Allocate memory for the PV qspinlock hash buckets + * + * This function should be called from the paravirt spinlock initialization + * routine. + */ +void __init __pv_init_lock_hash(void) +{ + int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE); + + if (pv_ha...
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15: - Incorporate PeterZ's v15 qspinlock patch and improve upon the PV qspinlock code by dynamically allocating the hash table as well as some other performance optimization. - Simplified the Xen PV qspinlock code as suggested by David Vrabel <david.vrabel at citrix.com>. - Add benchmarking data for 3.19 kernel to compare the performance of a spinlock heavy test
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15: - Incorporate PeterZ's v15 qspinlock patch and improve upon the PV qspinlock code by dynamically allocating the hash table as well as some other performance optimization. - Simplified the Xen PV qspinlock code as suggested by David Vrabel <david.vrabel at citrix.com>. - Add benchmarking data for 3.19 kernel to compare the performance of a spinlock heavy test
2015 Apr 24
16
[PATCH v16 00/14] qspinlock: a 4-byte queue spinlock with PV support
v15->v16: - Remove the lfsr patch and use linear probing as lfsr is not really necessary in most cases. - Move the paravirt PV_CALLEE_SAVE_REGS_THUNK code to an asm header. - Add a patch to collect PV qspinlock statistics which also supersedes the PV lock hash debug patch. - Add PV qspinlock performance numbers. v14->v15: - Incorporate PeterZ's v15 qspinlock patch and improve