Displaying 14 results from an estimated 14 matches for "pv_lock_hash".
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...f
> +
> +struct pv_hash_bucket {
> + struct qspinlock *lock;
> + struct pv_node *node;
> +};
> +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
> +#define HB_RESERVED ((struct qspinlock *)1)
This is unused.
> +
> +static struct pv_hash_bucket *pv_lock_hash;
> +static unsigned int pv_lock_hash_bits __read_mostly;
static unsigned int pv_taps __read_mostly;
> +
> +#include <linux/hash.h>
> +#include <linux/lfsr.h>
> +#include <linux/bootmem.h>
> +
> +/*
> + * Allocate memory for the PV qspinlock hash buckets
&...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...f
> +
> +struct pv_hash_bucket {
> + struct qspinlock *lock;
> + struct pv_node *node;
> +};
> +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
> +#define HB_RESERVED ((struct qspinlock *)1)
This is unused.
> +
> +static struct pv_hash_bucket *pv_lock_hash;
> +static unsigned int pv_lock_hash_bits __read_mostly;
static unsigned int pv_taps __read_mostly;
> +
> +#include <linux/hash.h>
> +#include <linux/lfsr.h>
> +#include <linux/bootmem.h>
> +
> +/*
> + * Allocate memory for the PV qspinlock hash buckets
&...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...lock;
>> + struct pv_node *node;
>> +};
>> +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
>> +#define HB_RESERVED ((struct qspinlock *)1)
> This is unused.
You are right, I will remove that.
>> +
>> +static struct pv_hash_bucket *pv_lock_hash;
>> +static unsigned int pv_lock_hash_bits __read_mostly;
> static unsigned int pv_taps __read_mostly;
It will depend on whether we keep the lfsr code or not.
>> +
>> +#include<linux/hash.h>
>> +#include<linux/lfsr.h>
>> +#include<linux/bootmem.h>...
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...ndef LFSR_MAX_BITS
+#define LFSR_MAX_BITS LFSR_MIN_BITS
+#endif
+
+struct pv_hash_bucket {
+ struct qspinlock *lock;
+ struct pv_node *node;
+};
+#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
+#define HB_RESERVED ((struct qspinlock *)1)
+
+static struct pv_hash_bucket *pv_lock_hash;
+static unsigned int pv_lock_hash_bits __read_mostly;
+
+#include <linux/hash.h>
+#include <linux/lfsr.h>
+#include <linux/bootmem.h>
+
+/*
+ * Allocate memory for the PV qspinlock hash buckets
+ *
+ * This function should be called from the paravirt spinlock initialization
+ * r...
2015 Apr 24
0
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...MP_CACHE_BYTES / sizeof(struct pv_hash_entry))
+#define PV_HB_MIN (PAGE_SIZE / sizeof(struct pv_hash_bucket))
+
+struct pv_hash_entry {
+ struct qspinlock *lock;
+ struct pv_node *node;
+};
+
+struct pv_hash_bucket {
+ struct pv_hash_entry ent[PV_HE_PER_LINE];
+};
+
+static struct pv_hash_bucket *pv_lock_hash;
+static unsigned int pv_lock_hash_bits __read_mostly;
+
+/*
+ * Allocate memory for the PV qspinlock hash buckets
+ *
+ * This function should be called from the paravirt spinlock initialization
+ * routine.
+ */
+void __init __pv_init_lock_hash(void)
+{
+ int pv_hash_size = 4 * num_possible_cpus(...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...pr 06, 2015 at 10:55:44PM -0400, Waiman Long wrote:
> > +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
> > +static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
> > +{
> > + unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits);
> > + struct pv_hash_bucket *hb, *end;
> > +
> > + if (!hash)
> > + hash = 1;
> > +
> > + init_hash = hash;
> > + hb = &pv_lock_hash[hash_align(hash)];
> > + for (;;) {
> > + for (end = hb + PV_HB_PER_LINE; hb < end; hb++) {
>...
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...>>+ if (pv_hash_size< (1U<< LFSR_MIN_BITS))
> >>+ pv_hash_size = (1U<< LFSR_MIN_BITS);
> >>+ /*
> >>+ * Allocate space from bootmem which should be page-size aligned
> >>+ * and hence cacheline aligned.
> >>+ */
> >>+ pv_lock_hash = alloc_large_system_hash("PV qspinlock",
> >>+ sizeof(struct pv_hash_bucket),
> >>+ pv_hash_size, 0, HASH_EARLY,
> >>+ &pv_lock_hash_bits, NULL,
> >>+ pv_hash_size, pv_hash_size);
> > pv_taps = lfsr_taps(p...
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...>>+ if (pv_hash_size< (1U<< LFSR_MIN_BITS))
> >>+ pv_hash_size = (1U<< LFSR_MIN_BITS);
> >>+ /*
> >>+ * Allocate space from bootmem which should be page-size aligned
> >>+ * and hence cacheline aligned.
> >>+ */
> >>+ pv_lock_hash = alloc_large_system_hash("PV qspinlock",
> >>+ sizeof(struct pv_hash_bucket),
> >>+ pv_hash_size, 0, HASH_EARLY,
> >>+ &pv_lock_hash_bits, NULL,
> >>+ pv_hash_size, pv_hash_size);
> > pv_taps = lfsr_taps(p...
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...t where open addressing
+ * breaks down.
+ *
+ */
+#define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
+#define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry))
+
+struct pv_hash_entry {
+ struct qspinlock *lock;
+ struct pv_node *node;
+};
+
+static struct pv_hash_bucket *pv_lock_hash;
+static unsigned int pv_lock_hash_bits __read_mostly;
+
+/*
+ * Allocate memory for the PV qspinlock hash buckets
+ *
+ * This function should be called from the paravirt spinlock initialization
+ * routine.
+ */
+void __init __pv_init_lock_hash(void)
+{
+ int pv_hash_size = ALIGN(4 * num_possible...
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...t where open addressing
+ * breaks down.
+ *
+ */
+#define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
+#define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry))
+
+struct pv_hash_entry {
+ struct qspinlock *lock;
+ struct pv_node *node;
+};
+
+static struct pv_hash_bucket *pv_lock_hash;
+static unsigned int pv_lock_hash_bits __read_mostly;
+
+/*
+ * Allocate memory for the PV qspinlock hash buckets
+ *
+ * This function should be called from the paravirt spinlock initialization
+ * routine.
+ */
+void __init __pv_init_lock_hash(void)
+{
+ int pv_hash_size = ALIGN(4 * num_possible...
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve upon the PV
qspinlock code by dynamically allocating the hash table as well
as some other performance optimization.
- Simplified the Xen PV qspinlock code as suggested by David Vrabel
<david.vrabel at citrix.com>.
- Add benchmarking data for 3.19 kernel to compare the performance
of a spinlock heavy test
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve upon the PV
qspinlock code by dynamically allocating the hash table as well
as some other performance optimization.
- Simplified the Xen PV qspinlock code as suggested by David Vrabel
<david.vrabel at citrix.com>.
- Add benchmarking data for 3.19 kernel to compare the performance
of a spinlock heavy test
2015 Apr 24
16
[PATCH v16 00/14] qspinlock: a 4-byte queue spinlock with PV support
v15->v16:
- Remove the lfsr patch and use linear probing as lfsr is not really
necessary in most cases.
- Move the paravirt PV_CALLEE_SAVE_REGS_THUNK code to an asm header.
- Add a patch to collect PV qspinlock statistics which also
supersedes the PV lock hash debug patch.
- Add PV qspinlock performance numbers.
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve
2015 Apr 24
16
[PATCH v16 00/14] qspinlock: a 4-byte queue spinlock with PV support
v15->v16:
- Remove the lfsr patch and use linear probing as lfsr is not really
necessary in most cases.
- Move the paravirt PV_CALLEE_SAVE_REGS_THUNK code to an asm header.
- Add a patch to collect PV qspinlock statistics which also
supersedes the PV lock hash debug patch.
- Add PV qspinlock performance numbers.
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve