Displaying 16 results from an estimated 16 matches for "pv_hb_per_lin".
Did you mean:
pv_hb_per_line
2015 Mar 19
4
[PATCH 8/9] qspinlock: Generic paravirt support
...#define PV_LOCK_HASH_BITS (2 + NR_CPUS_BITS)
+
+#if PV_LOCK_HASH_BITS < 6
+#undef PV_LOCK_HASH_BITS
+#define PB_LOCK_HASH_BITS 6
+#endif
+
+#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS)
+
+static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned;
+
+#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
+
+static inline u32 hash_align(u32 hash)
+{
+ return hash & ~(PV_HB_PER_LINE - 1);
+}
+
+static struct qspinlock **pv_hash(struct qspinlock *lock)
+{
+ u32 hash = hash_ptr(lock, PV_LOCK_HASH_BITS);
+ struct pv_hash_bucket *hb, *end;
+
+ if (!...
2015 Mar 19
4
[PATCH 8/9] qspinlock: Generic paravirt support
...#define PV_LOCK_HASH_BITS (2 + NR_CPUS_BITS)
+
+#if PV_LOCK_HASH_BITS < 6
+#undef PV_LOCK_HASH_BITS
+#define PB_LOCK_HASH_BITS 6
+#endif
+
+#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS)
+
+static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned;
+
+#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
+
+static inline u32 hash_align(u32 hash)
+{
+ return hash & ~(PV_HB_PER_LINE - 1);
+}
+
+static struct qspinlock **pv_hash(struct qspinlock *lock)
+{
+ u32 hash = hash_ptr(lock, PV_LOCK_HASH_BITS);
+ struct pv_hash_bucket *hb, *end;
+
+ if (!...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...TS 8
> +#define LFSR_MAX_BITS (2 + NR_CPUS_BITS)
> +#if LFSR_MAX_BITS < LFSR_MIN_BITS
> +#undef LFSR_MAX_BITS
> +#define LFSR_MAX_BITS LFSR_MIN_BITS
> +#endif
> +
> +struct pv_hash_bucket {
> + struct qspinlock *lock;
> + struct pv_node *node;
> +};
> +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
> +#define HB_RESERVED ((struct qspinlock *)1)
This is unused.
> +
> +static struct pv_hash_bucket *pv_lock_hash;
> +static unsigned int pv_lock_hash_bits __read_mostly;
static unsigned int pv_taps __read_mostly;
> +
> +#inclu...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...TS 8
> +#define LFSR_MAX_BITS (2 + NR_CPUS_BITS)
> +#if LFSR_MAX_BITS < LFSR_MIN_BITS
> +#undef LFSR_MAX_BITS
> +#define LFSR_MAX_BITS LFSR_MIN_BITS
> +#endif
> +
> +struct pv_hash_bucket {
> + struct qspinlock *lock;
> + struct pv_node *node;
> +};
> +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
> +#define HB_RESERVED ((struct qspinlock *)1)
This is unused.
> +
> +static struct pv_hash_bucket *pv_lock_hash;
> +static unsigned int pv_lock_hash_bits __read_mostly;
static unsigned int pv_taps __read_mostly;
> +
> +#inclu...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
..._CPUS_BITS)
>> +#if LFSR_MAX_BITS< LFSR_MIN_BITS
>> +#undef LFSR_MAX_BITS
>> +#define LFSR_MAX_BITS LFSR_MIN_BITS
>> +#endif
>> +
>> +struct pv_hash_bucket {
>> + struct qspinlock *lock;
>> + struct pv_node *node;
>> +};
>> +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
>> +#define HB_RESERVED ((struct qspinlock *)1)
> This is unused.
You are right, I will remove that.
>> +
>> +static struct pv_hash_bucket *pv_lock_hash;
>> +static unsigned int pv_lock_hash_bits __read_mostly;
> sta...
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...6 to fully utilize a 4k page.
+ */
+#define LFSR_MIN_BITS 8
+#define LFSR_MAX_BITS (2 + NR_CPUS_BITS)
+#if LFSR_MAX_BITS < LFSR_MIN_BITS
+#undef LFSR_MAX_BITS
+#define LFSR_MAX_BITS LFSR_MIN_BITS
+#endif
+
+struct pv_hash_bucket {
+ struct qspinlock *lock;
+ struct pv_node *node;
+};
+#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
+#define HB_RESERVED ((struct qspinlock *)1)
+
+static struct pv_hash_bucket *pv_lock_hash;
+static unsigned int pv_lock_hash_bits __read_mostly;
+
+#include <linux/hash.h>
+#include <linux/lfsr.h>
+#include <linux/bootmem.h>
+
+/...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
On Thu, Apr 09, 2015 at 08:13:27PM +0200, Peter Zijlstra wrote:
> On Mon, Apr 06, 2015 at 10:55:44PM -0400, Waiman Long wrote:
> > +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
> > +static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
> > +{
> > + unsigned long init_hash, hash = hash_ptr(lock, pv_lock_hash_bits);
> > + struct pv_hash_bucket *hb, *end;
> > +
> &g...
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 12:28:30PM -0400, Waiman Long wrote:
> On 04/01/2015 05:03 PM, Peter Zijlstra wrote:
> >On Wed, Apr 01, 2015 at 03:58:58PM -0400, Waiman Long wrote:
> >>On 04/01/2015 02:48 PM, Peter Zijlstra wrote:
> >>I am sorry that I don't quite get what you mean here. My point is that in
> >>the hashing step, a cpu will need to scan an empty
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 12:28:30PM -0400, Waiman Long wrote:
> On 04/01/2015 05:03 PM, Peter Zijlstra wrote:
> >On Wed, Apr 01, 2015 at 03:58:58PM -0400, Waiman Long wrote:
> >>On 04/01/2015 02:48 PM, Peter Zijlstra wrote:
> >>I am sorry that I don't quite get what you mean here. My point is that in
> >>the hashing step, a cpu will need to scan an empty
2015 Apr 02
0
[PATCH 8/9] qspinlock: Generic paravirt support
...#define PV_LOCK_HASH_BITS (2 + NR_CPUS_BITS)
+
+#if PV_LOCK_HASH_BITS < 6
+#undef PV_LOCK_HASH_BITS
+#define PB_LOCK_HASH_BITS 6
+#endif
+
+#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS)
+
+static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned;
+
+#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket))
+
+static inline u32 hash_align(u32 hash)
+{
+ return hash & ~(PV_HB_PER_LINE - 1);
+}
+
+#define for_each_hash_bucket(hb, off, hash) \
+ for (hash = hash_align(hash), off = 0, hb = &__pv_lock_hash[hash + off];\
+ off < PV_LOCK_...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
On 03/16/2015 09:16 AM, Peter Zijlstra wrote:
> Implement simple paravirt support for the qspinlock.
>
> Provide a separate (second) version of the spin_lock_slowpath for
> paravirt along with a special unlock path.
>
> The second slowpath is generated by adding a few pv hooks to the
> normal slowpath, but where those will compile away for the native
> case, they expand
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
On 03/16/2015 09:16 AM, Peter Zijlstra wrote:
> Implement simple paravirt support for the qspinlock.
>
> Provide a separate (second) version of the spin_lock_slowpath for
> paravirt along with a special unlock path.
>
> The second slowpath is generated by adding a few pv hooks to the
> normal slowpath, but where those will compile away for the native
> case, they expand
2015 Apr 01
0
[PATCH 8/9] qspinlock: Generic paravirt support
...v_hash_find(struct qspinlock *lock)
> +{
> + u64 hash = hash_ptr(lock, PV_LOCK_HASH_BITS);
> + struct pv_hash_bucket *hb, *end;
> + int cpu = -1;
> +
> + if (!hash)
> + hash = 1;
> +
> + hb =&__pv_lock_hash[hash_align(hash)];
> + for (;;) {
> + for (end = hb + PV_HB_PER_LINE; hb< end; hb++) {
> + struct qspinlock *l = READ_ONCE(hb->lock);
> +
> + /*
> + * If we hit an unused bucket, there is no match.
> + */
> + if (!l)
> + goto done;
After more careful reading, I think the assumption that the presence of
an unused bucket m...
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve upon the PV
qspinlock code by dynamically allocating the hash table as well
as some other performance optimization.
- Simplified the Xen PV qspinlock code as suggested by David Vrabel
<david.vrabel at citrix.com>.
- Add benchmarking data for 3.19 kernel to compare the performance
of a spinlock heavy test
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve upon the PV
qspinlock code by dynamically allocating the hash table as well
as some other performance optimization.
- Simplified the Xen PV qspinlock code as suggested by David Vrabel
<david.vrabel at citrix.com>.
- Add benchmarking data for 3.19 kernel to compare the performance
of a spinlock heavy test
2015 Apr 07
0
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
...th this state, the queue head CPU will always be kicked even
+ * if it is not halted to avoid potential racing condition.
+ */
enum vcpu_state {
vcpu_running = 0,
vcpu_halted,
+ vcpu_hashed
};
struct pv_node {
@@ -97,7 +104,13 @@ static inline u32 hash_align(u32 hash)
return hash & ~(PV_HB_PER_LINE - 1);
}
-static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
+/*
+ * Set up an entry in the lock hash table
+ * This is not inlined to reduce size of generated code as it is included
+ * twice and is used only in the slowest path of handling CPU halting.
+ */
+static...