Displaying 20 results from an estimated 28 matches for "pv_wait_nod".
Did you mean:
pv_wait_node
2015 Apr 07
0
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
.../qspinlock.c
index 33b3f54..b9ba83b 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -239,8 +239,8 @@ static __always_inline void set_locked(struct qspinlock *lock)
static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
-
+static __always_inline void __pv_scan_next(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
static __always_inline void __pv_wait_head(struct qspinlock *lock,
struc...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...s_inline void set_locked(s
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
}
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv_wait_node __pv_wait_node
+#define...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...s_inline void set_locked(s
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
}
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv_wait_node __pv_wait_node
+#define...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...ked, _Q_LOCKED_VAL);
> }
>
> +
> +/*
> + * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
> + * all the PV callbacks.
> + */
> +
> +static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
> +
> +static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
> +
> +#define pv_enabled() false
> +
> +#define pv_init_node __pv_init_node
> +#defin...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...ked, _Q_LOCKED_VAL);
> }
>
> +
> +/*
> + * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
> + * all the PV callbacks.
> + */
> +
> +static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
> +
> +static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
> +
> +#define pv_enabled() false
> +
> +#define pv_init_node __pv_init_node
> +#defin...
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...ed(struct qspinlock *lock)
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
}
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv...
2015 Apr 24
0
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...ed(struct qspinlock *lock)
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
}
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
...---
2 files changed, 101 insertions(+), 31 deletions(-)
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -246,10 +246,10 @@ static __always_inline void set_locked(s
*/
static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(u32 old, struct mcs_spinlock *node) { }
static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
+static __always_inline void __pv_...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
...---
2 files changed, 101 insertions(+), 31 deletions(-)
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -246,10 +246,10 @@ static __always_inline void set_locked(s
*/
static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(u32 old, struct mcs_spinlock *node) { }
static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
+static __always_inline void __pv_...
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...s_inline void set_locked(s
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
}
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv...
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...s_inline void set_locked(s
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
}
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv...
2015 Apr 09
2
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
On Mon, Apr 06, 2015 at 10:55:48PM -0400, Waiman Long wrote:
> @@ -219,24 +236,30 @@ static void pv_wait_node(struct mcs_spinlock *node)
> }
>
> /*
> + * Called after setting next->locked = 1 & lock acquired.
> + * Check if the the CPU has been halted. If so, set the _Q_SLOW_VAL flag
> + * and put an entry into the lock hash table to be waken up at unlock time.
> */
>...
2015 Apr 09
2
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
On Mon, Apr 06, 2015 at 10:55:48PM -0400, Waiman Long wrote:
> @@ -219,24 +236,30 @@ static void pv_wait_node(struct mcs_spinlock *node)
> }
>
> /*
> + * Called after setting next->locked = 1 & lock acquired.
> + * Check if the the CPU has been halted. If so, set the _Q_SLOW_VAL flag
> + * and put an entry into the lock hash table to be waken up at unlock time.
> */
>...
2015 Apr 24
16
[PATCH v16 00/14] qspinlock: a 4-byte queue spinlock with PV support
v15->v16:
- Remove the lfsr patch and use linear probing as lfsr is not really
necessary in most cases.
- Move the paravirt PV_CALLEE_SAVE_REGS_THUNK code to an asm header.
- Add a patch to collect PV qspinlock statistics which also
supersedes the PV lock hash debug patch.
- Add PV qspinlock performance numbers.
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve
2015 Apr 24
16
[PATCH v16 00/14] qspinlock: a 4-byte queue spinlock with PV support
v15->v16:
- Remove the lfsr patch and use linear probing as lfsr is not really
necessary in most cases.
- Move the paravirt PV_CALLEE_SAVE_REGS_THUNK code to an asm header.
- Add a patch to collect PV qspinlock statistics which also
supersedes the PV lock hash debug patch.
- Add PV qspinlock performance numbers.
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve upon the PV
qspinlock code by dynamically allocating the hash table as well
as some other performance optimization.
- Simplified the Xen PV qspinlock code as suggested by David Vrabel
<david.vrabel at citrix.com>.
- Add benchmarking data for 3.19 kernel to compare the performance
of a spinlock heavy test
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve upon the PV
qspinlock code by dynamically allocating the hash table as well
as some other performance optimization.
- Simplified the Xen PV qspinlock code as suggested by David Vrabel
<david.vrabel at citrix.com>.
- Add benchmarking data for 3.19 kernel to compare the performance
of a spinlock heavy test
2015 Mar 16
19
[PATCH 0/9] qspinlock stuff -v15
Hi Waiman,
As promised; here is the paravirt stuff I did during the trip to BOS last week.
All the !paravirt patches are more or less the same as before (the only real
change is the copyright lines in the first patch).
The paravirt stuff is 'simple' and KVM only -- the Xen code was a little more
convoluted and I've no real way to test that but it should be stright fwd to
make work.
2015 Mar 16
19
[PATCH 0/9] qspinlock stuff -v15
Hi Waiman,
As promised; here is the paravirt stuff I did during the trip to BOS last week.
All the !paravirt patches are more or less the same as before (the only real
change is the copyright lines in the first patch).
The paravirt stuff is 'simple' and KVM only -- the Xen code was a little more
convoluted and I've no real way to test that but it should be stright fwd to
make work.
2015 Apr 08
2
[PATCH v15 16/16] unfair qspinlock: a queue based unfair lock
...{ return false; }
+static __always_inline bool __unfair_wait_head(struct qspinlock *lock,
+ struct mcs_spinlock *node,
+ u32 tail)
+ { return false; }
+
#define pv_enabled() false
+#define unfair_enabled() false
#define pv_init_node __pv_init_node
#define pv_wait_node __pv_wait_node
#define pv_scan_next __pv_scan_next
-
#define pv_wait_head __pv_wait_head
+#define unfair_init_node __unfair_init_node
+#define unfair_wait_node __unfair_wait_node
+#define unfair_wait_head __unfair_wait_head
+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
#define queue_spin_lock_slowpa...