Displaying 20 results from an estimated 60 matches for "pv_kick_node".
2015 Apr 07
0
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
...+++ b/kernel/locking/qspinlock.c
@@ -239,8 +239,8 @@ static __always_inline void set_locked(struct qspinlock *lock)
static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
-
+static __always_inline void __pv_scan_next(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
static __always_inline void __pv_wait_head(struct qspinlock *lock,
struct mcs_spinlock *node) { }
@@ -248,7 +248,7 @@ static __always_inline void...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv_wait_node __pv_wait_node
+#define pv_kick_node __pv_kick_node
+
+#define pv_wait_head __pv_wait_head
+
+#ifd...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv_wait_node __pv_wait_node
+#define pv_kick_node __pv_kick_node
+
+#define pv_wait_head __pv_wait_head
+
+#ifd...
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv_wait_node __pv_wait_node
+#define pv_kick_node __pv_kick_node
+
+#define...
2015 Apr 24
0
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv_wait_node __pv_wait_node
+#define pv_kick_node __pv_kick_node
+#define pv...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...code for queue_spin_unlock_slowpath(); provide NOPs for
> + * all the PV callbacks.
> + */
> +
> +static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
> +
> +static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
> +
> +#define pv_enabled() false
> +
> +#define pv_init_node __pv_init_node
> +#define pv_wait_node __pv_wait_node
> +#define pv_kick_node __pv_kick_node
> +...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...code for queue_spin_unlock_slowpath(); provide NOPs for
> + * all the PV callbacks.
> + */
> +
> +static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
> +
> +static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
> +
> +#define pv_enabled() false
> +
> +#define pv_init_node __pv_init_node
> +#define pv_wait_node __pv_wait_node
> +#define pv_kick_node __pv_kick_node
> +...
2015 Apr 09
2
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
...spinlock *node)
> }
>
> /*
> + * Called after setting next->locked = 1 & lock acquired.
> + * Check if the the CPU has been halted. If so, set the _Q_SLOW_VAL flag
> + * and put an entry into the lock hash table to be waken up at unlock time.
> */
> -static void pv_kick_node(struct mcs_spinlock *node)
> +static void pv_scan_next(struct qspinlock *lock, struct mcs_spinlock *node)
I'm not too sure about that name change..
> {
> struct pv_node *pn = (struct pv_node *)node;
> + struct __qspinlock *l = (void *)lock;
>
> /*
> + * Transition...
2015 Apr 09
2
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
...spinlock *node)
> }
>
> /*
> + * Called after setting next->locked = 1 & lock acquired.
> + * Check if the the CPU has been halted. If so, set the _Q_SLOW_VAL flag
> + * and put an entry into the lock hash table to be waken up at unlock time.
> */
> -static void pv_kick_node(struct mcs_spinlock *node)
> +static void pv_scan_next(struct qspinlock *lock, struct mcs_spinlock *node)
I'm not too sure about that name change..
> {
> struct pv_node *pn = (struct pv_node *)node;
> + struct __qspinlock *l = (void *)lock;
>
> /*
> + * Transition...
2014 Jun 16
4
[PATCH 10/11] qspinlock: Paravirt support
...number.
> +
> + for (;;) {
> + count = SPIN_THRESHOLD;
> +
> + do {
> + if (smp_load_acquire(&node->locked))
> + return;
> +
> + cpu_relax();
> + } while (--count);
> +
> + pv_wait(&node->locked, 1);
> + }
> +}
> +
> +void __pv_kick_node(struct mcs_spinlock *node)
> +{
> + struct pv_node *pn = (struct pv_node *)node;
> +
> + pv_kick(pn->cpu);
> +}
> +
> +void __pv_wait_head(struct qspinlock *lock)
> +{
> + unsigned int count;
> + struct pv_node *pn;
> + int val, old, new;
> +
> + for (;;) {...
2014 Jun 16
4
[PATCH 10/11] qspinlock: Paravirt support
...number.
> +
> + for (;;) {
> + count = SPIN_THRESHOLD;
> +
> + do {
> + if (smp_load_acquire(&node->locked))
> + return;
> +
> + cpu_relax();
> + } while (--count);
> +
> + pv_wait(&node->locked, 1);
> + }
> +}
> +
> +void __pv_kick_node(struct mcs_spinlock *node)
> +{
> + struct pv_node *pn = (struct pv_node *)node;
> +
> + pv_kick(pn->cpu);
> +}
> +
> +void __pv_wait_head(struct qspinlock *lock)
> +{
> + unsigned int count;
> + struct pv_node *pn;
> + int val, old, new;
> +
> + for (;;) {...
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv_wait_node __pv_wait_node
+#define pv_kick_node __pv_kick_node
+#define pv...
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv_wait_node __pv_wait_node
+#define pv_kick_node __pv_kick_node
+#define pv...
2014 Mar 12
0
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...path() function.
+ */
+
+/**
+ * pv_get_prev - get previous queue node pointer
+ * @pv : pointer to struct pv_qvars to be set
+ * Return: the previous queue node pointer
+ */
+static __always_inline struct qnode *pv_get_prev(struct pv_qvars *pv)
+{
+ return ACCESS_ONCE(pv->prev);
+}
+
+/**
+ * pv_kick_node - kick up the CPU of the given node
+ * @pv : pointer to struct pv_qvars of the node to be kicked
+ */
+static __always_inline void pv_kick_node(struct pv_qvars *pv)
+{
+ if (pv->cpustate != PV_CPU_HALTED)
+ return;
+ ACCESS_ONCE(pv->cpustate) = PV_CPU_KICKED;
+ __queue_kick_cpu(pv->mycp...
2014 Jun 15
0
[PATCH 10/11] qspinlock: Paravirt support
...__always_inline void pv_init_node(struct mcs_spinlock *node)
+{
+ PVOP_VCALLEE1(pv_lock_ops.init_node, node);
+}
+
+static __always_inline void pv_link_and_wait_node(u32 old, struct mcs_spinlock *node)
+{
+ PVOP_VCALLEE2(pv_lock_ops.link_and_wait_node, old, node);
+}
+
+static __always_inline void pv_kick_node(struct mcs_spinlock *node)
+{
+ PVOP_VCALLEE1(pv_lock_ops.kick_node, node);
+}
+
+static __always_inline void pv_wait_head(struct qspinlock *lock)
+{
+ PVOP_VCALLEE1(pv_lock_ops.wait_head, lock);
+}
+
+static __always_inline void pv_queue_unlock(struct qspinlock *lock)
+{
+ PVOP_VCALLEE1(pv_lock_op...
2014 May 30
0
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...increase halting chance of heavily contended locks to favor lightly
+ * contended locks (queue depth of 1 or less).
+ *
+ * There are 2 places where races can happen:
+ * 1) Halting of the queue head CPU (in pv_head_spin_check) and the CPU
+ * kicking by the lock holder in the unlock path (in pv_kick_node).
+ * 2) Halting of the queue node CPU (in pv_queue_spin_check) and the
+ * the status check by the previous queue head (in pv_halt_check).
+ * See the comments on those functions to see how the races are being
+ * addressed.
+ */
+
+/*
+ * Spin threshold for queue spinlock
+ */
+#define QSPIN...
2015 Apr 24
16
[PATCH v16 00/14] qspinlock: a 4-byte queue spinlock with PV support
v15->v16:
- Remove the lfsr patch and use linear probing as lfsr is not really
necessary in most cases.
- Move the paravirt PV_CALLEE_SAVE_REGS_THUNK code to an asm header.
- Add a patch to collect PV qspinlock statistics which also
supersedes the PV lock hash debug patch.
- Add PV qspinlock performance numbers.
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve
2015 Apr 24
16
[PATCH v16 00/14] qspinlock: a 4-byte queue spinlock with PV support
v15->v16:
- Remove the lfsr patch and use linear probing as lfsr is not really
necessary in most cases.
- Move the paravirt PV_CALLEE_SAVE_REGS_THUNK code to an asm header.
- Add a patch to collect PV qspinlock statistics which also
supersedes the PV lock hash debug patch.
- Add PV qspinlock performance numbers.
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve
2014 Mar 13
1
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...&qlock->lock, _QSPINLOCK_LOCKED, 0)
== _QSPINLOCK_LOCKED))
return;
else
queue_spin_unlock_slowpath(lock);
} else {
__queue_spin_unlock(lock);
}
> // pv_kick_node:
> if (pv->cpustate != PV_CPU_HALTED)
> return;
> ACCESS_ONCE(pv->cpustate) = PV_CPU_KICKED;
> __queue_kick_cpu(pv->mycpu, PV_KICK_QUEUE_HEAD);
>
> Waiter -------------------------------------------
>
> // pv_head_spin_check
>...
2014 Mar 13
1
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...&qlock->lock, _QSPINLOCK_LOCKED, 0)
== _QSPINLOCK_LOCKED))
return;
else
queue_spin_unlock_slowpath(lock);
} else {
__queue_spin_unlock(lock);
}
> // pv_kick_node:
> if (pv->cpustate != PV_CPU_HALTED)
> return;
> ACCESS_ONCE(pv->cpustate) = PV_CPU_KICKED;
> __queue_kick_cpu(pv->mycpu, PV_KICK_QUEUE_HEAD);
>
> Waiter -------------------------------------------
>
> // pv_head_spin_check
>...