Displaying 20 results from an estimated 78 matches for "pv_wait".
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
On Thu, Apr 09, 2015 at 05:41:44PM -0400, Waiman Long wrote:
> >>+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
> >>+{
> >>+ struct __qspinlock *l = (void *)lock;
> >>+ struct qspinlock **lp = NULL;
> >>+ struct pv_node *pn = (struct pv_node *)node;
> >>+ int slow_set = false;
> >>+ int loop;
> &g...
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
On Thu, Apr 09, 2015 at 05:41:44PM -0400, Waiman Long wrote:
> >>+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
> >>+{
> >>+ struct __qspinlock *l = (void *)lock;
> >>+ struct qspinlock **lp = NULL;
> >>+ struct pv_node *pn = (struct pv_node *)node;
> >>+ int slow_set = false;
> >>+ int loop;
> &g...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...l);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val)
+{
+ pv_lock_op.lock(lock, val);
+}
+
+static inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+ pv_lock_op.unlock(lock);
+}
+
+static inline void pv_wait(u8 *ptr, u8 val)
+{
+ pv_lock_op.wait(ptr, val, -1);
+}
+
+static inline void pv_kick(int cpu)
+{
+ pv_lock_op.kick(cpu);
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/qspinlock_paravirt_types.h b/arch/powerpc/include/asm/qspinlock_paravirt_types.h
new file mode 100644
index 0000000..e1fdeb0
-...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...l);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val)
+{
+ pv_lock_op.lock(lock, val);
+}
+
+static inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+ pv_lock_op.unlock(lock);
+}
+
+static inline void pv_wait(u8 *ptr, u8 val)
+{
+ pv_lock_op.wait(ptr, val, -1);
+}
+
+static inline void pv_kick(int cpu)
+{
+ pv_lock_op.kick(cpu);
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/qspinlock_paravirt_types.h b/arch/powerpc/include/asm/qspinlock_paravirt_types.h
new file mode 100644
index 0000000..e1fdeb0
-...
2015 Apr 07
0
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
.../qspinlock.c
index 33b3f54..b9ba83b 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -239,8 +239,8 @@ static __always_inline void set_locked(struct qspinlock *lock)
static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
-
+static __always_inline void __pv_scan_next(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
static __always_inline void __pv_wait_head(struct qspinlock *lock,
s...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...H
> +#error "do not include this file"
> +#endif
> +
> +/*
> + * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
> + * of spinning them.
> + *
> + * This relies on the architecture to provide two paravirt hypercalls:
> + *
> + * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
> + * pv_kick(cpu) -- wakes a suspended vcpu
> + *
> + * Using these we implement __pv_queue_spin_lock_slowpath() and
> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
> + * native_queue_...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...H
> +#error "do not include this file"
> +#endif
> +
> +/*
> + * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
> + * of spinning them.
> + *
> + * This relies on the architecture to provide two paravirt hypercalls:
> + *
> + * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
> + * pv_kick(cpu) -- wakes a suspended vcpu
> + *
> + * Using these we implement __pv_queue_spin_lock_slowpath() and
> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
> + * native_queue_...
2020 Jul 08
1
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...>
> Add a new PARAVIRT_QSPINLOCKS_LITE config option that allows
> architectures to use the PV qspinlock code without the need to use or
> implement a pv_kick() function, thus eliminating the atomic unlock
> overhead. The non-atomic queued_spin_unlock() can be used instead.
> The pv_wait() function will still be needed, but it can be a dummy
> function.
>
> With that option set, the hybrid PV queued/unfair locking code should
> still be able to make it performant enough in a paravirtualized
How is this supposed to work? If there is no kick, you have no control
over wh...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...s_inline void set_locked(s
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
}
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv_wait_node __pv_wait_node
+#def...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...s_inline void set_locked(s
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
}
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#define pv_wait_node __pv_wait_node
+#def...
2016 Jun 02
9
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
...x it.
sorry for not even doing a test on bigendian machine before!!!
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch cover letter tilte has changed as only pseries may need use pv-qspinlock, not all powerpc.
1) __pv_wait will not return until *ptr != val as Waiman gives me a tip.
2) support lock holder serching by storing cpu number into a hash table(implemented as an array)
This is because lock_stealing hit too much, up to 10%~20% of all the successful lock(), and avoid
vcpu slices bounce.
change from v2:
__...
2016 Jun 02
9
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
...x it.
sorry for not even doing a test on bigendian machine before!!!
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch cover letter tilte has changed as only pseries may need use pv-qspinlock, not all powerpc.
1) __pv_wait will not return until *ptr != val as Waiman gives me a tip.
2) support lock holder serching by storing cpu number into a hash table(implemented as an array)
This is because lock_stealing hit too much, up to 10%~20% of all the successful lock(), and avoid
vcpu slices bounce.
change from v2:
__...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...this file"
>> +#endif
>> +
>> +/*
>> + * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
>> + * of spinning them.
>> + *
>> + * This relies on the architecture to provide two paravirt hypercalls:
>> + *
>> + * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
>> + * pv_kick(cpu) -- wakes a suspended vcpu
>> + *
>> + * Using these we implement __pv_queue_spin_lock_slowpath() and
>> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and
>&g...
2020 Jul 08
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...might actually
>> be able to change that to also support directed yield. Though I'm
>> not sure if this is actually the cause of the slowdown yet.
>
> Regarding the paravirt lock, I have taken a further look into the
> current PPC spinlock code. There is an equivalent of pv_wait() but no
> pv_kick(). Maybe PPC doesn't really need that.
So powerpc has two types of wait, either undirected "all processors" or
directed to a specific processor which has been preempted by the
hypervisor.
The simple spinlock code does a directed wait, because it knows the CP...
2020 Jul 08
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...might actually
>> be able to change that to also support directed yield. Though I'm
>> not sure if this is actually the cause of the slowdown yet.
>
> Regarding the paravirt lock, I have taken a further look into the
> current PPC spinlock code. There is an equivalent of pv_wait() but no
> pv_kick(). Maybe PPC doesn't really need that.
So powerpc has two types of wait, either undirected "all processors" or
directed to a specific processor which has been preempted by the
hypervisor.
The simple spinlock code does a directed wait, because it knows the CP...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
...queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch cover letter tilte has changed as only pseries may need use pv-qspinlock, not all powerpc.
1) __pv_wait will not return until *ptr != val as Waiman gives me a tip.
2) support lock holder serching by storing cpu number into a hash table(implemented as an array)
This is because lock_stealing hit too much, up to 10%~20% of all the successful lock(), and avoid
vcpu slices bounce.
change from v2:
__...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
...queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch cover letter tilte has changed as only pseries may need use pv-qspinlock, not all powerpc.
1) __pv_wait will not return until *ptr != val as Waiman gives me a tip.
2) support lock holder serching by storing cpu number into a hash table(implemented as an array)
This is because lock_stealing hit too much, up to 10%~20% of all the successful lock(), and avoid
vcpu slices bounce.
change from v2:
__...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...ked, _Q_LOCKED_VAL);
> }
>
> +
> +/*
> + * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
> + * all the PV callbacks.
> + */
> +
> +static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
> +
> +static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
> +
> +#define pv_enabled() false
> +
> +#define pv_init_node __pv_init_node
> +#d...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...ked, _Q_LOCKED_VAL);
> }
>
> +
> +/*
> + * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
> + * all the PV callbacks.
> + */
> +
> +static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
> +static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
> +
> +static __always_inline void __pv_wait_head(struct qspinlock *lock) { }
> +
> +#define pv_enabled() false
> +
> +#define pv_init_node __pv_init_node
> +#d...
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...ed(struct qspinlock *lock)
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
}
+
+/*
+ * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+ struct mcs_spinlock *node) { }
+
+#define pv_enabled() false
+
+#define pv_init_node __pv_init_node
+#defin...