Displaying 20 results from an estimated 96 matches for "arch_spin_lock_flag".
Did you mean:
arch_spin_lock_flags
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...locked(struct qspinlock *lock)
> +{
> + smp_mb();
> + return atomic_read(&lock->val);
> +}
> +
> +#include <asm-generic/qspinlock.h>
> +
> +/* we need override it as ppc has io_sync stuff */
> +#undef arch_spin_trylock
> +#undef arch_spin_lock
> +#undef arch_spin_lock_flags
> +#undef arch_spin_unlock
> +#define arch_spin_trylock arch_spin_trylock
> +#define arch_spin_lock arch_spin_lock
> +#define arch_spin_lock_flags arch_spin_lock_flags
> +#define arch_spin_unlock arch_spin_unlock
> +
> +static inline int arch_spin_trylock(arch_spinlock_t *lock...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...locked(struct qspinlock *lock)
> +{
> + smp_mb();
> + return atomic_read(&lock->val);
> +}
> +
> +#include <asm-generic/qspinlock.h>
> +
> +/* we need override it as ppc has io_sync stuff */
> +#undef arch_spin_trylock
> +#undef arch_spin_lock
> +#undef arch_spin_lock_flags
> +#undef arch_spin_unlock
> +#define arch_spin_trylock arch_spin_trylock
> +#define arch_spin_lock arch_spin_lock
> +#define arch_spin_lock_flags arch_spin_lock_flags
> +#define arch_spin_unlock arch_spin_unlock
> +
> +static inline int arch_spin_trylock(arch_spinlock_t *lock...
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...byte(lock), 0);
+}
+
+static inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ smp_mb();
+ return atomic_read(&lock->val);
+}
+
+#include <asm-generic/qspinlock.h>
+
+/* we need override it as ppc has io_sync stuff */
+#undef arch_spin_trylock
+#undef arch_spin_lock
+#undef arch_spin_lock_flags
+#undef arch_spin_unlock
+#define arch_spin_trylock arch_spin_trylock
+#define arch_spin_lock arch_spin_lock
+#define arch_spin_lock_flags arch_spin_lock_flags
+#define arch_spin_unlock arch_spin_unlock
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ CLEAR_IO_SYNC;
+ return queu...
2020 Jul 06
0
[PATCH v3 3/6] powerpc: move spinlock implementation to simple_spinlock
...inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ while (1) {
+ if (likely(__arch_spin_trylock(lock) == 0))
+ break;
+ do {
+ HMT_low();
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
+ } while (unlikely(lock->slock != 0));
+ HMT_medium();
+ }
+}
+
+static inline
+void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+{
+ unsigned long flags_dis;
+
+ while (1) {
+ if (likely(__arch_spin_trylock(lock) == 0))
+ break;
+ local_save_flags(flags_dis);
+ local_irq_restore(flags);
+ do {
+ HMT_low();
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
+ }...
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v7 -> v8:
add one patch to drop a function call
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v7 -> v8:
add one patch to drop a function call
2014 Feb 26
0
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...k, 0, _QSPINLOCK_LOCKED) == 0))
+ return 1;
+ return 0;
+}
+
+/*
+ * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
+ * jump to the unfair versions if the static key paravirt_unfairlocks_enabled
+ * is true.
+ */
+#undef arch_spin_lock
+#undef arch_spin_trylock
+#undef arch_spin_lock_flags
+
+extern struct static_key paravirt_unfairlocks_enabled;
+
+/**
+ * arch_spin_lock - acquire a queue spinlock
+ * @lock: Pointer to queue spinlock structure
+ */
+static inline void arch_spin_lock(struct qspinlock *lock)
+{
+ if (static_key_false(¶virt_unfairlocks_enabled)) {
+ queue_spi...
2014 Mar 12
0
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...k, 0, _QSPINLOCK_LOCKED) == 0))
+ return 1;
+ return 0;
+}
+
+/*
+ * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
+ * jump to the unfair versions if the static key paravirt_unfairlocks_enabled
+ * is true.
+ */
+#undef arch_spin_lock
+#undef arch_spin_trylock
+#undef arch_spin_lock_flags
+
+extern struct static_key paravirt_unfairlocks_enabled;
+
+/**
+ * arch_spin_lock - acquire a queue spinlock
+ * @lock: Pointer to queue spinlock structure
+ */
+static inline void arch_spin_lock(struct qspinlock *lock)
+{
+ if (static_key_false(¶virt_unfairlocks_enabled))
+ queue_spin_...
2020 Jul 03
7
[PATCH v2 0/6] powerpc: queued spinlocks and rwlocks
v2 is updated to account for feedback from Will, Peter, and
Waiman (thank you), and trims off a couple of RFC and unrelated
patches.
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued
2014 May 30
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...s lock stealing.
+ */
+ queue_spin_lock_slowpath(lock, -1);
+}
+
+/*
+ * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
+ * jump to the unfair versions if the static key virt_unfairlocks_enabled
+ * is true.
+ */
+#undef arch_spin_lock
+#undef arch_spin_trylock
+#undef arch_spin_lock_flags
+
+/**
+ * arch_spin_lock - acquire a queue spinlock
+ * @lock: Pointer to queue spinlock structure
+ */
+static inline void arch_spin_lock(struct qspinlock *lock)
+{
+ if (static_key_false(&virt_unfairlocks_enabled))
+ queue_spin_lock_unfair(lock);
+ else
+ queue_spin_lock(lock);
+}
+
+/**...
2020 Jul 24
8
[PATCH v4 0/6] powerpc: queued spinlocks and rwlocks
Updated with everybody's feedback (thanks all), and more performance
results.
What I've found is I might have been measuring the worst load point for
the paravirt case, and by looking at a range of loads it's clear that
queued spinlocks are overall better even on PV, doubly so when you look
at the generally much improved worst case latencies.
I have defaulted it to N even though
2020 Jul 02
12
[PATCH 0/8] powerpc: queued spinlocks and rwlocks
This series adds an option to use queued spinlocks for powerpc, and
makes it the default for the Book3S-64 subarch.
This effort starts with the generic code so it's very simple but
still very performant. There are optimisations that can be made to
slowpaths, but I think it's better to attack those incrementally
if/when we find things, and try to add the improvements to generic
code as
2014 May 07
0
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...ck stealing.
+ */
+ queue_spin_lock_slowpath(lock, -1);
+}
+
+/*
+ * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
+ * jump to the unfair versions if the static key paravirt_unfairlocks_enabled
+ * is true.
+ */
+#undef arch_spin_lock
+#undef arch_spin_trylock
+#undef arch_spin_lock_flags
+
+/**
+ * arch_spin_lock - acquire a queue spinlock
+ * @lock: Pointer to queue spinlock structure
+ */
+static inline void arch_spin_lock(struct qspinlock *lock)
+{
+ if (static_key_false(¶virt_unfairlocks_enabled))
+ queue_spin_lock_unfair(lock);
+ else
+ queue_spin_lock(lock);
+}
+
+...
2016 Dec 06
6
[PATCH v9 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v8 -> v9:
mv qspinlocm config entry to
2016 Dec 06
6
[PATCH v9 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v8 -> v9:
mv qspinlocm config entry to
2014 Feb 26
2
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...urn 0;
> +}
> +
> +/*
> + * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
> + * jump to the unfair versions if the static key paravirt_unfairlocks_enabled
> + * is true.
> + */
> +#undef arch_spin_lock
> +#undef arch_spin_trylock
> +#undef arch_spin_lock_flags
> +
> +extern struct static_key paravirt_unfairlocks_enabled;
> +
> +/**
> + * arch_spin_lock - acquire a queue spinlock
> + * @lock: Pointer to queue spinlock structure
> + */
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(...
2014 Feb 26
2
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...urn 0;
> +}
> +
> +/*
> + * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
> + * jump to the unfair versions if the static key paravirt_unfairlocks_enabled
> + * is true.
> + */
> +#undef arch_spin_lock
> +#undef arch_spin_trylock
> +#undef arch_spin_lock_flags
> +
> +extern struct static_key paravirt_unfairlocks_enabled;
> +
> +/**
> + * arch_spin_lock - acquire a queue spinlock
> + * @lock: Pointer to queue spinlock structure
> + */
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...+}
Why is this needed?
> +/*
> + * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
> + * jump to the unfair versions if the static key virt_unfairlocks_enabled
> + * is true.
> + */
> +#undef arch_spin_lock
> +#undef arch_spin_trylock
> +#undef arch_spin_lock_flags
> +
> +/**
> + * arch_spin_lock - acquire a queue spinlock
> + * @lock: Pointer to queue spinlock structure
> + */
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(&virt_unfairlocks_enabled))
> + queue_spin_lock_unfair(lock)...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...+}
Why is this needed?
> +/*
> + * Redefine arch_spin_lock and arch_spin_trylock as inline functions that will
> + * jump to the unfair versions if the static key virt_unfairlocks_enabled
> + * is true.
> + */
> +#undef arch_spin_lock
> +#undef arch_spin_trylock
> +#undef arch_spin_lock_flags
> +
> +/**
> + * arch_spin_lock - acquire a queue spinlock
> + * @lock: Pointer to queue spinlock structure
> + */
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(&virt_unfairlocks_enabled))
> + queue_spin_lock_unfair(lock)...
2020 Jul 06
13
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you).
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued spinlocks and rwlocks
powerpc/pseries: implement paravirt