search for: __pv_queued_spin_unlock

Displaying 20 results from an estimated 66 matches for "__pv_queued_spin_unlock".

Did you mean: __pv_queue_spin_unlock
2016 May 26
2
[PATCH v3 5/6] pv-qspinlock: use cmpxchg_release in __pv_queued_spin_unlock
...d, 1 insertion(+), 1 deletion(-) > > diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h > index a5b1248..2bbffe4 100644 > --- a/kernel/locking/qspinlock_paravirt.h > +++ b/kernel/locking/qspinlock_paravirt.h > @@ -614,7 +614,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock) > * unhash. Otherwise it would be possible to have multiple @lock > * entries, which would be BAD. > */ > - locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); > + locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0); > if (likely(loc...
2016 May 26
2
[PATCH v3 5/6] pv-qspinlock: use cmpxchg_release in __pv_queued_spin_unlock
...d, 1 insertion(+), 1 deletion(-) > > diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h > index a5b1248..2bbffe4 100644 > --- a/kernel/locking/qspinlock_paravirt.h > +++ b/kernel/locking/qspinlock_paravirt.h > @@ -614,7 +614,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock) > * unhash. Otherwise it would be possible to have multiple @lock > * entries, which would be BAD. > */ > - locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); > + locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0); > if (likely(loc...
2016 May 17
0
[PATCH v2 5/6] pv-qspinlock: use cmpxchg_release in __pv_queued_spin_unlock
...ravirt.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index a5b1248..2bbffe4 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -614,7 +614,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock) * unhash. Otherwise it would be possible to have multiple @lock * entries, which would be BAD. */ - locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); + locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0); if (likely(locked == _Q_LOCKED_VAL)) retu...
2016 May 25
0
[PATCH v3 5/6] pv-qspinlock: use cmpxchg_release in __pv_queued_spin_unlock
...ravirt.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index a5b1248..2bbffe4 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -614,7 +614,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock) * unhash. Otherwise it would be possible to have multiple @lock * entries, which would be BAD. */ - locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); + locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0); if (likely(locked == _Q_LOCKED_VAL)) retu...
2016 May 26
0
[PATCH v3 5/6] pv-qspinlock: use cmpxchg_release in __pv_queued_spin_unlock
...-) > > > > diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h > > index a5b1248..2bbffe4 100644 > > --- a/kernel/locking/qspinlock_paravirt.h > > +++ b/kernel/locking/qspinlock_paravirt.h > > @@ -614,7 +614,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock) > > * unhash. Otherwise it would be possible to have multiple @lock > > * entries, which would be BAD. > > */ > > - locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); > > + locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL,...
2020 Jul 05
1
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...32 val) > +{ > + if (!is_shared_processor()) > + native_queued_spin_lock_slowpath(lock, val); > + else > + __pv_queued_spin_lock_slowpath(lock, val); > +} In a previous mail, I said that: You may need to match the use of __pv_queued_spin_lock_slowpath() with the corresponding __pv_queued_spin_unlock(), e.g. #define queued_spin_unlock queued_spin_unlock static inline queued_spin_unlock(struct qspinlock *lock) { ??????? if (!is_shared_processor()) ??????????????? smp_store_release(&lock->locked, 0); ??????? else ??????????????? __pv_queued_spin_unlock(lock); } Otherwise, pv_kick() w...
2020 Jul 06
0
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...+#include <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else + __pv_queued_spin_lock_slowpath(lock, val); +} + +#define queued_spin_unlock queued_spin_unloc...
2017 Feb 10
2
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
..."pop %rdx;" > +"pop %rdi;" > +FRAME_END > +"ret;" > +".popsection"); > + > +#endif > + > /* > * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. > */ That should work for now. I have done something similar for __pv_queued_spin_unlock. However, this has the problem of creating a dependency on the exact layout of the steal_time structure. Maybe the constant 16 can be passed in as a parameter offsetof(struct kvm_steal_time, preempted) to the asm call. Cheers, Longman
2017 Feb 10
2
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
..."pop %rdx;" > +"pop %rdi;" > +FRAME_END > +"ret;" > +".popsection"); > + > +#endif > + > /* > * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. > */ That should work for now. I have done something similar for __pv_queued_spin_unlock. However, this has the problem of creating a dependency on the exact layout of the steal_time structure. Maybe the constant 16 can be passed in as a parameter offsetof(struct kvm_steal_time, preempted) to the asm call. Cheers, Longman
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
...Symbol # ........ ............... .............................. ........................................................ # 9.87% sched-messaging [kernel.vmlinux] [k] __pv_queued_spin_lock_slowpath 3.66% sched-messaging [kernel.vmlinux] [k] __pv_queued_spin_unlock 3.37% sched-messaging [kernel.vmlinux] [k] __slab_free 3.06% sched-messaging [kernel.vmlinux] [k] unix_stream_read_generic Pan Xinhui (6): qspinlock: powerpc support qspinlock powerpc: pseries/Kconfig: qspinlock build config powerpc: lib/locks.c:...
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
...Symbol # ........ ............... .............................. ........................................................ # 9.87% sched-messaging [kernel.vmlinux] [k] __pv_queued_spin_lock_slowpath 3.66% sched-messaging [kernel.vmlinux] [k] __pv_queued_spin_unlock 3.37% sched-messaging [kernel.vmlinux] [k] __slab_free 3.06% sched-messaging [kernel.vmlinux] [k] unix_stream_read_generic Pan Xinhui (6): qspinlock: powerpc support qspinlock powerpc: pseries/Kconfig: qspinlock build config powerpc: lib/locks.c:...
2016 Oct 28
1
[Xen-devel] [PATCH v6 10/11] x86, xen: support vcpu preempted check
...gt; - Spurious change. > /* > * Our init of PV spinlocks is split in two init functions due to us > * using paravirt patching and jump labels patching and having to do > @@ -137,6 +136,8 @@ void __init xen_init_spinlocks(void) > pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); > pv_lock_ops.wait = xen_qlock_wait; > pv_lock_ops.kick = xen_qlock_kick; > + > + pv_lock_ops.vcpu_is_preempted = xen_vcpu_stolen; > } > > /* > -- > 2.4.11 > > > _______________________________________________ > Xen-devel mailing list > Xen-dev...
2016 Oct 28
1
[Xen-devel] [PATCH v6 10/11] x86, xen: support vcpu preempted check
...gt; - Spurious change. > /* > * Our init of PV spinlocks is split in two init functions due to us > * using paravirt patching and jump labels patching and having to do > @@ -137,6 +136,8 @@ void __init xen_init_spinlocks(void) > pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); > pv_lock_ops.wait = xen_qlock_wait; > pv_lock_ops.kick = xen_qlock_kick; > + > + pv_lock_ops.vcpu_is_preempted = xen_vcpu_stolen; > } > > /* > -- > 2.4.11 > > > _______________________________________________ > Xen-devel mailing list > Xen-dev...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...;per_cpu(steal_time, cpu); + + return !!src->preempted; +} +PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); + /* * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. */ @@ -620,6 +616,12 @@ void __init kvm_spinlock_init(void) pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); pv_lock_ops.wait = kvm_wait; pv_lock_ops.kick = kvm_kick_cpu; + pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted); + + if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { + pv_lock_ops.vcpu_is_preempted = + PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); + } } static...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...;per_cpu(steal_time, cpu); + + return !!src->preempted; +} +PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); + /* * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. */ @@ -620,6 +616,12 @@ void __init kvm_spinlock_init(void) pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); pv_lock_ops.wait = kvm_wait; pv_lock_ops.kick = kvm_kick_cpu; + pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted); + + if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { + pv_lock_ops.vcpu_is_preempted = + PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); + } } static...
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
...inlcok_____ | compiling takes| 22m | 22m | Pan Xinhui (6): qspinlock: powerpc support qspinlock powerpc: pseries/Kconfig: Add qspinlock build config powerpc: lib/locks.c: Add cpu yield/wake helper function pv-qspinlock: powerpc support pv-qspinlock pv-qspinlock: use cmpxchg_release in __pv_queued_spin_unlock powerpc: pseries: Add pv-qspinlock build config/make arch/powerpc/include/asm/qspinlock.h | 39 +++++++++++++++++++ arch/powerpc/include/asm/qspinlock_paravirt.h | 38 ++++++++++++++++++ .../powerpc/include/asm/qspinlock_paravirt_types.h | 13 +++++++ arch/powerpc/include/asm...
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
...inlcok_____ | compiling takes| 22m | 22m | Pan Xinhui (6): qspinlock: powerpc support qspinlock powerpc: pseries/Kconfig: Add qspinlock build config powerpc: lib/locks.c: Add cpu yield/wake helper function pv-qspinlock: powerpc support pv-qspinlock pv-qspinlock: use cmpxchg_release in __pv_queued_spin_unlock powerpc: pseries: Add pv-qspinlock build config/make arch/powerpc/include/asm/qspinlock.h | 39 +++++++++++++++++++ arch/powerpc/include/asm/qspinlock_paravirt.h | 38 ++++++++++++++++++ .../powerpc/include/asm/qspinlock_paravirt_types.h | 13 +++++++ arch/powerpc/include/asm...
2020 Jul 09
4
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...--- /dev/null > +++ b/arch/powerpc/include/asm/qspinlock_paravirt.h > @@ -0,0 +1,7 @@ > +/* SPDX-License-Identifier: GPL-2.0-or-later */ > +#ifndef __ASM_QSPINLOCK_PARAVIRT_H > +#define __ASM_QSPINLOCK_PARAVIRT_H _ASM_POWERPC_QSPINLOCK_PARAVIRT_H please. > + > +EXPORT_SYMBOL(__pv_queued_spin_unlock); Why's that in a header? Should that (eventually) go with the generic implementation? > diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig > index 24c18362e5ea..756e727b383f 100644 > --- a/arch/powerpc/platforms/pseries/Kconfig > +++ b/ar...
2020 Jul 09
4
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...--- /dev/null > +++ b/arch/powerpc/include/asm/qspinlock_paravirt.h > @@ -0,0 +1,7 @@ > +/* SPDX-License-Identifier: GPL-2.0-or-later */ > +#ifndef __ASM_QSPINLOCK_PARAVIRT_H > +#define __ASM_QSPINLOCK_PARAVIRT_H _ASM_POWERPC_QSPINLOCK_PARAVIRT_H please. > + > +EXPORT_SYMBOL(__pv_queued_spin_unlock); Why's that in a header? Should that (eventually) go with the generic implementation? > diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig > index 24c18362e5ea..756e727b383f 100644 > --- a/arch/powerpc/platforms/pseries/Kconfig > +++ b/ar...
2016 May 17
0
[PATCH v2 4/6] pv-qspinlock: powerpc support pv-qspinlock
...H + +#include <asm/qspinlock_paravirt_types.h> + +extern void pv_lock_init(void); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + CLEAR_IO_SYNC; + pv_lock_op.lock(lock, val); +} + +static inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + SYNC_IO; + pv_lock_op.unlock(lock); +} + +static inline void pv_wait(u8 *...