search for: __pv_queued_spin_lock_slowpath

Displaying 20 results from an estimated 36 matches for "__pv_queued_spin_lock_slowpath".

2020 Jul 05
1
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...de <asm-generic/qspinlock_types.h> > +#include <asm/paravirt.h> > > #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > + > +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) > +{ > + if (!is_shared_processor()) > + native_queued_spin_lock_slowpath(lock, val); > + else > + __pv_queued_spin_lock_slowpath(lock, val); > +...
2016 May 17
0
[PATCH v2 4/6] pv-qspinlock: powerpc support pv-qspinlock
...+#endif + +#ifndef _ASM_QSPINLOCK_PARAVIRT_H +#define _ASM_QSPINLOCK_PARAVIRT_H + +#include <asm/qspinlock_paravirt_types.h> + +extern void pv_lock_init(void); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + CLEAR_IO_SYNC; + pv_lock_op.lock(lock, val); +} + +static inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + SY...
2020 Jul 02
0
[PATCH 6/8] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...ine _ASM_POWERPC_QSPINLOCK_H #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else + __pv_queued_spin_lock_slowpath(lock, val); +} +#else +extern void queued_spin_lock_s...
2020 Jul 03
0
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...ine _ASM_POWERPC_QSPINLOCK_H #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else + __pv_queued_spin_lock_slowpath(lock, val); +} +#else +extern void queued_spin_lock_s...
2020 Jul 06
0
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...ine _ASM_POWERPC_QSPINLOCK_H #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else + __pv_queued_spin_lock_...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...+#endif + +#ifndef _ASM_QSPINLOCK_PARAVIRT_H +#define _ASM_QSPINLOCK_PARAVIRT_H + +#include <asm/qspinlock_paravirt_types.h> + +extern void pv_lock_init(void); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + pv_lock_op.lock(lock, val); +} + +static inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + pv_lock_op.unlock(l...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...+#endif + +#ifndef _ASM_QSPINLOCK_PARAVIRT_H +#define _ASM_QSPINLOCK_PARAVIRT_H + +#include <asm/qspinlock_paravirt_types.h> + +extern void pv_lock_init(void); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + pv_lock_op.lock(lock, val); +} + +static inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + pv_lock_op.unlock(l...
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
...' # Event count (approx.): 1250040606393 # # Overhead Command Shared Object Symbol # ........ ............... .............................. ........................................................ # 9.87% sched-messaging [kernel.vmlinux] [k] __pv_queued_spin_lock_slowpath 3.66% sched-messaging [kernel.vmlinux] [k] __pv_queued_spin_unlock 3.37% sched-messaging [kernel.vmlinux] [k] __slab_free 3.06% sched-messaging [kernel.vmlinux] [k] unix_stream_read_generic Pan Xinhui (6): qspinlock: powerpc supp...
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
...' # Event count (approx.): 1250040606393 # # Overhead Command Shared Object Symbol # ........ ............... .............................. ........................................................ # 9.87% sched-messaging [kernel.vmlinux] [k] __pv_queued_spin_lock_slowpath 3.66% sched-messaging [kernel.vmlinux] [k] __pv_queued_spin_unlock 3.37% sched-messaging [kernel.vmlinux] [k] __slab_free 3.06% sched-messaging [kernel.vmlinux] [k] unix_stream_read_generic Pan Xinhui (6): qspinlock: powerpc supp...
2018 Aug 10
0
[PATCH 04/10] x86/paravirt: use a single ops structure
...+ pv_ops.pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) apic_set_eoi_write(kvm_guest_apic_eoi_write); @@ -749,13 +749,15 @@ void __init kvm_spinlock_init(void) return; __pv_init_lock_hash(); - pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; - pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); - pv_lock_ops.wait = kvm_wait; - pv_lock_ops.kick = kvm_kick_cpu; + pv_ops.pv_lock_ops.queued_spin_lock_slowpath = + __pv_queued_spin_lock_slowpath; + pv_ops.pv_lock_ops.queued_spin_unlock = + PV_CALLEE_SAVE(__pv_queued_...
2020 Apr 14
1
[PATCH v2] Fix: buffer overflow during hvc_alloc().
...39] general protection fault: 0000 [#1] SMP NOPTI [ 83.547422] CPU: 5 PID: 3225 Comm: modprobe Kdump: loaded Tainted: G W --------- - - 4.18.0-167.el8.x86_64 #1 [ 83.549191] Hardware name: Red Hat KVM, BIOS 1.12.0-5.scrmod+el8.2.0+5159+d8aa4d83 04/01/2014 [ 83.550544] RIP: 0010:__pv_queued_spin_lock_slowpath+0x19a/0x2a0 [ 83.551504] Code: c4 c1 ea 12 41 be 01 00 00 00 4c 8d 6d 14 41 83 e4 03 8d 42 ff 49 c1 e4 05 48 98 49 81 c4 40 a5 02 00 4c 03 24 c5 60 48 34 91 <49> 89 2c 24 b8 00 80 00 00 eb 15 84 c0 75 0a 41 0f b6 54 24 14 84 [ 83.554449] RSP: 0018:ffffb51a0323fdb0 EFLAGS: 00010202 [ 83....
2020 Jul 03
7
[PATCH v2 0/6] powerpc: queued spinlocks and rwlocks
v2 is updated to account for feedback from Will, Peter, and Waiman (thank you), and trims off a couple of RFC and unrelated patches. Thanks, Nick Nicholas Piggin (6): powerpc/powernv: must include hvcall.h to get PAPR defines powerpc/pseries: move some PAPR paravirt functions to their own file powerpc: move spinlock implementation to simple_spinlock powerpc/64s: implement queued
2017 Nov 01
2
[PATCH] x86/paravirt: Add kernel parameter to choose paravirt lock type
...kvm.c @@ -646,6 +646,10 @@ void __init kvm_spinlock_init(void) if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) return; + if ((pv_spinlock_type == locktype_queued) || + (pv_spinlock_type == locktype_unfair)) + return; + __pv_init_lock_hash(); pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 041096b..ca35cd3 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -115,11 +115,48 @@ unsigned paravirt_patch_jmp(void *i...
2017 Nov 01
2
[PATCH] x86/paravirt: Add kernel parameter to choose paravirt lock type
...kvm.c @@ -646,6 +646,10 @@ void __init kvm_spinlock_init(void) if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) return; + if ((pv_spinlock_type == locktype_queued) || + (pv_spinlock_type == locktype_unfair)) + return; + __pv_init_lock_hash(); pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 041096b..ca35cd3 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -115,11 +115,48 @@ unsigned paravirt_patch_jmp(void *i...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...+#endif + +#ifndef _ASM_QSPINLOCK_PARAVIRT_H +#define _ASM_QSPINLOCK_PARAVIRT_H + +#include <asm/qspinlock_paravirt_types.h> + +extern void pv_lock_init(void); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + pv_lock_op.lock(lock, val); +} + +static inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + pv_lock_op.unlock(l...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...+#endif + +#ifndef _ASM_QSPINLOCK_PARAVIRT_H +#define _ASM_QSPINLOCK_PARAVIRT_H + +#include <asm/qspinlock_paravirt_types.h> + +extern void pv_lock_init(void); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + pv_lock_op.lock(lock, val); +} + +static inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + pv_lock_op.unlock(l...
2016 Dec 06
6
[PATCH v9 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All, this is the fairlock patchset. You can apply them and build successfully. patches are based on linux-next qspinlock can avoid waiter starved issue. It has about the same speed in single-thread and it can be much faster in high contention situations especially when the spinlock is embedded within the data structure to be protected. v8 -> v9: mv qspinlocm config entry to
2016 Dec 06
6
[PATCH v9 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All, this is the fairlock patchset. You can apply them and build successfully. patches are based on linux-next qspinlock can avoid waiter starved issue. It has about the same speed in single-thread and it can be much faster in high contention situations especially when the spinlock is embedded within the data structure to be protected. v8 -> v9: mv qspinlocm config entry to
2020 Jul 02
12
[PATCH 0/8] powerpc: queued spinlocks and rwlocks
This series adds an option to use queued spinlocks for powerpc, and makes it the default for the Book3S-64 subarch. This effort starts with the generic code so it's very simple but still very performant. There are optimisations that can be made to slowpaths, but I think it's better to attack those incrementally if/when we find things, and try to add the improvements to generic code as
2017 Nov 01
0
[PATCH] x86/paravirt: Add kernel parameter to choose paravirt lock type
...inlock_init(void) > if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) > return; > > + if ((pv_spinlock_type == locktype_queued) || > + (pv_spinlock_type == locktype_unfair)) > + return; > + > __pv_init_lock_hash(); > pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; > pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); > diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c > index 041096b..ca35cd3 100644 > --- a/arch/x86/kernel/paravirt.c > +++ b/arch/x86/kernel/paravirt.c > @@ -115,11 +115,48 @@ unsig...