search for: native_queued_spin_lock_slowpath

Displaying 20 results from an estimated 56 matches for "native_queued_spin_lock_slowpath".

2020 Jul 05
1
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...h > @@ -3,9 +3,36 @@ > #define _ASM_POWERPC_QSPINLOCK_H > > #include <asm-generic/qspinlock_types.h> > +#include <asm/paravirt.h> > > #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > + > +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) > +{ > + if (!is_shared_processor()) > + native_queued_spin_lock_slowpat...
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be optimized by patching the call away completely. In case a kernel running as a guest it can decide whether to use paravitualized spinlocks, the current fallback to the unfair test-and-set scheme, or to mimic the bare metal behavior. Juergen Gross (4): paravirt: add generic _paravirt_false() function paravirt: switch
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be optimized by patching the call away completely. In case a kernel running as a guest it can decide whether to use paravitualized spinlocks, the current fallback to the unfair test-and-set scheme, or to mimic the bare metal behavior. Juergen Gross (4): paravirt: add generic _paravirt_false() function paravirt: switch
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...k, because fair locks have + * horrible lock 'holder' preemption issues. + */ + + do { + while (atomic_read(&lock->val) != 0) + cpu_relax(); + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); + + return true; +} + #ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void); @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) { return pv_vcpu_is_preempted(cpu); } + +void native_pv_lock_init(void) __init; #else static inline void queued_spin_unlock(struct qspinlock *lock) {...
2016 May 17
0
[PATCH v2 4/6] pv-qspinlock: powerpc support pv-qspinlock
...m/qspinlock_paravirt.h @@ -0,0 +1,38 @@ +#ifndef CONFIG_PARAVIRT_SPINLOCKS +#error "do not include this file" +#endif + +#ifndef _ASM_QSPINLOCK_PARAVIRT_H +#define _ASM_QSPINLOCK_PARAVIRT_H + +#include <asm/qspinlock_paravirt_types.h> + +extern void pv_lock_init(void); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + CLEAR...
2020 Jul 02
0
[PATCH 6/8] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...sm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -3,9 +3,36 @@ #define _ASM_POWERPC_QSPINLOCK_H #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else + __pv_q...
2020 Jul 03
0
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...sm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -3,9 +3,36 @@ #define _ASM_POWERPC_QSPINLOCK_H #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else + __pv_q...
2020 Jul 06
0
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...sm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -3,9 +3,47 @@ #define _ASM_POWERPC_QSPINLOCK_H #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) +...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...ernel/locking/qspinlock.c index b9515fcc9b29..ebcc6f5d99d5 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -287,10 +287,14 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, #ifdef CONFIG_PARAVIRT_SPINLOCKS #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath +#define queued_spin_lock_slowpath_queue native_queued_spin_lock_slowpath_queue #endif #endif /* _GEN_PV_LOCK_SLOWPATH */ +void queued_spin_lock_slowpath_queue(struct qspinlock *lock); +static void __queued_spin_lock_slowpath_queue(struct qspinlock *lock); + /** * queued_spin_lock_slowpath...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...ernel/locking/qspinlock.c index b9515fcc9b29..ebcc6f5d99d5 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -287,10 +287,14 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, #ifdef CONFIG_PARAVIRT_SPINLOCKS #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath +#define queued_spin_lock_slowpath_queue native_queued_spin_lock_slowpath_queue #endif #endif /* _GEN_PV_LOCK_SLOWPATH */ +void queued_spin_lock_slowpath_queue(struct qspinlock *lock); +static void __queued_spin_lock_slowpath_queue(struct qspinlock *lock); + /** * queued_spin_lock_slowpath...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...e (atomic_read(&lock->val) != 0) >>> + cpu_relax(); >>> + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); >>> + >>> + return true; >>> +} >>> + >>> #ifdef CONFIG_PARAVIRT_SPINLOCKS >>> extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); >>> extern void __pv_init_lock_hash(void); >>> @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) >>> { >>> return pv_vcpu_is_preempted(cpu); >>> } >>> + >>> +void native_pv_lock_in...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...e (atomic_read(&lock->val) != 0) >>> + cpu_relax(); >>> + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); >>> + >>> + return true; >>> +} >>> + >>> #ifdef CONFIG_PARAVIRT_SPINLOCKS >>> extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); >>> extern void __pv_init_lock_hash(void); >>> @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) >>> { >>> return pv_vcpu_is_preempted(cpu); >>> } >>> + >>> +void native_pv_lock_in...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...9; preemption issues. > + */ > + > + do { > + while (atomic_read(&lock->val) != 0) > + cpu_relax(); > + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); > + > + return true; > +} > + > #ifdef CONFIG_PARAVIRT_SPINLOCKS > extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > extern void __pv_init_lock_hash(void); > @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) > { > return pv_vcpu_is_preempted(cpu); > } > + > +void native_pv_lock_init(void) __init; > #else > static inline void qu...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...9; preemption issues. > + */ > + > + do { > + while (atomic_read(&lock->val) != 0) > + cpu_relax(); > + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); > + > + return true; > +} > + > #ifdef CONFIG_PARAVIRT_SPINLOCKS > extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > extern void __pv_init_lock_hash(void); > @@ -38,33 +57,32 @@ static inline bool vcpu_is_preempted(long cpu) > { > return pv_vcpu_is_preempted(cpu); > } > + > +void native_pv_lock_init(void) __init; > #else > static inline void qu...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...m/qspinlock_paravirt.h @@ -0,0 +1,36 @@ +#ifndef CONFIG_PARAVIRT_SPINLOCKS +#error "do not include this file" +#endif + +#ifndef _ASM_QSPINLOCK_PARAVIRT_H +#define _ASM_QSPINLOCK_PARAVIRT_H + +#include <asm/qspinlock_paravirt_types.h> + +extern void pv_lock_init(void); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + pv_lo...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...m/qspinlock_paravirt.h @@ -0,0 +1,36 @@ +#ifndef CONFIG_PARAVIRT_SPINLOCKS +#error "do not include this file" +#endif + +#ifndef _ASM_QSPINLOCK_PARAVIRT_H +#define _ASM_QSPINLOCK_PARAVIRT_H + +#include <asm/qspinlock_paravirt_types.h> + +extern void pv_lock_init(void); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + pv_lo...
2017 Sep 05
1
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...tion issues. > + */ > + > + do { > + while (atomic_read(&lock->val) != 0) > + cpu_relax(); > + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); > + > + return true; > +} #endif > + > #ifdef CONFIG_PARAVIRT_SPINLOCKS > extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > extern void __pv_init_lock_hash(void); > #ifdef CONFIG_PARAVIRT > #define virt_spin_lock virt_spin_lock > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > static inline bool virt_spin_lock(struct qspinlock *lock) > { > + return pv_virt_spin_lock(lo...
2020 Apr 08
0
[RFC PATCH 00/26] Runtime paravirt patching
...t I saw on the guest while testing this is > indicative of the problem: > > [ 1136.461522] watchdog: BUG: soft lockup - CPU#8 stuck for 22s! [lock_torture_wr:12865] > [ 1136.461542] CPU: 8 PID: 12865 Comm: lock_torture_wr Tainted: G W L 5.4.0-rc7+ #77 > [ 1136.461546] RIP: 0010:native_queued_spin_lock_slowpath+0x15/0x220 > > (Caused by an oversubscribed host but using mismatched native pv_lock_ops > on the gues.) And this illustrates what? The fact that you used a misconfigured setup. > This series addresses the problem by doing paravirt switching at > runtime. You're not addressing...
2017 Sep 05
1
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...tion issues. > + */ > + > + do { > + while (atomic_read(&lock->val) != 0) > + cpu_relax(); > + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); > + > + return true; > +} #endif > + > #ifdef CONFIG_PARAVIRT_SPINLOCKS > extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > extern void __pv_init_lock_hash(void); > #ifdef CONFIG_PARAVIRT > #define virt_spin_lock virt_spin_lock > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > static inline bool virt_spin_lock(struct qspinlock *lock) > { > + return pv_virt_spin_lock(lo...
2016 Nov 02
0
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...rch/x86/kernel/paravirt-spinlocks.c @@ -21,12 +21,18 @@ bool pv_is_native_spin_unlock(void) __raw_callee_save___native_queued_spin_unlock; } +static bool native_vcpu_is_preempted(int cpu) +{ + return 0; +} + struct pv_lock_ops pv_lock_ops = { #ifdef CONFIG_SMP .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), .wait = paravirt_nop, .kick = paravirt_nop, + .vcpu_is_preempted = native_vcpu_is_preempted, #endif /* SMP */ }; EXPORT_SYMBOL(pv_lock_ops); -- 2.4.11