search for: config_paravirt_unfair_locks

Displaying 20 results from an estimated 43 matches for "config_paravirt_unfair_locks".

2014 May 07
0
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...UEST diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index e4a4f5d..19af937 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -5,6 +5,10 @@ #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS +extern struct static_key paravirt_unfairlocks_enabled; +#endif + #define queue_spin_unlock queue_spin_unlock /** * queue_spin_unlock - release a queue spinlock @@ -26,4 +30,79 @@ static inline void queue_spin_unlock(struct qspinlock *lock) #include <asm-generic/qspinlock.h> +union a...
2014 Feb 26
0
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...nclude/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 98db42e..c278aed 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -56,4 +56,78 @@ static inline void queue_spin_unlock(struct qspinlock *lock) #include <asm-generic/qspinlock.h> +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS +/** + * queue_spin_lock_unfair - acquire a queue spinlock unfairly + * @lock: Pointer to queue spinlock structure + */ +static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock) +{ + union arch_qspinlock *qlock = (union arch_qspinlock *)lock; + + if (likely(cmpxchg(&qlock->...
2014 Mar 12
0
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...nclude/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 7f3129c..0e6740a 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -51,4 +51,76 @@ static inline void queue_spin_unlock(struct qspinlock *lock) #include <asm-generic/qspinlock.h> +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS +/** + * queue_spin_lock_unfair - acquire a queue spinlock unfairly + * @lock: Pointer to queue spinlock structure + */ +static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock) +{ + union arch_qspinlock *qlock = (union arch_qspinlock *)lock; + + if (likely(cmpxchg(&qlock->...
2014 May 07
0
[PATCH v10 12/19] unfair qspinlock: Variable frequency lock stealing mechanism
...+++++- 1 files changed, 146 insertions(+), 1 deletions(-) diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index a14241e..06dd486 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -63,6 +63,11 @@ */ struct qnode { struct mcs_spinlock mcs; +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS + int lsteal_mask; /* Lock stealing frequency mask */ + u32 prev_tail; /* Tail code of previous node */ + struct qnode *qprev; /* Previous queue node addr */ +#endif }; #define qhead mcs.locked /* The queue head flag */ @@ -215,6 +220,139 @@ xchg_tail(struct qspinlock *lock, u32 tail, u32...
2014 Feb 26
2
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...e/asm/qspinlock.h > index 98db42e..c278aed 100644 > --- a/arch/x86/include/asm/qspinlock.h > +++ b/arch/x86/include/asm/qspinlock.h > @@ -56,4 +56,78 @@ static inline void queue_spin_unlock(struct qspinlock *lock) > > #include <asm-generic/qspinlock.h> > > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/** > + * queue_spin_lock_unfair - acquire a queue spinlock unfairly > + * @lock: Pointer to queue spinlock structure > + */ > +static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock) > +{ > + union arch_qspinlock *qlock = (union arch_qspinlock *)lock; &gt...
2014 Feb 26
2
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...e/asm/qspinlock.h > index 98db42e..c278aed 100644 > --- a/arch/x86/include/asm/qspinlock.h > +++ b/arch/x86/include/asm/qspinlock.h > @@ -56,4 +56,78 @@ static inline void queue_spin_unlock(struct qspinlock *lock) > > #include <asm-generic/qspinlock.h> > > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/** > + * queue_spin_lock_unfair - acquire a queue spinlock unfairly > + * @lock: Pointer to queue spinlock structure > + */ > +static __always_inline void queue_spin_lock_unfair(struct qspinlock *lock) > +{ > + union arch_qspinlock *qlock = (union arch_qspinlock *)lock; &gt...
2014 Feb 26
1
[PATCH RFC v5 5/8] pvqspinlock, x86: Enable unfair queue spinlock in a KVM guest
...l/kvm.c > index 713f1b3..a489140 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -826,3 +826,20 @@ static __init int kvm_spinlock_init_jump(void) > early_initcall(kvm_spinlock_init_jump); > > #endif /* CONFIG_PARAVIRT_SPINLOCKS */ > + > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/* > + * Enable unfair lock if running in a real para-virtualized environment > + */ > +static __init int kvm_unfair_locks_init_jump(void) > +{ > + if (!kvm_para_available()) > + return 0; I think you also need to check for !kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)? Oth...
2014 May 08
1
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...b/kernel/locking/qspinlock.c > index 9e7659e..10e87e1 100644 > --- a/kernel/locking/qspinlock.c > +++ b/kernel/locking/qspinlock.c > @@ -227,6 +227,14 @@ static __always_inline int get_qlock(struct qspinlock *lock) > { > struct __qspinlock *l = (void *)lock; > > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > + if (static_key_false(&paravirt_unfairlocks_enabled)) > + /* > + * Need to use atomic operation to get the lock when > + * lock stealing can happen. > + */ > + return cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0; That's missing {}. > +#endif > b...
2014 Feb 26
1
[PATCH RFC v5 5/8] pvqspinlock, x86: Enable unfair queue spinlock in a KVM guest
...l/kvm.c > index 713f1b3..a489140 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -826,3 +826,20 @@ static __init int kvm_spinlock_init_jump(void) > early_initcall(kvm_spinlock_init_jump); > > #endif /* CONFIG_PARAVIRT_SPINLOCKS */ > + > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/* > + * Enable unfair lock if running in a real para-virtualized environment > + */ > +static __init int kvm_unfair_locks_init_jump(void) > +{ > + if (!kvm_para_available()) > + return 0; I think you also need to check for !kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)? Oth...
2014 May 08
1
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...b/kernel/locking/qspinlock.c > index 9e7659e..10e87e1 100644 > --- a/kernel/locking/qspinlock.c > +++ b/kernel/locking/qspinlock.c > @@ -227,6 +227,14 @@ static __always_inline int get_qlock(struct qspinlock *lock) > { > struct __qspinlock *l = (void *)lock; > > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > + if (static_key_false(&paravirt_unfairlocks_enabled)) > + /* > + * Need to use atomic operation to get the lock when > + * lock stealing can happen. > + */ > + return cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0; That's missing {}. > +#endif > b...
2014 Feb 27
1
[PATCH RFC v5 5/8] pvqspinlock, x86: Enable unfair queue spinlock in a KVM guest
...nel/kvm.c > index 713f1b3..a489140 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -826,3 +826,20 @@ static __init int kvm_spinlock_init_jump(void) > early_initcall(kvm_spinlock_init_jump); > > #endif /* CONFIG_PARAVIRT_SPINLOCKS */ > + > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/* > + * Enable unfair lock if running in a real para-virtualized environment > + */ > +static __init int kvm_unfair_locks_init_jump(void) > +{ > + if (!kvm_para_available()) > + return 0; > + > + static_key_slow_inc(&paravirt_unfairlocks_enabled); > + printk(K...
2014 Feb 27
1
[PATCH RFC v5 5/8] pvqspinlock, x86: Enable unfair queue spinlock in a KVM guest
...nel/kvm.c > index 713f1b3..a489140 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -826,3 +826,20 @@ static __init int kvm_spinlock_init_jump(void) > early_initcall(kvm_spinlock_init_jump); > > #endif /* CONFIG_PARAVIRT_SPINLOCKS */ > + > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/* > + * Enable unfair lock if running in a real para-virtualized environment > + */ > +static __init int kvm_unfair_locks_init_jump(void) > +{ > + if (!kvm_para_available()) > + return 0; > + > + static_key_slow_inc(&paravirt_unfairlocks_enabled); > + printk(K...
2014 Feb 27
1
[PATCH RFC v5 5/8] pvqspinlock, x86: Enable unfair queue spinlock in a KVM guest
...l/kvm.c > index 713f1b3..a489140 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -826,3 +826,20 @@ static __init int kvm_spinlock_init_jump(void) > early_initcall(kvm_spinlock_init_jump); > > #endif /* CONFIG_PARAVIRT_SPINLOCKS */ > + > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/* > + * Enable unfair lock if running in a real para-virtualized environment > + */ > +static __init int kvm_unfair_locks_init_jump(void) > +{ > + if (!kvm_para_available()) > + return 0; > + kvm_kick_cpu_type() in patch 8 assumes that host has support for kick hyperca...
2014 Feb 27
1
[PATCH RFC v5 5/8] pvqspinlock, x86: Enable unfair queue spinlock in a KVM guest
...l/kvm.c > index 713f1b3..a489140 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -826,3 +826,20 @@ static __init int kvm_spinlock_init_jump(void) > early_initcall(kvm_spinlock_init_jump); > > #endif /* CONFIG_PARAVIRT_SPINLOCKS */ > + > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/* > + * Enable unfair lock if running in a real para-virtualized environment > + */ > +static __init int kvm_unfair_locks_init_jump(void) > +{ > + if (!kvm_para_available()) > + return 0; > + kvm_kick_cpu_type() in patch 8 assumes that host has support for kick hyperca...
2014 Mar 19
1
[PATCH v7 07/11] pvqspinlock, x86: Allow unfair queue spinlock in a XEN guest
...eletions(-) > > diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c > index 0982233..66bb6f5 100644 > --- a/arch/x86/xen/setup.c > +++ b/arch/x86/xen/setup.c > @@ -625,3 +625,22 @@ void __init xen_arch_setup(void) > numa_off = 1; > #endif > } > + > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/* > + * Enable unfair lock if running in a Xen guest > + */ > +static __init int xen_unfair_locks_init_jump(void) > +{ > + /* > + * Disable unfair lock if not running in a PV domain > + */ > + if (!xen_pv_domain()) > + return 0; I would just make this 'xen_...
2014 Mar 19
1
[PATCH v7 07/11] pvqspinlock, x86: Allow unfair queue spinlock in a XEN guest
...eletions(-) > > diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c > index 0982233..66bb6f5 100644 > --- a/arch/x86/xen/setup.c > +++ b/arch/x86/xen/setup.c > @@ -625,3 +625,22 @@ void __init xen_arch_setup(void) > numa_off = 1; > #endif > } > + > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/* > + * Enable unfair lock if running in a Xen guest > + */ > +static __init int xen_unfair_locks_init_jump(void) > +{ > + /* > + * Disable unfair lock if not running in a PV domain > + */ > + if (!xen_pv_domain()) > + return 0; I would just make this 'xen_...
2014 Mar 20
3
[PATCH v7 06/11] pvqspinlock, x86: Allow unfair queue spinlock in a KVM guest
...nel/kvm.c > index 713f1b3..a489140 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -826,3 +826,20 @@ static __init int kvm_spinlock_init_jump(void) > early_initcall(kvm_spinlock_init_jump); > > #endif /* CONFIG_PARAVIRT_SPINLOCKS */ > + > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/* > + * Enable unfair lock if running in a real para-virtualized environment > + */ > +static __init int kvm_unfair_locks_init_jump(void) > +{ > + if (!kvm_para_available()) > + return 0; > + > + static_key_slow_inc(&paravirt_unfairlocks_enabled); > + printk(K...
2014 Mar 20
3
[PATCH v7 06/11] pvqspinlock, x86: Allow unfair queue spinlock in a KVM guest
...nel/kvm.c > index 713f1b3..a489140 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -826,3 +826,20 @@ static __init int kvm_spinlock_init_jump(void) > early_initcall(kvm_spinlock_init_jump); > > #endif /* CONFIG_PARAVIRT_SPINLOCKS */ > + > +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS > +/* > + * Enable unfair lock if running in a real para-virtualized environment > + */ > +static __init int kvm_unfair_locks_init_jump(void) > +{ > + if (!kvm_para_available()) > + return 0; > + > + static_key_slow_inc(&paravirt_unfairlocks_enabled); > + printk(K...
2014 Mar 19
15
[PATCH v7 00/11] qspinlock: a 4-byte queue spinlock with PV support
v6->v7: - Remove an atomic operation from the 2-task contending code - Shorten the names of some macros - Make the queue waiter to attempt to steal lock when unfair lock is enabled. - Remove lock holder kick from the PV code and fix a race condition - Run the unfair lock & PV code on overcommitted KVM guests to collect performance data. v5->v6: - Change the optimized
2014 Mar 19
15
[PATCH v7 00/11] qspinlock: a 4-byte queue spinlock with PV support
v6->v7: - Remove an atomic operation from the 2-task contending code - Shorten the names of some macros - Make the queue waiter to attempt to steal lock when unfair lock is enabled. - Remove lock holder kick from the PV code and fix a race condition - Run the unfair lock & PV code on overcommitted KVM guests to collect performance data. v5->v6: - Change the optimized