Juergen Gross
2017-Sep-06 15:29 UTC
[PATCH v2 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case
can be optimized by patching the call away completely. In case a kernel
running as a guest it can decide whether to use paravitualized
spinlocks, the current fallback to the unfair test-and-set scheme, or
to mimic the bare metal behavior.
V2:
- use static key instead of making virt_spin_lock() a pvops function
Juergen Gross (2):
paravirt/locks: use new static key for controlling call of
virt_spin_lock()
paravirt,xen: correct xen_nopvspin case
arch/x86/include/asm/qspinlock.h | 11 +++++++++++
arch/x86/kernel/paravirt-spinlocks.c | 6 ++++++
arch/x86/kernel/smpboot.c | 2 ++
arch/x86/xen/spinlock.c | 2 ++
kernel/locking/qspinlock.c | 4 ++++
5 files changed, 25 insertions(+)
--
2.12.3
Juergen Gross
2017-Sep-06 15:29 UTC
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
There are cases where a guest tries to switch spinlocks to bare metal
behavior (e.g. by setting "xen_nopvspin" boot parameter). Today this
has the downside of falling back to unfair test and set scheme for
qspinlocks due to virt_spin_lock() detecting the virtualized
environment.
Add a static key controlling whether virt_spin_lock() should be
called or not. When running on bare metal set the new key to false.
Signed-off-by: Juergen Gross <jgross at suse.com>
---
arch/x86/include/asm/qspinlock.h | 11 +++++++++++
arch/x86/kernel/paravirt-spinlocks.c | 6 ++++++
arch/x86/kernel/smpboot.c | 2 ++
kernel/locking/qspinlock.c | 4 ++++
4 files changed, 23 insertions(+)
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 48a706f641f2..fc39389f196b 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_QSPINLOCK_H
#define _ASM_X86_QSPINLOCK_H
+#include <linux/jump_label.h>
#include <asm/cpufeature.h>
#include <asm-generic/qspinlock_types.h>
#include <asm/paravirt.h>
@@ -46,9 +47,15 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
#endif
#ifdef CONFIG_PARAVIRT
+DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+void native_pv_lock_init(void) __init;
+
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{
+ if (!static_branch_likely(&virt_spin_lock_key))
+ return false;
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return false;
@@ -65,6 +72,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
return true;
}
+#else
+static inline void native_pv_lock_init(void)
+{
+}
#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>
diff --git a/arch/x86/kernel/paravirt-spinlocks.c
b/arch/x86/kernel/paravirt-spinlocks.c
index 8f2d1c9d43a8..2fc65ddea40d 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -42,3 +42,9 @@ struct pv_lock_ops pv_lock_ops = {
#endif /* SMP */
};
EXPORT_SYMBOL(pv_lock_ops);
+
+void __init native_pv_lock_init(void)
+{
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ static_branch_disable(&virt_spin_lock_key);
+}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 54b9e89d4d6b..21500d3ba359 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -77,6 +77,7 @@
#include <asm/i8259.h>
#include <asm/realmode.h>
#include <asm/misc.h>
+#include <asm/qspinlock.h>
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -1381,6 +1382,7 @@ void __init native_smp_prepare_boot_cpu(void)
/* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask);
cpu_set_state_online(me);
+ native_pv_lock_init();
}
void __init native_smp_cpus_done(unsigned int max_cpus)
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 294294c71ba4..838d235b87ef 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -76,6 +76,10 @@
#define MAX_NODES 4
#endif
+#ifdef CONFIG_PARAVIRT
+DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
+#endif
+
/*
* Per-CPU queue node structures; we can never have more than 4 nested
* contexts: task, softirq, hardirq, nmi.
--
2.12.3
Juergen Gross
2017-Sep-06 15:29 UTC
[PATCH v2 2/2] paravirt,xen: correct xen_nopvspin case
With the boot parameter "xen_nopvspin" specified a Xen guest should
not
make use of paravirt spinlocks, but behave as if running on bare
metal. This is not true, however, as the qspinlock code will fall back
to a test-and-set scheme when it is detecting a hypervisor.
In order to avoid this disable the virt_spin_lock_key.
Signed-off-by: Juergen Gross <jgross at suse.com>
---
arch/x86/xen/spinlock.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 25a7c4302ce7..e8ab80ad7a6f 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <asm/paravirt.h>
+#include <asm/qspinlock.h>
#include <xen/interface/xen.h>
#include <xen/events.h>
@@ -129,6 +130,7 @@ void __init xen_init_spinlocks(void)
if (!xen_pvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
+ static_branch_disable(&virt_spin_lock_key);
return;
}
printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
--
2.12.3
Waiman Long
2017-Sep-06 15:49 UTC
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
On 09/06/2017 11:29 AM, Juergen Gross wrote:> There are cases where a guest tries to switch spinlocks to bare metal > behavior (e.g. by setting "xen_nopvspin" boot parameter). Today this > has the downside of falling back to unfair test and set scheme for > qspinlocks due to virt_spin_lock() detecting the virtualized > environment. > > Add a static key controlling whether virt_spin_lock() should be > called or not. When running on bare metal set the new key to false. > > Signed-off-by: Juergen Gross <jgross at suse.com> > --- > arch/x86/include/asm/qspinlock.h | 11 +++++++++++ > arch/x86/kernel/paravirt-spinlocks.c | 6 ++++++ > arch/x86/kernel/smpboot.c | 2 ++ > kernel/locking/qspinlock.c | 4 ++++ > 4 files changed, 23 insertions(+) > > diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h > index 48a706f641f2..fc39389f196b 100644 > --- a/arch/x86/include/asm/qspinlock.h > +++ b/arch/x86/include/asm/qspinlock.h > @@ -1,6 +1,7 @@ > #ifndef _ASM_X86_QSPINLOCK_H > #define _ASM_X86_QSPINLOCK_H > > +#include <linux/jump_label.h> > #include <asm/cpufeature.h> > #include <asm-generic/qspinlock_types.h> > #include <asm/paravirt.h> > @@ -46,9 +47,15 @@ static inline void queued_spin_unlock(struct qspinlock *lock) > #endif > > #ifdef CONFIG_PARAVIRT > +DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); > + > +void native_pv_lock_init(void) __init; > + > #define virt_spin_lock virt_spin_lock > static inline bool virt_spin_lock(struct qspinlock *lock) > { > + if (!static_branch_likely(&virt_spin_lock_key)) > + return false; > if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > return false; > > @@ -65,6 +72,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock) > > return true; > } > +#else > +static inline void native_pv_lock_init(void) > +{ > +} > #endif /* CONFIG_PARAVIRT */ > > #include <asm-generic/qspinlock.h> > diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c > index 8f2d1c9d43a8..2fc65ddea40d 100644 > --- a/arch/x86/kernel/paravirt-spinlocks.c > +++ b/arch/x86/kernel/paravirt-spinlocks.c > @@ -42,3 +42,9 @@ struct pv_lock_ops pv_lock_ops = { > #endif /* SMP */ > }; > EXPORT_SYMBOL(pv_lock_ops); > + > +void __init native_pv_lock_init(void) > +{ > + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > + static_branch_disable(&virt_spin_lock_key); > +} > diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c > index 54b9e89d4d6b..21500d3ba359 100644 > --- a/arch/x86/kernel/smpboot.c > +++ b/arch/x86/kernel/smpboot.c > @@ -77,6 +77,7 @@ > #include <asm/i8259.h> > #include <asm/realmode.h> > #include <asm/misc.h> > +#include <asm/qspinlock.h> > > /* Number of siblings per CPU package */ > int smp_num_siblings = 1; > @@ -1381,6 +1382,7 @@ void __init native_smp_prepare_boot_cpu(void) > /* already set me in cpu_online_mask in boot_cpu_init() */ > cpumask_set_cpu(me, cpu_callout_mask); > cpu_set_state_online(me); > + native_pv_lock_init(); > } > > void __init native_smp_cpus_done(unsigned int max_cpus) > diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c > index 294294c71ba4..838d235b87ef 100644 > --- a/kernel/locking/qspinlock.c > +++ b/kernel/locking/qspinlock.c > @@ -76,6 +76,10 @@ > #define MAX_NODES 4 > #endif > > +#ifdef CONFIG_PARAVIRT > +DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); > +#endif > + > /* > * Per-CPU queue node structures; we can never have more than 4 nested > * contexts: task, softirq, hardirq, nmi.Acked-by: Waiman Long <longman at redhat.com>
On 09/06/2017 11:29 AM, Juergen Gross wrote:> With the boot parameter "xen_nopvspin" specified a Xen guest should not > make use of paravirt spinlocks, but behave as if running on bare > metal. This is not true, however, as the qspinlock code will fall back > to a test-and-set scheme when it is detecting a hypervisor. > > In order to avoid this disable the virt_spin_lock_key. > > Signed-off-by: Juergen Gross <jgross at suse.com> > --- > arch/x86/xen/spinlock.c | 2 ++ > 1 file changed, 2 insertions(+) > > diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c > index 25a7c4302ce7..e8ab80ad7a6f 100644 > --- a/arch/x86/xen/spinlock.c > +++ b/arch/x86/xen/spinlock.c > @@ -10,6 +10,7 @@ > #include <linux/slab.h> > > #include <asm/paravirt.h> > +#include <asm/qspinlock.h> > > #include <xen/interface/xen.h> > #include <xen/events.h> > @@ -129,6 +130,7 @@ void __init xen_init_spinlocks(void) > > if (!xen_pvspin) { > printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); > + static_branch_disable(&virt_spin_lock_key); > return; > } > printk(KERN_DEBUG "xen: PV spinlocks enabled\n");Acked-by: Waiman Long <longman at redhat.com>
Apparently Analagous Threads
- [PATCH v2 0/2] guard virt_spin_lock() with a static key
- [PATCH v3 0/2] guard virt_spin_lock() with a static key
- [PATCH v3 0/2] guard virt_spin_lock() with a static key
- [PATCH 0/4] make virt_spin_lock() a pvops function
- [PATCH 0/4] make virt_spin_lock() a pvops function