Pan Xinhui
2016-Apr-28 10:55 UTC
[PATCH resend] powerpc: enable qspinlock and its virtualization support
From: Pan Xinhui <xinhui.pan at linux.vnet.ibm.com> This patch aims to enable qspinlock on PPC. And on pseries platform, it also support paravirt qspinlock. Signed-off-by: Pan Xinhui <xinhui.pan at linux.vnet.ibm.com> --- arch/powerpc/include/asm/qspinlock.h | 37 +++++++++++++++ arch/powerpc/include/asm/qspinlock_paravirt.h | 36 +++++++++++++++ .../powerpc/include/asm/qspinlock_paravirt_types.h | 13 ++++++ arch/powerpc/include/asm/spinlock.h | 31 ++++++++----- arch/powerpc/include/asm/spinlock_types.h | 4 ++ arch/powerpc/kernel/paravirt.c | 52 ++++++++++++++++++++++ arch/powerpc/lib/locks.c | 32 +++++++++++++ arch/powerpc/platforms/pseries/setup.c | 5 +++ 8 files changed, 198 insertions(+), 12 deletions(-) create mode 100644 arch/powerpc/include/asm/qspinlock.h create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt.h create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt_types.h create mode 100644 arch/powerpc/kernel/paravirt.c diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h new file mode 100644 index 0000000..d4e4dc3 --- /dev/null +++ b/arch/powerpc/include/asm/qspinlock.h @@ -0,0 +1,37 @@ +#ifndef _ASM_POWERPC_QSPINLOCK_H +#define _ASM_POWERPC_QSPINLOCK_H + +#include <asm-generic/qspinlock_types.h> + +#define SPIN_THRESHOLD (1 << 15) +#define queued_spin_unlock queued_spin_unlock + +static inline void native_queued_spin_unlock(struct qspinlock *lock) +{ + /* no load/store can be across the unlock()*/ + smp_store_release((u8 *)lock, 0); +} + +#ifdef CONFIG_PARAVIRT_SPINLOCKS + +#include <asm/qspinlock_paravirt.h> + +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + pv_queued_spin_lock(lock, val); +} + +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + pv_queued_spin_unlock(lock); +} +#else +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + native_queued_spin_unlock(lock); +} +#endif + +#include <asm-generic/qspinlock.h> + +#endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/qspinlock_paravirt.h b/arch/powerpc/include/asm/qspinlock_paravirt.h new file mode 100644 index 0000000..86e81c3 --- /dev/null +++ b/arch/powerpc/include/asm/qspinlock_paravirt.h @@ -0,0 +1,36 @@ +#ifndef CONFIG_PARAVIRT_SPINLOCKS +#error "do not include this file" +#endif + +#ifndef _ASM_QSPINLOCK_PARAVIRT_H +#define _ASM_QSPINLOCK_PARAVIRT_H + +#include <asm/qspinlock_paravirt_types.h> + +extern void pv_lock_init(void); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + pv_lock_op.lock(lock, val); +} + +static inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + pv_lock_op.unlock(lock); +} + +static inline void pv_wait(u8 *ptr, u8 val) +{ + pv_lock_op.wait(ptr, val, -1); +} + +static inline void pv_kick(int cpu) +{ + pv_lock_op.kick(cpu); +} + +#endif diff --git a/arch/powerpc/include/asm/qspinlock_paravirt_types.h b/arch/powerpc/include/asm/qspinlock_paravirt_types.h new file mode 100644 index 0000000..e1fdeb0 --- /dev/null +++ b/arch/powerpc/include/asm/qspinlock_paravirt_types.h @@ -0,0 +1,13 @@ +#ifndef _ASM_QSPINLOCK_PARAVIRT_TYPES_H +#define _ASM_QSPINLOCK_PARAVIRT_TYPES_H + +struct pv_lock_ops { + void (*lock)(struct qspinlock *lock, u32 val); + void (*unlock)(struct qspinlock *lock); + void (*wait)(u8 *ptr, u8 val, int cpu); + void (*kick)(int cpu); +}; + +extern struct pv_lock_ops pv_lock_op; + +#endif diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..3b65372 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,24 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __spin_yield_cpu(int cpu); +extern void __spin_wake_cpu(int cpu); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) barrier() +#define __spin_yield_cpu(x) barrier() +#define __spin_wake_cpu(x) barrier() +#define __rw_yield(x) barrier() +#define SHARED_PROCESSOR 0 +#endif + +#ifdef CONFIG_QUEUED_SPINLOCKS +#include <asm/qspinlock.h> +#else static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { return lock.slock == 0; @@ -106,18 +124,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) * held. Conveniently, we have a word in the paca that holds this * value. */ - -#if defined(CONFIG_PPC_SPLPAR) -/* We only yield to the hypervisor if we are in shared processor mode */ -#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) -extern void __spin_yield(arch_spinlock_t *lock); -extern void __rw_yield(arch_rwlock_t *lock); -#else /* SPLPAR */ -#define __spin_yield(x) barrier() -#define __rw_yield(x) barrier() -#define SHARED_PROCESSOR 0 -#endif - static inline void arch_spin_lock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; @@ -169,6 +175,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock); do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) #endif +#endif /* !CONFIG_QUEUED_SPINLOCKS */ /* * Read-write spinlocks, allowing multiple readers * but only one writer. diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 2351adc..bd7144e 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -5,11 +5,15 @@ # error "please don't include this file directly" #endif +#ifdef CONFIG_QUEUED_SPINLOCKS +#include <asm-generic/qspinlock_types.h> +#else typedef struct { volatile unsigned int slock; } arch_spinlock_t; #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } +#endif typedef struct { volatile signed int lock; diff --git a/arch/powerpc/kernel/paravirt.c b/arch/powerpc/kernel/paravirt.c new file mode 100644 index 0000000..355c9fb --- /dev/null +++ b/arch/powerpc/kernel/paravirt.c @@ -0,0 +1,52 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/spinlock.h> + +static void __native_queued_spin_unlock(struct qspinlock *lock) +{ + native_queued_spin_unlock(lock); +} + +static void __native_wait(u8 *ptr, u8 val, int cpu) +{ +} + +static void __native_kick(int cpu) +{ +} + +static void __pv_wait(u8 *ptr, u8 val, int cpu) +{ + HMT_low(); + __spin_yield_cpu(cpu); + HMT_medium(); +} + +static void __pv_kick(int cpu) +{ + __spin_wake_cpu(cpu); +} + +struct pv_lock_ops pv_lock_op = { + .lock = native_queued_spin_lock_slowpath, + .unlock = __native_queued_spin_unlock, + .wait = __native_wait, + .kick = __native_kick, +}; +EXPORT_SYMBOL(pv_lock_op); + +void __init pv_lock_init(void) +{ + if (SHARED_PROCESSOR) { + __pv_init_lock_hash(); + pv_lock_op.lock = __pv_queued_spin_lock_slowpath; + pv_lock_op.unlock = __pv_queued_spin_unlock; + pv_lock_op.wait = __pv_wait; + pv_lock_op.kick = __pv_kick; + } +} diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index f7deebd..6e9d3bb 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -23,6 +23,35 @@ #include <asm/hvcall.h> #include <asm/smp.h> +void __spin_yield_cpu(int cpu) +{ + unsigned int holder_cpu = cpu, yield_count; + + if (cpu == -1) { + plpar_hcall_norets(H_CEDE); + return; + } + BUG_ON(holder_cpu >= nr_cpu_ids); + yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + plpar_hcall_norets(H_CONFER, + get_hard_smp_processor_id(holder_cpu), yield_count); +} +EXPORT_SYMBOL_GPL(__spin_yield_cpu); + +void __spin_wake_cpu(int cpu) +{ + unsigned int holder_cpu = cpu; + + BUG_ON(holder_cpu >= nr_cpu_ids); + plpar_hcall_norets(H_PROD, + get_hard_smp_processor_id(holder_cpu)); +} +EXPORT_SYMBOL_GPL(__spin_wake_cpu); + +#ifndef CONFIG_QUEUED_SPINLOCKS void __spin_yield(arch_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; @@ -42,6 +71,7 @@ void __spin_yield(arch_spinlock_t *lock) get_hard_smp_processor_id(holder_cpu), yield_count); } EXPORT_SYMBOL_GPL(__spin_yield); +#endif /* * Waiting for a read lock or a write lock on a rwlock... @@ -69,6 +99,7 @@ void __rw_yield(arch_rwlock_t *rw) } #endif +#ifndef CONFIG_QUEUED_SPINLOCKS void arch_spin_unlock_wait(arch_spinlock_t *lock) { smp_mb(); @@ -84,3 +115,4 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock) } EXPORT_SYMBOL(arch_spin_unlock_wait); +#endif diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 6e944fc..c9f056e 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -547,6 +547,11 @@ static void __init pSeries_setup_arch(void) "%ld\n", rc); } } + +#ifdef CONFIG_PARAVIRT_SPINLOCKS + pv_lock_init(); +#endif + } static int __init pSeries_init_panel(void) -- 2.4.3
Waiman Long
2016-Apr-28 21:07 UTC
[PATCH resend] powerpc: enable qspinlock and its virtualization support
On 04/28/2016 06:55 AM, Pan Xinhui wrote:> From: Pan Xinhui<xinhui.pan at linux.vnet.ibm.com> > > This patch aims to enable qspinlock on PPC. And on pseries platform, it also support > paravirt qspinlock. > > Signed-off-by: Pan Xinhui<xinhui.pan at linux.vnet.ibm.com> > --- > arch/powerpc/include/asm/qspinlock.h | 37 +++++++++++++++ > arch/powerpc/include/asm/qspinlock_paravirt.h | 36 +++++++++++++++ > .../powerpc/include/asm/qspinlock_paravirt_types.h | 13 ++++++ > arch/powerpc/include/asm/spinlock.h | 31 ++++++++----- > arch/powerpc/include/asm/spinlock_types.h | 4 ++ > arch/powerpc/kernel/paravirt.c | 52 ++++++++++++++++++++++ > arch/powerpc/lib/locks.c | 32 +++++++++++++ > arch/powerpc/platforms/pseries/setup.c | 5 +++ > 8 files changed, 198 insertions(+), 12 deletions(-) > create mode 100644 arch/powerpc/include/asm/qspinlock.h > create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt.h > create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt_types.h > create mode 100644 arch/powerpc/kernel/paravirt.c > >This is just an enablement patch. You will also need a patch to activate qspinlock for, at lease, some PPC configs. Right? It has dependency on the pv_wait() patch that I sent out extend the parameter list. Some performance data on how PPC system will perform with and without qspinlock will also be helpful data points. Cheers, Longman
Pan Xinhui
2016-Apr-29 02:34 UTC
[PATCH resend] powerpc: enable qspinlock and its virtualization support
On 2016?04?29? 05:07, Waiman Long wrote:> On 04/28/2016 06:55 AM, Pan Xinhui wrote: >> From: Pan Xinhui<xinhui.pan at linux.vnet.ibm.com> >> >> This patch aims to enable qspinlock on PPC. And on pseries platform, it also support >> paravirt qspinlock. >> >> Signed-off-by: Pan Xinhui<xinhui.pan at linux.vnet.ibm.com> >> --- >> arch/powerpc/include/asm/qspinlock.h | 37 +++++++++++++++ >> arch/powerpc/include/asm/qspinlock_paravirt.h | 36 +++++++++++++++ >> .../powerpc/include/asm/qspinlock_paravirt_types.h | 13 ++++++ >> arch/powerpc/include/asm/spinlock.h | 31 ++++++++----- >> arch/powerpc/include/asm/spinlock_types.h | 4 ++ >> arch/powerpc/kernel/paravirt.c | 52 ++++++++++++++++++++++ >> arch/powerpc/lib/locks.c | 32 +++++++++++++ >> arch/powerpc/platforms/pseries/setup.c | 5 +++ >> 8 files changed, 198 insertions(+), 12 deletions(-) >> create mode 100644 arch/powerpc/include/asm/qspinlock.h >> create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt.h >> create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt_types.h >> create mode 100644 arch/powerpc/kernel/paravirt.c >> >> > > This is just an enablement patch. You will also need a patch to activate qspinlock for, at lease, some PPC configs. Right? >yep, I want to enable these config and makefile at last. it just looks like diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 2da380f..ae7c2f1 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_PPC_970_NAP) += idle_power4.o obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o procfs-y := proc_powerpc.o obj-$(CONFIG_PROC_FS) += $(procfs-y) +obj-$(CONFIG_PARAVIRT_SPINLOCKS) += paravirt.o rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y) obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index bec90fb..46632e4 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -21,6 +21,7 @@ config PPC_PSERIES select HOTPLUG_CPU if SMP select ARCH_RANDOM select PPC_DOORBELL + select ARCH_USE_QUEUED_SPINLOCKS default y config PPC_SPLPAR @@ -127,3 +128,11 @@ config HV_PERF_CTRS systems. 24x7 is available on Power 8 systems. If unsure, select Y. + +config PARAVIRT_SPINLOCKS + bool "Paravirtialization support for qspinlock" + depends on PPC_SPLPAR && QUEUED_SPINLOCKS + default y + help + If platform supports virtualization, for example PowerVM, this option + can let guest have a better performace. -- 2.4.3> It has dependency on the pv_wait() patch that I sent out extend the parameter list. Some performance data on how PPC system will perform with and without qspinlock will also be helpful data points. >For now, pv_wait defined in ppc is static inline void pv_wait(u8 *ptr, u8 val) My plan is that waiting your patch goes in kernel tree first, then I send out another patch to extend the parameter list. yes, I need copy some performance data in my patch's comments. thanks xinhui> Cheers, > Longman >
Apparently Analagous Threads
- [PATCH resend] powerpc: enable qspinlock and its virtualization support
- [PATCH] powerpc: enable qspinlock and its virtualization support
- [PATCH] powerpc: enable qspinlock and its virtualization support
- [PATCH v8 3/6] powerpc: lib/locks.c: Add cpu yield/wake helper function
- [PATCH v8 3/6] powerpc: lib/locks.c: Add cpu yield/wake helper function