Waiman Long
2017-Feb-08 18:00 UTC
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
It was found when running fio sequential write test with a XFS ramdisk on a 2-socket x86-64 system, the %CPU times as reported by perf were as follows: 71.27% 0.28% fio [k] down_write 70.99% 0.01% fio [k] call_rwsem_down_write_failed 69.43% 1.18% fio [k] rwsem_down_write_failed 65.51% 54.57% fio [k] osq_lock 9.72% 7.99% fio [k] __raw_callee_save___kvm_vcpu_is_preempted 4.16% 4.16% fio [k] __kvm_vcpu_is_preempted So making vcpu_is_preempted() a callee-save function has a pretty high cost associated with it. As vcpu_is_preempted() is called within the spinlock, mutex and rwsem slowpaths, there isn't much to gain by making it callee-save. So it is now changed to a normal function call instead. With this patch applied, the aggregrate bandwidth of the fio sequential write test increased slightly from 2563.3MB/s to 2588.1MB/s (about 1%). Signed-off-by: Waiman Long <longman at redhat.com> --- arch/x86/include/asm/paravirt.h | 2 +- arch/x86/include/asm/paravirt_types.h | 2 +- arch/x86/kernel/kvm.c | 7 ++----- arch/x86/kernel/paravirt-spinlocks.c | 6 ++---- arch/x86/xen/spinlock.c | 4 +--- 5 files changed, 7 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 864f57b..2515885 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -676,7 +676,7 @@ static __always_inline void pv_kick(int cpu) static __always_inline bool pv_vcpu_is_preempted(int cpu) { - return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu); + return PVOP_CALL1(bool, pv_lock_ops.vcpu_is_preempted, cpu); } #endif /* SMP && PARAVIRT_SPINLOCKS */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index bb2de45..88dc852 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -309,7 +309,7 @@ struct pv_lock_ops { void (*wait)(u8 *ptr, u8 val); void (*kick)(int cpu); - struct paravirt_callee_save vcpu_is_preempted; + bool (*vcpu_is_preempted)(int cpu); }; /* This contains all the paravirt structures: we get a convenient diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 099fcba..eb3753d 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -595,7 +595,6 @@ __visible bool __kvm_vcpu_is_preempted(int cpu) return !!src->preempted; } -PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); /* * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. @@ -614,10 +613,8 @@ void __init kvm_spinlock_init(void) pv_lock_ops.wait = kvm_wait; pv_lock_ops.kick = kvm_kick_cpu; - if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { - pv_lock_ops.vcpu_is_preempted - PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); - } + if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) + pv_lock_ops.vcpu_is_preempted = __kvm_vcpu_is_preempted; } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 6259327..da050bc 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -24,12 +24,10 @@ __visible bool __native_vcpu_is_preempted(int cpu) { return false; } -PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); bool pv_is_native_vcpu_is_preempted(void) { - return pv_lock_ops.vcpu_is_preempted.func =- __raw_callee_save___native_vcpu_is_preempted; + return pv_lock_ops.vcpu_is_preempted == __native_vcpu_is_preempted; } struct pv_lock_ops pv_lock_ops = { @@ -38,7 +36,7 @@ struct pv_lock_ops pv_lock_ops = { .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), .wait = paravirt_nop, .kick = paravirt_nop, - .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted), + .vcpu_is_preempted = __native_vcpu_is_preempted, #endif /* SMP */ }; EXPORT_SYMBOL(pv_lock_ops); diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 25a7c43..c85bb8f 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -114,8 +114,6 @@ void xen_uninit_lock_cpu(int cpu) per_cpu(irq_name, cpu) = NULL; } -PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); - /* * Our init of PV spinlocks is split in two init functions due to us * using paravirt patching and jump labels patching and having to do @@ -138,7 +136,7 @@ void __init xen_init_spinlocks(void) pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); pv_lock_ops.wait = xen_qlock_wait; pv_lock_ops.kick = xen_qlock_kick; - pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); + pv_lock_ops.vcpu_is_preempted = xen_vcpu_stolen; } static __init int xen_parse_nopvspin(char *arg) -- 1.8.3.1
Waiman Long
2017-Feb-08 18:00 UTC
[PATCH 2/2] locking/mutex, rwsem: Reduce vcpu_is_preempted() calling frequency
As the vcpu_is_preempted() call is pretty costly compared with other checks within mutex_spin_on_owner() and rwsem_spin_on_owner(), they are done at a reduce frequency of once every 256 iterations. Signed-off-by: Waiman Long <longman at redhat.com> --- kernel/locking/mutex.c | 5 ++++- kernel/locking/rwsem-xadd.c | 6 ++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index ad2d9e2..2ece0c4 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -423,6 +423,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) { bool ret = true; + int loop = 0; rcu_read_lock(); while (__mutex_owner(lock) == owner) { @@ -436,9 +437,11 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, /* * Use vcpu_is_preempted to detect lock holder preemption issue. + * As vcpu_is_preempted is more costly to use, it is called at + * a reduced frequencey (once every 256 iterations). */ if (!owner->on_cpu || need_resched() || - vcpu_is_preempted(task_cpu(owner))) { + (!(++loop & 0xff) && vcpu_is_preempted(task_cpu(owner)))) { ret = false; break; } diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 2ad8d8d..7a884a6 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -351,6 +351,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) { struct task_struct *owner = READ_ONCE(sem->owner); + int loop = 0; if (!rwsem_owner_is_writer(owner)) goto out; @@ -367,10 +368,11 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) /* * abort spinning when need_resched or owner is not running or - * owner's cpu is preempted. + * owner's cpu is preempted. The preemption check is done at + * a lower frequencey because of its high cost. */ if (!owner->on_cpu || need_resched() || - vcpu_is_preempted(task_cpu(owner))) { + (!(++loop & 0xff) && vcpu_is_preempted(task_cpu(owner)))) { rcu_read_unlock(); return false; } -- 1.8.3.1
Peter Zijlstra
2017-Feb-08 19:05 UTC
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
On Wed, Feb 08, 2017 at 01:00:24PM -0500, Waiman Long wrote:> It was found when running fio sequential write test with a XFS ramdisk > on a 2-socket x86-64 system, the %CPU times as reported by perf were > as follows: > > 71.27% 0.28% fio [k] down_write > 70.99% 0.01% fio [k] call_rwsem_down_write_failed > 69.43% 1.18% fio [k] rwsem_down_write_failed > 65.51% 54.57% fio [k] osq_lock > 9.72% 7.99% fio [k] __raw_callee_save___kvm_vcpu_is_preempted > 4.16% 4.16% fio [k] __kvm_vcpu_is_preempted > > So making vcpu_is_preempted() a callee-save function has a pretty high > cost associated with it. As vcpu_is_preempted() is called within the > spinlock, mutex and rwsem slowpaths, there isn't much to gain by making > it callee-save. So it is now changed to a normal function call instead. >Numbers for bare metal too please.
Peter Zijlstra
2017-Feb-08 19:05 UTC
[PATCH 2/2] locking/mutex,rwsem: Reduce vcpu_is_preempted() calling frequency
On Wed, Feb 08, 2017 at 01:00:25PM -0500, Waiman Long wrote:> As the vcpu_is_preempted() call is pretty costly compared with other > checks within mutex_spin_on_owner() and rwsem_spin_on_owner(), they > are done at a reduce frequency of once every 256 iterations.That's just disgusting.
Waiman Long
2017-Feb-08 20:17 UTC
[PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
On 02/08/2017 02:05 PM, Peter Zijlstra wrote:> On Wed, Feb 08, 2017 at 01:00:24PM -0500, Waiman Long wrote: >> It was found when running fio sequential write test with a XFS ramdisk >> on a 2-socket x86-64 system, the %CPU times as reported by perf were >> as follows: >> >> 71.27% 0.28% fio [k] down_write >> 70.99% 0.01% fio [k] call_rwsem_down_write_failed >> 69.43% 1.18% fio [k] rwsem_down_write_failed >> 65.51% 54.57% fio [k] osq_lock >> 9.72% 7.99% fio [k] __raw_callee_save___kvm_vcpu_is_preempted >> 4.16% 4.16% fio [k] __kvm_vcpu_is_preempted >> >> So making vcpu_is_preempted() a callee-save function has a pretty high >> cost associated with it. As vcpu_is_preempted() is called within the >> spinlock, mutex and rwsem slowpaths, there isn't much to gain by making >> it callee-save. So it is now changed to a normal function call instead. >> > Numbers for bare metal too please.I will run the test on bare metal, but I doubt there will be noticeable difference. Cheers, Longman
Possibly Parallel Threads
- [PATCH 2/2] locking/mutex,rwsem: Reduce vcpu_is_preempted() calling frequency
- [PATCH 2/2] locking/mutex, rwsem: Reduce vcpu_is_preempted() calling frequency
- [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
- [PATCH 1/2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
- [PATCH v3 0/4] implement vcpu preempted check