search for: __ticket_clear_slowpath

Displaying 5 results from an estimated 5 matches for "__ticket_clear_slowpath".

2015 Apr 30
0
[PATCH 3/6] x86: introduce new pvops function clear_slowpath
...avirt.h index 8957810..318f077 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -724,6 +724,13 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); } +static __always_inline void __ticket_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) +{ + PVOP_VCALL2(pv_lock_ops.clear_slowpath, lock, head); +} + +void pv_lock_activate(void); #endif #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index f7b0b5c..3432713 100644 --...
2015 Apr 30
0
[PATCH 2/6] x86: move decision about clearing slowpath flag into arch_spin_lock()
.../arch/x86/include/asm/spinlock.h @@ -66,20 +66,18 @@ static inline int __tickets_equal(__ticket_t one, __ticket_t two) return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); } -static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, - __ticket_t head) +static inline void __ticket_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) { - if (head & TICKET_SLOWPATH_FLAG) { - arch_spinlock_t old, new; + arch_spinlock_t old, new; - old.tickets.head = head; - new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; - old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; - ne...
2015 Apr 30
12
[PATCH 0/6] x86: reduce paravirtualized spinlock overhead
Paravirtualized spinlocks produce some overhead even if the kernel is running on bare metal. The main reason are the more complex locking and unlocking functions. Especially unlocking is no longer just one instruction but so complex that it is no longer inlined. This patch series addresses this issue by adding two more pvops functions to reduce the size of the inlined spinlock functions. When
2015 Apr 30
12
[PATCH 0/6] x86: reduce paravirtualized spinlock overhead
Paravirtualized spinlocks produce some overhead even if the kernel is running on bare metal. The main reason are the more complex locking and unlocking functions. Especially unlocking is no longer just one instruction but so complex that it is no longer inlined. This patch series addresses this issue by adding two more pvops functions to reduce the size of the inlined spinlock functions. When
2015 Apr 30
0
[PATCH 4/6] x86: introduce new pvops function spin_unlock
...+++++++++++ 7 files changed, 104 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 318f077..2f39129 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -730,6 +730,11 @@ static __always_inline void __ticket_clear_slowpath(arch_spinlock_t *lock, PVOP_VCALL2(pv_lock_ops.clear_slowpath, lock, head); } +static __always_inline void __ticket_unlock(arch_spinlock_t *lock) +{ + PVOP_VCALL1_LOCK(pv_lock_ops.unlock, lock); +} + void pv_lock_activate(void); #endif @@ -843,6 +848,7 @@ static inline notrace unsigned lon...