search for: __ticket_t

Displaying 20 results from an estimated 144 matches for "__ticket_t".

2014 Jun 28
2
[RFC PATCH v2] Implement Batched (group) ticket lock
...le unsigned long *)&lock->tickets.tail); } +static int __ticket_lock_get_batch_mask(void) +{ + if (static_key_false(&paravirt_ticketlocks_enabled)) + return TICKET_BATCH_MASK; + else + return TICKET_BATCH_MASK_NATIVE; +} + +static void __ticket_lock_batch_spin(arch_spinlock_t *lock, __ticket_t ticket) +{ + if (static_key_false(&paravirt_ticketlocks_enabled)) { + register struct __raw_tickets inc, new; + + inc.head = ACCESS_ONCE(lock->tickets.head); + barrier(); + for (;;) { + if (!(inc.head & TICKET_LOCK_LOCK_INC)) { + new.head = inc.head | TICKET_LOCK_LOCK_INC; +...
2014 Jun 28
2
[RFC PATCH v2] Implement Batched (group) ticket lock
...le unsigned long *)&lock->tickets.tail); } +static int __ticket_lock_get_batch_mask(void) +{ + if (static_key_false(&paravirt_ticketlocks_enabled)) + return TICKET_BATCH_MASK; + else + return TICKET_BATCH_MASK_NATIVE; +} + +static void __ticket_lock_batch_spin(arch_spinlock_t *lock, __ticket_t ticket) +{ + if (static_key_false(&paravirt_ticketlocks_enabled)) { + register struct __raw_tickets inc, new; + + inc.head = ACCESS_ONCE(lock->tickets.head); + barrier(); + for (;;) { + if (!(inc.head & TICKET_LOCK_LOCK_INC)) { + new.head = inc.head | TICKET_LOCK_LOCK_INC; +...
2015 Feb 13
3
[PATCH V4] x86 spinlock: Fix memory corruption on completing completions
...ck->tickets.tail); + set_bit(0, (volatile unsigned long *)&lock->tickets.head); } #else /* !CONFIG_PARAVIRT_SPINLOCKS */ @@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ +static inline int __tickets_equal(__ticket_t one, __ticket_t two) +{ + return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); +} + +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) +{ + if (head & TICKET_SLOWPATH_FLAG) { + arch_spinlock_t old, new; + + old.tickets.head = head; + new.ticke...
2015 Feb 13
3
[PATCH V4] x86 spinlock: Fix memory corruption on completing completions
...ck->tickets.tail); + set_bit(0, (volatile unsigned long *)&lock->tickets.head); } #else /* !CONFIG_PARAVIRT_SPINLOCKS */ @@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ +static inline int __tickets_equal(__ticket_t one, __ticket_t two) +{ + return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); +} + +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) +{ + if (head & TICKET_SLOWPATH_FLAG) { + arch_spinlock_t old, new; + + old.tickets.head = head; + new.ticke...
2014 May 28
7
[RFC] Implement Batched (group) ticket lock
...new.head_tail |= TICKET_LOCK_HEAD_INC; /* cmpxchg is a full barrier, so nothing can move before it */ return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; @@ -123,7 +137,7 @@ static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock, BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); /* Perform the unlock on the "before" copy */ - old.tickets.head += TICKET_LOCK_INC; + old.tickets.head += TICKET_LOCK_HEAD_INC; /* Clear the slowpath flag */ new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT); @@ -150,14 +164...
2014 May 28
7
[RFC] Implement Batched (group) ticket lock
...new.head_tail |= TICKET_LOCK_HEAD_INC; /* cmpxchg is a full barrier, so nothing can move before it */ return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; @@ -123,7 +137,7 @@ static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock, BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); /* Perform the unlock on the "before" copy */ - old.tickets.head += TICKET_LOCK_INC; + old.tickets.head += TICKET_LOCK_HEAD_INC; /* Clear the slowpath flag */ new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT); @@ -150,14 +164...
2015 Feb 15
7
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...s.tail); + set_bit(0, (volatile unsigned long *)&lock->tickets.head); + barrier(); } #else /* !CONFIG_PARAVIRT_SPINLOCKS */ @@ -60,10 +61,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ +static inline int __tickets_equal(__ticket_t one, __ticket_t two) +{ + return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); +} + +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) +{ + if (head & TICKET_SLOWPATH_FLAG) { + arch_spinlock_t old, new; + + old.tickets.head = head; + new.ticke...
2015 Feb 15
7
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...s.tail); + set_bit(0, (volatile unsigned long *)&lock->tickets.head); + barrier(); } #else /* !CONFIG_PARAVIRT_SPINLOCKS */ @@ -60,10 +61,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ +static inline int __tickets_equal(__ticket_t one, __ticket_t two) +{ + return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); +} + +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) +{ + if (head & TICKET_SLOWPATH_FLAG) { + arch_spinlock_t old, new; + + old.tickets.head = head; + new.ticke...
2015 Feb 15
0
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...ck->tickets.tail); + set_bit(0, (volatile unsigned long *)&lock->tickets.head); } #else /* !CONFIG_PARAVIRT_SPINLOCKS */ @@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ +static inline int __tickets_equal(__ticket_t one, __ticket_t two) +{ + return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); +} + +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) +{ + if (head & TICKET_SLOWPATH_FLAG) { + arch_spinlock_t old, new; + + old.tickets.head = head; + new.ticke...
2015 Feb 15
0
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...ck->tickets.tail); + set_bit(0, (volatile unsigned long *)&lock->tickets.head); } #else /* !CONFIG_PARAVIRT_SPINLOCKS */ @@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ +static inline int __tickets_equal(__ticket_t one, __ticket_t two) +{ + return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); +} + +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) +{ + if (head & TICKET_SLOWPATH_FLAG) { + arch_spinlock_t old, new; + + old.tickets.head = head; + new.ticke...
2015 Apr 30
0
[PATCH 3/6] x86: introduce new pvops function clear_slowpath
...de/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -724,6 +724,13 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); } +static __always_inline void __ticket_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) +{ + PVOP_VCALL2(pv_lock_ops.clear_slowpath, lock, head); +} + +void pv_lock_activate(void); #endif #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index f7b0b5c..3432713 100644 --- a/arch/x86/include/asm/paravirt_types.h +++...
2015 Apr 30
0
[PATCH 4/6] x86: introduce new pvops function spin_unlock
...pes.h b/arch/x86/include/asm/paravirt_types.h index 3432713..a26af74 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -337,6 +337,7 @@ struct pv_lock_ops { struct paravirt_callee_save lock_spinning; void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket); void (*clear_slowpath)(arch_spinlock_t *lock, __ticket_t head); + void (*unlock)(arch_spinlock_t *lock); }; /* This contains all the paravirt structures: we get a convenient @@ -398,6 +399,7 @@ extern struct pv_lock_ops pv_lock_ops; unsigned paravirt_patch_nop(void); unsigned parav...
2015 Feb 09
2
[PATCH V2] x86 spinlock: Fix memory corruption on completing completions
...arch/x86/include/asm/spinlock.h @@ -49,6 +49,23 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) set_bit(0, (volatile unsigned long *)&lock->tickets.tail); } +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock) +{ + arch_spinlock_t old, new; + __ticket_t diff; + + old.tickets = READ_ONCE(lock->tickets); + diff = (old.tickets.tail & ~TICKET_SLOWPATH_FLAG) - old.tickets.head; + + /* try to clear slowpath flag when there are no contenders */ + if ((old.tickets.tail & TICKET_SLOWPATH_FLAG) && + (diff == TICKET_LOCK_INC)) { + new =...
2015 Feb 09
2
[PATCH V2] x86 spinlock: Fix memory corruption on completing completions
...arch/x86/include/asm/spinlock.h @@ -49,6 +49,23 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) set_bit(0, (volatile unsigned long *)&lock->tickets.tail); } +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock) +{ + arch_spinlock_t old, new; + __ticket_t diff; + + old.tickets = READ_ONCE(lock->tickets); + diff = (old.tickets.tail & ~TICKET_SLOWPATH_FLAG) - old.tickets.head; + + /* try to clear slowpath flag when there are no contenders */ + if ((old.tickets.tail & TICKET_SLOWPATH_FLAG) && + (diff == TICKET_LOCK_INC)) { + new =...
2015 Feb 16
1
[Xen-devel] [PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...gt; - u8 old = ACCESS_ONCE(zero_stats); > + u8 old = READ_ONCE(zero_stats); > if (unlikely(old)) { > ret = cmpxchg(&zero_stats, old, 0); > /* This ensures only one fellow resets the stat */ > @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) > struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); > int cpu = smp_processor_id(); > u64 start; > + __ticket_t head; > unsigned long flags; > > /* If kicker interrupts not initialized yet, just spin */ > @@ -159,11 +160,15 @@ __visible void xen...
2015 Feb 16
1
[Xen-devel] [PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...gt; - u8 old = ACCESS_ONCE(zero_stats); > + u8 old = READ_ONCE(zero_stats); > if (unlikely(old)) { > ret = cmpxchg(&zero_stats, old, 0); > /* This ensures only one fellow resets the stat */ > @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) > struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); > int cpu = smp_processor_id(); > u64 start; > + __ticket_t head; > unsigned long flags; > > /* If kicker interrupts not initialized yet, just spin */ > @@ -159,11 +160,15 @@ __visible void xen...
2015 Apr 30
12
[PATCH 0/6] x86: reduce paravirtualized spinlock overhead
Paravirtualized spinlocks produce some overhead even if the kernel is running on bare metal. The main reason are the more complex locking and unlocking functions. Especially unlocking is no longer just one instruction but so complex that it is no longer inlined. This patch series addresses this issue by adding two more pvops functions to reduce the size of the inlined spinlock functions. When
2015 Apr 30
12
[PATCH 0/6] x86: reduce paravirtualized spinlock overhead
Paravirtualized spinlocks produce some overhead even if the kernel is running on bare metal. The main reason are the more complex locking and unlocking functions. Especially unlocking is no longer just one instruction but so complex that it is no longer inlined. This patch series addresses this issue by adding two more pvops functions to reduce the size of the inlined spinlock functions. When
2014 May 29
0
[RFC] Implement Batched (group) ticket lock
....h > @@ -3,15 +3,16 @@ > > #include<linux/types.h> > > +#define TICKET_LOCK_INC_SHIFT 1 > +#define __TICKET_LOCK_TAIL_INC (1<<TICKET_LOCK_INC_SHIFT) > + > #ifdef CONFIG_PARAVIRT_SPINLOCKS > -#define __TICKET_LOCK_INC 2 > #define TICKET_SLOWPATH_FLAG ((__ticket_t)1) > #else > -#define __TICKET_LOCK_INC 1 > #define TICKET_SLOWPATH_FLAG ((__ticket_t)0) > #endif > > -#if (CONFIG_NR_CPUS< (256 / __TICKET_LOCK_INC)) > +#if (CONFIG_NR_CPUS< (256 / __TICKET_LOCK_TAIL_INC)) > typedef u8 __ticket_t; > typedef u16 __ticke...
2014 May 28
0
[RFC] Implement Batched (group) ticket lock
...> @@ -3,15 +3,16 @@ > > #include <linux/types.h> > > +#define TICKET_LOCK_INC_SHIFT 1 > +#define __TICKET_LOCK_TAIL_INC (1<<TICKET_LOCK_INC_SHIFT) > + > #ifdef CONFIG_PARAVIRT_SPINLOCKS > -#define __TICKET_LOCK_INC 2 > #define TICKET_SLOWPATH_FLAG ((__ticket_t)1) > #else > -#define __TICKET_LOCK_INC 1 > #define TICKET_SLOWPATH_FLAG ((__ticket_t)0) > #endif For the !CONFIG_PARAVIRT case, TICKET_LOCK_INC_SHIFT used to be 0, now you are making it one. Probably not an issue, since even people who compile with 128 < CONFIG_NR_CPUS <= 256...