search for: head_tail

Displaying 20 results from an estimated 111 matches for "head_tail".

2015 Feb 09
2
[PATCH V2] x86 spinlock: Fix memory corruption on completing completions
...& ~TICKET_SLOWPATH_FLAG) - old.tickets.head; + + /* try to clear slowpath flag when there are no contenders */ + if ((old.tickets.tail & TICKET_SLOWPATH_FLAG) && + (diff == TICKET_LOCK_INC)) { + new = old; + new.tickets.tail &= ~TICKET_SLOWPATH_FLAG; + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} + #else /* !CONFIG_PARAVIRT_SPINLOCKS */ static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, __ticket_t ticket) @@ -59,11 +76,15 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, { } +static inline void __ti...
2015 Feb 09
2
[PATCH V2] x86 spinlock: Fix memory corruption on completing completions
...& ~TICKET_SLOWPATH_FLAG) - old.tickets.head; + + /* try to clear slowpath flag when there are no contenders */ + if ((old.tickets.tail & TICKET_SLOWPATH_FLAG) && + (diff == TICKET_LOCK_INC)) { + new = old; + new.tickets.tail &= ~TICKET_SLOWPATH_FLAG; + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} + #else /* !CONFIG_PARAVIRT_SPINLOCKS */ static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, __ticket_t ticket) @@ -59,11 +76,15 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, { } +static inline void __ti...
2015 Feb 10
4
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...0/2015 06:23 AM, Linus Torvalds wrote: > >> add_smp(&lock->tickets.head, TICKET_LOCK_INC); >> if (READ_ONCE(lock->tickets.tail) & TICKET_SLOWPATH_FLAG) .. >> >> into something like >> >> val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC << TICKET_SHIFT); >> if (unlikely(val & TICKET_SLOWPATH_FLAG)) ... >> >> would be the right thing to do. Somebody should just check that I got >> that shift right, and that the tail is in the high bytes (head really >> needs to be hi...
2015 Feb 10
4
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...0/2015 06:23 AM, Linus Torvalds wrote: > >> add_smp(&lock->tickets.head, TICKET_LOCK_INC); >> if (READ_ONCE(lock->tickets.tail) & TICKET_SLOWPATH_FLAG) .. >> >> into something like >> >> val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC << TICKET_SHIFT); >> if (unlikely(val & TICKET_SLOWPATH_FLAG)) ... >> >> would be the right thing to do. Somebody should just check that I got >> that shift right, and that the tail is in the high bytes (head really >> needs to be hi...
2015 Feb 08
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
....tickets.head; > + > + /* try to clear slowpath flag when there are no contenders */ > + if ((old.tickets.tail & TICKET_SLOWPATH_FLAG) && > + (diff == TICKET_LOCK_INC)) { > + new = old; > + new.tickets.tail &= ~TICKET_SLOWPATH_FLAG; > + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); > + } > +} > + > #else /* !CONFIG_PARAVIRT_SPINLOCKS */ > static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, > __ticket_t ticket) > @@ -59,6 +76,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock...
2015 Feb 08
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
....tickets.head; > + > + /* try to clear slowpath flag when there are no contenders */ > + if ((old.tickets.tail & TICKET_SLOWPATH_FLAG) && > + (diff == TICKET_LOCK_INC)) { > + new = old; > + new.tickets.tail &= ~TICKET_SLOWPATH_FLAG; > + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); > + } > +} > + > #else /* !CONFIG_PARAVIRT_SPINLOCKS */ > static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, > __ticket_t ticket) > @@ -59,6 +76,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock...
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...& ~TICKET_SLOWPATH_FLAG) - old.tickets.head; + + /* try to clear slowpath flag when there are no contenders */ + if ((old.tickets.tail & TICKET_SLOWPATH_FLAG) && + (diff == TICKET_LOCK_INC)) { + new = old; + new.tickets.tail &= ~TICKET_SLOWPATH_FLAG; + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} + #else /* !CONFIG_PARAVIRT_SPINLOCKS */ static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, __ticket_t ticket) @@ -59,6 +76,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, { } +static inline void __tic...
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...& ~TICKET_SLOWPATH_FLAG) - old.tickets.head; + + /* try to clear slowpath flag when there are no contenders */ + if ((old.tickets.tail & TICKET_SLOWPATH_FLAG) && + (diff == TICKET_LOCK_INC)) { + new = old; + new.tickets.tail &= ~TICKET_SLOWPATH_FLAG; + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} + #else /* !CONFIG_PARAVIRT_SPINLOCKS */ static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, __ticket_t ticket) @@ -59,6 +76,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, { } +static inline void __tic...
2015 Feb 12
8
[PATCH V3] x86 spinlock: Fix memory corruption on completing completions
...ch_spinlock_t old, new; + + old.tickets.head = head; + new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; + old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; + new.tickets.tail = old.tickets.tail; + + /* try to clear slowpath flag when there are no contenders */ + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - return lock.tickets.head == lock.tickets.tail; + return __tickets_equal(lock.tickets.head, lock.tickets.tail); } /* @@ -87,18 +107,22 @@ static __always_inline void arch_spin...
2015 Feb 12
8
[PATCH V3] x86 spinlock: Fix memory corruption on completing completions
...ch_spinlock_t old, new; + + old.tickets.head = head; + new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; + old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; + new.tickets.tail = old.tickets.tail; + + /* try to clear slowpath flag when there are no contenders */ + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - return lock.tickets.head == lock.tickets.tail; + return __tickets_equal(lock.tickets.head, lock.tickets.tail); } /* @@ -87,18 +107,22 @@ static __always_inline void arch_spin...
2015 Feb 11
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...lds wrote: >> >>> add_smp(&lock->tickets.head, TICKET_LOCK_INC); >>> if (READ_ONCE(lock->tickets.tail) & TICKET_SLOWPATH_FLAG) .. >>> >>> into something like >>> >>> val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC << TICKET_SHIFT); >>> if (unlikely(val & TICKET_SLOWPATH_FLAG)) ... >>> >>> would be the right thing to do. Somebody should just check that I got >>> that shift right, and that the tail is in the high bytes (head really >&gt...
2015 Feb 11
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...lds wrote: >> >>> add_smp(&lock->tickets.head, TICKET_LOCK_INC); >>> if (READ_ONCE(lock->tickets.tail) & TICKET_SLOWPATH_FLAG) .. >>> >>> into something like >>> >>> val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC << TICKET_SHIFT); >>> if (unlikely(val & TICKET_SLOWPATH_FLAG)) ... >>> >>> would be the right thing to do. Somebody should just check that I got >>> that shift right, and that the tail is in the high bytes (head really >&gt...
2015 Feb 15
0
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...ch_spinlock_t old, new; + + old.tickets.head = head; + new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; + old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; + new.tickets.tail = old.tickets.tail; + + /* try to clear slowpath flag when there are no contenders */ + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - return lock.tickets.head == lock.tickets.tail; + return __tickets_equal(lock.tickets.head, lock.tickets.tail); } /* @@ -87,18 +107,21 @@ static __always_inline void arch_spin...
2015 Feb 15
0
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...ch_spinlock_t old, new; + + old.tickets.head = head; + new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; + old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; + new.tickets.tail = old.tickets.tail; + + /* try to clear slowpath flag when there are no contenders */ + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - return lock.tickets.head == lock.tickets.tail; + return __tickets_equal(lock.tickets.head, lock.tickets.tail); } /* @@ -87,18 +107,21 @@ static __always_inline void arch_spin...
2015 Feb 15
7
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...ch_spinlock_t old, new; + + old.tickets.head = head; + new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; + old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; + new.tickets.tail = old.tickets.tail; + + /* try to clear slowpath flag when there are no contenders */ + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - return lock.tickets.head == lock.tickets.tail; + return __tickets_equal(lock.tickets.head, lock.tickets.tail); } /* @@ -87,18 +108,21 @@ static __always_inline void arch_spin...
2015 Feb 15
7
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...ch_spinlock_t old, new; + + old.tickets.head = head; + new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; + old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; + new.tickets.tail = old.tickets.tail; + + /* try to clear slowpath flag when there are no contenders */ + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - return lock.tickets.head == lock.tickets.tail; + return __tickets_equal(lock.tickets.head, lock.tickets.tail); } /* @@ -87,18 +108,21 @@ static __always_inline void arch_spin...
2015 Feb 10
4
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ersion (which hopefully fixes the lockup problem that Sasha reported) together with changing that add_smp(&lock->tickets.head, TICKET_LOCK_INC); if (READ_ONCE(lock->tickets.tail) & TICKET_SLOWPATH_FLAG) .. into something like val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC << TICKET_SHIFT); if (unlikely(val & TICKET_SLOWPATH_FLAG)) ... would be the right thing to do. Somebody should just check that I got that shift right, and that the tail is in the high bytes (head really needs to be high to work, if it's in the low byte(s) th...
2015 Feb 10
4
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ersion (which hopefully fixes the lockup problem that Sasha reported) together with changing that add_smp(&lock->tickets.head, TICKET_LOCK_INC); if (READ_ONCE(lock->tickets.tail) & TICKET_SLOWPATH_FLAG) .. into something like val = xadd((&lock->ticket.head_tail, TICKET_LOCK_INC << TICKET_SHIFT); if (unlikely(val & TICKET_SLOWPATH_FLAG)) ... would be the right thing to do. Somebody should just check that I got that shift right, and that the tail is in the high bytes (head really needs to be high to work, if it's in the low byte(s) th...
2015 Feb 09
3
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...he > slowpath case. > Yes, I ll move that call up. >> } >> >> static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) >> @@ -115,47 +139,21 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) >> return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; >> } >> >> -static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock, >> - arch_spinlock_t old) >> -{ >> - arch_spinlock_t new; >> - >> - BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS...
2015 Feb 09
3
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...he > slowpath case. > Yes, I ll move that call up. >> } >> >> static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) >> @@ -115,47 +139,21 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) >> return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; >> } >> >> -static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock, >> - arch_spinlock_t old) >> -{ >> - arch_spinlock_t new; >> - >> - BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS...