Displaying 20 results from an estimated 91 matches for "__ticket_unlock_kick".
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...tickets.tail &= ~TICKET_SLOWPATH_FLAG;
+ cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
+ }
+}
+
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
__ticket_t ticket)
@@ -59,6 +76,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
{
}
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
+{
+}
+
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -84,7 +105,7 @@ static __always_inline void arch_spin_lock(arch...
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...tickets.tail &= ~TICKET_SLOWPATH_FLAG;
+ cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
+ }
+}
+
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
__ticket_t ticket)
@@ -59,6 +76,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
{
}
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
+{
+}
+
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -84,7 +105,7 @@ static __always_inline void arch_spin_lock(arch...
2015 Feb 08
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...G;
> + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
> + }
> +}
> +
> #else /* !CONFIG_PARAVIRT_SPINLOCKS */
> static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
> __ticket_t ticket)
> @@ -59,6 +76,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
> {
> }
>
> +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
> +{
> +}
> +
> #endif /* CONFIG_PARAVIRT_SPINLOCKS */
>
> static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
> @@ -84,7 +1...
2015 Feb 08
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...G;
> + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
> + }
> +}
> +
> #else /* !CONFIG_PARAVIRT_SPINLOCKS */
> static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
> __ticket_t ticket)
> @@ -59,6 +76,10 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
> {
> }
>
> +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
> +{
> +}
> +
> #endif /* CONFIG_PARAVIRT_SPINLOCKS */
>
> static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
> @@ -84,7 +1...
2015 Feb 06
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
.../* add_smp() is a full mb() */
>
> - if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
> - __ticket_unlock_slowpath(lock, prev);
> + if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) {
> + BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
> + __ticket_unlock_kick(lock, prev_head);
Can we modify it slightly to avoid potentially accessing invalid memory:
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 5315887..cd22d73 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -144,13 +144,13 @@...
2015 Feb 11
1
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...sed. now either or both of head,tail
> lsb bit may be set after unlock.
Sorry, can't understand... could you spell?
If TICKET_SLOWPATH_FLAG lives in .head arch_spin_unlock() could simply do
head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
if (head & TICKET_SLOWPATH_FLAG)
__ticket_unlock_kick(head);
so it can't overflow to .tail?
But probably I missed your concern.
And we we do this, probably it makes sense to add something like
bool tickets_equal(__ticket_t one, __ticket_t two)
{
return (one ^ two) & ~TICKET_SLOWPATH_FLAG;
}
and change kvm_lock_spinning() to use tic...
2015 Feb 06
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
.../* add_smp() is a full mb() */
>
> - if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
> - __ticket_unlock_slowpath(lock, prev);
> + if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) {
> + BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
> + __ticket_unlock_kick(lock, prev_head);
Can we modify it slightly to avoid potentially accessing invalid memory:
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 5315887..cd22d73 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -144,13 +144,13 @@...
2015 Feb 11
1
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...sed. now either or both of head,tail
> lsb bit may be set after unlock.
Sorry, can't understand... could you spell?
If TICKET_SLOWPATH_FLAG lives in .head arch_spin_unlock() could simply do
head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
if (head & TICKET_SLOWPATH_FLAG)
__ticket_unlock_kick(head);
so it can't overflow to .tail?
But probably I missed your concern.
And we we do this, probably it makes sense to add something like
bool tickets_equal(__ticket_t one, __ticket_t two)
{
return (one ^ two) & ~TICKET_SLOWPATH_FLAG;
}
and change kvm_lock_spinning() to use tic...
2015 Feb 09
2
[PATCH V2] x86 spinlock: Fix memory corruption on completing completions
...ickets.tail &= ~TICKET_SLOWPATH_FLAG;
+ cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
+ }
+}
+
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
__ticket_t ticket)
@@ -59,11 +76,15 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
{
}
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
+{
+}
+
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
- return lock.tickets.head == lock.tickets.tail;
+ return lock....
2015 Feb 09
2
[PATCH V2] x86 spinlock: Fix memory corruption on completing completions
...ickets.tail &= ~TICKET_SLOWPATH_FLAG;
+ cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
+ }
+}
+
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
__ticket_t ticket)
@@ -59,11 +76,15 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
{
}
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock)
+{
+}
+
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
- return lock.tickets.head == lock.tickets.tail;
+ return lock....
2012 Mar 21
15
[PATCH RFC V6 0/11] Paravirtualized ticketlocks
...spinlock for SPIN_THRESHOLD
iterations, then call out to the __ticket_lock_spinning() pvop,
which allows a backend to block the vCPU rather than spinning. This
pvop can set the lock into "slowpath state".
- When releasing a lock, if it is in "slowpath state", the call
__ticket_unlock_kick() to kick the next vCPU in line awake. If the
lock is no longer in contention, it also clears the slowpath flag.
The "slowpath state" is stored in the LSB of the within the lock tail
ticket. This has the effect of reducing the max number of CPUs by
half (so, a "small ticket"...
2012 Mar 21
15
[PATCH RFC V6 0/11] Paravirtualized ticketlocks
...spinlock for SPIN_THRESHOLD
iterations, then call out to the __ticket_lock_spinning() pvop,
which allows a backend to block the vCPU rather than spinning. This
pvop can set the lock into "slowpath state".
- When releasing a lock, if it is in "slowpath state", the call
__ticket_unlock_kick() to kick the next vCPU in line awake. If the
lock is no longer in contention, it also clears the slowpath flag.
The "slowpath state" is stored in the LSB of the within the lock tail
ticket. This has the effect of reducing the max number of CPUs by
half (so, a "small ticket"...
2012 Apr 19
13
[PATCH RFC V7 0/12] Paravirtualized ticketlocks
...spinlock for SPIN_THRESHOLD
iterations, then call out to the __ticket_lock_spinning() pvop,
which allows a backend to block the vCPU rather than spinning. This
pvop can set the lock into "slowpath state".
- When releasing a lock, if it is in "slowpath state", the call
__ticket_unlock_kick() to kick the next vCPU in line awake. If the
lock is no longer in contention, it also clears the slowpath flag.
The "slowpath state" is stored in the LSB of the within the lock tail
ticket. This has the effect of reducing the max number of CPUs by
half (so, a "small ticket"...
2012 Apr 19
13
[PATCH RFC V7 0/12] Paravirtualized ticketlocks
...spinlock for SPIN_THRESHOLD
iterations, then call out to the __ticket_lock_spinning() pvop,
which allows a backend to block the vCPU rather than spinning. This
pvop can set the lock into "slowpath state".
- When releasing a lock, if it is in "slowpath state", the call
__ticket_unlock_kick() to kick the next vCPU in line awake. If the
lock is no longer in contention, it also clears the slowpath flag.
The "slowpath state" is stored in the LSB of the within the lock tail
ticket. This has the effect of reducing the max number of CPUs by
half (so, a "small ticket"...
2015 Apr 30
0
[PATCH 3/6] x86: introduce new pvops function clear_slowpath
...| 2 ++
6 files changed, 38 insertions(+), 14 deletions(-)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 8957810..318f077 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -724,6 +724,13 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
}
+static __always_inline void __ticket_clear_slowpath(arch_spinlock_t *lock,
+ __ticket_t head)
+{
+ PVOP_VCALL2(pv_lock_ops.clear_slowpath, lock, head);
+}
+
+void pv_lock_activate(void);
#endif
#ifd...
2015 Feb 15
0
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...*key);
static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
{
- set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
+ set_bit(0, (volatile unsigned long *)&lock->tickets.head);
}
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
@@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+static inline int __tickets_equal(__ticket_t one, __ticket_t two)
+{
+ return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
+}
+
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
+ __ticket_t head)...
2015 Feb 15
0
[PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...*key);
static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
{
- set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
+ set_bit(0, (volatile unsigned long *)&lock->tickets.head);
}
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
@@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+static inline int __tickets_equal(__ticket_t one, __ticket_t two)
+{
+ return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
+}
+
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
+ __ticket_t head)...
2013 Aug 06
16
[PATCH V12 0/14] Paravirtualized ticket spinlocks
...spinlock for SPIN_THRESHOLD
iterations, then call out to the __ticket_lock_spinning() pvop,
which allows a backend to block the vCPU rather than spinning. This
pvop can set the lock into "slowpath state".
- When releasing a lock, if it is in "slowpath state", the call
__ticket_unlock_kick() to kick the next vCPU in line awake. If the
lock is no longer in contention, it also clears the slowpath flag.
The "slowpath state" is stored in the LSB of the within the lock tail
ticket. This has the effect of reducing the max number of CPUs by
half (so, a "small ticket"...
2013 Aug 06
16
[PATCH V12 0/14] Paravirtualized ticket spinlocks
...spinlock for SPIN_THRESHOLD
iterations, then call out to the __ticket_lock_spinning() pvop,
which allows a backend to block the vCPU rather than spinning. This
pvop can set the lock into "slowpath state".
- When releasing a lock, if it is in "slowpath state", the call
__ticket_unlock_kick() to kick the next vCPU in line awake. If the
lock is no longer in contention, it also clears the slowpath flag.
The "slowpath state" is stored in the LSB of the within the lock tail
ticket. This has the effect of reducing the max number of CPUs by
half (so, a "small ticket"...
2013 Aug 06
16
[PATCH V12 0/14] Paravirtualized ticket spinlocks
...spinlock for SPIN_THRESHOLD
iterations, then call out to the __ticket_lock_spinning() pvop,
which allows a backend to block the vCPU rather than spinning. This
pvop can set the lock into "slowpath state".
- When releasing a lock, if it is in "slowpath state", the call
__ticket_unlock_kick() to kick the next vCPU in line awake. If the
lock is no longer in contention, it also clears the slowpath flag.
The "slowpath state" is stored in the LSB of the within the lock tail
ticket. This has the effect of reducing the max number of CPUs by
half (so, a "small ticket"...