Displaying 20 results from an estimated 160 matches for "static_key_false".
2014 Mar 13
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + queue_spin_lock_unfair(lock);
> + else
> + queue_spin_lock(lock);
> +}
So I would have expected something like:
if (static_key_false(¶virt_spinlock)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return;
}
At the top of qu...
2014 Mar 13
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + queue_spin_lock_unfair(lock);
> + else
> + queue_spin_lock(lock);
> +}
So I would have expected something like:
if (static_key_false(¶virt_spinlock)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return;
}
At the top of qu...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...#undef arch_spin_lock
> +#undef arch_spin_trylock
> +#undef arch_spin_lock_flags
> +
> +/**
> + * arch_spin_lock - acquire a queue spinlock
> + * @lock: Pointer to queue spinlock structure
> + */
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(&virt_unfairlocks_enabled))
> + queue_spin_lock_unfair(lock);
> + else
> + queue_spin_lock(lock);
> +}
> +
> +/**
> + * arch_spin_trylock - try to acquire the queue spinlock
> + * @lock : Pointer to queue spinlock structure
> + * Return: 1 if lock acquired, 0 if fai...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...#undef arch_spin_lock
> +#undef arch_spin_trylock
> +#undef arch_spin_lock_flags
> +
> +/**
> + * arch_spin_lock - acquire a queue spinlock
> + * @lock: Pointer to queue spinlock structure
> + */
> +static inline void arch_spin_lock(struct qspinlock *lock)
> +{
> + if (static_key_false(&virt_unfairlocks_enabled))
> + queue_spin_lock_unfair(lock);
> + else
> + queue_spin_lock(lock);
> +}
> +
> +/**
> + * arch_spin_trylock - try to acquire the queue spinlock
> + * @lock : Pointer to queue spinlock structure
> + * Return: 1 if lock acquired, 0 if fai...
2014 May 08
1
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...9e7659e..10e87e1 100644
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -227,6 +227,14 @@ static __always_inline int get_qlock(struct qspinlock *lock)
> {
> struct __qspinlock *l = (void *)lock;
>
> +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + /*
> + * Need to use atomic operation to get the lock when
> + * lock stealing can happen.
> + */
> + return cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0;
That's missing {}.
> +#endif
> barrier();
> ACCESS_ONCE...
2014 May 08
1
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...9e7659e..10e87e1 100644
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -227,6 +227,14 @@ static __always_inline int get_qlock(struct qspinlock *lock)
> {
> struct __qspinlock *l = (void *)lock;
>
> +#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
> + if (static_key_false(¶virt_unfairlocks_enabled))
> + /*
> + * Need to use atomic operation to get the lock when
> + * lock stealing can happen.
> + */
> + return cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0;
That's missing {}.
> +#endif
> barrier();
> ACCESS_ONCE...
2014 Jun 12
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...rch_spin_trylock
>> +#undef arch_spin_lock_flags
>> +
>> +/**
>> + * arch_spin_lock - acquire a queue spinlock
>> + * @lock: Pointer to queue spinlock structure
>> + */
>> +static inline void arch_spin_lock(struct qspinlock *lock)
>> +{
>> + if (static_key_false(&virt_unfairlocks_enabled))
>> + queue_spin_lock_unfair(lock);
>> + else
>> + queue_spin_lock(lock);
>> +}
>> +
>> +/**
>> + * arch_spin_trylock - try to acquire the queue spinlock
>> + * @lock : Pointer to queue spinlock structure
>> + *...
2014 Mar 14
4
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On Thu, Mar 13, 2014 at 04:05:19PM -0400, Waiman Long wrote:
> On 03/13/2014 11:15 AM, Peter Zijlstra wrote:
> >On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>+{
> >>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>+ queue_spin_lock_unfair(lock);
> >>+ else
> >>+ queue_spin_lock(lock);
> >>+}
> >So I would have expected something like:
> >
> > if (static_key_false(¶virt_spinlock)) {
> > while (!queue...
2014 Mar 14
4
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On Thu, Mar 13, 2014 at 04:05:19PM -0400, Waiman Long wrote:
> On 03/13/2014 11:15 AM, Peter Zijlstra wrote:
> >On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>+{
> >>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>+ queue_spin_lock_unfair(lock);
> >>+ else
> >>+ queue_spin_lock(lock);
> >>+}
> >So I would have expected something like:
> >
> > if (static_key_false(¶virt_spinlock)) {
> > while (!queue...
2014 Mar 13
0
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On 03/13/2014 11:15 AM, Peter Zijlstra wrote:
> On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
>> +static inline void arch_spin_lock(struct qspinlock *lock)
>> +{
>> + if (static_key_false(¶virt_unfairlocks_enabled))
>> + queue_spin_lock_unfair(lock);
>> + else
>> + queue_spin_lock(lock);
>> +}
> So I would have expected something like:
>
> if (static_key_false(¶virt_spinlock)) {
> while (!queue_spin_trylock(lock))
> cpu_...
2014 Jun 15
0
[PATCH 09/11] pvqspinlock, x86: Rename paravirt_ticketlocks_enabled
...+++ b/arch/x86/include/asm/spinlock.h
@@ -39,7 +39,7 @@
/* How long a lock should spin before we consider blocking */
#define SPIN_THRESHOLD (1 << 15)
-extern struct static_key paravirt_ticketlocks_enabled;
+extern struct static_key paravirt_spinlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
#ifdef CONFIG_QUEUE_SPINLOCK
@@ -150,7 +150,7 @@ static inline void __ticket_unlock_slowp
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
- static_key_false(¶virt_ticketlocks_enabled)) {
+ stat...
2015 Jan 20
0
[PATCH v14 08/11] qspinlock, x86: Rename paravirt_ticketlocks_enabled
...+++ b/arch/x86/include/asm/spinlock.h
@@ -39,7 +39,7 @@
/* How long a lock should spin before we consider blocking */
#define SPIN_THRESHOLD (1 << 15)
-extern struct static_key paravirt_ticketlocks_enabled;
+extern struct static_key paravirt_spinlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
#ifdef CONFIG_QUEUE_SPINLOCK
@@ -150,7 +150,7 @@ static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
- static_key_false(¶virt_ticketlo...
2015 Feb 06
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
On 02/06/2015 09:49 AM, Raghavendra K T wrote:
> static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
> {
> if (TICKET_SLOWPATH_FLAG &&
> - static_key_false(¶virt_ticketlocks_enabled)) {
> - arch_spinlock_t prev;
> + static_key_false(¶virt_ticketlocks_enabled)) {
> + __ticket_t prev_head;
>
> - prev = *lock;
> + prev_head = lock->tickets.head;
> add_smp(&lock->tickets.head, TICKET_LOCK_INC);
>...
2015 Jan 20
0
[PATCH v14 08/11] qspinlock, x86: Rename paravirt_ticketlocks_enabled
...+++ b/arch/x86/include/asm/spinlock.h
@@ -39,7 +39,7 @@
/* How long a lock should spin before we consider blocking */
#define SPIN_THRESHOLD (1 << 15)
-extern struct static_key paravirt_ticketlocks_enabled;
+extern struct static_key paravirt_spinlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
#ifdef CONFIG_QUEUE_SPINLOCK
@@ -150,7 +150,7 @@ static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
- static_key_false(¶virt_ticketlo...
2015 Feb 06
0
[PATCH] x86 spinlock: Fix memory corruption on completing completions
On 02/06/2015 09:49 AM, Raghavendra K T wrote:
> static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
> {
> if (TICKET_SLOWPATH_FLAG &&
> - static_key_false(¶virt_ticketlocks_enabled)) {
> - arch_spinlock_t prev;
> + static_key_false(¶virt_ticketlocks_enabled)) {
> + __ticket_t prev_head;
>
> - prev = *lock;
> + prev_head = lock->tickets.head;
> add_smp(&lock->tickets.head, TICKET_LOCK_INC);
>...
2014 Mar 17
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...:19PM -0400, Waiman Long wrote:
> >>On 03/13/2014 11:15 AM, Peter Zijlstra wrote:
> >>>On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>>>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>>>+{
> >>>>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>>>+ queue_spin_lock_unfair(lock);
> >>>>+ else
> >>>>+ queue_spin_lock(lock);
> >>>>+}
> >>>So I would have expected something like:
> >>>
> >>> if (static_key_f...
2014 Mar 17
2
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...:19PM -0400, Waiman Long wrote:
> >>On 03/13/2014 11:15 AM, Peter Zijlstra wrote:
> >>>On Wed, Mar 12, 2014 at 02:54:52PM -0400, Waiman Long wrote:
> >>>>+static inline void arch_spin_lock(struct qspinlock *lock)
> >>>>+{
> >>>>+ if (static_key_false(¶virt_unfairlocks_enabled))
> >>>>+ queue_spin_lock_unfair(lock);
> >>>>+ else
> >>>>+ queue_spin_lock(lock);
> >>>>+}
> >>>So I would have expected something like:
> >>>
> >>> if (static_key_f...
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ad_tail) != old.head_tail) {
- /*
- * Lock still has someone queued for it, so wake up an
- * appropriate waiter.
- */
- __ticket_unlock_kick(lock, old.tickets.head);
- }
-}
-
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
- static_key_false(¶virt_ticketlocks_enabled)) {
- arch_spinlock_t prev;
+ static_key_false(¶virt_ticketlocks_enabled)) {
+ __ticket_t prev_head;
- prev = *lock;
+ prev_head = lock->tickets.head;
add_smp(&lock->tickets.head, TICKET_LOCK_INC);
/* add_smp() is a full mb() */
-...
2015 Feb 06
10
[PATCH] x86 spinlock: Fix memory corruption on completing completions
...ad_tail) != old.head_tail) {
- /*
- * Lock still has someone queued for it, so wake up an
- * appropriate waiter.
- */
- __ticket_unlock_kick(lock, old.tickets.head);
- }
-}
-
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
- static_key_false(¶virt_ticketlocks_enabled)) {
- arch_spinlock_t prev;
+ static_key_false(¶virt_ticketlocks_enabled)) {
+ __ticket_t prev_head;
- prev = *lock;
+ prev_head = lock->tickets.head;
add_smp(&lock->tickets.head, TICKET_LOCK_INC);
/* add_smp() is a full mb() */
-...
2014 May 30
0
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...struct pv_qnode
+ * @mcs: pointer to struct mcs_spinlock
+ * @cpu: current CPU number
+ */
+static inline void pv_init_vars(struct mcs_spinlock *node, int cpu)
+{
+ struct pv_qnode *pv = (struct pv_qnode *)node;
+
+ BUILD_BUG_ON(sizeof(struct pv_qnode) > 5*sizeof(struct mcs_spinlock));
+
+ if (!static_key_false(¶virt_spinlocks_enabled))
+ return;
+
+ pv->cpustate = PV_CPU_ACTIVE;
+ pv->prev = NULL;
+ pv->mayhalt = false;
+ pv->mycpu = cpu;
+}
+
+/**
+ * pv_head_spin_check - perform para-virtualization checks for queue head
+ * @mcs : pointer to the mcs_spinlock structure
+ *...