search for: queue_spin_unlock_slowpath

Displaying 20 results from an estimated 54 matches for "queue_spin_unlock_slowpath".

2014 Jun 12
2
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...(); > } > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +/* > + * The lock byte can have a value of _Q_LOCKED_SLOWPATH to indicate > + * that it needs to go through the slowpath to do the unlocking. > + */ > +#define _Q_LOCKED_SLOWPATH (_Q_LOCKED_VAL | 2) > + > +extern void queue_spin_unlock_slowpath(struct qspinlock *lock); > + > +static inline void queue_spin_unlock(struct qspinlock *lock) > +{ > + barrier(); > + if (static_key_false(&paravirt_spinlocks_enabled)) { > + /* > + * Need to atomically clear the lock byte to avoid racing with > + * queue head waiter...
2014 Jun 12
2
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...(); > } > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +/* > + * The lock byte can have a value of _Q_LOCKED_SLOWPATH to indicate > + * that it needs to go through the slowpath to do the unlocking. > + */ > +#define _Q_LOCKED_SLOWPATH (_Q_LOCKED_VAL | 2) > + > +extern void queue_spin_unlock_slowpath(struct qspinlock *lock); > + > +static inline void queue_spin_unlock(struct qspinlock *lock) > +{ > + barrier(); > + if (static_key_false(&paravirt_spinlocks_enabled)) { > + /* > + * Need to atomically clear the lock byte to avoid racing with > + * queue head waiter...
2014 Mar 13
3
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...715/ 3762/ +1% > 6 4942/ 7609/+54% 4504/ 4558/ +2% > 7 6304/ 9570/+52% 5292/ 5351/ +1% > 8 7736/11323/+46% 6037/ 6097/ +1% Do you have measurements from tests when VCPUs are overcommitted? > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +/** > + * queue_spin_unlock_slowpath - kick up the CPU of the queue head > + * @lock : Pointer to queue spinlock structure > + * > + * The lock is released after finding the queue head to avoid racing > + * condition between the queue head and the lock holder. > + */ > +void queue_spin_unlock_slowpath(struct qspinloc...
2014 Mar 13
3
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...715/ 3762/ +1% > 6 4942/ 7609/+54% 4504/ 4558/ +2% > 7 6304/ 9570/+52% 5292/ 5351/ +1% > 8 7736/11323/+46% 6037/ 6097/ +1% Do you have measurements from tests when VCPUs are overcommitted? > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +/** > + * queue_spin_unlock_slowpath - kick up the CPU of the queue head > + * @lock : Pointer to queue spinlock structure > + * > + * The lock is released after finding the queue head to avoid racing > + * condition between the queue head and the lock holder. > + */ > +void queue_spin_unlock_slowpath(struct qspinloc...
2014 Mar 12
0
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...ue node pointer + * @pv : pointer to struct pv_qvars to be set + * @prev: pointer to the previous node + */ +static __always_inline void pv_set_prev(struct pv_qvars *pv, struct qnode *prev) +{ + ACCESS_ONCE(pv->prev) = prev; +} + +/* + * The following inlined functions are being used by the + * queue_spin_unlock_slowpath() function. + */ + +/** + * pv_get_prev - get previous queue node pointer + * @pv : pointer to struct pv_qvars to be set + * Return: the previous queue node pointer + */ +static __always_inline struct qnode *pv_get_prev(struct pv_qvars *pv) +{ + return ACCESS_ONCE(pv->prev); +} + +/** + * pv_k...
2014 May 30
0
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...s_spinlock *mcs, struct mcs_spinlock *prev) +{ + struct pv_qnode *pv = (struct pv_qnode *)mcs; + + ACCESS_ONCE(pv->prev) = (struct pv_qnode *)prev; + /* + * Make sure the prev field is set up before others + */ + smp_wmb(); +} + +/* + * The following inlined functions are being used by the + * queue_spin_unlock_slowpath() function. + */ + +/** + * pv_tail_to_qhead - get queue head pv_qnode from tail code + * @tail : pointer to queue tail code + * @Return: mcs_spinlock pointer of queue head + * + * This function should only be called by the current lock holder so that + * the queue head won't be changed. + */...
2014 Jun 12
0
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...def CONFIG_PARAVIRT_SPINLOCKS >> +/* >> + * The lock byte can have a value of _Q_LOCKED_SLOWPATH to indicate >> + * that it needs to go through the slowpath to do the unlocking. >> + */ >> +#define _Q_LOCKED_SLOWPATH (_Q_LOCKED_VAL | 2) >> + >> +extern void queue_spin_unlock_slowpath(struct qspinlock *lock); >> + >> +static inline void queue_spin_unlock(struct qspinlock *lock) >> +{ >> + barrier(); >> + if (static_key_false(&paravirt_spinlocks_enabled)) { >> + /* >> + * Need to atomically clear the lock byte to avoid racing with...
2014 Nov 03
0
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...gt; + pv_queue_spin_lock_slowpath(lock, val); > + else > + queue_spin_lock_slowpath(lock, val); > +} No, this is just vile.. _that_ is what we have PV ops for. And at that point its the same function it was before the PV stuff, so that whole inline thing is then gone. > +extern void queue_spin_unlock_slowpath(struct qspinlock *lock); > + > /** > * queue_spin_unlock - release a queue spinlock > * @lock : Pointer to queue spinlock structure > * > * An effective smp_store_release() on the least-significant byte. > + * > + * Inlining of the unlock function is disabled when C...
2014 Oct 29
1
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...inline struct mcs_spinlock *pv_get_qhead(struct qspinlock *lock) +{ + struct pv_qnode *pn = pv_decode_tail(atomic_read(&lock->val)); + + while (pn->head == PV_INVALID_HEAD) + cpu_relax(); + + if (WARN_ON_ONCE(!pn->head->locked)) + return NULL; + + return pn->head; +} + +/** + * queue_spin_unlock_slowpath - kick up the CPU of the queue head + * @lock : Pointer to queue spinlock structure + * + * The lock is released after finding the queue head to avoid racing + * condition between the queue head and the lock holder. + */ +void queue_spin_unlock_slowpath(struct qspinlock *lock) +{ + struct mcs_spinl...
2014 Oct 29
1
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...inline struct mcs_spinlock *pv_get_qhead(struct qspinlock *lock) +{ + struct pv_qnode *pn = pv_decode_tail(atomic_read(&lock->val)); + + while (pn->head == PV_INVALID_HEAD) + cpu_relax(); + + if (WARN_ON_ONCE(!pn->head->locked)) + return NULL; + + return pn->head; +} + +/** + * queue_spin_unlock_slowpath - kick up the CPU of the queue head + * @lock : Pointer to queue spinlock structure + * + * The lock is released after finding the queue head to avoid racing + * condition between the queue head and the lock holder. + */ +void queue_spin_unlock_slowpath(struct qspinlock *lock) +{ + struct mcs_spinl...
2014 Oct 16
2
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
...RO_FENCE */ #define queue_spin_unlock queue_spin_unlock +#ifdef CONFIG_PARAVIRT_SPINLOCKS +/* + * The lock byte can have a value of _Q_LOCKED_SLOWPATH to indicate + * that it needs to go through the slowpath to do the unlocking. + */ +#define _Q_LOCKED_SLOWPATH (_Q_LOCKED_VAL | 2) + +extern void queue_spin_unlock_slowpath(struct qspinlock *lock); + /** * queue_spin_unlock - release a queue spinlock * @lock : Pointer to queue spinlock structure * * An effective smp_store_release() on the least-significant byte. + * + * Inlining of the unlock function is disabled when CONFIG_PARAVIRT_SPINLOCKS + * is defined....
2014 Oct 16
2
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
...RO_FENCE */ #define queue_spin_unlock queue_spin_unlock +#ifdef CONFIG_PARAVIRT_SPINLOCKS +/* + * The lock byte can have a value of _Q_LOCKED_SLOWPATH to indicate + * that it needs to go through the slowpath to do the unlocking. + */ +#define _Q_LOCKED_SLOWPATH (_Q_LOCKED_VAL | 2) + +extern void queue_spin_unlock_slowpath(struct qspinlock *lock); + /** * queue_spin_unlock - release a queue spinlock * @lock : Pointer to queue spinlock structure * * An effective smp_store_release() on the least-significant byte. + * + * Inlining of the unlock function is disabled when CONFIG_PARAVIRT_SPINLOCKS + * is defined....
2014 Mar 13
1
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...* queue head waiter trying to set _QSPINLOCK_LOCKED_SLOWPATH. */ if (likely(cmpxchg(&qlock->lock, _QSPINLOCK_LOCKED, 0) == _QSPINLOCK_LOCKED)) return; else queue_spin_unlock_slowpath(lock); } else { __queue_spin_unlock(lock); } > // pv_kick_node: > if (pv->cpustate != PV_CPU_HALTED) > return; > ACCESS_ONCE(pv->cpustate) = PV_CPU_KICKED; > __queue_kick_cpu(pv->mycpu, PV_KICK_QUEUE_HEAD); > &g...
2014 Mar 13
1
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...* queue head waiter trying to set _QSPINLOCK_LOCKED_SLOWPATH. */ if (likely(cmpxchg(&qlock->lock, _QSPINLOCK_LOCKED, 0) == _QSPINLOCK_LOCKED)) return; else queue_spin_unlock_slowpath(lock); } else { __queue_spin_unlock(lock); } > // pv_kick_node: > if (pv->cpustate != PV_CPU_HALTED) > return; > ACCESS_ONCE(pv->cpustate) = PV_CPU_KICKED; > __queue_kick_cpu(pv->mycpu, PV_KICK_QUEUE_HEAD); > &g...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...tatic DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); /* * We must be able to distinguish between no-tail and the tail at 0:0, @@ -230,6 +241,32 @@ static __always_inline void set_locked(s WRITE_ONCE(l->locked, _Q_LOCKED_VAL); } + +/* + * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for + * all the PV callbacks. + */ + +static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } +static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { } +static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { } + +static __always_in...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...tatic DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); /* * We must be able to distinguish between no-tail and the tail at 0:0, @@ -230,6 +241,32 @@ static __always_inline void set_locked(s WRITE_ONCE(l->locked, _Q_LOCKED_VAL); } + +/* + * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for + * all the PV callbacks. + */ + +static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } +static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { } +static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { } + +static __always_in...
2015 Apr 08
2
[PATCH v15 16/16] unfair qspinlock: a queue based unfair lock
...Unfair lock (mutually exclusive to PV) also uses the second cacheline. */ +#define MAX_NODES 8 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); /* @@ -234,7 +234,7 @@ static __always_inline void set_locked(struct qspinlock *lock) /* * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for - * all the PV callbacks. + * all the PV and unfair callbacks. */ static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } @@ -244,19 +244,36 @@ static __always_inline void __pv_scan_next(struct qspinlock *lock, static __always_inline void __pv_wait_head(s...
2015 Apr 08
2
[PATCH v15 16/16] unfair qspinlock: a queue based unfair lock
...Unfair lock (mutually exclusive to PV) also uses the second cacheline. */ +#define MAX_NODES 8 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); /* @@ -234,7 +234,7 @@ static __always_inline void set_locked(struct qspinlock *lock) /* * Generate the native code for queue_spin_unlock_slowpath(); provide NOPs for - * all the PV callbacks. + * all the PV and unfair callbacks. */ static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } @@ -244,19 +244,36 @@ static __always_inline void __pv_scan_next(struct qspinlock *lock, static __always_inline void __pv_wait_head(s...
2014 Oct 24
3
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
On 10/24/2014 04:47 AM, Peter Zijlstra wrote: > On Thu, Oct 16, 2014 at 02:10:38PM -0400, Waiman Long wrote: >> +static inline void pv_init_node(struct mcs_spinlock *node) >> +{ >> + struct pv_qnode *pn = (struct pv_qnode *)node; >> + >> + BUILD_BUG_ON(sizeof(struct pv_qnode)> 5*sizeof(struct mcs_spinlock)); >> + >> + if (!pv_enabled()) >> +
2014 Oct 24
3
[PATCH v12 09/11] pvqspinlock, x86: Add para-virtualization support
On 10/24/2014 04:47 AM, Peter Zijlstra wrote: > On Thu, Oct 16, 2014 at 02:10:38PM -0400, Waiman Long wrote: >> +static inline void pv_init_node(struct mcs_spinlock *node) >> +{ >> + struct pv_qnode *pn = (struct pv_qnode *)node; >> + >> + BUILD_BUG_ON(sizeof(struct pv_qnode)> 5*sizeof(struct mcs_spinlock)); >> + >> + if (!pv_enabled()) >> +