search for: pv_node

Displaying 20 results from an estimated 35 matches for "pv_node".

2014 Jun 16
4
[PATCH 10/11] qspinlock: Paravirt support
On 06/15/2014 08:47 AM, Peter Zijlstra wrote: > > > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > + > +/* > + * Write a comment about how all this works... > + */ > + > +#define _Q_LOCKED_SLOW (2U<< _Q_LOCKED_OFFSET) > + > +struct pv_node { > + struct mcs_spinlock mcs; > + struct mcs_spinlock __offset[3]; > + int cpu, head; > +}; I am wondering why you need the separate cpu and head variables. I thought one will be enough here. The wait code put the cpu number in head, the the kick_cpu code kick the one in cpu which i...
2014 Jun 16
4
[PATCH 10/11] qspinlock: Paravirt support
On 06/15/2014 08:47 AM, Peter Zijlstra wrote: > > > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > + > +/* > + * Write a comment about how all this works... > + */ > + > +#define _Q_LOCKED_SLOW (2U<< _Q_LOCKED_OFFSET) > + > +struct pv_node { > + struct mcs_spinlock mcs; > + struct mcs_spinlock __offset[3]; > + int cpu, head; > +}; I am wondering why you need the separate cpu and head variables. I thought one will be enough here. The wait code put the cpu number in head, the the kick_cpu code kick the one in cpu which i...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
...- pv_wait_head(lock); + pv_wait_head(lock, node); while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK) cpu_relax(); --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -29,8 +29,14 @@ struct pv_node { int cpu; u8 state; + struct pv_node *head; }; +static inline struct pv_node *pv_decode_tail(u32 tail) +{ + return (struct pv_node *)decode_tail(tail); +} + /* * Initialize the PV part of the mcs_spinlock node....
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
...- pv_wait_head(lock); + pv_wait_head(lock, node); while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK) cpu_relax(); --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -29,8 +29,14 @@ struct pv_node { int cpu; u8 state; + struct pv_node *head; }; +static inline struct pv_node *pv_decode_tail(u32 tail) +{ + return (struct pv_node *)decode_tail(tail); +} + /* * Initialize the PV part of the mcs_spinlock node....
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...Using these we implement __pv_queue_spin_lock_slowpath() and + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and + * native_queue_spin_unlock(). + */ + +#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) + +enum vcpu_state { + vcpu_running = 0, + vcpu_halted, +}; + +struct pv_node { + struct mcs_spinlock mcs; + struct mcs_spinlock __res[3]; + + int cpu; + u8 state; +}; + +/* + * Hash table using open addressing with an LFSR probe sequence. + * + * Since we should not be holding locks from NMI context (very rare indeed) the + * max load factor is 0.75, which is around the...
2015 Apr 24
0
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...Using these we implement __pv_queue_spin_lock_slowpath() and + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and + * native_queue_spin_unlock(). + */ + +#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) + +enum vcpu_state { + vcpu_running = 0, + vcpu_halted, +}; + +struct pv_node { + struct mcs_spinlock mcs; + struct mcs_spinlock __res[3]; + + int cpu; + u8 state; +}; + +/* + * Lock and MCS node addresses hash table for fast lookup + * + * Hashing is done on a per-cacheline basis to minimize the need to access + * more than one cacheline. + * + * Dynamically allocate a...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and > + * native_queue_spin_unlock(). > + */ > + > +#define _Q_SLOW_VAL (2U<< _Q_LOCKED_OFFSET) > + > +enum vcpu_state { > + vcpu_running = 0, > + vcpu_halted, > +}; > + > +struct pv_node { > + struct mcs_spinlock mcs; > + struct mcs_spinlock __res[3]; > + > + int cpu; > + u8 state; > +}; > + > +/* > + * Initialize the PV part of the mcs_spinlock node. > + */ > +static void pv_init_node(struct mcs_spinlock *node) > +{ > + struct pv_node *pn...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and > + * native_queue_spin_unlock(). > + */ > + > +#define _Q_SLOW_VAL (2U<< _Q_LOCKED_OFFSET) > + > +enum vcpu_state { > + vcpu_running = 0, > + vcpu_halted, > +}; > + > +struct pv_node { > + struct mcs_spinlock mcs; > + struct mcs_spinlock __res[3]; > + > + int cpu; > + u8 state; > +}; > + > +/* > + * Initialize the PV part of the mcs_spinlock node. > + */ > +static void pv_init_node(struct mcs_spinlock *node) > +{ > + struct pv_node *pn...
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...Using these we implement __pv_queue_spin_lock_slowpath() and + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and + * native_queue_spin_unlock(). + */ + +#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) + +enum vcpu_state { + vcpu_running = 0, + vcpu_halted, +}; + +struct pv_node { + struct mcs_spinlock mcs; + struct mcs_spinlock __res[3]; + + int cpu; + u8 state; +}; + +/* + * Lock and MCS node addresses hash table for fast lookup + * + * Hashing is done on a per-cacheline basis to minimize the need to access + * more than one cacheline. + * + * Dynamically allocate a...
2015 May 04
1
[PATCH v16 08/14] pvqspinlock: Implement simple paravirt support for the qspinlock
...Using these we implement __pv_queue_spin_lock_slowpath() and + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and + * native_queue_spin_unlock(). + */ + +#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) + +enum vcpu_state { + vcpu_running = 0, + vcpu_halted, +}; + +struct pv_node { + struct mcs_spinlock mcs; + struct mcs_spinlock __res[3]; + + int cpu; + u8 state; +}; + +/* + * Lock and MCS node addresses hash table for fast lookup + * + * Hashing is done on a per-cacheline basis to minimize the need to access + * more than one cacheline. + * + * Dynamically allocate a...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and > + * native_queue_spin_unlock(). > + */ > + > +#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) > + > +enum vcpu_state { > + vcpu_running = 0, > + vcpu_halted, > +}; > + > +struct pv_node { > + struct mcs_spinlock mcs; > + struct mcs_spinlock __res[3]; > + > + int cpu; > + u8 state; > +}; > + > +/* > + * Hash table using open addressing with an LFSR probe sequence. > + * > + * Since we should not be holding locks from NMI context (very rare indee...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...> + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and > + * native_queue_spin_unlock(). > + */ > + > +#define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) > + > +enum vcpu_state { > + vcpu_running = 0, > + vcpu_halted, > +}; > + > +struct pv_node { > + struct mcs_spinlock mcs; > + struct mcs_spinlock __res[3]; > + > + int cpu; > + u8 state; > +}; > + > +/* > + * Hash table using open addressing with an LFSR probe sequence. > + * > + * Since we should not be holding locks from NMI context (very rare indee...
2014 Jun 15
0
[PATCH 10/11] qspinlock: Paravirt support
...-tail and the tail at 0:0, @@ -218,6 +238,156 @@ static __always_inline void set_locked(s ACCESS_ONCE(l->locked) = _Q_LOCKED_VAL; } +#ifdef CONFIG_PARAVIRT_SPINLOCKS + +/* + * Write a comment about how all this works... + */ + +#define _Q_LOCKED_SLOW (2U << _Q_LOCKED_OFFSET) + +struct pv_node { + struct mcs_spinlock mcs; + struct mcs_spinlock __offset[3]; + int cpu, head; +}; + +#define INVALID_HEAD -1 +#define NO_HEAD nr_cpu_ids + +void __pv_init_node(struct mcs_spinlock *node) +{ + struct pv_node *pn = (struct pv_node *)node; + + BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(stru...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...Using these we implement __pv_queue_spin_lock_slowpath() and + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and + * native_queue_spin_unlock(). + */ + +#define _Q_SLOW_VAL (2U << _Q_LOCKED_OFFSET) + +enum vcpu_state { + vcpu_running = 0, + vcpu_halted, +}; + +struct pv_node { + struct mcs_spinlock mcs; + struct mcs_spinlock __res[3]; + + int cpu; + u8 state; +}; + +/* + * Initialize the PV part of the mcs_spinlock node. + */ +static void pv_init_node(struct mcs_spinlock *node) +{ + struct pv_node *pn = (struct pv_node *)node; + + BUILD_BUG_ON(sizeof(struct pv_node...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...Using these we implement __pv_queue_spin_lock_slowpath() and + * __pv_queue_spin_unlock() to replace native_queue_spin_lock_slowpath() and + * native_queue_spin_unlock(). + */ + +#define _Q_SLOW_VAL (2U << _Q_LOCKED_OFFSET) + +enum vcpu_state { + vcpu_running = 0, + vcpu_halted, +}; + +struct pv_node { + struct mcs_spinlock mcs; + struct mcs_spinlock __res[3]; + + int cpu; + u8 state; +}; + +/* + * Initialize the PV part of the mcs_spinlock node. + */ +static void pv_init_node(struct mcs_spinlock *node) +{ + struct pv_node *pn = (struct pv_node *)node; + + BUILD_BUG_ON(sizeof(struct pv_node...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...e native_queue_spin_lock_slowpath() and >> + * native_queue_spin_unlock(). >> + */ >> + >> +#define _Q_SLOW_VAL (3U<< _Q_LOCKED_OFFSET) >> + >> +enum vcpu_state { >> + vcpu_running = 0, >> + vcpu_halted, >> +}; >> + >> +struct pv_node { >> + struct mcs_spinlock mcs; >> + struct mcs_spinlock __res[3]; >> + >> + int cpu; >> + u8 state; >> +}; >> + >> +/* >> + * Hash table using open addressing with an LFSR probe sequence. >> + * >> + * Since we should not be hol...
2015 Apr 09
2
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
...t; + * and put an entry into the lock hash table to be waken up at unlock time. > */ > -static void pv_kick_node(struct mcs_spinlock *node) > +static void pv_scan_next(struct qspinlock *lock, struct mcs_spinlock *node) I'm not too sure about that name change.. > { > struct pv_node *pn = (struct pv_node *)node; > + struct __qspinlock *l = (void *)lock; > > /* > + * Transition CPU state: halted => hashed > + * Quit if the transition failed. > */ > + if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted) > + return; > +...
2015 Apr 09
2
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
...t; + * and put an entry into the lock hash table to be waken up at unlock time. > */ > -static void pv_kick_node(struct mcs_spinlock *node) > +static void pv_scan_next(struct qspinlock *lock, struct mcs_spinlock *node) I'm not too sure about that name change.. > { > struct pv_node *pn = (struct pv_node *)node; > + struct __qspinlock *l = (void *)lock; > > /* > + * Transition CPU state: halted => hashed > + * Quit if the transition failed. > */ > + if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted) > + return; > +...
2015 Apr 07
0
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
...on + * the new queue head to indicate that _Q_SLOW_VAL is set and hash entry + * filled. With this state, the queue head CPU will always be kicked even + * if it is not halted to avoid potential racing condition. + */ enum vcpu_state { vcpu_running = 0, vcpu_halted, + vcpu_hashed }; struct pv_node { @@ -97,7 +104,13 @@ static inline u32 hash_align(u32 hash) return hash & ~(PV_HB_PER_LINE - 1); } -static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) +/* + * Set up an entry in the lock hash table + * This is not inlined to reduce size of generated code as it...
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
On Thu, Apr 09, 2015 at 05:41:44PM -0400, Waiman Long wrote: > >>+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node) > >>+{ > >>+ struct __qspinlock *l = (void *)lock; > >>+ struct qspinlock **lp = NULL; > >>+ struct pv_node *pn = (struct pv_node *)node; > >>+ int slow_set = false; > >>+ int loop; > >>+ > >>+ for (;;) { > >>+ for (loop = SPIN_THRESHOLD; loop; loop--) { > >>+ if (!READ_ONCE(l->locked)) > >>+ return; > >>+ > >>+ c...