search for: queue_spin_unlock

Displaying 20 results from an estimated 96 matches for "queue_spin_unlock".

2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
Implement the paravirt qspinlock for x86-kvm. We use the regular paravirt call patching to switch between: native_queue_spin_lock_slowpath() __pv_queue_spin_lock_slowpath() native_queue_spin_unlock() __pv_queue_spin_unlock() We use a callee saved call for the unlock function which reduces the i-cache footprint and allows 'inlining' of SPIN_UNLOCK functions again. We further optimize the unlock path by patching the direct call with a "movb $0,%arg1" if we are indeed using...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
Implement the paravirt qspinlock for x86-kvm. We use the regular paravirt call patching to switch between: native_queue_spin_lock_slowpath() __pv_queue_spin_lock_slowpath() native_queue_spin_unlock() __pv_queue_spin_unlock() We use a callee saved call for the unlock function which reduces the i-cache footprint and allows 'inlining' of SPIN_UNLOCK functions again. We further optimize the unlock path by patching the direct call with a "movb $0,%arg1" if we are indeed using...
2014 Jun 12
2
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...0, Waiman Long wrote: > @@ -19,13 +19,46 @@ extern struct static_key virt_unfairlocks_enabled; > * that the clearing the lock bit is done ASAP without artificial delay > * due to compiler optimization. > */ > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +static __always_inline void __queue_spin_unlock(struct qspinlock *lock) > +#else > static inline void queue_spin_unlock(struct qspinlock *lock) > +#endif > { > barrier(); > ACCESS_ONCE(*(u8 *)lock) = 0; > barrier(); > } > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +/* > + * The lock byte can have a value...
2014 Jun 12
2
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...0, Waiman Long wrote: > @@ -19,13 +19,46 @@ extern struct static_key virt_unfairlocks_enabled; > * that the clearing the lock bit is done ASAP without artificial delay > * due to compiler optimization. > */ > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +static __always_inline void __queue_spin_unlock(struct qspinlock *lock) > +#else > static inline void queue_spin_unlock(struct qspinlock *lock) > +#endif > { > barrier(); > ACCESS_ONCE(*(u8 *)lock) = 0; > barrier(); > } > > +#ifdef CONFIG_PARAVIRT_SPINLOCKS > +/* > + * The lock byte can have a value...
2014 Jun 15
0
[PATCH 10/11] qspinlock: Paravirt support
...86/include/asm/qspinlock.h +++ linux-2.6/arch/x86/include/asm/qspinlock.h @@ -3,24 +3,45 @@ #include <asm/cpufeature.h> #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) -#define queue_spin_unlock queue_spin_unlock /** * queue_spin_unlock - release a queue spinlock * @lock : Pointer to queue spinlock structure * * An effective smp_store_release() on the least-significant byte. */ -static inline void queue_spin_unlock(struct qspinlock *lock) +static inline void native_queue_unlock(...
2014 Feb 26
0
[PATCH v5 2/8] qspinlock, x86: Enable x86-64 to use queue spinlock
...inlock_types.h> + +#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) + +#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS + +/* + * x86-64 specific queue spinlock union structure + */ +union arch_qspinlock { + struct qspinlock slock; + u8 lock; /* Lock bit */ +}; + +#define queue_spin_unlock queue_spin_unlock +/** + * queue_spin_unlock - release a queue spinlock + * @lock : Pointer to queue spinlock structure + * + * No special memory barrier other than a compiler one is needed for the + * x86 architecture. A compiler barrier is added at the end to make sure + * that the clearing the l...
2014 Feb 27
0
[PATCH v5 2/8] qspinlock, x86: Enable x86-64 to use queue spinlock
...inlock_types.h> + +#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) + +#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS + +/* + * x86-64 specific queue spinlock union structure + */ +union arch_qspinlock { + struct qspinlock slock; + u8 lock; /* Lock bit */ +}; + +#define queue_spin_unlock queue_spin_unlock +/** + * queue_spin_unlock - release a queue spinlock + * @lock : Pointer to queue spinlock structure + * + * No special memory barrier other than a compiler one is needed for the + * x86 architecture. A compiler barrier is added at the end to make sure + * that the clearing the l...
2014 Mar 19
0
[PATCH v7 02/11] qspinlock, x86: Enable x86-64 to use queue spinlock
...inlock_types.h> + +#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) + +#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS + +/* + * x86-64 specific queue spinlock union structure + */ +union arch_qspinlock { + struct qspinlock slock; + u8 lock; /* Lock bit */ +}; + +#define queue_spin_unlock queue_spin_unlock +/** + * queue_spin_unlock - release a queue spinlock + * @lock : Pointer to queue spinlock structure + * + * No special memory barrier other than a compiler one is needed for the + * x86 architecture. A compiler barrier is added at the end to make sure + * that the clearing the l...
2014 Mar 19
1
[PATCH v7 02/11] qspinlock, x86: Enable x86-64 to use queue spinlock
...& !defined(CONFIG_X86_PPRO_FENCE) > + > +#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS > + > +/* > + * x86-64 specific queue spinlock union structure > + */ > +union arch_qspinlock { > + struct qspinlock slock; > + u8 lock; /* Lock bit */ > +}; > + > +#define queue_spin_unlock queue_spin_unlock > +/** > + * queue_spin_unlock - release a queue spinlock > + * @lock : Pointer to queue spinlock structure > + * > + * No special memory barrier other than a compiler one is needed for the > + * x86 architecture. A compiler barrier is added at the end to make su...
2014 Mar 19
1
[PATCH v7 02/11] qspinlock, x86: Enable x86-64 to use queue spinlock
...& !defined(CONFIG_X86_PPRO_FENCE) > + > +#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS > + > +/* > + * x86-64 specific queue spinlock union structure > + */ > +union arch_qspinlock { > + struct qspinlock slock; > + u8 lock; /* Lock bit */ > +}; > + > +#define queue_spin_unlock queue_spin_unlock > +/** > + * queue_spin_unlock - release a queue spinlock > + * @lock : Pointer to queue spinlock structure > + * > + * No special memory barrier other than a compiler one is needed for the > + * x86 architecture. A compiler barrier is added at the end to make su...
2014 Jun 12
0
[PATCH v11 14/16] pvqspinlock: Add qspinlock para-virtualization support
...gt; @@ -19,13 +19,46 @@ extern struct static_key virt_unfairlocks_enabled; >> * that the clearing the lock bit is done ASAP without artificial delay >> * due to compiler optimization. >> */ >> +#ifdef CONFIG_PARAVIRT_SPINLOCKS >> +static __always_inline void __queue_spin_unlock(struct qspinlock *lock) >> +#else >> static inline void queue_spin_unlock(struct qspinlock *lock) >> +#endif >> { >> barrier(); >> ACCESS_ONCE(*(u8 *)lock) = 0; >> barrier(); >> } >> >> +#ifdef CONFIG_PARAVIRT_SPINLOCKS >...
2014 Jun 16
4
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...= _Q_LOCKED_VAL; > + if (val) Could you add a comment here, like this: /* * N.B. Initially 'val' will have some value (as we are called * after the _Q_LOCKED_VAL could not be set by queue_spin_lock). * But on subsequent iterations, either the lock holder will * decrement the val (queue_spin_unlock - to zero) and we * needn't to record our status in the queue as we have set the * Q_LOCKED_VAL (new) and are the lock holder. Or we are next * in line and need to record our 'next' (aka, smp_processor_id() | idx) * position. */ */ > + new = tail | (val & _Q_LOCKED_MASK);...
2014 Jun 16
4
[PATCH 01/11] qspinlock: A simple generic 4-byte queue spinlock
...= _Q_LOCKED_VAL; > + if (val) Could you add a comment here, like this: /* * N.B. Initially 'val' will have some value (as we are called * after the _Q_LOCKED_VAL could not be set by queue_spin_lock). * But on subsequent iterations, either the lock holder will * decrement the val (queue_spin_unlock - to zero) and we * needn't to record our status in the queue as we have set the * Q_LOCKED_VAL (new) and are the lock holder. Or we are next * in line and need to record our 'next' (aka, smp_processor_id() | idx) * position. */ */ > + new = tail | (val & _Q_LOCKED_MASK);...
2015 Mar 16
19
[PATCH 0/9] qspinlock stuff -v15
Hi Waiman, As promised; here is the paravirt stuff I did during the trip to BOS last week. All the !paravirt patches are more or less the same as before (the only real change is the copyright lines in the first patch). The paravirt stuff is 'simple' and KVM only -- the Xen code was a little more convoluted and I've no real way to test that but it should be stright fwd to make work.
2015 Mar 16
19
[PATCH 0/9] qspinlock stuff -v15
Hi Waiman, As promised; here is the paravirt stuff I did during the trip to BOS last week. All the !paravirt patches are more or less the same as before (the only real change is the copyright lines in the first patch). The paravirt stuff is 'simple' and KVM only -- the Xen code was a little more convoluted and I've no real way to test that but it should be stright fwd to make work.
2014 Mar 02
1
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
...t; + * are added to the queue. In this case, we need to > + * notify the next one to be the head of the queue. > + */ > + goto notify_next; > + } > + /* > + * Accidentally steal the lock, release the lock and > + * let the queue head get it. > + */ > + queue_spin_unlock(lock); > + } else > + prev_qcode &= ~_QSPINLOCK_LOCKED; /* Clear the lock bit */ You know, actually I started this email because I thought that "goto notify_next" is wrong, I misread the patch as if this "goto" can happen even if prev_qcode != 0. So feel free to ign...
2014 Mar 02
1
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
...t; + * are added to the queue. In this case, we need to > + * notify the next one to be the head of the queue. > + */ > + goto notify_next; > + } > + /* > + * Accidentally steal the lock, release the lock and > + * let the queue head get it. > + */ > + queue_spin_unlock(lock); > + } else > + prev_qcode &= ~_QSPINLOCK_LOCKED; /* Clear the lock bit */ You know, actually I started this email because I thought that "goto notify_next" is wrong, I misread the patch as if this "goto" can happen even if prev_qcode != 0. So feel free to ign...
2014 May 30
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...f5d..448de8b 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -5,6 +5,10 @@ #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) +#ifdef CONFIG_VIRT_UNFAIR_LOCKS +extern struct static_key virt_unfairlocks_enabled; +#endif + #define queue_spin_unlock queue_spin_unlock /** * queue_spin_unlock - release a queue spinlock @@ -26,4 +30,79 @@ static inline void queue_spin_unlock(struct qspinlock *lock) #include <asm-generic/qspinlock.h> +union arch_qspinlock { + atomic_t val; + u8 locked; +}; + +#ifdef CONFIG_VIRT_UNFAIR_LOCKS +/** + *...
2014 Oct 29
1
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...inline struct mcs_spinlock *pv_get_qhead(struct qspinlock *lock) +{ + struct pv_qnode *pn = pv_decode_tail(atomic_read(&lock->val)); + + while (pn->head == PV_INVALID_HEAD) + cpu_relax(); + + if (WARN_ON_ONCE(!pn->head->locked)) + return NULL; + + return pn->head; +} + +/** + * queue_spin_unlock_slowpath - kick up the CPU of the queue head + * @lock : Pointer to queue spinlock structure + * + * The lock is released after finding the queue head to avoid racing + * condition between the queue head and the lock holder. + */ +void queue_spin_unlock_slowpath(struct qspinlock *lock) +{ + struct...
2014 Oct 29
1
[PATCH v13 09/11] pvqspinlock, x86: Add para-virtualization support
...inline struct mcs_spinlock *pv_get_qhead(struct qspinlock *lock) +{ + struct pv_qnode *pn = pv_decode_tail(atomic_read(&lock->val)); + + while (pn->head == PV_INVALID_HEAD) + cpu_relax(); + + if (WARN_ON_ONCE(!pn->head->locked)) + return NULL; + + return pn->head; +} + +/** + * queue_spin_unlock_slowpath - kick up the CPU of the queue head + * @lock : Pointer to queue spinlock structure + * + * The lock is released after finding the queue head to avoid racing + * condition between the queue head and the lock holder. + */ +void queue_spin_unlock_slowpath(struct qspinlock *lock) +{ + struct...