search for: qcode

Displaying 20 results from an estimated 29 matches for "qcode".

Did you mean: code
2014 Feb 26
0
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...index 44cefee..98db42e 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -7,12 +7,30 @@ #define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS +#define smp_u8_store_release(p, v) \ +do { \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +/* + * As the qcode will be accessed as a 16-bit word, no offset is needed + */ +#define _QCODE_VAL_OFFSET 0 + /* * x86-64 specific queue spinlock union structure + * Besides the slock and lock fields, the other fields are only + * valid with less than 16K CPUs. */ union arch_qspinlock { struct qspinlock slock...
2014 Feb 27
0
[PATCH v5 3/8] qspinlock, x86: Add x86 specific optimization for 2 contending tasks
...index 44cefee..98db42e 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -7,12 +7,30 @@ #define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS +#define smp_u8_store_release(p, v) \ +do { \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +/* + * As the qcode will be accessed as a 16-bit word, no offset is needed + */ +#define _QCODE_VAL_OFFSET 0 + /* * x86-64 specific queue spinlock union structure + * Besides the slock and lock fields, the other fields are only + * valid with less than 16K CPUs. */ union arch_qspinlock { struct qspinlock slock...
2014 Feb 27
14
[PATCH v5 0/8] qspinlock: a 4-byte queue spinlock with PV support
v4->v5: - Move the optimized 2-task contending code to the generic file to enable more architectures to use it without code duplication. - Address some of the style-related comments by PeterZ. - Allow the use of unfair queue spinlock in a real para-virtualized execution environment. - Add para-virtualization support to the qspinlock code by ensuring that the lock holder and queue
2014 Feb 27
14
[PATCH v5 0/8] qspinlock: a 4-byte queue spinlock with PV support
v4->v5: - Move the optimized 2-task contending code to the generic file to enable more architectures to use it without code duplication. - Address some of the style-related comments by PeterZ. - Allow the use of unfair queue spinlock in a real para-virtualized execution environment. - Add para-virtualization support to the qspinlock code by ensuring that the lock holder and queue
2014 Feb 26
0
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
...US < 16K, the bits assignment are: + * Bit 0 : Set if locked + * Bits 1-7 : Not used + * Bits 8-15: Reserved for architecture specific optimization + * Bits 16-31: Queue code + */ +typedef struct qspinlock { + atomic_t qlcode; /* Lock + queue code */ +} arch_spinlock_t; + +#define _QCODE_OFFSET 8 +#define _QSPINLOCK_LOCKED 1U +#define _QSPINLOCK_LOCK_MASK 0xff + +#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index d2b32ac..f185584 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -223,3 +223,10 @@ endif config...
2014 Feb 27
0
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
...US < 16K, the bits assignment are: + * Bit 0 : Set if locked + * Bits 1-7 : Not used + * Bits 8-15: Reserved for architecture specific optimization + * Bits 16-31: Queue code + */ +typedef struct qspinlock { + atomic_t qlcode; /* Lock + queue code */ +} arch_spinlock_t; + +#define _QCODE_OFFSET 8 +#define _QSPINLOCK_LOCKED 1U +#define _QSPINLOCK_LOCK_MASK 0xff + +#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index d2b32ac..f185584 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -223,3 +223,10 @@ endif config...
2014 Apr 02
0
[PATCH v8 01/10] qspinlock: A generic 4-byte queue spinlock implementation
...h> +#endif + +/* + * The queue spinlock data structure - a 32-bit word + * + * The bits assignment are: + * Bit 0 : Set if locked + * Bits 1-7 : Not used + * Bits 8-31: Queue code + */ +typedef struct qspinlock { + atomic_t qlcode; /* Lock + queue code */ +} arch_spinlock_t; + +#define _QCODE_OFFSET 8 +#define _QLOCK_LOCKED 1U +#define _QLOCK_LOCK_MASK 0xff + +#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index d2b32ac..f185584 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -223,3 +223,10 @@ endif config MUTEX_S...
2014 Mar 02
1
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
On 02/26, Waiman Long wrote: > > +void queue_spin_lock_slowpath(struct qspinlock *lock, int qsval) > +{ > + unsigned int cpu_nr, qn_idx; > + struct qnode *node, *next; > + u32 prev_qcode, my_qcode; > + > + /* > + * Get the queue node > + */ > + cpu_nr = smp_processor_id(); > + node = get_qnode(&qn_idx); > + > + /* > + * It should never happen that all the queue nodes are being used. > + */ > + BUG_ON(!node); > + > + /* > + * Set...
2014 Mar 02
1
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
On 02/26, Waiman Long wrote: > > +void queue_spin_lock_slowpath(struct qspinlock *lock, int qsval) > +{ > + unsigned int cpu_nr, qn_idx; > + struct qnode *node, *next; > + u32 prev_qcode, my_qcode; > + > + /* > + * Get the queue node > + */ > + cpu_nr = smp_processor_id(); > + node = get_qnode(&qn_idx); > + > + /* > + * It should never happen that all the queue nodes are being used. > + */ > + BUG_ON(!node); > + > + /* > + * Set...
2014 Mar 12
17
[PATCH v6 00/11] qspinlock: a 4-byte queue spinlock with PV support
v5->v6: - Change the optimized 2-task contending code to make it fairer at the expense of a bit of performance. - Add a patch to support unfair queue spinlock for Xen. - Modify the PV qspinlock code to follow what was done in the PV ticketlock. - Add performance data for the unfair lock as well as the PV support code. v4->v5: - Move the optimized 2-task contending code to the
2014 Mar 12
17
[PATCH v6 00/11] qspinlock: a 4-byte queue spinlock with PV support
v5->v6: - Change the optimized 2-task contending code to make it fairer at the expense of a bit of performance. - Add a patch to support unfair queue spinlock for Xen. - Modify the PV qspinlock code to follow what was done in the PV ticketlock. - Add performance data for the unfair lock as well as the PV support code. v4->v5: - Move the optimized 2-task contending code to the
2014 Mar 19
15
[PATCH v7 00/11] qspinlock: a 4-byte queue spinlock with PV support
v6->v7: - Remove an atomic operation from the 2-task contending code - Shorten the names of some macros - Make the queue waiter to attempt to steal lock when unfair lock is enabled. - Remove lock holder kick from the PV code and fix a race condition - Run the unfair lock & PV code on overcommitted KVM guests to collect performance data. v5->v6: - Change the optimized
2014 Mar 19
15
[PATCH v7 00/11] qspinlock: a 4-byte queue spinlock with PV support
v6->v7: - Remove an atomic operation from the 2-task contending code - Shorten the names of some macros - Make the queue waiter to attempt to steal lock when unfair lock is enabled. - Remove lock holder kick from the PV code and fix a race condition - Run the unfair lock & PV code on overcommitted KVM guests to collect performance data. v5->v6: - Change the optimized
2014 Feb 26
0
[PATCH RFC v5 7/8] pvqspinlock, x86: Add qspinlock para-virtualization support
...-0,0 +1,176 @@ +#ifndef _ASM_X86_PVQSPINLOCK_H +#define _ASM_X86_PVQSPINLOCK_H + +/* + * Queue Spinlock Para-Virtualization Support + * + * +------+ +-----+ nxtcpu_p1 +----+ + * | Lock | |Queue|----------->|Next| + * |Holder|<-----------|Head |<-----------|Node| + * +------+ prev_qcode +-----+ prev_qcode +----+ + * + * As long as the current lock holder passes through the slowpath, the queue + * head CPU will have its CPU number stored in prev_qcode. The situation is + * the same for the node next to the queue head. + * + * The next node, while setting up the next pointer in the...
2014 Feb 26
22
[PATCH v5 0/8] qspinlock: a 4-byte queue spinlock with PV support
v4->v5: - Move the optimized 2-task contending code to the generic file to enable more architectures to use it without code duplication. - Address some of the style-related comments by PeterZ. - Allow the use of unfair queue spinlock in a real para-virtualized execution environment. - Add para-virtualization support to the qspinlock code by ensuring that the lock holder and queue
2014 Feb 26
22
[PATCH v5 0/8] qspinlock: a 4-byte queue spinlock with PV support
v4->v5: - Move the optimized 2-task contending code to the generic file to enable more architectures to use it without code duplication. - Address some of the style-related comments by PeterZ. - Allow the use of unfair queue spinlock in a real para-virtualized execution environment. - Add para-virtualization support to the qspinlock code by ensuring that the lock holder and queue
2014 Apr 01
10
[PATCH v8 00/10] qspinlock: a 4-byte queue spinlock with PV support
v7->v8: - Remove one unneeded atomic operation from the slowpath, thus improving performance. - Simplify some of the codes and add more comments. - Test for X86_FEATURE_HYPERVISOR CPU feature bit to enable/disable unfair lock. - Reduce unfair lock slowpath lock stealing frequency depending on its distance from the queue head. - Add performance data for IvyBridge-EX CPU.
2014 Apr 01
10
[PATCH v8 00/10] qspinlock: a 4-byte queue spinlock with PV support
v7->v8: - Remove one unneeded atomic operation from the slowpath, thus improving performance. - Simplify some of the codes and add more comments. - Test for X86_FEATURE_HYPERVISOR CPU feature bit to enable/disable unfair lock. - Reduce unfair lock slowpath lock stealing frequency depending on its distance from the queue head. - Add performance data for IvyBridge-EX CPU.
2014 Mar 12
0
[PATCH RFC v6 09/11] pvqspinlock, x86: Add qspinlock para-virtualization support
...+1,232 @@ +#ifndef _ASM_X86_PVQSPINLOCK_H +#define _ASM_X86_PVQSPINLOCK_H + +/* + * Queue Spinlock Para-Virtualization (PV) Support + * + * +------+ +-----+ nxtcpu_p1 +----+ + * | Lock | |Queue|----------->|Next| + * |Holder|<-----------|Head |<-----------|Node| + * +------+ prev_qcode +-----+ prev_qcode +----+ + * + * As long as the current lock holder passes through the slowpath, the queue + * head CPU will have its CPU number stored in prev_qcode. The situation is + * the same for the node next to the queue head. + * + * The next node, while setting up the next pointer in the...
2014 Mar 12
0
[PATCH v6 04/11] qspinlock: Optimized code path for 2 contending tasks
...h/x86/include/asm/qspinlock.h index acbe155..7f3129c 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -21,9 +21,10 @@ union arch_qspinlock { struct qspinlock slock; struct { u8 lock; /* Lock bit */ - u8 reserved; + u8 wait; /* Waiting bit */ u16 qcode; /* Queue code */ }; + u16 lock_wait; /* Lock and wait bits */ u32 qlcode; /* Complete lock word */ }; diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 52d3580..0030fad 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -112,6 +112,8 @@ st...