search for: config_x86_oostor

Displaying 20 results from an estimated 52 matches for "config_x86_oostor".

Did you mean: config_x86_oostore
2014 Jun 15
0
[PATCH 08/11] qspinlock: Revert to test-and-set on hypervisors
...3 +++ 3 files changed, 24 insertions(+) --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_QSPINLOCK_H #define _ASM_X86_QSPINLOCK_H +#include <asm/cpufeature.h> #include <asm-generic/qspinlock_types.h> #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) @@ -20,6 +21,19 @@ static inline void queue_spin_unlock(str #endif /* !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE */ +#define virt_queue_spin_lock virt_queue_spin_lock + +static inline bool virt_queue_spin_lock(struct qspinlock *lock) +{ +...
2014 Feb 26
0
[PATCH v5 2/8] qspinlock, x86: Enable x86-64 to use queue spinlock
.../include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h new file mode 100644 index 0000000..44cefee --- /dev/null +++ b/arch/x86/include/asm/qspinlock.h @@ -0,0 +1,41 @@ +#ifndef _ASM_X86_QSPINLOCK_H +#define _ASM_X86_QSPINLOCK_H + +#include <asm-generic/qspinlock_types.h> + +#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) + +#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS + +/* + * x86-64 specific queue spinlock union structure + */ +union arch_qspinlock { + struct qspinlock slock; + u8 lock; /* Lock bit */ +}; + +#define queue_spin_unlock queue_spin_unlock +/** + * queue_s...
2014 Feb 27
0
[PATCH v5 2/8] qspinlock, x86: Enable x86-64 to use queue spinlock
.../include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h new file mode 100644 index 0000000..44cefee --- /dev/null +++ b/arch/x86/include/asm/qspinlock.h @@ -0,0 +1,41 @@ +#ifndef _ASM_X86_QSPINLOCK_H +#define _ASM_X86_QSPINLOCK_H + +#include <asm-generic/qspinlock_types.h> + +#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) + +#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS + +/* + * x86-64 specific queue spinlock union structure + */ +union arch_qspinlock { + struct qspinlock slock; + u8 lock; /* Lock bit */ +}; + +#define queue_spin_unlock queue_spin_unlock +/** + * queue_s...
2014 Mar 19
0
[PATCH v7 02/11] qspinlock, x86: Enable x86-64 to use queue spinlock
.../include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h new file mode 100644 index 0000000..44cefee --- /dev/null +++ b/arch/x86/include/asm/qspinlock.h @@ -0,0 +1,41 @@ +#ifndef _ASM_X86_QSPINLOCK_H +#define _ASM_X86_QSPINLOCK_H + +#include <asm-generic/qspinlock_types.h> + +#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) + +#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS + +/* + * x86-64 specific queue spinlock union structure + */ +union arch_qspinlock { + struct qspinlock slock; + u8 lock; /* Lock bit */ +}; + +#define queue_spin_unlock queue_spin_unlock +/** + * queue_s...
2016 Jan 12
1
[PATCH 3/4] x86,asm: Re-work smp_store_mb()
On Tue, Jan 12, 2016 at 09:20:06AM -0800, Linus Torvalds wrote: > On Tue, Jan 12, 2016 at 5:57 AM, Michael S. Tsirkin <mst at redhat.com> wrote: > > #ifdef xchgrz > > /* same as xchg but poking at gcc red zone */ > > #define barrier() do { int ret; asm volatile ("xchgl %0, -4(%%" SP ");": "=r"(ret) :: "memory", "cc"); }
2016 Jan 12
1
[PATCH 3/4] x86,asm: Re-work smp_store_mb()
On Tue, Jan 12, 2016 at 09:20:06AM -0800, Linus Torvalds wrote: > On Tue, Jan 12, 2016 at 5:57 AM, Michael S. Tsirkin <mst at redhat.com> wrote: > > #ifdef xchgrz > > /* same as xchg but poking at gcc red zone */ > > #define barrier() do { int ret; asm volatile ("xchgl %0, -4(%%" SP ");": "=r"(ret) :: "memory", "cc"); }
2014 Mar 19
1
[PATCH v7 02/11] qspinlock, x86: Enable x86-64 to use queue spinlock
...ck.h > new file mode 100644 > index 0000000..44cefee > --- /dev/null > +++ b/arch/x86/include/asm/qspinlock.h > @@ -0,0 +1,41 @@ > +#ifndef _ASM_X86_QSPINLOCK_H > +#define _ASM_X86_QSPINLOCK_H > + > +#include <asm-generic/qspinlock_types.h> > + > +#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) > + > +#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS > + > +/* > + * x86-64 specific queue spinlock union structure > + */ > +union arch_qspinlock { > + struct qspinlock slock; > + u8 lock; /* Lock bit */ > +}; > + > +...
2014 Mar 19
1
[PATCH v7 02/11] qspinlock, x86: Enable x86-64 to use queue spinlock
...ck.h > new file mode 100644 > index 0000000..44cefee > --- /dev/null > +++ b/arch/x86/include/asm/qspinlock.h > @@ -0,0 +1,41 @@ > +#ifndef _ASM_X86_QSPINLOCK_H > +#define _ASM_X86_QSPINLOCK_H > + > +#include <asm-generic/qspinlock_types.h> > + > +#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) > + > +#define _ARCH_SUPPORTS_ATOMIC_8_16_BITS_OPS > + > +/* > + * x86-64 specific queue spinlock union structure > + */ > +union arch_qspinlock { > + struct qspinlock slock; > + u8 lock; /* Lock bit */ > +}; > + > +...
2014 May 30
0
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...--- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -585,6 +585,17 @@ config PARAVIRT_SPINLOCKS If you are unsure how to answer this question, answer Y. +config VIRT_UNFAIR_LOCKS + bool "Enable unfair locks in a virtual guest" + depends on SMP && QUEUE_SPINLOCK + depends on !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE + ---help--- + This changes the kernel to use unfair locks in a virtual + guest. This will help performance in most cases. However, + there is a possibility of lock starvation on a heavily + contended lock especially in a large guest with many + virtual...
2016 Jan 12
7
[PATCH v2 0/3] x86: faster mb()+other barrier.h tweaks
mb() typically uses mfence on modern x86, but a micro-benchmark shows that it's 2 to 3 times slower than lock; addl $0,(%%e/rsp) that we use on older CPUs. So let's use the locked variant everywhere - helps keep the code simple as well. While I was at it, I found some inconsistencies in comments in arch/x86/include/asm/barrier.h I hope I'm not splitting this up too much - the reason
2016 Jan 12
7
[PATCH v2 0/3] x86: faster mb()+other barrier.h tweaks
mb() typically uses mfence on modern x86, but a micro-benchmark shows that it's 2 to 3 times slower than lock; addl $0,(%%e/rsp) that we use on older CPUs. So let's use the locked variant everywhere - helps keep the code simple as well. While I was at it, I found some inconsistencies in comments in arch/x86/include/asm/barrier.h I hope I'm not splitting this up too much - the reason
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the paravirt crap. The first few patches are taken from Waiman's latest series, but the virt support is completely new. Its primary aim is to not mess up the native code. I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple smp guests. I've not done Xen, but the patch should be
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the paravirt crap. The first few patches are taken from Waiman's latest series, but the virt support is completely new. Its primary aim is to not mess up the native code. I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple smp guests. I've not done Xen, but the patch should be
2014 May 07
0
[PATCH v10 10/19] qspinlock, x86: Allow unfair spinlock in a virtual guest
...h/x86/Kconfig @@ -585,6 +585,17 @@ config PARAVIRT_SPINLOCKS If you are unsure how to answer this question, answer Y. +config PARAVIRT_UNFAIR_LOCKS + bool "Enable unfair locks in a para-virtualized guest" + depends on PARAVIRT && SMP && QUEUE_SPINLOCK + depends on !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE + ---help--- + This changes the kernel to use unfair locks in a + para-virtualized guest. This will help performance in most + cases. However, there is a possibility of lock starvation + on a heavily contended lock especially in a large guest + with many...
2014 Jun 15
0
[PATCH 10/11] qspinlock: Paravirt support
...================================================ --- linux-2.6.orig/arch/x86/include/asm/qspinlock.h +++ linux-2.6/arch/x86/include/asm/qspinlock.h @@ -3,24 +3,45 @@ #include <asm/cpufeature.h> #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) -#define queue_spin_unlock queue_spin_unlock /** * queue_spin_unlock - release a queue spinlock * @lock : Pointer to queue spinlock structure * * An effective smp_store_release() on the least-significant byte. */ -static inline void queue_sp...
2016 Jan 12
0
[PATCH v2 2/3] x86: drop a comment left over from X86_OOSTORE
The comment about wmb being non-nop is a left over from before commit 09df7c4c8097 ("x86: Remove CONFIG_X86_OOSTORE"). It makes no sense now: if you have an SMP system with out of order stores, making wmb not a nop will not help. Additionally, wmb is not a nop even for regular intel CPUs because of weird use-cases e.g. dealing with WC memory. Drop this comment. Signed-off-by: Michael S. Tsirkin <mst...
2016 Jan 28
0
[PATCH v5 2/5] x86: drop a comment left over from X86_OOSTORE
The comment about wmb being non-nop to deal with non-intel CPUs is a left over from before commit 09df7c4c8097 ("x86: Remove CONFIG_X86_OOSTORE"). It makes no sense now: in particular, wmb is not a nop even for regular intel CPUs because of weird use-cases e.g. dealing with WC memory. Drop this comment. Signed-off-by: Michael S. Tsirkin <mst at redhat.com> --- arch/x86/include/asm/barrier.h | 4 ---- 1 file changed, 4 delet...
2014 Feb 26
0
[PATCH RFC v5 4/8] pvqspinlock, x86: Allow unfair spinlock in a real PV environment
...h/x86/Kconfig @@ -645,6 +645,17 @@ config PARAVIRT_SPINLOCKS If you are unsure how to answer this question, answer Y. +config PARAVIRT_UNFAIR_LOCKS + bool "Enable unfair locks in a para-virtualized guest" + depends on PARAVIRT && SMP && QUEUE_SPINLOCK + depends on !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE + ---help--- + This changes the kernel to use unfair locks in a real + para-virtualized guest system. This will help performance + in most cases. However, there is a possibility of lock + starvation on a heavily contended lock especially in a + large gue...
2014 Mar 12
0
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
...h/x86/Kconfig @@ -629,6 +629,17 @@ config PARAVIRT_SPINLOCKS If you are unsure how to answer this question, answer Y. +config PARAVIRT_UNFAIR_LOCKS + bool "Enable unfair locks in a para-virtualized guest" + depends on PARAVIRT && SMP && QUEUE_SPINLOCK + depends on !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE + ---help--- + This changes the kernel to use unfair locks in a + para-virtualized guest. This will help performance in most + cases. However, there is a possibility of lock starvation + on a heavily contended lock especially in a large guest + with many...
2016 Jan 28
10
[PATCH v5 0/5] x86: faster smp_mb()+documentation tweaks
mb() typically uses mfence on modern x86, but a micro-benchmark shows that it's 2 to 3 times slower than lock; addl that we use on older CPUs. So we really should use the locked variant everywhere, except that intel manual says that clflush is only ordered by mfence, so we can't. Note: some callers of clflush seems to assume sfence will order it, so there could be existing bugs around