Displaying 20 results from an estimated 48 matches for "config_queued_spinlocks".
2015 May 11
0
locking/pvqspinlock, x86: undefined CONFIG_QUEUED_SPINLOCKS
Hi Peter,
your commit f233f7f1581e ("locking/pvqspinlock, x86: Implement the
paravirt qspinlock call patching") is in today's linux-next tree
(i.e., next-20150511). The two #ifdef blocks listed below cannot be
compiled at the current state since CONFIG_QUEUED_SPINLOCKS does not
exist:
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
[...]
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
It seems that it's just a typo, since QUEUED_SPINLOCK (i.e., without
last S) is defined in kernel/Kconf...
2015 May 11
0
locking/pvqspinlock, x86: undefined CONFIG_QUEUED_SPINLOCKS
Hi Peter,
your commit f233f7f1581e ("locking/pvqspinlock, x86: Implement the
paravirt qspinlock call patching") is in today's linux-next tree
(i.e., next-20150511). The two #ifdef blocks listed below cannot be
compiled at the current state since CONFIG_QUEUED_SPINLOCKS does not
exist:
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
[...]
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
It seems that it's just a typo, since QUEUED_SPINLOCK (i.e., without
last S) is defined in kernel/Kconf...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...a-
> >lppaca_ptr))
> +extern void __spin_yield(arch_spinlock_t *lock);
> +extern void __rw_yield(arch_rwlock_t *lock);
> +#else /* SPLPAR */
> +#define __spin_yield(x) barrier()
> +#define __rw_yield(x) barrier()
> +#define SHARED_PROCESSOR 0
> +#endif
> +
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> +#include <asm/qspinlock.h>
> +#else
> ?static __always_inline int arch_spin_value_unlocked(arch_spinlock_t
> lock)
> ?{
> ? return lock.slock == 0;
> @@ -106,18 +120,6 @@ static inline int
> arch_spin_trylock(arch_spinlock_t *lock)
> ? * held.??Conveniently, we h...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...a-
> >lppaca_ptr))
> +extern void __spin_yield(arch_spinlock_t *lock);
> +extern void __rw_yield(arch_rwlock_t *lock);
> +#else /* SPLPAR */
> +#define __spin_yield(x) barrier()
> +#define __rw_yield(x) barrier()
> +#define SHARED_PROCESSOR 0
> +#endif
> +
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> +#include <asm/qspinlock.h>
> +#else
> ?static __always_inline int arch_spin_value_unlocked(arch_spinlock_t
> lock)
> ?{
> ? return lock.slock == 0;
> @@ -106,18 +120,6 @@ static inline int
> arch_spin_trylock(arch_spinlock_t *lock)
> ? * held.??Conveniently, we h...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...fine SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
+extern void __spin_yield(arch_spinlock_t *lock);
+extern void __rw_yield(arch_rwlock_t *lock);
+#else /* SPLPAR */
+#define __spin_yield(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +120,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* va...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...fine SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
+extern void __spin_yield(arch_spinlock_t *lock);
+extern void __rw_yield(arch_rwlock_t *lock);
+#else /* SPLPAR */
+#define __spin_yield(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +120,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* va...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...paca_ptr))
> +extern void __spin_yield(arch_spinlock_t *lock);
> +extern void __rw_yield(arch_rwlock_t *lock);
> +#else /* SPLPAR */
> +#define __spin_yield(x) barrier()
> +#define __rw_yield(x) barrier()
> +#define SHARED_PROCESSOR 0
> +#endif
> +
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> +#include <asm/qspinlock.h>
> +#else
> +
> +#define arch_spin_relax(lock) __spin_yield(lock)
> +
> static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
> {
> return lock.slock == 0;
> @@ -114,18 +131,6 @@ static inline int arch_spin_trylo...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...paca_ptr))
> +extern void __spin_yield(arch_spinlock_t *lock);
> +extern void __rw_yield(arch_rwlock_t *lock);
> +#else /* SPLPAR */
> +#define __spin_yield(x) barrier()
> +#define __rw_yield(x) barrier()
> +#define SHARED_PROCESSOR 0
> +#endif
> +
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> +#include <asm/qspinlock.h>
> +#else
> +
> +#define arch_spin_relax(lock) __spin_yield(lock)
> +
> static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
> {
> return lock.slock == 0;
> @@ -114,18 +131,6 @@ static inline int arch_spin_trylo...
2016 Jun 03
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...id __spin_yield(arch_spinlock_t *lock);
> > +extern void __rw_yield(arch_rwlock_t *lock);
> > +#else /* SPLPAR */
> > +#define __spin_yield(x) barrier()
> > +#define __rw_yield(x) barrier()
> > +#define SHARED_PROCESSOR 0
> > +#endif
> > +
> > +#ifdef CONFIG_QUEUED_SPINLOCKS
> > +#include <asm/qspinlock.h>
> > +#else
> > ?static __always_inline int
> > arch_spin_value_unlocked(arch_spinlock_t
> > lock)
> > ?{
> > ? return lock.slock == 0;
> > @@ -106,18 +120,6 @@ static inline int
> > arch_spin_trylock(arch_sp...
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...OCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
+extern void __spin_yield(arch_spinlock_t *lock);
+extern void __rw_yield(arch_rwlock_t *lock);
+#else /* SPLPAR */
+#define __spin_yield(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
+
+#define arch_spin_relax(lock) __spin_yield(lock)
+
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -114,18 +131,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Convenie...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...u);
+extern void __spin_wake_cpu(int cpu);
+extern void __rw_yield(arch_rwlock_t *lock);
+#else /* SPLPAR */
+#define __spin_yield(x) barrier()
+#define __spin_yield_cpu(x) barrier()
+#define __spin_wake_cpu(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +124,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* va...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...u);
+extern void __spin_wake_cpu(int cpu);
+extern void __rw_yield(arch_rwlock_t *lock);
+#else /* SPLPAR */
+#define __spin_yield(x) barrier()
+#define __spin_yield_cpu(x) barrier()
+#define __spin_wake_cpu(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +124,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* va...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...nlock_t *lock);
>>> +extern void __rw_yield(arch_rwlock_t *lock);
>>> +#else /* SPLPAR */
>>> +#define __spin_yield(x) barrier()
>>> +#define __rw_yield(x) barrier()
>>> +#define SHARED_PROCESSOR 0
>>> +#endif
>>> +
>>> +#ifdef CONFIG_QUEUED_SPINLOCKS
>>> +#include <asm/qspinlock.h>
>>> +#else
>>> static __always_inline int
>>> arch_spin_value_unlocked(arch_spinlock_t
>>> lock)
>>> {
>>> return lock.slock == 0;
>>> @@ -106,18 +120,6 @@ static inline int
>>...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...nlock_t *lock);
>>> +extern void __rw_yield(arch_rwlock_t *lock);
>>> +#else /* SPLPAR */
>>> +#define __spin_yield(x) barrier()
>>> +#define __rw_yield(x) barrier()
>>> +#define SHARED_PROCESSOR 0
>>> +#endif
>>> +
>>> +#ifdef CONFIG_QUEUED_SPINLOCKS
>>> +#include <asm/qspinlock.h>
>>> +#else
>>> static __always_inline int
>>> arch_spin_value_unlocked(arch_spinlock_t
>>> lock)
>>> {
>>> return lock.slock == 0;
>>> @@ -106,18 +120,6 @@ static inline int
>>...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...u);
+extern void __spin_wake_cpu(int cpu);
+extern void __rw_yield(arch_rwlock_t *lock);
+#else /* SPLPAR */
+#define __spin_yield(x) barrier()
+#define __spin_yield_cpu(x) barrier()
+#define __spin_wake_cpu(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +124,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* va...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...u);
+extern void __spin_wake_cpu(int cpu);
+extern void __rw_yield(arch_rwlock_t *lock);
+#else /* SPLPAR */
+#define __spin_yield(x) barrier()
+#define __spin_yield_cpu(x) barrier()
+#define __spin_wake_cpu(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +124,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* va...
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
benchmark test results are below.
run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory
perf bench futex hash
perf bench futex lock-pi
perf record -advRT || perf bench sched messaging -g 1000 || perf report
summary:
_____test________________spinlcok______________pv-qspinlcok_____
|futex hash | 556370 ops |
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
benchmark test results are below.
run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory
perf bench futex hash
perf bench futex lock-pi
perf record -advRT || perf bench sched messaging -g 1000 || perf report
summary:
_____test________________spinlcok______________pv-qspinlcok_____
|futex hash | 556370 ops |