search for: local_paca

Displaying 20 results from an estimated 34 matches for "local_paca".

2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -52,6 +52,20 @@ > ?#define SYNC_IO > ?#endif > ? > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor > mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > >lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void __rw_yield(arch_rwlock_t *lock); > +#else /* SPLPAR */ > +#define __spin_yield(x) barrier() > +#define __rw_yield(x) barrier() > +#define SHARED_PROCESSOR 0 > +#endif > + > +#ifdef C...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -52,6 +52,20 @@ > ?#define SYNC_IO > ?#endif > ? > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor > mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > >lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void __rw_yield(arch_rwlock_t *lock); > +#else /* SPLPAR */ > +#define __spin_yield(x) barrier() > +#define __rw_yield(x) barrier() > +#define SHARED_PROCESSOR 0 > +#endif > + > +#ifdef C...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...d..4359ee6 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,20 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) barrier() +#define __rw_yield(x) barrier() +#define SHARED_PROCESSOR 0 +#endif + +#ifdef CONFIG_QUEUED_SPINLOCKS +#include <asm/qspinlock....
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...d..4359ee6 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,20 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) barrier() +#define __rw_yield(x) barrier() +#define SHARED_PROCESSOR 0 +#endif + +#ifdef CONFIG_QUEUED_SPINLOCKS +#include <asm/qspinlock....
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...rch/powerpc/include/asm/spinlock.h > @@ -60,6 +60,23 @@ static inline bool vcpu_is_preempted(int cpu) > } > #endif > > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void __rw_yield(arch_rwlock_t *lock); > +#else /* SPLPAR */ > +#define __spin_yield(x) barrier() > +#define __rw_yield(x) barrier() > +#define SHARED_PROCESSOR 0 > +#endif > + > +...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...rch/powerpc/include/asm/spinlock.h > @@ -60,6 +60,23 @@ static inline bool vcpu_is_preempted(int cpu) > } > #endif > > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void __rw_yield(arch_rwlock_t *lock); > +#else /* SPLPAR */ > +#define __spin_yield(x) barrier() > +#define __rw_yield(x) barrier() > +#define SHARED_PROCESSOR 0 > +#endif > + > +...
2016 Jun 03
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...nclude/asm/spinlock.h > > @@ -52,6 +52,20 @@ > > ?#define SYNC_IO > > ?#endif > > ? > > +#if defined(CONFIG_PPC_SPLPAR) > > +/* We only yield to the hypervisor if we are in shared processor > > mode */ > > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > > > > > > lppaca_ptr)) > > +extern void __spin_yield(arch_spinlock_t *lock); > > +extern void __rw_yield(arch_rwlock_t *lock); > > +#else /* SPLPAR */ > > +#define __spin_yield(x) barrier() > > +#define __rw_yield(x) barrier() > > +#define S...
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...erpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -60,6 +60,23 @@ static inline bool vcpu_is_preempted(int cpu) } #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) barrier() +#define __rw_yield(x) barrier() +#define SHARED_PROCESSOR 0 +#endif + +#ifdef CONFIG_QUEUED_SPINLOCKS +#include <...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com> change from v4: BUG FIX. thanks boqun reporting this issue. struct __qspinlock has different layout in bigendian mahcine. native_queued_spin_unlock() may write value to a wrong address. now fix it. change from v3: a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock no other patch changed. and the patch
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com> change from v4: BUG FIX. thanks boqun reporting this issue. struct __qspinlock has different layout in bigendian mahcine. native_queued_spin_unlock() may write value to a wrong address. now fix it. change from v3: a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock no other patch changed. and the patch
2016 Dec 06
1
[PATCH v8 3/6] powerpc: lib/locks.c: Add cpu yield/wake helper function
...00644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -64,9 +64,13 @@ static inline bool vcpu_is_preempted(int cpu) > /* We only yield to the hypervisor if we are in shared processor mode */ > #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) > extern void __spin_yield(arch_spinlock_t *lock); > +extern void __spin_yield_cpu(int cpu, int confer); > +extern void __spin_wake_cpu(int cpu); > extern void __rw_yield(arch_rwlock_t *lock); > #else /* SPLPAR */ > #define __spin_yield(x) barrier() &gt...
2016 Dec 06
1
[PATCH v8 3/6] powerpc: lib/locks.c: Add cpu yield/wake helper function
...00644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -64,9 +64,13 @@ static inline bool vcpu_is_preempted(int cpu) > /* We only yield to the hypervisor if we are in shared processor mode */ > #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) > extern void __spin_yield(arch_spinlock_t *lock); > +extern void __spin_yield_cpu(int cpu, int confer); > +extern void __spin_wake_cpu(int cpu); > extern void __rw_yield(arch_rwlock_t *lock); > #else /* SPLPAR */ > #define __spin_yield(x) barrier() &gt...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...d..3b65372 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,24 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __spin_yield_cpu(int cpu); +extern void __spin_wake_cpu(int cpu); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) barrier() +#define __spin_yield_cpu(x) barrier() +#define __sp...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...d..3b65372 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,24 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __spin_yield_cpu(int cpu); +extern void __spin_wake_cpu(int cpu); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) barrier() +#define __spin_yield_cpu(x) barrier() +#define __sp...
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1: separate into 6 pathes from one patch some minor code changes. benchmark test results are below. run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory perf bench futex hash perf bench futex lock-pi perf record -advRT || perf bench sched messaging -g 1000 || perf report summary: _____test________________spinlcok______________pv-qspinlcok_____ |futex hash | 556370 ops |
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1: separate into 6 pathes from one patch some minor code changes. benchmark test results are below. run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory perf bench futex hash perf bench futex lock-pi perf record -advRT || perf bench sched messaging -g 1000 || perf report summary: _____test________________spinlcok______________pv-qspinlcok_____ |futex hash | 556370 ops |
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All, this is the fairlock patchset. You can apply them and build successfully. patches are based on linux-next qspinlock can avoid waiter starved issue. It has about the same speed in single-thread and it can be much faster in high contention situations especially when the spinlock is embedded within the data structure to be protected. v7 -> v8: add one patch to drop a function call
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All, this is the fairlock patchset. You can apply them and build successfully. patches are based on linux-next qspinlock can avoid waiter starved issue. It has about the same speed in single-thread and it can be much faster in high contention situations especially when the spinlock is embedded within the data structure to be protected. v7 -> v8: add one patch to drop a function call
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...t;>> @@ -52,6 +52,20 @@ >>> #define SYNC_IO >>> #endif >>> >>> +#if defined(CONFIG_PPC_SPLPAR) >>> +/* We only yield to the hypervisor if we are in shared processor >>> mode */ >>> +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- >>>> >>>> lppaca_ptr)) >>> +extern void __spin_yield(arch_spinlock_t *lock); >>> +extern void __rw_yield(arch_rwlock_t *lock); >>> +#else /* SPLPAR */ >>> +#define __spin_yield(x) barrier() >>> +#define __rw_yield(x) barrier() &g...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...t;>> @@ -52,6 +52,20 @@ >>> #define SYNC_IO >>> #endif >>> >>> +#if defined(CONFIG_PPC_SPLPAR) >>> +/* We only yield to the hypervisor if we are in shared processor >>> mode */ >>> +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- >>>> >>>> lppaca_ptr)) >>> +extern void __spin_yield(arch_spinlock_t *lock); >>> +extern void __rw_yield(arch_rwlock_t *lock); >>> +#else /* SPLPAR */ >>> +#define __spin_yield(x) barrier() >>> +#define __rw_yield(x) barrier() &g...