Displaying 19 results from an estimated 19 matches for "f7deebd".
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...#else
> ?typedef struct {
> ? volatile unsigned int slock;
> ?} arch_spinlock_t;
> ?
> ?#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
> +#endif
> ?
> ?typedef struct {
> ? volatile signed int lock;
> diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
> index f7deebd..a9ebd71 100644
> --- a/arch/powerpc/lib/locks.c
> +++ b/arch/powerpc/lib/locks.c
> @@ -23,6 +23,7 @@
> ?#include <asm/hvcall.h>
> ?#include <asm/smp.h>
> ?
> +#ifndef CONFIG_QUEUED_SPINLOCKS
> ?void __spin_yield(arch_spinlock_t *lock)
> ?{
> ? unsigned int...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...#else
> ?typedef struct {
> ? volatile unsigned int slock;
> ?} arch_spinlock_t;
> ?
> ?#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
> +#endif
> ?
> ?typedef struct {
> ? volatile signed int lock;
> diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
> index f7deebd..a9ebd71 100644
> --- a/arch/powerpc/lib/locks.c
> +++ b/arch/powerpc/lib/locks.c
> @@ -23,6 +23,7 @@
> ?#include <asm/hvcall.h>
> ?#include <asm/smp.h>
> ?
> +#ifndef CONFIG_QUEUED_SPINLOCKS
> ?void __spin_yield(arch_spinlock_t *lock)
> ?{
> ? unsigned int...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...LOCKS
+#include <asm-generic/qspinlock_types.h>
+#else
typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+#endif
typedef struct {
volatile signed int lock;
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index f7deebd..a9ebd71 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -23,6 +23,7 @@
#include <asm/hvcall.h>
#include <asm/smp.h>
+#ifndef CONFIG_QUEUED_SPINLOCKS
void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -42,6 +43...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...LOCKS
+#include <asm-generic/qspinlock_types.h>
+#else
typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+#endif
typedef struct {
volatile signed int lock;
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index f7deebd..a9ebd71 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -23,6 +23,7 @@
#include <asm/hvcall.h>
#include <asm/smp.h>
+#ifndef CONFIG_QUEUED_SPINLOCKS
void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -42,6 +43...
2016 Jun 03
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...unsigned int slock;
> > ?} arch_spinlock_t;
> > ?
> > ?#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
> > +#endif
> > ?
> > ?typedef struct {
> > ? volatile signed int lock;
> > diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
> > index f7deebd..a9ebd71 100644
> > --- a/arch/powerpc/lib/locks.c
> > +++ b/arch/powerpc/lib/locks.c
> > @@ -23,6 +23,7 @@
> > ?#include <asm/hvcall.h>
> > ?#include <asm/smp.h>
> > ?
> > +#ifndef CONFIG_QUEUED_SPINLOCKS
> > ?void __spin_yield(arch_spinl...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...)
+{
+ if (SHARED_PROCESSOR) {
+ __pv_init_lock_hash();
+ pv_lock_op.lock = __pv_queued_spin_lock_slowpath;
+ pv_lock_op.unlock = __pv_queued_spin_unlock;
+ pv_lock_op.wait = __pv_wait;
+ pv_lock_op.kick = __pv_kick;
+ }
+}
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index f7deebd..6e9d3bb 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -23,6 +23,35 @@
#include <asm/hvcall.h>
#include <asm/smp.h>
+void __spin_yield_cpu(int cpu)
+{
+ unsigned int holder_cpu = cpu, yield_count;
+
+ if (cpu == -1) {
+ plpar_hcall_norets(H_CEDE);
+ retur...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...)
+{
+ if (SHARED_PROCESSOR) {
+ __pv_init_lock_hash();
+ pv_lock_op.lock = __pv_queued_spin_lock_slowpath;
+ pv_lock_op.unlock = __pv_queued_spin_unlock;
+ pv_lock_op.wait = __pv_wait;
+ pv_lock_op.kick = __pv_kick;
+ }
+}
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index f7deebd..6e9d3bb 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -23,6 +23,35 @@
#include <asm/hvcall.h>
#include <asm/smp.h>
+void __spin_yield_cpu(int cpu)
+{
+ unsigned int holder_cpu = cpu, yield_count;
+
+ if (cpu == -1) {
+ plpar_hcall_norets(H_CEDE);
+ retur...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...;> } arch_spinlock_t;
>>>
>>> #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
>>> +#endif
>>>
>>> typedef struct {
>>> volatile signed int lock;
>>> diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
>>> index f7deebd..a9ebd71 100644
>>> --- a/arch/powerpc/lib/locks.c
>>> +++ b/arch/powerpc/lib/locks.c
>>> @@ -23,6 +23,7 @@
>>> #include <asm/hvcall.h>
>>> #include <asm/smp.h>
>>>
>>> +#ifndef CONFIG_QUEUED_SPINLOCKS
>>> voi...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...;> } arch_spinlock_t;
>>>
>>> #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
>>> +#endif
>>>
>>> typedef struct {
>>> volatile signed int lock;
>>> diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
>>> index f7deebd..a9ebd71 100644
>>> --- a/arch/powerpc/lib/locks.c
>>> +++ b/arch/powerpc/lib/locks.c
>>> @@ -23,6 +23,7 @@
>>> #include <asm/hvcall.h>
>>> #include <asm/smp.h>
>>>
>>> +#ifndef CONFIG_QUEUED_SPINLOCKS
>>> voi...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...)
+{
+ if (SHARED_PROCESSOR) {
+ __pv_init_lock_hash();
+ pv_lock_op.lock = __pv_queued_spin_lock_slowpath;
+ pv_lock_op.unlock = __pv_queued_spin_unlock;
+ pv_lock_op.wait = __pv_wait;
+ pv_lock_op.kick = __pv_kick;
+ }
+}
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index f7deebd..6e9d3bb 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -23,6 +23,35 @@
#include <asm/hvcall.h>
#include <asm/smp.h>
+void __spin_yield_cpu(int cpu)
+{
+ unsigned int holder_cpu = cpu, yield_count;
+
+ if (cpu == -1) {
+ plpar_hcall_norets(H_CEDE);
+ retur...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...)
+{
+ if (SHARED_PROCESSOR) {
+ __pv_init_lock_hash();
+ pv_lock_op.lock = __pv_queued_spin_lock_slowpath;
+ pv_lock_op.unlock = __pv_queued_spin_unlock;
+ pv_lock_op.wait = __pv_wait;
+ pv_lock_op.kick = __pv_kick;
+ }
+}
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index f7deebd..6e9d3bb 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -23,6 +23,35 @@
#include <asm/hvcall.h>
#include <asm/smp.h>
+void __spin_yield_cpu(int cpu)
+{
+ unsigned int holder_cpu = cpu, yield_count;
+
+ if (cpu == -1) {
+ plpar_hcall_norets(H_CEDE);
+ retur...
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
benchmark test results are below.
run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory
perf bench futex hash
perf bench futex lock-pi
perf record -advRT || perf bench sched messaging -g 1000 || perf report
summary:
_____test________________spinlcok______________pv-qspinlcok_____
|futex hash | 556370 ops |
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
benchmark test results are below.
run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory
perf bench futex hash
perf bench futex lock-pi
perf record -advRT || perf bench sched messaging -g 1000 || perf report
summary:
_____test________________spinlcok______________pv-qspinlcok_____
|futex hash | 556370 ops |
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
change from v2:
__spin_yeild_cpu() will yield slices to lpar if target cpu is running.
remove unnecessary rmb() in __spin_yield/wake_cpu.
__pv_wait() will check the *ptr == val.
some commit message change
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
I do several tests on pseries IBM,8408-E8E with 32cpus, 64GB memory.
benchmark test results are below.
2
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
change from v2:
__spin_yeild_cpu() will yield slices to lpar if target cpu is running.
remove unnecessary rmb() in __spin_yield/wake_cpu.
__pv_wait() will check the *ptr == val.
some commit message change
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
I do several tests on pseries IBM,8408-E8E with 32cpus, 64GB memory.
benchmark test results are below.
2
2016 Jun 02
9
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
sorry for not even doing a test on bigendian machine before!!!
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Jun 02
9
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
sorry for not even doing a test on bigendian machine before!!!
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch