Displaying 20 results from an estimated 33 matches for "lock_value".
2020 Jul 06
0
[PATCH v3 2/6] powerpc/pseries: move some PAPR paravirt functions to their own file
...t *lock)
{
if (is_shared_processor())
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 6440d5943c00..04165b7a163f 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -27,14 +27,14 @@ void splpar_spin_yield(arch_spinlock_t *lock)
return;
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
- yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
+
+ yield_count = yield_count_of(holder_cpu);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
if (lock->slock != lock_value)
return...
2020 Jul 02
12
[PATCH 0/8] powerpc: queued spinlocks and rwlocks
This series adds an option to use queued spinlocks for powerpc, and
makes it the default for the Book3S-64 subarch.
This effort starts with the generic code so it's very simple but
still very performant. There are optimisations that can be made to
slowpaths, but I think it's better to attack those incrementally
if/when we find things, and try to add the improvements to generic
code as
2020 Jul 02
0
[PATCH 2/8] powerpc/pseries: use smp_rmb() in H_CONFER spin yield
...b/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -30,7 +30,7 @@ void splpar_spin_yield(arch_spinlock_t *lock)
yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
- rmb();
+ smp_rmb();
if (lock->slock != lock_value)
return; /* something has changed */
plpar_hcall_norets(H_CONFER,
@@ -56,7 +56,7 @@ void splpar_rw_yield(arch_rwlock_t *rw)
yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
- rmb();
+ smp_rmb()...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
....a9ebd71 100644
> --- a/arch/powerpc/lib/locks.c
> +++ b/arch/powerpc/lib/locks.c
> @@ -23,6 +23,7 @@
> ?#include <asm/hvcall.h>
> ?#include <asm/smp.h>
> ?
> +#ifndef CONFIG_QUEUED_SPINLOCKS
> ?void __spin_yield(arch_spinlock_t *lock)
> ?{
> ? unsigned int lock_value, holder_cpu, yield_count;
> @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock)
> ? get_hard_smp_processor_id(holder_cpu), yield_count);
> ?}
> ?EXPORT_SYMBOL_GPL(__spin_yield);
> +#endif
> ?
> ?/*
> ? * Waiting for a read lock or a write lock on a rwlock...
> @@...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
....a9ebd71 100644
> --- a/arch/powerpc/lib/locks.c
> +++ b/arch/powerpc/lib/locks.c
> @@ -23,6 +23,7 @@
> ?#include <asm/hvcall.h>
> ?#include <asm/smp.h>
> ?
> +#ifndef CONFIG_QUEUED_SPINLOCKS
> ?void __spin_yield(arch_spinlock_t *lock)
> ?{
> ? unsigned int lock_value, holder_cpu, yield_count;
> @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock)
> ? get_hard_smp_processor_id(holder_cpu), yield_count);
> ?}
> ?EXPORT_SYMBOL_GPL(__spin_yield);
> +#endif
> ?
> ?/*
> ? * Waiting for a read lock or a write lock on a rwlock...
> @@...
2020 Jul 03
7
[PATCH v2 0/6] powerpc: queued spinlocks and rwlocks
v2 is updated to account for feedback from Will, Peter, and
Waiman (thank you), and trims off a couple of RFC and unrelated
patches.
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
.../locks.c b/arch/powerpc/lib/locks.c
index f7deebd..a9ebd71 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -23,6 +23,7 @@
#include <asm/hvcall.h>
#include <asm/smp.h>
+#ifndef CONFIG_QUEUED_SPINLOCKS
void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock)
get_hard_smp_processor_id(holder_cpu), yield_count);
}
EXPORT_SYMBOL_GPL(__spin_yield);
+#endif
/*
* Waiting for a read lock or a write lock on a rwlock...
@@ -69,6 +71,7 @@ void __rw_yield(arch_rwlock_t...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
.../locks.c b/arch/powerpc/lib/locks.c
index f7deebd..a9ebd71 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -23,6 +23,7 @@
#include <asm/hvcall.h>
#include <asm/smp.h>
+#ifndef CONFIG_QUEUED_SPINLOCKS
void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock)
get_hard_smp_processor_id(holder_cpu), yield_count);
}
EXPORT_SYMBOL_GPL(__spin_yield);
+#endif
/*
* Waiting for a read lock or a write lock on a rwlock...
@@ -69,6 +71,7 @@ void __rw_yield(arch_rwlock_t...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
....6574626 100644
> --- a/arch/powerpc/lib/locks.c
> +++ b/arch/powerpc/lib/locks.c
> @@ -23,6 +23,7 @@
> #include <asm/hvcall.h>
> #include <asm/smp.h>
>
> +#ifndef CONFIG_QUEUED_SPINLOCKS
> void __spin_yield(arch_spinlock_t *lock)
> {
> unsigned int lock_value, holder_cpu, yield_count;
> @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock)
> get_hard_smp_processor_id(holder_cpu), yield_count);
> }
> EXPORT_SYMBOL_GPL(__spin_yield);
> +#endif
>
> /*
> * Waiting for a read lock or a write lock on a rwlock...
> @@...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
....6574626 100644
> --- a/arch/powerpc/lib/locks.c
> +++ b/arch/powerpc/lib/locks.c
> @@ -23,6 +23,7 @@
> #include <asm/hvcall.h>
> #include <asm/smp.h>
>
> +#ifndef CONFIG_QUEUED_SPINLOCKS
> void __spin_yield(arch_spinlock_t *lock)
> {
> unsigned int lock_value, holder_cpu, yield_count;
> @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock)
> get_hard_smp_processor_id(holder_cpu), yield_count);
> }
> EXPORT_SYMBOL_GPL(__spin_yield);
> +#endif
>
> /*
> * Waiting for a read lock or a write lock on a rwlock...
> @@...
2016 Jun 03
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...ocks.c
> > +++ b/arch/powerpc/lib/locks.c
> > @@ -23,6 +23,7 @@
> > ?#include <asm/hvcall.h>
> > ?#include <asm/smp.h>
> > ?
> > +#ifndef CONFIG_QUEUED_SPINLOCKS
> > ?void __spin_yield(arch_spinlock_t *lock)
> > ?{
> > ? unsigned int lock_value, holder_cpu, yield_count;
> > @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock)
> > ? get_hard_smp_processor_id(holder_cpu),
> > yield_count);
> > ?}
> > ?EXPORT_SYMBOL_GPL(__spin_yield);
> > +#endif
> > ?
> > ?/*
> > ? * Waiting for a...
2020 Jul 24
8
[PATCH v4 0/6] powerpc: queued spinlocks and rwlocks
Updated with everybody's feedback (thanks all), and more performance
results.
What I've found is I might have been measuring the worst load point for
the paravirt case, and by looking at a range of loads it's clear that
queued spinlocks are overall better even on PV, doubly so when you look
at the generally much improved worst case latencies.
I have defaulted it to N even though
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
.../locks.c b/arch/powerpc/lib/locks.c
index b7b1237..6574626 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -23,6 +23,7 @@
#include <asm/hvcall.h>
#include <asm/smp.h>
+#ifndef CONFIG_QUEUED_SPINLOCKS
void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock)
get_hard_smp_processor_id(holder_cpu), yield_count);
}
EXPORT_SYMBOL_GPL(__spin_yield);
+#endif
/*
* Waiting for a read lock or a write lock on a rwlock...
@@ -68,3 +70,60 @@ void __rw_yield(arch_rwlock_t...
2020 Jul 06
13
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you).
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued spinlocks and rwlocks
powerpc/pseries: implement paravirt
2020 Jul 06
13
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you).
Thanks,
Nick
Nicholas Piggin (6):
powerpc/powernv: must include hvcall.h to get PAPR defines
powerpc/pseries: move some PAPR paravirt functions to their own file
powerpc: move spinlock implementation to simple_spinlock
powerpc/64s: implement queued spinlocks and rwlocks
powerpc/pseries: implement paravirt
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...ke_cpu(int cpu)
+{
+ unsigned int holder_cpu = cpu;
+
+ BUG_ON(holder_cpu >= nr_cpu_ids);
+ plpar_hcall_norets(H_PROD,
+ get_hard_smp_processor_id(holder_cpu));
+}
+EXPORT_SYMBOL_GPL(__spin_wake_cpu);
+
+#ifndef CONFIG_QUEUED_SPINLOCKS
void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -42,6 +71,7 @@ void __spin_yield(arch_spinlock_t *lock)
get_hard_smp_processor_id(holder_cpu), yield_count);
}
EXPORT_SYMBOL_GPL(__spin_yield);
+#endif
/*
* Waiting for a read lock or a write lock on a rwlock...
@@ -69,6 +99,7 @@ void __rw_yield(arch_rwlock_t...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...ke_cpu(int cpu)
+{
+ unsigned int holder_cpu = cpu;
+
+ BUG_ON(holder_cpu >= nr_cpu_ids);
+ plpar_hcall_norets(H_PROD,
+ get_hard_smp_processor_id(holder_cpu));
+}
+EXPORT_SYMBOL_GPL(__spin_wake_cpu);
+
+#ifndef CONFIG_QUEUED_SPINLOCKS
void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -42,6 +71,7 @@ void __spin_yield(arch_spinlock_t *lock)
get_hard_smp_processor_id(holder_cpu), yield_count);
}
EXPORT_SYMBOL_GPL(__spin_yield);
+#endif
/*
* Waiting for a read lock or a write lock on a rwlock...
@@ -69,6 +99,7 @@ void __rw_yield(arch_rwlock_t...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
.../powerpc/lib/locks.c
>>> @@ -23,6 +23,7 @@
>>> #include <asm/hvcall.h>
>>> #include <asm/smp.h>
>>>
>>> +#ifndef CONFIG_QUEUED_SPINLOCKS
>>> void __spin_yield(arch_spinlock_t *lock)
>>> {
>>> unsigned int lock_value, holder_cpu, yield_count;
>>> @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock)
>>> get_hard_smp_processor_id(holder_cpu),
>>> yield_count);
>>> }
>>> EXPORT_SYMBOL_GPL(__spin_yield);
>>> +#endif
>>>
>>> /*
&...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
.../powerpc/lib/locks.c
>>> @@ -23,6 +23,7 @@
>>> #include <asm/hvcall.h>
>>> #include <asm/smp.h>
>>>
>>> +#ifndef CONFIG_QUEUED_SPINLOCKS
>>> void __spin_yield(arch_spinlock_t *lock)
>>> {
>>> unsigned int lock_value, holder_cpu, yield_count;
>>> @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock)
>>> get_hard_smp_processor_id(holder_cpu),
>>> yield_count);
>>> }
>>> EXPORT_SYMBOL_GPL(__spin_yield);
>>> +#endif
>>>
>>> /*
&...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch