search for: get_hard_smp_processor_id

Displaying 20 results from an estimated 40 matches for "get_hard_smp_processor_id".

2020 Jul 05
1
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...rpc/include/asm/paravirt.h > index 7a8546660a63..f2d51f929cf5 100644 > --- a/arch/powerpc/include/asm/paravirt.h > +++ b/arch/powerpc/include/asm/paravirt.h > @@ -29,6 +29,16 @@ static inline void yield_to_preempted(int cpu, u32 yield_count) > { > plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); > } > + > +static inline void prod_cpu(int cpu) > +{ > + plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); > +} > + > +static inline void yield_to_any(void) > +{ > + plpar_hcall_norets(H_CONFER, -1, 0); > +} > #else > static...
2020 Jul 06
0
[PATCH v3 2/6] powerpc/pseries: move some PAPR paravirt functions to their own file
...+ +/* If bit 0 is set, the cpu has been preempted */ +static inline u32 yield_count_of(int cpu) +{ + __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count); + return be32_to_cpu(yield_count); +} + +static inline void yield_to_preempted(int cpu, u32 yield_count) +{ + plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); +} +#else +static inline bool is_shared_processor(void) +{ + return false; +} + +static inline u32 yield_count_of(int cpu) +{ + return 0; +} + +extern void ___bad_yield_to_preempted(void); +static inline void yield_to_preempted(int cpu, u32 yield_count) +{ + ___bad_yield_to_pree...
2016 Dec 06
1
[PATCH v8 3/6] powerpc: lib/locks.c: Add cpu yield/wake helper function
...+ BUG_ON(holder_cpu >= nr_cpu_ids); > + yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); > + > + /* if cpu is running, confer slices to lpar conditionally*/ > + if ((yield_count & 1) == 0) > + goto yield_to_lpar; > + > + plpar_hcall_norets(H_CONFER, > + get_hard_smp_processor_id(holder_cpu), yield_count); > + return; > + > +yield_to_lpar: > + if (confer) > + plpar_hcall_norets(H_CONFER, -1, 0); > +} > +EXPORT_SYMBOL_GPL(__spin_yield_cpu); > + > +void __spin_wake_cpu(int cpu) > +{ > + unsigned int holder_cpu = cpu; And it's even wrong...
2016 Dec 06
1
[PATCH v8 3/6] powerpc: lib/locks.c: Add cpu yield/wake helper function
...+ BUG_ON(holder_cpu >= nr_cpu_ids); > + yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); > + > + /* if cpu is running, confer slices to lpar conditionally*/ > + if ((yield_count & 1) == 0) > + goto yield_to_lpar; > + > + plpar_hcall_norets(H_CONFER, > + get_hard_smp_processor_id(holder_cpu), yield_count); > + return; > + > +yield_to_lpar: > + if (confer) > + plpar_hcall_norets(H_CONFER, -1, 0); > +} > +EXPORT_SYMBOL_GPL(__spin_yield_cpu); > + > +void __spin_wake_cpu(int cpu) > +{ > + unsigned int holder_cpu = cpu; And it's even wrong...
2016 May 17
0
[PATCH v2 3/6] powerpc: lib/locks.c: cpu yield/wake helper function
...; + + if (cpu == -1) { + plpar_hcall_norets(H_CEDE); + return; + } + BUG_ON(holder_cpu >= nr_cpu_ids); + yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + plpar_hcall_norets(H_CONFER, + get_hard_smp_processor_id(holder_cpu), yield_count); +} +EXPORT_SYMBOL_GPL(__spin_yield_cpu); + +void __spin_wake_cpu(int cpu) +{ + unsigned int holder_cpu = cpu, yield_count; + + BUG_ON(holder_cpu >= nr_cpu_ids); + yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); + if ((yield_count & 1) == 0) + return;...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...; + + if (cpu == -1) { + plpar_hcall_norets(H_CEDE); + return; + } + BUG_ON(holder_cpu >= nr_cpu_ids); + yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + plpar_hcall_norets(H_CONFER, + get_hard_smp_processor_id(holder_cpu), yield_count); +} +EXPORT_SYMBOL_GPL(__spin_yield_cpu); + +void __spin_wake_cpu(int cpu) +{ + unsigned int holder_cpu = cpu; + + BUG_ON(holder_cpu >= nr_cpu_ids); + plpar_hcall_norets(H_PROD, + get_hard_smp_processor_id(holder_cpu)); +} +EXPORT_SYMBOL_GPL(__spin_wake_cpu); + +#ifnde...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...; + + if (cpu == -1) { + plpar_hcall_norets(H_CEDE); + return; + } + BUG_ON(holder_cpu >= nr_cpu_ids); + yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + plpar_hcall_norets(H_CONFER, + get_hard_smp_processor_id(holder_cpu), yield_count); +} +EXPORT_SYMBOL_GPL(__spin_yield_cpu); + +void __spin_wake_cpu(int cpu) +{ + unsigned int holder_cpu = cpu; + + BUG_ON(holder_cpu >= nr_cpu_ids); + plpar_hcall_norets(H_PROD, + get_hard_smp_processor_id(holder_cpu)); +} +EXPORT_SYMBOL_GPL(__spin_wake_cpu); + +#ifnde...
2016 Dec 05
0
[PATCH v8 3/6] powerpc: lib/locks.c: Add cpu yield/wake helper function
...if (cpu == -1) + goto yield_to_lpar; + + BUG_ON(holder_cpu >= nr_cpu_ids); + yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); + + /* if cpu is running, confer slices to lpar conditionally*/ + if ((yield_count & 1) == 0) + goto yield_to_lpar; + + plpar_hcall_norets(H_CONFER, + get_hard_smp_processor_id(holder_cpu), yield_count); + return; + +yield_to_lpar: + if (confer) + plpar_hcall_norets(H_CONFER, -1, 0); +} +EXPORT_SYMBOL_GPL(__spin_yield_cpu); + +void __spin_wake_cpu(int cpu) +{ + unsigned int holder_cpu = cpu; + + BUG_ON(holder_cpu >= nr_cpu_ids); + /* + * NOTE: we should always do thi...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...@ > #include <asm/hvcall.h> > #include <asm/smp.h> > > +#ifndef CONFIG_QUEUED_SPINLOCKS > void __spin_yield(arch_spinlock_t *lock) > { > unsigned int lock_value, holder_cpu, yield_count; > @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock) > get_hard_smp_processor_id(holder_cpu), yield_count); > } > EXPORT_SYMBOL_GPL(__spin_yield); > +#endif > > /* > * Waiting for a read lock or a write lock on a rwlock... > @@ -68,3 +70,60 @@ void __rw_yield(arch_rwlock_t *rw) > get_hard_smp_processor_id(holder_cpu), yield_count); > } >...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...@ > #include <asm/hvcall.h> > #include <asm/smp.h> > > +#ifndef CONFIG_QUEUED_SPINLOCKS > void __spin_yield(arch_spinlock_t *lock) > { > unsigned int lock_value, holder_cpu, yield_count; > @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock) > get_hard_smp_processor_id(holder_cpu), yield_count); > } > EXPORT_SYMBOL_GPL(__spin_yield); > +#endif > > /* > * Waiting for a read lock or a write lock on a rwlock... > @@ -68,3 +70,60 @@ void __rw_yield(arch_rwlock_t *rw) > get_hard_smp_processor_id(holder_cpu), yield_count); > } >...
2020 Jul 02
0
[PATCH 6/8] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...clude/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index 7a8546660a63..5fae9dfa6fe9 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -29,6 +29,16 @@ static inline void yield_to_preempted(int cpu, u32 yield_count) { plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); } + +static inline void prod_cpu(int cpu) +{ + plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); +} + +static inline void yield_to_any(void) +{ + plpar_hcall_norets(H_CONFER, -1, 0); +} #else static inline bool is_shared_processor(void) { @@ -45,6 +55,19 @@ static...
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All, this is the fairlock patchset. You can apply them and build successfully. patches are based on linux-next qspinlock can avoid waiter starved issue. It has about the same speed in single-thread and it can be much faster in high contention situations especially when the spinlock is embedded within the data structure to be protected. v7 -> v8: add one patch to drop a function call
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All, this is the fairlock patchset. You can apply them and build successfully. patches are based on linux-next qspinlock can avoid waiter starved issue. It has about the same speed in single-thread and it can be much faster in high contention situations especially when the spinlock is embedded within the data structure to be protected. v7 -> v8: add one patch to drop a function call
2020 Jul 03
0
[PATCH v2 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...clude/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index 7a8546660a63..f2d51f929cf5 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -29,6 +29,16 @@ static inline void yield_to_preempted(int cpu, u32 yield_count) { plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); } + +static inline void prod_cpu(int cpu) +{ + plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); +} + +static inline void yield_to_any(void) +{ + plpar_hcall_norets(H_CONFER, -1, 0); +} #else static inline bool is_shared_processor(void) { @@ -45,6 +55,19 @@ static...
2020 Jul 03
7
[PATCH v2 0/6] powerpc: queued spinlocks and rwlocks
v2 is updated to account for feedback from Will, Peter, and Waiman (thank you), and trims off a couple of RFC and unrelated patches. Thanks, Nick Nicholas Piggin (6): powerpc/powernv: must include hvcall.h to get PAPR defines powerpc/pseries: move some PAPR paravirt functions to their own file powerpc: move spinlock implementation to simple_spinlock powerpc/64s: implement queued
2020 Jul 06
0
[PATCH v3 5/6] powerpc/pseries: implement paravirt qspinlocks for SPLPAR
...clude/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index 7a8546660a63..f2d51f929cf5 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -29,6 +29,16 @@ static inline void yield_to_preempted(int cpu, u32 yield_count) { plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); } + +static inline void prod_cpu(int cpu) +{ + plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); +} + +static inline void yield_to_any(void) +{ + plpar_hcall_norets(H_CONFER, -1, 0); +} #else static inline bool is_shared_processor(void) { @@ -45,6 +55,19 @@ static...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...; + + if (cpu == -1) { + plpar_hcall_norets(H_CEDE); + return; + } + BUG_ON(holder_cpu >= nr_cpu_ids); + yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + plpar_hcall_norets(H_CONFER, + get_hard_smp_processor_id(holder_cpu), yield_count); +} +EXPORT_SYMBOL_GPL(__spin_yield_cpu); + +void __spin_wake_cpu(int cpu) +{ + unsigned int holder_cpu = cpu; + + BUG_ON(holder_cpu >= nr_cpu_ids); + plpar_hcall_norets(H_PROD, + get_hard_smp_processor_id(holder_cpu)); +} +EXPORT_SYMBOL_GPL(__spin_wake_cpu); + +#ifnde...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...; + + if (cpu == -1) { + plpar_hcall_norets(H_CEDE); + return; + } + BUG_ON(holder_cpu >= nr_cpu_ids); + yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + plpar_hcall_norets(H_CONFER, + get_hard_smp_processor_id(holder_cpu), yield_count); +} +EXPORT_SYMBOL_GPL(__spin_yield_cpu); + +void __spin_wake_cpu(int cpu) +{ + unsigned int holder_cpu = cpu; + + BUG_ON(holder_cpu >= nr_cpu_ids); + plpar_hcall_norets(H_PROD, + get_hard_smp_processor_id(holder_cpu)); +} +EXPORT_SYMBOL_GPL(__spin_wake_cpu); + +#ifnde...
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...+ b/arch/powerpc/lib/locks.c @@ -23,6 +23,7 @@ #include <asm/hvcall.h> #include <asm/smp.h> +#ifndef CONFIG_QUEUED_SPINLOCKS void __spin_yield(arch_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock) get_hard_smp_processor_id(holder_cpu), yield_count); } EXPORT_SYMBOL_GPL(__spin_yield); +#endif /* * Waiting for a read lock or a write lock on a rwlock... @@ -68,3 +70,60 @@ void __rw_yield(arch_rwlock_t *rw) get_hard_smp_processor_id(holder_cpu), yield_count); } #endif + +#ifdef CONFIG_QUEUED_SPINLOCKS +/* + *...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...@ > ?#include <asm/hvcall.h> > ?#include <asm/smp.h> > ? > +#ifndef CONFIG_QUEUED_SPINLOCKS > ?void __spin_yield(arch_spinlock_t *lock) > ?{ > ? unsigned int lock_value, holder_cpu, yield_count; > @@ -42,6 +43,7 @@ void __spin_yield(arch_spinlock_t *lock) > ? get_hard_smp_processor_id(holder_cpu), yield_count); > ?} > ?EXPORT_SYMBOL_GPL(__spin_yield); > +#endif > ? > ?/* > ? * Waiting for a read lock or a write lock on a rwlock... > @@ -69,6 +71,7 @@ void __rw_yield(arch_rwlock_t *rw) > ?} > ?#endif > ? > +#ifndef CONFIG_QUEUED_SPINLOCKS > ?vo...