search for: config_ppc_splpar

Displaying 20 results from an estimated 37 matches for "config_ppc_splpar".

2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...--git a/arch/powerpc/include/asm/spinlock.h > b/arch/powerpc/include/asm/spinlock.h > index 523673d..4359ee6 100644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -52,6 +52,20 @@ > ?#define SYNC_IO > ?#endif > ? > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor > mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > >lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void __rw_yield(arch_rwlock_t *lock); > +#else /* SPLPAR *...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...--git a/arch/powerpc/include/asm/spinlock.h > b/arch/powerpc/include/asm/spinlock.h > index 523673d..4359ee6 100644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -52,6 +52,20 @@ > ?#define SYNC_IO > ?#endif > ? > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor > mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > >lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void __rw_yield(arch_rwlock_t *lock); > +#else /* SPLPAR *...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...+#endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..4359ee6 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,20 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) barrier()...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...+#endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..4359ee6 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,20 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) barrier()...
2020 Jul 06
0
[PATCH v3 2/6] powerpc/pseries: move some PAPR paravirt functions to their own file
...-0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __ASM_PARAVIRT_H +#define __ASM_PARAVIRT_H +#ifdef __KERNEL__ + +#include <linux/jump_label.h> +#include <asm/smp.h> +#ifdef CONFIG_PPC64 +#include <asm/paca.h> +#include <asm/hvcall.h> +#endif + +#ifdef CONFIG_PPC_SPLPAR +DECLARE_STATIC_KEY_FALSE(shared_processor); + +static inline bool is_shared_processor(void) +{ + return static_branch_unlikely(&shared_processor); +} + +/* If bit 0 is set, the cpu has been preempted */ +static inline u32 yield_count_of(int cpu) +{ + __be32 yield_count = READ_ONCE(lppaca_of(cp...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...e/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h > index 8c1b913..954099e 100644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -60,6 +60,23 @@ static inline bool vcpu_is_preempted(int cpu) > } > #endif > > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void __rw_yield(arch_rwlock_t *lock); > +#else /* SPLPAR */ > +#de...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...e/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h > index 8c1b913..954099e 100644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -60,6 +60,23 @@ static inline bool vcpu_is_preempted(int cpu) > } > #endif > > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void __rw_yield(arch_rwlock_t *lock); > +#else /* SPLPAR */ > +#de...
2016 Jun 03
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...gt; > b/arch/powerpc/include/asm/spinlock.h > > index 523673d..4359ee6 100644 > > --- a/arch/powerpc/include/asm/spinlock.h > > +++ b/arch/powerpc/include/asm/spinlock.h > > @@ -52,6 +52,20 @@ > > ?#define SYNC_IO > > ?#endif > > ? > > +#if defined(CONFIG_PPC_SPLPAR) > > +/* We only yield to the hypervisor if we are in shared processor > > mode */ > > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > > > > > > lppaca_ptr)) > > +extern void __spin_yield(arch_spinlock_t *lock); > > +extern void __rw_yiel...
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...CK_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 8c1b913..954099e 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -60,6 +60,23 @@ static inline bool vcpu_is_preempted(int cpu) } #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +#define __spin_yield(x) ba...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...truct pv_lock_ops pv_lock_op; + +#endif diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..3b65372 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,24 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __spin_yield_cpu(int cpu); +extern void __spin_wake_cpu(int cpu); +extern void __rw_yie...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...truct pv_lock_ops pv_lock_op; + +#endif diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..3b65372 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,24 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __spin_yield_cpu(int cpu); +extern void __spin_wake_cpu(int cpu); +extern void __rw_yie...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...include/asm/spinlock.h >>> index 523673d..4359ee6 100644 >>> --- a/arch/powerpc/include/asm/spinlock.h >>> +++ b/arch/powerpc/include/asm/spinlock.h >>> @@ -52,6 +52,20 @@ >>> #define SYNC_IO >>> #endif >>> >>> +#if defined(CONFIG_PPC_SPLPAR) >>> +/* We only yield to the hypervisor if we are in shared processor >>> mode */ >>> +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- >>>> >>>> lppaca_ptr)) >>> +extern void __spin_yield(arch_spinlock_t *lock); >>> +ext...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...include/asm/spinlock.h >>> index 523673d..4359ee6 100644 >>> --- a/arch/powerpc/include/asm/spinlock.h >>> +++ b/arch/powerpc/include/asm/spinlock.h >>> @@ -52,6 +52,20 @@ >>> #define SYNC_IO >>> #endif >>> >>> +#if defined(CONFIG_PPC_SPLPAR) >>> +/* We only yield to the hypervisor if we are in shared processor >>> mode */ >>> +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- >>>> >>>> lppaca_ptr)) >>> +extern void __spin_yield(arch_spinlock_t *lock); >>> +ext...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com> change from v4: BUG FIX. thanks boqun reporting this issue. struct __qspinlock has different layout in bigendian mahcine. native_queued_spin_unlock() may write value to a wrong address. now fix it. change from v3: a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock no other patch changed. and the patch
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com> change from v4: BUG FIX. thanks boqun reporting this issue. struct __qspinlock has different layout in bigendian mahcine. native_queued_spin_unlock() may write value to a wrong address. now fix it. change from v3: a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock no other patch changed. and the patch
2020 Jul 02
12
[PATCH 0/8] powerpc: queued spinlocks and rwlocks
This series adds an option to use queued spinlocks for powerpc, and makes it the default for the Book3S-64 subarch. This effort starts with the generic code so it's very simple but still very performant. There are optimisations that can be made to slowpaths, but I think it's better to attack those incrementally if/when we find things, and try to add the improvements to generic code as
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...truct pv_lock_ops pv_lock_op; + +#endif diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..3b65372 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,24 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __spin_yield_cpu(int cpu); +extern void __spin_wake_cpu(int cpu); +extern void __rw_yie...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...truct pv_lock_ops pv_lock_op; + +#endif diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..3b65372 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,24 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __spin_yield_cpu(int cpu); +extern void __spin_wake_cpu(int cpu); +extern void __rw_yie...
2020 Jul 06
0
[PATCH v3 3/6] powerpc: move spinlock implementation to simple_spinlock
...visor to give the + * rest of our timeslice to the lock holder. + * + * So that we can tell which virtual processor is holding a lock, + * we put 0x80000000 | smp_processor_id() in the lock when it is + * held. Conveniently, we have a word in the paca that holds this + * value. + */ + +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +void splpar_spin_yield(arch_spinlock_t *lock); +void splpar_rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; +static inline void splpar_rw_yield(arch_rwlock...
2020 Jul 03
7
[PATCH v2 0/6] powerpc: queued spinlocks and rwlocks
v2 is updated to account for feedback from Will, Peter, and Waiman (thank you), and trims off a couple of RFC and unrelated patches. Thanks, Nick Nicholas Piggin (6): powerpc/powernv: must include hvcall.h to get PAPR defines powerpc/pseries: move some PAPR paravirt functions to their own file powerpc: move spinlock implementation to simple_spinlock powerpc/64s: implement queued