search for: sync_io

Displaying 20 results from an estimated 45 matches for "sync_io".

Did you mean: sync_id
2016 Jul 06
1
[PATCH v2 2/4] powerpc/spinlock: support vcpu preempted check
...?1 file changed, 18 insertions(+) >? > diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h > index 523673d..3ac9fcb 100644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -52,6 +52,24 @@ > ?#define SYNC_IO > ?#endif > ? > +/* > + * This support kernel to check if one cpu is preempted or not. > + * Then we can fix some lock holder preemption issue. > + */ > +#ifdef CONFIG_PPC_PSERIES > +#define vcpu_is_preempted vcpu_is_preempted > +static inline bool vcpu_is_preempted(int c...
2016 Jul 06
1
[PATCH v2 2/4] powerpc/spinlock: support vcpu preempted check
...?1 file changed, 18 insertions(+) >? > diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h > index 523673d..3ac9fcb 100644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -52,6 +52,24 @@ > ?#define SYNC_IO > ?#endif > ? > +/* > + * This support kernel to check if one cpu is preempted or not. > + * Then we can fix some lock holder preemption issue. > + */ > +#ifdef CONFIG_PPC_PSERIES > +#define vcpu_is_preempted vcpu_is_preempted > +static inline bool vcpu_is_preempted(int c...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...ndif /* _ASM_POWERPC_QSPINLOCK_H */ > diff --git a/arch/powerpc/include/asm/spinlock.h > b/arch/powerpc/include/asm/spinlock.h > index 523673d..4359ee6 100644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -52,6 +52,20 @@ > ?#define SYNC_IO > ?#endif > ? > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor > mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > >lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...ndif /* _ASM_POWERPC_QSPINLOCK_H */ > diff --git a/arch/powerpc/include/asm/spinlock.h > b/arch/powerpc/include/asm/spinlock.h > index 523673d..4359ee6 100644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spinlock.h > @@ -52,6 +52,20 @@ > ?#define SYNC_IO > ?#endif > ? > +#if defined(CONFIG_PPC_SPLPAR) > +/* We only yield to the hypervisor if we are in shared processor > mode */ > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > >lppaca_ptr)) > +extern void __spin_yield(arch_spinlock_t *lock); > +extern void...
2016 Jun 28
0
[PATCH v2 2/4] powerpc/spinlock: support vcpu preempted check
.../spinlock.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..3ac9fcb 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,24 @@ #define SYNC_IO #endif +/* + * This support kernel to check if one cpu is preempted or not. + * Then we can fix some lock holder preemption issue. + */ +#ifdef CONFIG_PPC_PSERIES +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(int cpu) +{ + /* + * pSeries and powerNV can be b...
2016 Jul 21
0
[PATCH v3 2/4] powerpc/spinlock: support vcpu preempted check
.../spinlock.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..3ac9fcb 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,24 @@ #define SYNC_IO #endif +/* + * This support kernel to check if one cpu is preempted or not. + * Then we can fix some lock holder preemption issue. + */ +#ifdef CONFIG_PPC_PSERIES +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(int cpu) +{ + /* + * pSeries and powerNV can be b...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...t;asm-generic/qspinlock.h> + +#endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..4359ee6 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,20 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPL...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...t;asm-generic/qspinlock.h> + +#endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 523673d..4359ee6 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -52,6 +52,20 @@ #define SYNC_IO #endif +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); +#else /* SPL...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...; > + queued_spin_lock(lock); > +} > + > +static inline > +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) > +{ > + CLEAR_IO_SYNC; > + queued_spin_lock(lock); > +} > + > +static inline void arch_spin_unlock(arch_spinlock_t *lock) > +{ > + SYNC_IO; > + queued_spin_unlock(lock); > +} > +#endif /* _ASM_POWERPC_QSPINLOCK_H */ > diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h > index 8c1b913..954099e 100644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spi...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...; > + queued_spin_lock(lock); > +} > + > +static inline > +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) > +{ > + CLEAR_IO_SYNC; > + queued_spin_lock(lock); > +} > + > +static inline void arch_spin_unlock(arch_spinlock_t *lock) > +{ > + SYNC_IO; > + queued_spin_unlock(lock); > +} > +#endif /* _ASM_POWERPC_QSPINLOCK_H */ > diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h > index 8c1b913..954099e 100644 > --- a/arch/powerpc/include/asm/spinlock.h > +++ b/arch/powerpc/include/asm/spi...
2016 Jun 03
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...> > diff --git a/arch/powerpc/include/asm/spinlock.h > > b/arch/powerpc/include/asm/spinlock.h > > index 523673d..4359ee6 100644 > > --- a/arch/powerpc/include/asm/spinlock.h > > +++ b/arch/powerpc/include/asm/spinlock.h > > @@ -52,6 +52,20 @@ > > ?#define SYNC_IO > > ?#endif > > ? > > +#if defined(CONFIG_PPC_SPLPAR) > > +/* We only yield to the hypervisor if we are in shared processor > > mode */ > > +#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca- > > > > > > lppaca_ptr)) > > +extern voi...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com> change from v4: BUG FIX. thanks boqun reporting this issue. struct __qspinlock has different layout in bigendian mahcine. native_queued_spin_unlock() may write value to a wrong address. now fix it. change from v3: a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock no other patch changed. and the patch
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com> change from v4: BUG FIX. thanks boqun reporting this issue. struct __qspinlock has different layout in bigendian mahcine. native_queued_spin_unlock() may write value to a wrong address. now fix it. change from v3: a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock no other patch changed. and the patch
2016 May 17
0
[PATCH v2 4/6] pv-qspinlock: powerpc support pv-qspinlock
...th(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val) +{ + CLEAR_IO_SYNC; + pv_lock_op.lock(lock, val); +} + +static inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + SYNC_IO; + pv_lock_op.unlock(lock); +} + +static inline void pv_wait(u8 *ptr, u8 val, int lockcpu) +{ + pv_lock_op.wait(ptr, val, lockcpu); +} + +static inline void pv_kick(int cpu) +{ + pv_lock_op.kick(cpu); +} + +#endif diff --git a/arch/powerpc/include/asm/qspinlock_paravirt_types.h b/arch/powerpc/inclu...
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1: separate into 6 pathes from one patch some minor code changes. benchmark test results are below. run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory perf bench futex hash perf bench futex lock-pi perf record -advRT || perf bench sched messaging -g 1000 || perf report summary: _____test________________spinlcok______________pv-qspinlcok_____ |futex hash | 556370 ops |
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1: separate into 6 pathes from one patch some minor code changes. benchmark test results are below. run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory perf bench futex hash perf bench futex lock-pi perf record -advRT || perf bench sched messaging -g 1000 || perf report summary: _____test________________spinlcok______________pv-qspinlcok_____ |futex hash | 556370 ops |
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
change from v2: __spin_yeild_cpu() will yield slices to lpar if target cpu is running. remove unnecessary rmb() in __spin_yield/wake_cpu. __pv_wait() will check the *ptr == val. some commit message change change fome v1: separate into 6 pathes from one patch some minor code changes. I do several tests on pseries IBM,8408-E8E with 32cpus, 64GB memory. benchmark test results are below. 2
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
change from v2: __spin_yeild_cpu() will yield slices to lpar if target cpu is running. remove unnecessary rmb() in __spin_yield/wake_cpu. __pv_wait() will check the *ptr == val. some commit message change change fome v1: separate into 6 pathes from one patch some minor code changes. I do several tests on pseries IBM,8408-E8E with 32cpus, 64GB memory. benchmark test results are below. 2
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...ine void arch_spin_lock(arch_spinlock_t *lock) +{ + CLEAR_IO_SYNC; + queued_spin_lock(lock); +} + +static inline +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +{ + CLEAR_IO_SYNC; + queued_spin_lock(lock); +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + SYNC_IO; + queued_spin_unlock(lock); +} +#endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 8c1b913..954099e 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -60,6 +60,23 @@ static i...
2016 Jun 28
11
[PATCH v2 0/4] implement vcpu preempted check
change fomr v1: a simplier definition of default vcpu_is_preempted skip mahcine type check on ppc, and add config. remove dedicated macro. add one patch to drop overload of rwsem_spin_on_owner and mutex_spin_on_owner. add more comments thanks boqun and Peter's suggestion. This patch set aims to fix lock holder preemption issues. test-case: perf record -a perf bench sched messaging -g