Displaying 20 results from an estimated 27 matches for "clear_io_sync".
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...ch_spin_unlock
> +#define arch_spin_trylock arch_spin_trylock
> +#define arch_spin_lock arch_spin_lock
> +#define arch_spin_lock_flags arch_spin_lock_flags
> +#define arch_spin_unlock arch_spin_unlock
> +
> +static inline int arch_spin_trylock(arch_spinlock_t *lock)
> +{
> + CLEAR_IO_SYNC;
> + return queued_spin_trylock(lock);
> +}
> +
> +static inline void arch_spin_lock(arch_spinlock_t *lock)
> +{
> + CLEAR_IO_SYNC;
> + queued_spin_lock(lock);
> +}
> +
> +static inline
> +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
> +{...
2016 Dec 06
1
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...ch_spin_unlock
> +#define arch_spin_trylock arch_spin_trylock
> +#define arch_spin_lock arch_spin_lock
> +#define arch_spin_lock_flags arch_spin_lock_flags
> +#define arch_spin_unlock arch_spin_unlock
> +
> +static inline int arch_spin_trylock(arch_spinlock_t *lock)
> +{
> + CLEAR_IO_SYNC;
> + return queued_spin_trylock(lock);
> +}
> +
> +static inline void arch_spin_lock(arch_spinlock_t *lock)
> +{
> + CLEAR_IO_SYNC;
> + queued_spin_lock(lock);
> +}
> +
> +static inline
> +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
> +{...
2016 Dec 05
0
[PATCH v8 1/6] powerpc/qspinlock: powerpc support qspinlock
...+#undef arch_spin_lock_flags
+#undef arch_spin_unlock
+#define arch_spin_trylock arch_spin_trylock
+#define arch_spin_lock arch_spin_lock
+#define arch_spin_lock_flags arch_spin_lock_flags
+#define arch_spin_unlock arch_spin_unlock
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ CLEAR_IO_SYNC;
+ return queued_spin_trylock(lock);
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ CLEAR_IO_SYNC;
+ queued_spin_lock(lock);
+}
+
+static inline
+void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+{
+ CLEAR_IO_SYNC;
+ queued_spin_lock(lock);
+}
+
+static inl...
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v7 -> v8:
add one patch to drop a function call
2016 Dec 05
9
[PATCH v8 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v7 -> v8:
add one patch to drop a function call
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...lock_t *lock);
> -extern void __rw_yield(arch_rwlock_t *lock);
> -#else /* SPLPAR */
> -#define __spin_yield(x) barrier()
> -#define __rw_yield(x) barrier()
> -#define SHARED_PROCESSOR 0
> -#endif
> -
> ?static inline void arch_spin_lock(arch_spinlock_t *lock)
> ?{
> ? CLEAR_IO_SYNC;
> @@ -169,6 +171,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t
> *lock);
> ? do { while (arch_spin_is_locked(lock)) cpu_relax(); } while
> (0)
> ?#endif
> ?
> +#endif /* !CONFIG_QUEUED_SPINLOCKS */
> ?/*
> ? * Read-write spinlocks, allowing multiple readers
>...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...lock_t *lock);
> -extern void __rw_yield(arch_rwlock_t *lock);
> -#else /* SPLPAR */
> -#define __spin_yield(x) barrier()
> -#define __rw_yield(x) barrier()
> -#define SHARED_PROCESSOR 0
> -#endif
> -
> ?static inline void arch_spin_lock(arch_spinlock_t *lock)
> ?{
> ? CLEAR_IO_SYNC;
> @@ -169,6 +171,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t
> *lock);
> ? do { while (arch_spin_is_locked(lock)) cpu_relax(); } while
> (0)
> ?#endif
> ?
> +#endif /* !CONFIG_QUEUED_SPINLOCKS */
> ?/*
> ? * Read-write spinlocks, allowing multiple readers
>...
2016 Dec 06
6
[PATCH v9 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v8 -> v9:
mv qspinlocm config entry to
2016 Dec 06
6
[PATCH v9 0/6] Implement qspinlock/pv-qspinlock on ppc
Hi All,
this is the fairlock patchset. You can apply them and build successfully.
patches are based on linux-next
qspinlock can avoid waiter starved issue. It has about the same speed in
single-thread and it can be much faster in high contention situations
especially when the spinlock is embedded within the data structure to be
protected.
v8 -> v9:
mv qspinlocm config entry to
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...t;lppaca_ptr))
-extern void __spin_yield(arch_spinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
-#endif
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
@@ -169,6 +171,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
diff --git a/arch/powerp...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...t;lppaca_ptr))
-extern void __spin_yield(arch_spinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
-#endif
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
@@ -169,6 +171,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
diff --git a/arch/powerp...
2016 Jun 03
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...rch_rwlock_t *lock);
> > -#else /* SPLPAR */
> > -#define __spin_yield(x) barrier()
> > -#define __rw_yield(x) barrier()
> > -#define SHARED_PROCESSOR 0
> > -#endif
> > -
> > ?static inline void arch_spin_lock(arch_spinlock_t *lock)
> > ?{
> > ? CLEAR_IO_SYNC;
> > @@ -169,6 +171,7 @@ extern void
> > arch_spin_unlock_wait(arch_spinlock_t
> > *lock);
> > ? do { while (arch_spin_is_locked(lock)) cpu_relax(); }
> > while
> > (0)
> > ?#endif
> > ?
> > +#endif /* !CONFIG_QUEUED_SPINLOCKS */
> > ?/*
&...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 May 17
0
[PATCH v2 4/6] pv-qspinlock: powerpc support pv-qspinlock
...wpath(struct qspinlock *lock, u32 val);
+extern void __pv_init_lock_hash(void);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val)
+{
+ CLEAR_IO_SYNC;
+ pv_lock_op.lock(lock, val);
+}
+
+static inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+ SYNC_IO;
+ pv_lock_op.unlock(lock);
+}
+
+static inline void pv_wait(u8 *ptr, u8 val, int lockcpu)
+{
+ pv_lock_op.wait(ptr, val, lockcpu);
+}
+
+static inline void pv_kick(int cpu)
+{
+ pv_lo...
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
benchmark test results are below.
run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory
perf bench futex hash
perf bench futex lock-pi
perf record -advRT || perf bench sched messaging -g 1000 || perf report
summary:
_____test________________spinlcok______________pv-qspinlcok_____
|futex hash | 556370 ops |
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
benchmark test results are below.
run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory
perf bench futex hash
perf bench futex lock-pi
perf record -advRT || perf bench sched messaging -g 1000 || perf report
summary:
_____test________________spinlcok______________pv-qspinlcok_____
|futex hash | 556370 ops |
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
change from v2:
__spin_yeild_cpu() will yield slices to lpar if target cpu is running.
remove unnecessary rmb() in __spin_yield/wake_cpu.
__pv_wait() will check the *ptr == val.
some commit message change
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
I do several tests on pseries IBM,8408-E8E with 32cpus, 64GB memory.
benchmark test results are below.
2
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
change from v2:
__spin_yeild_cpu() will yield slices to lpar if target cpu is running.
remove unnecessary rmb() in __spin_yield/wake_cpu.
__pv_wait() will check the *ptr == val.
some commit message change
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
I do several tests on pseries IBM,8408-E8E with 32cpus, 64GB memory.
benchmark test results are below.
2
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...t;lppaca_ptr))
-extern void __spin_yield(arch_spinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
-#endif
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
@@ -169,6 +175,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
diff --git a/arch/powerp...