Displaying 20 results from an estimated 101 matches for "arch_spin_unlock_wait".
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...ock_t *lock);
> -#else /* SPLPAR */
> -#define __spin_yield(x) barrier()
> -#define __rw_yield(x) barrier()
> -#define SHARED_PROCESSOR 0
> -#endif
> -
> ?static inline void arch_spin_lock(arch_spinlock_t *lock)
> ?{
> ? CLEAR_IO_SYNC;
> @@ -169,6 +171,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t
> *lock);
> ? do { while (arch_spin_is_locked(lock)) cpu_relax(); } while
> (0)
> ?#endif
> ?
> +#endif /* !CONFIG_QUEUED_SPINLOCKS */
> ?/*
> ? * Read-write spinlocks, allowing multiple readers
> ? * but only one writer.
> diff --git a/arch/powerpc/inc...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...ock_t *lock);
> -#else /* SPLPAR */
> -#define __spin_yield(x) barrier()
> -#define __rw_yield(x) barrier()
> -#define SHARED_PROCESSOR 0
> -#endif
> -
> ?static inline void arch_spin_lock(arch_spinlock_t *lock)
> ?{
> ? CLEAR_IO_SYNC;
> @@ -169,6 +171,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t
> *lock);
> ? do { while (arch_spin_is_locked(lock)) cpu_relax(); } while
> (0)
> ?#endif
> ?
> +#endif /* !CONFIG_QUEUED_SPINLOCKS */
> ?/*
> ? * Read-write spinlocks, allowing multiple readers
> ? * but only one writer.
> diff --git a/arch/powerpc/inc...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...pinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
-#endif
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
@@ -169,6 +171,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/a...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...pinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
-#endif
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
@@ -169,6 +171,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/a...
2013 May 09
4
[PATCH] mini-os: eliminate duplicated definition of spin_unlock_wait
...index 70cf20f..6604e3c 100644
--- a/extras/mini-os/include/spinlock.h
+++ b/extras/mini-os/include/spinlock.h
@@ -30,7 +30,7 @@ typedef struct {
#define spin_is_locked(x) arch_spin_is_locked(x)
-#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
+#define spin_unlock_wait(x) arch_spin_unlock_wait(x)
#define _spin_trylock(lock) ({_raw_spin_trylock(lock) ? \
diff --git a/extras/mini-os/include/x86/arch_spinlock.h b/extras/mini-os/include/x86/arch_spinlock.h
index 4b8faf7..c08b6f1 100644
--- a/extras/mini-os/include/x86/arch_spinlock.h
+++ b/extras/mini-os/include/x86/arch_spinlock.h...
2016 Jun 03
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...ne __spin_yield(x) barrier()
> > -#define __rw_yield(x) barrier()
> > -#define SHARED_PROCESSOR 0
> > -#endif
> > -
> > ?static inline void arch_spin_lock(arch_spinlock_t *lock)
> > ?{
> > ? CLEAR_IO_SYNC;
> > @@ -169,6 +171,7 @@ extern void
> > arch_spin_unlock_wait(arch_spinlock_t
> > *lock);
> > ? do { while (arch_spin_is_locked(lock)) cpu_relax(); }
> > while
> > (0)
> > ?#endif
> > ?
> > +#endif /* !CONFIG_QUEUED_SPINLOCKS */
> > ?/*
> > ? * Read-write spinlocks, allowing multiple readers
> > ? *...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...pinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
-#endif
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
@@ -169,6 +175,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/a...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...pinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
-#endif
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
@@ -169,6 +175,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/a...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...gt;>> -#define __rw_yield(x) barrier()
>>> -#define SHARED_PROCESSOR 0
>>> -#endif
>>> -
>>> static inline void arch_spin_lock(arch_spinlock_t *lock)
>>> {
>>> CLEAR_IO_SYNC;
>>> @@ -169,6 +171,7 @@ extern void
>>> arch_spin_unlock_wait(arch_spinlock_t
>>> *lock);
>>> do { while (arch_spin_is_locked(lock)) cpu_relax(); }
>>> while
>>> (0)
>>> #endif
>>>
>>> +#endif /* !CONFIG_QUEUED_SPINLOCKS */
>>> /*
>>> * Read-write spinlocks, allowing mu...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...gt;>> -#define __rw_yield(x) barrier()
>>> -#define SHARED_PROCESSOR 0
>>> -#endif
>>> -
>>> static inline void arch_spin_lock(arch_spinlock_t *lock)
>>> {
>>> CLEAR_IO_SYNC;
>>> @@ -169,6 +171,7 @@ extern void
>>> arch_spin_unlock_wait(arch_spinlock_t
>>> *lock);
>>> do { while (arch_spin_is_locked(lock)) cpu_relax(); }
>>> while
>>> (0)
>>> #endif
>>>
>>> +#endif /* !CONFIG_QUEUED_SPINLOCKS */
>>> /*
>>> * Read-write spinlocks, allowing mu...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...pinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
-#endif
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
@@ -169,6 +175,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/a...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...pinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
-#endif
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
@@ -169,6 +175,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/a...
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
benchmark test results are below.
run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory
perf bench futex hash
perf bench futex lock-pi
perf record -advRT || perf bench sched messaging -g 1000 || perf report
summary:
_____test________________spinlcok______________pv-qspinlcok_____
|futex hash | 556370 ops |
2016 May 17
6
[PATCH v3 0/6] powerpc use pv-qpsinlock instead of spinlock
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
benchmark test results are below.
run 3 tests on pseries IBM,8408-E8E with 32cpus, 64GB memory
perf bench futex hash
perf bench futex lock-pi
perf record -advRT || perf bench sched messaging -g 1000 || perf report
summary:
_____test________________spinlcok______________pv-qspinlcok_____
|futex hash | 556370 ops |
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
change from v2:
__spin_yeild_cpu() will yield slices to lpar if target cpu is running.
remove unnecessary rmb() in __spin_yield/wake_cpu.
__pv_wait() will check the *ptr == val.
some commit message change
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
I do several tests on pseries IBM,8408-E8E with 32cpus, 64GB memory.
benchmark test results are below.
2
2016 May 25
10
[PATCH v3 0/6] powerpc use pv-qpsinlock as the default spinlock implemention
change from v2:
__spin_yeild_cpu() will yield slices to lpar if target cpu is running.
remove unnecessary rmb() in __spin_yield/wake_cpu.
__pv_wait() will check the *ptr == val.
some commit message change
change fome v1:
separate into 6 pathes from one patch
some minor code changes.
I do several tests on pseries IBM,8408-E8E with 32cpus, 64GB memory.
benchmark test results are below.
2
2016 Jun 02
9
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
sorry for not even doing a test on bigendian machine before!!!
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Jun 02
9
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
sorry for not even doing a test on bigendian machine before!!!
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch