search for: __smp_wmb

Displaying 20 results from an estimated 43 matches for "__smp_wmb".

Did you mean: __smp_mb
2016 Jan 04
2
[PATCH v2 22/32] s390: define __smp_xxx
...barrier() > #define dma_rmb() mb() > #define dma_wmb() mb() > -#define smp_mb() mb() > -#define smp_rmb() rmb() > -#define smp_wmb() wmb() > - > -#define smp_store_release(p, v) \ > +#define __smp_mb() mb() > +#define __smp_rmb() rmb() > +#define __smp_wmb() wmb() > +#define smp_mb() __smp_mb() > +#define smp_rmb() __smp_rmb() > +#define smp_wmb() __smp_wmb() Why define the smp_*mb() primitives here? Would not the inclusion of asm-generic/barrier.h do this?
2016 Jan 04
2
[PATCH v2 22/32] s390: define __smp_xxx
...barrier() > #define dma_rmb() mb() > #define dma_wmb() mb() > -#define smp_mb() mb() > -#define smp_rmb() rmb() > -#define smp_wmb() wmb() > - > -#define smp_store_release(p, v) \ > +#define __smp_mb() mb() > +#define __smp_rmb() rmb() > +#define __smp_wmb() wmb() > +#define smp_mb() __smp_mb() > +#define smp_rmb() __smp_rmb() > +#define smp_wmb() __smp_wmb() Why define the smp_*mb() primitives here? Would not the inclusion of asm-generic/barrier.h do this?
2015 Dec 30
0
[PATCH 32/34] xen/io: use __smp_XXX barriers
...ace/io/ring.h index 7dc685b..46dfc65 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h @@ -208,12 +208,12 @@ struct __name##_back_ring { \ #define RING_PUSH_REQUESTS(_r) do { \ - wmb(); /* back sees requests /before/ updated producer index */ \ + __smp_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ - wmb(); /* front sees responses /before/ updated producer index */ \ + __smp_wmb(); /* front sees responses...
2015 Dec 30
46
[PATCH 00/34] arch: barrier cleanup + __smp_XXX barriers for virt
This is really trying to cleanup some virt code, as suggested by Peter, who said > You could of course go fix that instead of mutilating things into > sort-of functional state. This work is needed for virtio, so it's probably easiest to merge it through my tree - is this fine by everyone? Arnd, if you agree, could you ack this please? Note to arch maintainers: please don't
2015 Dec 30
46
[PATCH 00/34] arch: barrier cleanup + __smp_XXX barriers for virt
This is really trying to cleanup some virt code, as suggested by Peter, who said > You could of course go fix that instead of mutilating things into > sort-of functional state. This work is needed for virtio, so it's probably easiest to merge it through my tree - is this fine by everyone? Arnd, if you agree, could you ack this please? Note to arch maintainers: please don't
2016 Jan 05
1
[PATCH v2 20/32] metag: define __smp_xxx
...MIC_UNLOCK; > barrier(); > *flushptr = 0; > barrier(); > } > -#define smp_mb() fence() > -#define smp_rmb() fence() > -#define smp_wmb() barrier() > +#define __smp_mb() metag_fence() > +#define __smp_rmb() metag_fence() > +#define __smp_wmb() barrier() > #else > -#define fence() do { } while (0) > -#define smp_mb() barrier() > -#define smp_rmb() barrier() > -#define smp_wmb() barrier() > +#define metag_fence() do { } while (0) > +#define __smp_mb() barrier() > +#define __smp_r...
2016 Jan 05
1
[PATCH v2 20/32] metag: define __smp_xxx
...MIC_UNLOCK; > barrier(); > *flushptr = 0; > barrier(); > } > -#define smp_mb() fence() > -#define smp_rmb() fence() > -#define smp_wmb() barrier() > +#define __smp_mb() metag_fence() > +#define __smp_rmb() metag_fence() > +#define __smp_wmb() barrier() > #else > -#define fence() do { } while (0) > -#define smp_mb() barrier() > -#define smp_rmb() barrier() > -#define smp_wmb() barrier() > +#define metag_fence() do { } while (0) > +#define __smp_mb() barrier() > +#define __smp_r...
2016 Jan 05
3
[PATCH v2 22/32] s390: define __smp_xxx
...gt; > -#define smp_mb() mb() > > > -#define smp_rmb() rmb() > > > -#define smp_wmb() wmb() > > > - > > > -#define smp_store_release(p, v) \ > > > +#define __smp_mb() mb() > > > +#define __smp_rmb() rmb() > > > +#define __smp_wmb() wmb() > > > +#define smp_mb() __smp_mb() > > > +#define smp_rmb() __smp_rmb() > > > +#define smp_wmb() __smp_wmb() > > > > Why define the smp_*mb() primitives here? Would not the inclusion of > > asm-generic/barrier.h do this? > > No be...
2016 Jan 05
3
[PATCH v2 22/32] s390: define __smp_xxx
...gt; > -#define smp_mb() mb() > > > -#define smp_rmb() rmb() > > > -#define smp_wmb() wmb() > > > - > > > -#define smp_store_release(p, v) \ > > > +#define __smp_mb() mb() > > > +#define __smp_rmb() rmb() > > > +#define __smp_wmb() wmb() > > > +#define smp_mb() __smp_mb() > > > +#define smp_rmb() __smp_rmb() > > > +#define smp_wmb() __smp_wmb() > > > > Why define the smp_*mb() primitives here? Would not the inclusion of > > asm-generic/barrier.h do this? > > No be...
2016 Jan 05
2
[PATCH v2 22/32] s390: define __smp_xxx
...p_rmb() rmb() > > > > > -#define smp_wmb() wmb() > > > > > - > > > > > -#define smp_store_release(p, v) \ > > > > > +#define __smp_mb() mb() > > > > > +#define __smp_rmb() rmb() > > > > > +#define __smp_wmb() wmb() > > > > > +#define smp_mb() __smp_mb() > > > > > +#define smp_rmb() __smp_rmb() > > > > > +#define smp_wmb() __smp_wmb() > > > > > > > > Why define the smp_*mb() primitives here? Would not the inclusion of > &g...
2016 Jan 05
2
[PATCH v2 22/32] s390: define __smp_xxx
...p_rmb() rmb() > > > > > -#define smp_wmb() wmb() > > > > > - > > > > > -#define smp_store_release(p, v) \ > > > > > +#define __smp_mb() mb() > > > > > +#define __smp_rmb() rmb() > > > > > +#define __smp_wmb() wmb() > > > > > +#define smp_mb() __smp_mb() > > > > > +#define smp_rmb() __smp_rmb() > > > > > +#define smp_wmb() __smp_wmb() > > > > > > > > Why define the smp_*mb() primitives here? Would not the inclusion of > &g...
2015 Dec 31
0
[PATCH v2 20/32] metag: define __smp_xxx
...nt *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; barrier(); *flushptr = 0; barrier(); } -#define smp_mb() fence() -#define smp_rmb() fence() -#define smp_wmb() barrier() +#define __smp_mb() metag_fence() +#define __smp_rmb() metag_fence() +#define __smp_wmb() barrier() #else -#define fence() do { } while (0) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() +#define metag_fence() do { } while (0) +#define __smp_mb() barrier() +#define __smp_rmb() barrier() +#define __smp_wmb(...
2015 Dec 31
0
[PATCH v2 22/32] s390: define __smp_xxx
...asm/barrier.h @@ -26,18 +26,21 @@ #define wmb() barrier() #define dma_rmb() mb() #define dma_wmb() mb() -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() - -#define smp_store_release(p, v) \ +#define __smp_mb() mb() +#define __smp_rmb() rmb() +#define __smp_wmb() wmb() +#define smp_mb() __smp_mb() +#define smp_rmb() __smp_rmb() +#define smp_wmb() __smp_wmb() + +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(...
2016 Jan 04
0
[PATCH v2 22/32] s390: define __smp_xxx
...t; > #define dma_wmb() mb() > > -#define smp_mb() mb() > > -#define smp_rmb() rmb() > > -#define smp_wmb() wmb() > > - > > -#define smp_store_release(p, v) \ > > +#define __smp_mb() mb() > > +#define __smp_rmb() rmb() > > +#define __smp_wmb() wmb() > > +#define smp_mb() __smp_mb() > > +#define smp_rmb() __smp_rmb() > > +#define smp_wmb() __smp_wmb() > > Why define the smp_*mb() primitives here? Would not the inclusion of > asm-generic/barrier.h do this? No because the generic one is a nop on !SMP,...
2015 Dec 31
54
[PATCH v2 00/34] arch: barrier cleanup + barriers for virt
Changes since v1: - replaced my asm-generic patch with an equivalent patch already in tip - add wrappers with virt_ prefix for better code annotation, as suggested by David Miller - dropped XXX in patch names as this makes vger choke, Cc all relevant mailing lists on all patches (not personal email, as the list becomes too long then) I parked this in vhost tree for now, but the
2015 Dec 31
54
[PATCH v2 00/34] arch: barrier cleanup + barriers for virt
Changes since v1: - replaced my asm-generic patch with an equivalent patch already in tip - add wrappers with virt_ prefix for better code annotation, as suggested by David Miller - dropped XXX in patch names as this makes vger choke, Cc all relevant mailing lists on all patches (not personal email, as the list becomes too long then) I parked this in vhost tree for now, but the
2016 Jan 05
2
[PATCH v2 15/32] powerpc: define __smp_xxx
...MPWMB) : : :"memory") > -#else > -#define smp_lwsync() barrier() > - > -#define smp_mb() barrier() > -#define smp_rmb() barrier() > -#define smp_wmb() barrier() > -#endif /* CONFIG_SMP */ > +#define __smp_mb() mb() > +#define __smp_rmb() __lwsync() > +#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") > > /* > * This is a barrier which prevents following instructions from being > @@ -67,18 +59,18 @@ > #define data_barrier(x) \ > asm volatile("twi 0,%0,0; isync" : : "r" (x) : &quo...
2016 Jan 10
48
[PATCH v3 00/41] arch: barrier cleanup + barriers for virt
Changes since v2: - extended checkpatch tests for barriers, and added patches teaching it to warn about incorrect usage of barriers (__smp_xxx barriers are for use by asm-generic code only), should help prevent misuse by arch code to address comments by Russell King - patched more instances of xen to use virt_ barriers as suggested by Stefano Stabellini - implemented a 2 byte xchg on sh
2016 Jan 10
48
[PATCH v3 00/41] arch: barrier cleanup + barriers for virt
Changes since v2: - extended checkpatch tests for barriers, and added patches teaching it to warn about incorrect usage of barriers (__smp_xxx barriers are for use by asm-generic code only), should help prevent misuse by arch code to address comments by Russell King - patched more instances of xen to use virt_ barriers as suggested by Stefano Stabellini - implemented a 2 byte xchg on sh
2016 Jan 05
2
[PATCH v2 15/32] powerpc: define __smp_xxx
...MPWMB) : : :"memory") > -#else > -#define smp_lwsync() barrier() > - > -#define smp_mb() barrier() > -#define smp_rmb() barrier() > -#define smp_wmb() barrier() > -#endif /* CONFIG_SMP */ > +#define __smp_mb() mb() > +#define __smp_rmb() __lwsync() > +#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") > > /* > * This is a barrier which prevents following instructions from being > @@ -67,18 +59,18 @@ > #define data_barrier(x) \ > asm volatile("twi 0,%0,0; isync" : : "r" (x) : &quo...