search for: smpwmb

Displaying 19 results from an estimated 19 matches for "smpwmb".

Did you mean: smp_wmb
2016 Jan 05
2
[PATCH v2 15/32] powerpc: define __smp_xxx
.../asm/barrier.h b/arch/powerpc/include/asm/barrier.h > index 980ad0c..c0deafc 100644 > --- a/arch/powerpc/include/asm/barrier.h > +++ b/arch/powerpc/include/asm/barrier.h > @@ -44,19 +44,11 @@ > #define dma_rmb() __lwsync() > #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") > > -#ifdef CONFIG_SMP > -#define smp_lwsync() __lwsync() > +#define __smp_lwsync() __lwsync() > so __smp_lwsync() is always mapped to lwsync, right? > -#define smp_mb() mb() > -#define smp_rmb() __lwsync() > -#define smp_wmb() __asm__ __volat...
2016 Jan 05
2
[PATCH v2 15/32] powerpc: define __smp_xxx
.../asm/barrier.h b/arch/powerpc/include/asm/barrier.h > index 980ad0c..c0deafc 100644 > --- a/arch/powerpc/include/asm/barrier.h > +++ b/arch/powerpc/include/asm/barrier.h > @@ -44,19 +44,11 @@ > #define dma_rmb() __lwsync() > #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") > > -#ifdef CONFIG_SMP > -#define smp_lwsync() __lwsync() > +#define __smp_lwsync() __lwsync() > so __smp_lwsync() is always mapped to lwsync, right? > -#define smp_mb() mb() > -#define smp_rmb() __lwsync() > -#define smp_wmb() __asm__ __volat...
2016 Jan 05
2
[PATCH v2 15/32] powerpc: define __smp_xxx
...; > index 980ad0c..c0deafc 100644 > > > --- a/arch/powerpc/include/asm/barrier.h > > > +++ b/arch/powerpc/include/asm/barrier.h > > > @@ -44,19 +44,11 @@ > > > #define dma_rmb() __lwsync() > > > #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") > > > > > > -#ifdef CONFIG_SMP > > > -#define smp_lwsync() __lwsync() > > > +#define __smp_lwsync() __lwsync() > > > > > > > so __smp_lwsync() is always mapped to lwsync, right? > > Yes. > > > &...
2016 Jan 05
2
[PATCH v2 15/32] powerpc: define __smp_xxx
...; > index 980ad0c..c0deafc 100644 > > > --- a/arch/powerpc/include/asm/barrier.h > > > +++ b/arch/powerpc/include/asm/barrier.h > > > @@ -44,19 +44,11 @@ > > > #define dma_rmb() __lwsync() > > > #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") > > > > > > -#ifdef CONFIG_SMP > > > -#define smp_lwsync() __lwsync() > > > +#define __smp_lwsync() __lwsync() > > > > > > > so __smp_lwsync() is always mapped to lwsync, right? > > Yes. > > > &...
2015 Dec 31
0
[PATCH v2 15/32] powerpc: define __smp_xxx
...f --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 980ad0c..c0deafc 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -44,19 +44,11 @@ #define dma_rmb() __lwsync() #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") -#ifdef CONFIG_SMP -#define smp_lwsync() __lwsync() +#define __smp_lwsync() __lwsync() -#define smp_mb() mb() -#define smp_rmb() __lwsync() -#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") -#else -#define smp_lwsync() barrier() -...
2016 Jan 05
0
[PATCH v2 15/32] powerpc: define __smp_xxx
...include/asm/barrier.h > > index 980ad0c..c0deafc 100644 > > --- a/arch/powerpc/include/asm/barrier.h > > +++ b/arch/powerpc/include/asm/barrier.h > > @@ -44,19 +44,11 @@ > > #define dma_rmb() __lwsync() > > #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") > > > > -#ifdef CONFIG_SMP > > -#define smp_lwsync() __lwsync() > > +#define __smp_lwsync() __lwsync() > > > > so __smp_lwsync() is always mapped to lwsync, right? Yes. > > -#define smp_mb() mb() > > -#define smp_rmb()...
2016 Jan 05
0
[PATCH v2 15/32] powerpc: define __smp_xxx
...eafc 100644 > > > > --- a/arch/powerpc/include/asm/barrier.h > > > > +++ b/arch/powerpc/include/asm/barrier.h > > > > @@ -44,19 +44,11 @@ > > > > #define dma_rmb() __lwsync() > > > > #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") > > > > > > > > -#ifdef CONFIG_SMP > > > > -#define smp_lwsync() __lwsync() > > > > +#define __smp_lwsync() __lwsync() > > > > > > > > > > so __smp_lwsync() is always mapped to lwsync, right...
2016 Jan 12
1
[PATCH v3 01/41] lcoking/barriers, arch: Use smp barriers in smp_store_release()
...__ __volatile__ ("sync" : : : "memory") > > -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) > +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) > > #ifdef __SUBARCH_HAS_LWSYNC > # define SMPWMB LWSYNC > diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h > index d68e11e..7ffd0b1 100644 > --- a/arch/s390/include/asm/barrier.h > +++ b/arch/s390/include/asm/barrier.h > @@ -36,7 +36,7 @@ > #define smp_mb__before_atomic() smp_mb() > #defi...
2016 Jan 12
1
[PATCH v3 01/41] lcoking/barriers, arch: Use smp barriers in smp_store_release()
...__ __volatile__ ("sync" : : : "memory") > > -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) > +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) > > #ifdef __SUBARCH_HAS_LWSYNC > # define SMPWMB LWSYNC > diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h > index d68e11e..7ffd0b1 100644 > --- a/arch/s390/include/asm/barrier.h > +++ b/arch/s390/include/asm/barrier.h > @@ -36,7 +36,7 @@ > #define smp_mb__before_atomic() smp_mb() > #defi...
2016 Jan 10
0
[PATCH v3 05/41] powerpc: reuse asm-generic/barrier.h
...4,6 @@ #define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - #ifdef __SUBARCH_HAS_LWSYNC # define SMPWMB LWSYNC #else @@ -60,9 +58,6 @@ #define smp_wmb() barrier() #endif /* CONFIG_SMP */ -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - /* * This is a barrier which prevents following instructions from being * started until the valu...
2016 Jan 10
0
[PATCH v3 01/41] lcoking/barriers, arch: Use smp barriers in smp_store_release()
...quot;) #define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #ifdef __SUBARCH_HAS_LWSYNC # define SMPWMB LWSYNC diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index d68e11e..7ffd0b1 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -36,7 +36,7 @@ #define smp_mb__before_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb()...
2015 Dec 20
2
[Xen-devel] new barrier type for paravirt (was Re: [PATCH] virtio_ring: use smp_store_mb)
On Sun, Dec 20, 2015 at 05:07:19PM +0000, Andrew Cooper wrote: > > Very much +1 for fixing this. > > Those names would be fine, but they do add yet another set of options in > an already-complicated area. > > An alternative might be to have the regular smp_{w,r,}mb() not revert > back to nops if CONFIG_PARAVIRT, or perhaps if pvops have detected a > non-native
2015 Dec 20
2
[Xen-devel] new barrier type for paravirt (was Re: [PATCH] virtio_ring: use smp_store_mb)
On Sun, Dec 20, 2015 at 05:07:19PM +0000, Andrew Cooper wrote: > > Very much +1 for fixing this. > > Those names would be fine, but they do add yet another set of options in > an already-complicated area. > > An alternative might be to have the regular smp_{w,r,}mb() not revert > back to nops if CONFIG_PARAVIRT, or perhaps if pvops have detected a > non-native
2015 Dec 31
54
[PATCH v2 00/34] arch: barrier cleanup + barriers for virt
Changes since v1: - replaced my asm-generic patch with an equivalent patch already in tip - add wrappers with virt_ prefix for better code annotation, as suggested by David Miller - dropped XXX in patch names as this makes vger choke, Cc all relevant mailing lists on all patches (not personal email, as the list becomes too long then) I parked this in vhost tree for now, but the
2015 Dec 31
54
[PATCH v2 00/34] arch: barrier cleanup + barriers for virt
Changes since v1: - replaced my asm-generic patch with an equivalent patch already in tip - add wrappers with virt_ prefix for better code annotation, as suggested by David Miller - dropped XXX in patch names as this makes vger choke, Cc all relevant mailing lists on all patches (not personal email, as the list becomes too long then) I parked this in vhost tree for now, but the
2016 Jan 10
48
[PATCH v3 00/41] arch: barrier cleanup + barriers for virt
Changes since v2: - extended checkpatch tests for barriers, and added patches teaching it to warn about incorrect usage of barriers (__smp_xxx barriers are for use by asm-generic code only), should help prevent misuse by arch code to address comments by Russell King - patched more instances of xen to use virt_ barriers as suggested by Stefano Stabellini - implemented a 2 byte xchg on sh
2016 Jan 10
48
[PATCH v3 00/41] arch: barrier cleanup + barriers for virt
Changes since v2: - extended checkpatch tests for barriers, and added patches teaching it to warn about incorrect usage of barriers (__smp_xxx barriers are for use by asm-generic code only), should help prevent misuse by arch code to address comments by Russell King - patched more instances of xen to use virt_ barriers as suggested by Stefano Stabellini - implemented a 2 byte xchg on sh
2015 Dec 30
46
[PATCH 00/34] arch: barrier cleanup + __smp_XXX barriers for virt
This is really trying to cleanup some virt code, as suggested by Peter, who said > You could of course go fix that instead of mutilating things into > sort-of functional state. This work is needed for virtio, so it's probably easiest to merge it through my tree - is this fine by everyone? Arnd, if you agree, could you ack this please? Note to arch maintainers: please don't
2015 Dec 30
46
[PATCH 00/34] arch: barrier cleanup + __smp_XXX barriers for virt
This is really trying to cleanup some virt code, as suggested by Peter, who said > You could of course go fix that instead of mutilating things into > sort-of functional state. This work is needed for virtio, so it's probably easiest to merge it through my tree - is this fine by everyone? Arnd, if you agree, could you ack this please? Note to arch maintainers: please don't