search for: __ptr_ring_consume

Displaying 20 results from an estimated 26 matches for "__ptr_ring_consume".

2017 Dec 05
7
[PATCH] ptr_ring: add barriers
...+ * points to a valid data. */ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) { if (unlikely(!r->size) || r->queue[r->producer]) return -ENOSPC; + /* Make sure the pointer we are storing points to a valid data. */ + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ + smp_wmb(); + r->queue[r->producer++] = ptr; if (unlikely(r->producer >= r->size)) r->producer = 0; @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) if (ptr) __ptr_ring_discard_one(r); + /* Make sure anyone accessing data through th...
2017 Dec 05
7
[PATCH] ptr_ring: add barriers
...+ * points to a valid data. */ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) { if (unlikely(!r->size) || r->queue[r->producer]) return -ENOSPC; + /* Make sure the pointer we are storing points to a valid data. */ + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ + smp_wmb(); + r->queue[r->producer++] = ptr; if (unlikely(r->producer >= r->size)) r->producer = 0; @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) if (ptr) __ptr_ring_discard_one(r); + /* Make sure anyone accessing data through th...
2017 Mar 22
1
[PATCH net-next 1/8] ptr_ring: introduce batch dequeuing
...+++++++++++++++++++++++++++++++ > 1 file changed, 65 insertions(+) > > diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h > index 6c70444..4771ded 100644 > --- a/include/linux/ptr_ring.h > +++ b/include/linux/ptr_ring.h > @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > return ptr; > } > > +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, > + void **array, int n) > +{ > + void *ptr; > + int i = 0; > + > + while (i < n) { > + ptr = __ptr_ring_consume(r); > + if (!ptr) >...
2017 Mar 22
1
[PATCH net-next 1/8] ptr_ring: introduce batch dequeuing
...+++++++++++++++++++++++++++++++ > 1 file changed, 65 insertions(+) > > diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h > index 6c70444..4771ded 100644 > --- a/include/linux/ptr_ring.h > +++ b/include/linux/ptr_ring.h > @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > return ptr; > } > > +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, > + void **array, int n) > +{ > + void *ptr; > + int i = 0; > + > + while (i < n) { > + ptr = __ptr_ring_consume(r); > + if (!ptr) >...
2017 Dec 06
1
[PATCH] ptr_ring: add barriers
...t __ptr_ring_produce(struct ptr_ring *r, void *ptr) > > { > > if (unlikely(!r->size) || r->queue[r->producer]) > > return -ENOSPC; > > + /* Make sure the pointer we are storing points to a valid data. */ > > + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ > > + smp_wmb(); > > + > > r->queue[r->producer++] = ptr; > > if (unlikely(r->producer >= r->size)) > > r->producer = 0; > > @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > > if (ptr) >...
2017 Dec 06
1
[PATCH] ptr_ring: add barriers
...t __ptr_ring_produce(struct ptr_ring *r, void *ptr) > > { > > if (unlikely(!r->size) || r->queue[r->producer]) > > return -ENOSPC; > > + /* Make sure the pointer we are storing points to a valid data. */ > > + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ > > + smp_wmb(); > > + > > r->queue[r->producer++] = ptr; > > if (unlikely(r->producer >= r->size)) > > r->producer = 0; > > @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > > if (ptr) >...
2017 Mar 21
1
[PATCH net-next 1/8] ptr_ring: introduce batch dequeuing
...++++++++++++++++++++++++++++++++ > 1 file changed, 65 insertions(+) > > diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h > index 6c70444..4771ded 100644 > --- a/include/linux/ptr_ring.h > +++ b/include/linux/ptr_ring.h > @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > return ptr; > } > > +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, > + void **array, int n) > +{ > + void *ptr; > + int i = 0; > + > + while (i < n) { Hm, why not *for*? > + ptr = __ptr_ring_consume(r);...
2017 Mar 21
1
[PATCH net-next 1/8] ptr_ring: introduce batch dequeuing
...++++++++++++++++++++++++++++++++ > 1 file changed, 65 insertions(+) > > diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h > index 6c70444..4771ded 100644 > --- a/include/linux/ptr_ring.h > +++ b/include/linux/ptr_ring.h > @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > return ptr; > } > > +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, > + void **array, int n) > +{ > + void *ptr; > + int i = 0; > + > + while (i < n) { Hm, why not *for*? > + ptr = __ptr_ring_consume(r);...
2017 Dec 06
2
[PATCH] ptr_ring: Add barriers to fix NULL-pointer exception
.../ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -106,6 +106,12 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) { if (unlikely(!r->size) || r->queue[r->producer]) return -ENOSPC; + /* + * This barrier is necessary in order to prevent race condition with + * with __ptr_ring_consume(). By this we make sure all the prior + * writes to *ptr elements are updated. + */ + wmb(); r->queue[r->producer++] = ptr; if (unlikely(r->producer >= r->size)) @@ -275,6 +281,13 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) if (ptr) __ptr_ring_discard_...
2017 Dec 06
2
[PATCH] ptr_ring: Add barriers to fix NULL-pointer exception
.../ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -106,6 +106,12 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) { if (unlikely(!r->size) || r->queue[r->producer]) return -ENOSPC; + /* + * This barrier is necessary in order to prevent race condition with + * with __ptr_ring_consume(). By this we make sure all the prior + * writes to *ptr elements are updated. + */ + wmb(); r->queue[r->producer++] = ptr; if (unlikely(r->producer >= r->size)) @@ -275,6 +281,13 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) if (ptr) __ptr_ring_discard_...
2017 Mar 21
0
[PATCH net-next 1/8] ptr_ring: introduce batch dequeuing
...x/ptr_ring.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6c70444..4771ded 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i = 0; + + while (i < n) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i++] = ptr; + } + + return i; +} + /* * No...
2017 Dec 06
0
[PATCH] ptr_ring: add barriers
...static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) > { > if (unlikely(!r->size) || r->queue[r->producer]) > return -ENOSPC; > > + /* Make sure the pointer we are storing points to a valid data. */ > + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ > + smp_wmb(); > + > r->queue[r->producer++] = ptr; > if (unlikely(r->producer >= r->size)) > r->producer = 0; > @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > if (ptr) > __ptr_ring_discard_one(r); >...
2017 Dec 06
1
[PATCH] ptr_ring: Add barriers to fix NULL-pointer exception
...inux/ptr_ring.h > @@ -106,6 +106,12 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) > { > if (unlikely(!r->size) || r->queue[r->producer]) > return -ENOSPC; > + /* > + * This barrier is necessary in order to prevent race condition with > + * with __ptr_ring_consume(). By this we make sure all the prior > + * writes to *ptr elements are updated. > + */ > + wmb(); > > r->queue[r->producer++] = ptr; > if (unlikely(r->producer >= r->size)) > @@ -275,6 +281,13 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > if...
2017 Dec 06
1
[PATCH] ptr_ring: Add barriers to fix NULL-pointer exception
...inux/ptr_ring.h > @@ -106,6 +106,12 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) > { > if (unlikely(!r->size) || r->queue[r->producer]) > return -ENOSPC; > + /* > + * This barrier is necessary in order to prevent race condition with > + * with __ptr_ring_consume(). By this we make sure all the prior > + * writes to *ptr elements are updated. > + */ > + wmb(); > > r->queue[r->producer++] = ptr; > if (unlikely(r->producer >= r->size)) > @@ -275,6 +281,13 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > if...
2017 Dec 06
0
[PATCH] ptr_ring: add barriers
...static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) > { > if (unlikely(!r->size) || r->queue[r->producer]) > return -ENOSPC; > > + /* Make sure the pointer we are storing points to a valid data. */ > + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ > + smp_wmb(); > + > r->queue[r->producer++] = ptr; > if (unlikely(r->producer >= r->size)) > r->producer = 0; > @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > if (ptr) > __ptr_ring_discard_one(r); >...
2017 Dec 06
0
[PATCH] ptr_ring: add barriers
...t __ptr_ring_produce(struct ptr_ring *r, void *ptr) > > { > > if (unlikely(!r->size) || r->queue[r->producer]) > > return -ENOSPC; > > + /* Make sure the pointer we are storing points to a valid data. */ > > + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ > > + smp_wmb(); > > + > > r->queue[r->producer++] = ptr; > > if (unlikely(r->producer >= r->size)) > > r->producer = 0; > > @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > > if (ptr) >...
2017 Mar 21
12
[PATCH net-next 0/8] vhost-net rx batching
Hi all: This series tries to implement rx batching for vhost-net. This is done by batching the dequeuing from skb_array which was exported by underlayer socket and pass the sbk back through msg_control to finish userspace copying. Tests shows at most 19% improvment on rx pps. Please review. Thanks Jason Wang (8): ptr_ring: introduce batch dequeuing skb_array: introduce batch dequeuing
2017 Mar 21
12
[PATCH net-next 0/8] vhost-net rx batching
Hi all: This series tries to implement rx batching for vhost-net. This is done by batching the dequeuing from skb_array which was exported by underlayer socket and pass the sbk back through msg_control to finish userspace copying. Tests shows at most 19% improvment on rx pps. Please review. Thanks Jason Wang (8): ptr_ring: introduce batch dequeuing skb_array: introduce batch dequeuing
2017 Dec 06
0
[PATCH] ptr_ring: Add barriers to fix NULL-pointer exception
..._ring.h > @@ -106,6 +106,12 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) > { > if (unlikely(!r->size) || r->queue[r->producer]) > return -ENOSPC; > + /* > + * This barrier is necessary in order to prevent race condition with > + * with __ptr_ring_consume(). By this we make sure all the prior > + * writes to *ptr elements are updated. > + */ > + wmb(); > > r->queue[r->producer++] = ptr; > if (unlikely(r->producer >= r->size)) > @@ -275,6 +281,13 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r...
2017 Dec 06
0
[PATCH] ptr_ring: Add barriers to fix NULL-pointer exception
On Wed, Dec 06, 2017 at 02:08:54PM +0000, Cherian, George wrote: > > @@ -275,6 +281,13 @@ static inline void *__ptr_ring_consume(struct ptr_ring > *r) > > if (ptr) > > __ptr_ring_discard_one(r); > > > > + /* > > + * This barrier is necessary in order to prevent race condition with > > + * with __ptr_ring_produce(). Make sure all the elements of ptr is > > + * in sync with the earl...