search for: more_used_split

Displaying 20 results from an estimated 41 matches for "more_used_split".

2018 Apr 13
0
[RFC v2] virtio: support packed ring
On Sun, Apr 01, 2018 at 10:12:16PM +0800, Tiwei Bie wrote: > +static inline bool more_used(const struct vring_virtqueue *vq) > +{ > + return vq->packed ? more_used_packed(vq) : more_used_split(vq); > +} > + > +void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, unsigned int *len, > + void **ctx) > +{ > + struct vring_virtqueue *vq = to_vvq(_vq); > + void *ret; > + unsigned int i; > + u16 last_used; > + > + START_USE(vq); > + > + if (unlike...
2023 Mar 07
3
[PATCH 0/3] virtio_ring: Clean up code for virtio ring and pci
This patch series performs a clean up of the code in virtio_ring and virtio_pci, modifying it to conform with the Linux kernel coding style guidance [1]. The modifications ensure the code easy to read and understand. This small series does few short cleanups in the code. Patch-1 Remove unnecessary num zero check, which performs in power_of_2. Patch-2 Avoid using inline for small functions.
2018 Apr 23
2
[RFC v2] virtio: support packed ring
...kfree(desc); > > + vq->desc_state[head].indir_desc = NULL; > > + } else if (ctx) { > > + *ctx = vq->desc_state[head].indir_desc; > > + } > > + > > +out: > > + return vq->desc_state[head].num; > > +} > > + > > +static inline bool more_used_split(const struct vring_virtqueue *vq) > > { > > return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); > > } > > +static inline bool more_used_packed(const struct vring_virtqueue *vq) > > +{ > > + u16 last_used, flags; >...
2018 Apr 23
2
[RFC v2] virtio: support packed ring
...kfree(desc); > > + vq->desc_state[head].indir_desc = NULL; > > + } else if (ctx) { > > + *ctx = vq->desc_state[head].indir_desc; > > + } > > + > > +out: > > + return vq->desc_state[head].num; > > +} > > + > > +static inline bool more_used_split(const struct vring_virtqueue *vq) > > { > > return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); > > } > > +static inline bool more_used_packed(const struct vring_virtqueue *vq) > > +{ > > + u16 last_used, flags; >...
2023 Mar 15
4
[PATCH v2 0/3] virtio_ring: Clean up code for virtio ring and pci
This patch series performs a clean up of the code in virtio_ring and virtio_pci, modifying it to conform with the Linux kernel coding style guidance [1]. The modifications ensure the code easy to read and understand. This small series does few short cleanups in the code. Patch-1 Allow non power of 2 sizes for packed virtqueues. Patch-2 Avoid using inline for small functions. Patch-3 Use const to
2023 Mar 10
4
[PATCH v2 0/3] virtio_ring: Clean up code for virtio ring and pci
This patch series performs a clean up of the code in virtio_ring and virtio_pci, modifying it to conform with the Linux kernel coding style guidance [1]. The modifications ensure the code easy to read and understand. This small series does few short cleanups in the code. Patch-1 Allow non power of 2 sizes for virtqueues Patch-2 Avoid using inline for small functions. Patch-3 Use const to annotate
2018 Apr 23
0
[RFC v2] virtio: support packed ring
...nmap_one_packed(vq, &desc[j]); > + > + kfree(desc); > + vq->desc_state[head].indir_desc = NULL; > + } else if (ctx) { > + *ctx = vq->desc_state[head].indir_desc; > + } > + > +out: > + return vq->desc_state[head].num; > +} > + > +static inline bool more_used_split(const struct vring_virtqueue *vq) > { > return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); > } > > +static inline bool more_used_packed(const struct vring_virtqueue *vq) > +{ > + u16 last_used, flags; > + bool avail, used; &g...
2023 Jun 22
1
[PATCH vhost v10 05/10] virtio_ring: split-detach: support return dma info to driver
...d detach_buf_split(struct vring_virtqueue *vq, unsigned int head, > > kfree(indir_desc); > vq->split.desc_state[head].indir_desc = NULL; > - } else if (ctx) { > - *ctx = vq->split.desc_state[head].indir_desc; > } > } > > @@ -812,7 +897,8 @@ static bool more_used_split(const struct vring_virtqueue *vq) > > static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, > unsigned int *len, > - void **ctx) > + void **ctx, > + struct virtqueue_detach_cursor *cursor) > { > struct vring_virtqueue *vq = to_vvq(_vq);...
2018 Apr 24
3
[RFC v2] virtio: support packed ring
...} else if (ctx) { > > > > + *ctx = vq->desc_state[head].indir_desc; > > > > + } > > > > + > > > > +out: > > > > + return vq->desc_state[head].num; > > > > +} > > > > + > > > > +static inline bool more_used_split(const struct vring_virtqueue *vq) > > > > { > > > > return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); > > > > } > > > > +static inline bool more_used_packed(const struct vring_virtqueue *vq) > >...
2018 Apr 24
3
[RFC v2] virtio: support packed ring
...} else if (ctx) { > > > > + *ctx = vq->desc_state[head].indir_desc; > > > > + } > > > > + > > > > +out: > > > > + return vq->desc_state[head].num; > > > > +} > > > > + > > > > +static inline bool more_used_split(const struct vring_virtqueue *vq) > > > > { > > > > return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); > > > > } > > > > +static inline bool more_used_packed(const struct vring_virtqueue *vq) > >...
2018 May 22
0
[RFC v5 2/5] virtio_ring: support creating packed ring
...(struct vring_desc)); + + for (j = 0; j < len / sizeof(struct vring_desc); j++) + vring_unmap_one_split(vq, &indir_desc[j]); + + kfree(indir_desc); + vq->desc_state[head].indir_desc = NULL; + } else if (ctx) { + *ctx = vq->desc_state[head].indir_desc; + } +} + +static inline bool more_used_split(const struct vring_virtqueue *vq) +{ + return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); +} + +static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, + unsigned int *len, + void **ctx) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + void...
2018 Jul 11
0
[PATCH net-next v2 2/5] virtio_ring: support creating packed ring
...(struct vring_desc)); + + for (j = 0; j < len / sizeof(struct vring_desc); j++) + vring_unmap_one_split(vq, &indir_desc[j]); + + kfree(indir_desc); + vq->desc_state[head].indir_desc = NULL; + } else if (ctx) { + *ctx = vq->desc_state[head].indir_desc; + } +} + +static inline bool more_used_split(const struct vring_virtqueue *vq) +{ + return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); +} + +static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, + unsigned int *len, + void **ctx) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + void...
2018 Apr 01
8
[RFC v2] virtio: support packed ring
...lt; len / sizeof(struct vring_packed_desc); j++) + vring_unmap_one_packed(vq, &desc[j]); + + kfree(desc); + vq->desc_state[head].indir_desc = NULL; + } else if (ctx) { + *ctx = vq->desc_state[head].indir_desc; + } + +out: + return vq->desc_state[head].num; +} + +static inline bool more_used_split(const struct vring_virtqueue *vq) { return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); } +static inline bool more_used_packed(const struct vring_virtqueue *vq) +{ + u16 last_used, flags; + bool avail, used; + + if (vq->vq.num_free == vq->vring_pac...
2018 Apr 01
8
[RFC v2] virtio: support packed ring
...lt; len / sizeof(struct vring_packed_desc); j++) + vring_unmap_one_packed(vq, &desc[j]); + + kfree(desc); + vq->desc_state[head].indir_desc = NULL; + } else if (ctx) { + *ctx = vq->desc_state[head].indir_desc; + } + +out: + return vq->desc_state[head].num; +} + +static inline bool more_used_split(const struct vring_virtqueue *vq) { return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); } +static inline bool more_used_packed(const struct vring_virtqueue *vq) +{ + u16 last_used, flags; + bool avail, used; + + if (vq->vq.num_free == vq->vring_pac...
2018 Apr 24
0
[RFC v2] virtio: support packed ring
...q->desc_state[head].indir_desc = NULL; >>> + } else if (ctx) { >>> + *ctx = vq->desc_state[head].indir_desc; >>> + } >>> + >>> +out: >>> + return vq->desc_state[head].num; >>> +} >>> + >>> +static inline bool more_used_split(const struct vring_virtqueue *vq) >>> { >>> return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); >>> } >>> +static inline bool more_used_packed(const struct vring_virtqueue *vq) >>> +{ >>> + u16 la...
2018 Apr 24
0
[RFC v2] virtio: support packed ring
...gt; + *ctx = vq->desc_state[head].indir_desc; > > > > > + } > > > > > + > > > > > +out: > > > > > + return vq->desc_state[head].num; > > > > > +} > > > > > + > > > > > +static inline bool more_used_split(const struct vring_virtqueue *vq) > > > > > { > > > > > return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); > > > > > } > > > > > +static inline bool more_used_packed(const struct vring_virt...
2018 May 29
2
[RFC v5 2/5] virtio_ring: support creating packed ring
.../ sizeof(struct vring_desc); j++) > + vring_unmap_one_split(vq, &indir_desc[j]); > + > + kfree(indir_desc); > + vq->desc_state[head].indir_desc = NULL; > + } else if (ctx) { > + *ctx = vq->desc_state[head].indir_desc; > + } > +} > + > +static inline bool more_used_split(const struct vring_virtqueue *vq) > +{ > + return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); > +} > + > +static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, > + unsigned int *len, > + void **ctx) > +{ > + stru...
2018 May 29
2
[RFC v5 2/5] virtio_ring: support creating packed ring
.../ sizeof(struct vring_desc); j++) > + vring_unmap_one_split(vq, &indir_desc[j]); > + > + kfree(indir_desc); > + vq->desc_state[head].indir_desc = NULL; > + } else if (ctx) { > + *ctx = vq->desc_state[head].indir_desc; > + } > +} > + > +static inline bool more_used_split(const struct vring_virtqueue *vq) > +{ > + return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); > +} > + > +static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, > + unsigned int *len, > + void **ctx) > +{ > + stru...
2018 Feb 23
0
[PATCH RFC 2/2] virtio_ring: support packed ring
...zeof(struct vring_packed_desc)); + + for (j = 0; j < len / sizeof(struct vring_packed_desc); j++) + vring_unmap_one(vq, &desc[j]); + + kfree(desc); + vq->desc_state[head].indir_desc = NULL; + } else if (ctx) { + *ctx = vq->desc_state[head].indir_desc; + } +} + +static inline bool more_used_split(const struct vring_virtqueue *vq) { return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); } -/** - * virtqueue_get_buf - get the next used buffer - * @vq: the struct virtqueue we're talking about. - * @len: the length written into the buffer - * - * If...
2018 May 22
9
[RFC v5 0/5] virtio: support packed ring
Hello everyone, This RFC implements packed ring support in virtio driver. Some simple functional tests have been done with Jason's packed ring implementation in vhost (RFC v4): https://lkml.org/lkml/2018/5/16/501 Both of ping and netperf worked as expected w/ EVENT_IDX disabled. Ping worked as expected w/ EVENT_IDX enabled, but netperf didn't (A hack has been added in the driver to