Displaying 20 results from an estimated 57 matches for "more_used_packed".
2018 May 16
0
[RFC v4 3/5] virtio_ring: add packed ring support
...for (j = 0; j < len / sizeof(struct vring_packed_desc); j++)
+ vring_unmap_one_packed(vq, &desc[j]);
+
+ kfree(desc);
+ vq->desc_state[id].indir_desc = NULL;
+ } else if (ctx) {
+ *ctx = vq->desc_state[id].indir_desc;
+ }
+
+out:
+ free_id_packed(vq, id);
}
static inline bool more_used_packed(const struct vring_virtqueue *vq)
{
- return false;
+ u16 last_used, flags;
+ bool avail, used;
+
+ if (vq->vq.num_free == vq->vring_packed.num)
+ return false;
+
+ last_used = vq->last_used_idx;
+ flags = virtio16_to_cpu(vq->vq.vdev,
+ vq->vring_packed.desc[last_used].flags);
+...
2018 May 29
2
[RFC v5 3/5] virtio_ring: add packed ring support
...truct vring_packed_desc); i++)
> + vring_unmap_desc_packed(vq, &desc[i]);
> +
> + kfree(desc);
> + vq->desc_state_packed[id].indir_desc = NULL;
> + } else if (ctx) {
> + *ctx = vq->desc_state_packed[id].indir_desc;
> + }
> }
>
> static inline bool more_used_packed(const struct vring_virtqueue *vq)
> {
> - return false;
> + u16 last_used, flags;
> + u8 avail, used;
> +
> + last_used = vq->last_used_idx;
> + flags = virtio16_to_cpu(vq->vq.vdev,
> + vq->vring_packed.desc[last_used].flags);
> + avail = !!(flags & VRIN...
2018 May 29
2
[RFC v5 3/5] virtio_ring: add packed ring support
...truct vring_packed_desc); i++)
> + vring_unmap_desc_packed(vq, &desc[i]);
> +
> + kfree(desc);
> + vq->desc_state_packed[id].indir_desc = NULL;
> + } else if (ctx) {
> + *ctx = vq->desc_state_packed[id].indir_desc;
> + }
> }
>
> static inline bool more_used_packed(const struct vring_virtqueue *vq)
> {
> - return false;
> + u16 last_used, flags;
> + u8 avail, used;
> +
> + last_used = vq->last_used_idx;
> + flags = virtio16_to_cpu(vq->vq.vdev,
> + vq->vring_packed.desc[last_used].flags);
> + avail = !!(flags & VRIN...
2018 Jun 07
1
[RFC v6 4/5] virtio_ring: add event idx support in packed ring
...ts first before re-checking
> - * for more used buffers. */
> - virtio_mb(vq->weak_barriers);
> }
>
> + /* We need to update event suppression structure first
> + * before re-checking for more used buffers. */
> + virtio_mb(vq->weak_barriers);
> +
> if (more_used_packed(vq)) {
> END_USE(vq);
> return false;
I think what we need to to make sure the descriptor used_idx is used?
Otherwise we may stop and restart qdisc too frequently?
Thanks
> --
2018 May 16
2
[RFC v4 3/5] virtio_ring: add packed ring support
...; + vring_unmap_one_packed(vq, &desc[j]);
> +
> + kfree(desc);
> + vq->desc_state[id].indir_desc = NULL;
> + } else if (ctx) {
> + *ctx = vq->desc_state[id].indir_desc;
> + }
> +
> +out:
> + free_id_packed(vq, id);
> }
>
> static inline bool more_used_packed(const struct vring_virtqueue *vq)
> {
> - return false;
> + u16 last_used, flags;
> + bool avail, used;
> +
> + if (vq->vq.num_free == vq->vring_packed.num)
> + return false;
> +
> + last_used = vq->last_used_idx;
> + flags = virtio16_to_cpu(vq->vq.vdev,...
2018 May 16
2
[RFC v4 3/5] virtio_ring: add packed ring support
...; + vring_unmap_one_packed(vq, &desc[j]);
> +
> + kfree(desc);
> + vq->desc_state[id].indir_desc = NULL;
> + } else if (ctx) {
> + *ctx = vq->desc_state[id].indir_desc;
> + }
> +
> +out:
> + free_id_packed(vq, id);
> }
>
> static inline bool more_used_packed(const struct vring_virtqueue *vq)
> {
> - return false;
> + u16 last_used, flags;
> + bool avail, used;
> +
> + if (vq->vq.num_free == vq->vring_packed.num)
> + return false;
> +
> + last_used = vq->last_used_idx;
> + flags = virtio16_to_cpu(vq->vq.vdev,...
2018 Apr 25
0
[RFC v3 3/5] virtio_ring: add packed ring support
...g_packed.desc[head].len);
+
+ for (j = 0; j < len / sizeof(struct vring_packed_desc); j++)
+ vring_unmap_one_packed(vq, &desc[j]);
+
+ kfree(desc);
+ vq->desc_state[head].indir_desc = NULL;
+ } else if (ctx) {
+ *ctx = vq->desc_state[head].indir_desc;
+ }
}
static inline bool more_used_packed(const struct vring_virtqueue *vq)
{
- return false;
+ u16 last_used, flags;
+ bool avail, used;
+
+ if (vq->vq.num_free == vq->vring_packed.num)
+ return false;
+
+ last_used = vq->last_used_idx;
+ flags = virtio16_to_cpu(vq->vq.vdev,
+ vq->vring_packed.desc[last_used].flags);
+...
2018 May 22
0
[RFC v5 3/5] virtio_ring: add packed ring support
..._packed[id].len;
+ for (i = 0; i < len / sizeof(struct vring_packed_desc); i++)
+ vring_unmap_desc_packed(vq, &desc[i]);
+
+ kfree(desc);
+ vq->desc_state_packed[id].indir_desc = NULL;
+ } else if (ctx) {
+ *ctx = vq->desc_state_packed[id].indir_desc;
+ }
}
static inline bool more_used_packed(const struct vring_virtqueue *vq)
{
- return false;
+ u16 last_used, flags;
+ u8 avail, used;
+
+ last_used = vq->last_used_idx;
+ flags = virtio16_to_cpu(vq->vq.vdev,
+ vq->vring_packed.desc[last_used].flags);
+ avail = !!(flags & VRING_DESC_F_AVAIL(1));
+ used = !!(flags & VR...
2018 Jul 11
0
[PATCH net-next v2 3/5] virtio_ring: add packed ring support
...{
+ u16 flags;
+ bool avail, used;
+
+ flags = virtio16_to_cpu(vq->vq.vdev,
+ vq->vring_packed.desc[idx].flags);
+ avail = !!(flags & VRING_DESC_F_AVAIL);
+ used = !!(flags & VRING_DESC_F_USED);
+
+ return avail == used && used == used_wrap_counter;
}
static inline bool more_used_packed(const struct vring_virtqueue *vq)
{
- return false;
+ return is_used_desc_packed(vq, vq->last_used_idx,
+ vq->used_wrap_counter);
}
static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
unsigned int *len,
void **ctx)
{
- return NULL;
+ struct vring_virtqueue...
2018 Sep 07
1
[PATCH net-next v2 3/5] virtio_ring: add packed ring support
..._DESC_F_USED);
> +
> + return avail == used && used == used_wrap_counter;
I think that you don't need to look at avail flag to detect a used
descriptor. The reason device writes it is to avoid confusing
*device* next time descriptor wraps.
> }
>
> static inline bool more_used_packed(const struct vring_virtqueue *vq)
> {
> - return false;
> + return is_used_desc_packed(vq, vq->last_used_idx,
> + vq->used_wrap_counter);
> }
>
> static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
> unsigned int *len,
> void **ct...
2018 May 16
8
[RFC v4 0/5] virtio: support packed ring
Hello everyone,
This RFC implements packed ring support in virtio driver.
Some simple functional tests have been done with Jason's
packed ring implementation in vhost:
https://lkml.org/lkml/2018/4/23/12
Both of ping and netperf worked as expected (with EVENT_IDX
disabled).
TODO:
- Refinements (for code and commit log);
- More tests;
- Bug fixes;
RFC v3 -> RFC v4:
- Make ID allocation
2018 Sep 07
1
[PATCH net-next v2 4/5] virtio_ring: add event idx support in packed ring
...T_F_ENABLE;
> vq->vring_packed.driver->flags = cpu_to_virtio16(_vq->vdev,
> vq->event_flags_shadow);
> - /* We need to enable interrupts first before re-checking
> - * for more used buffers. */
> - virtio_mb(vq->weak_barriers);
> }
>
> - if (more_used_packed(vq)) {
> + /* We need to update event suppression structure first
> + * before re-checking for more used buffers. */
> + virtio_mb(vq->weak_barriers);
> +
mb is expensive. We should not do it if we changed nothing.
> + if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
>...
2023 Mar 07
3
[PATCH 0/3] virtio_ring: Clean up code for virtio ring and pci
This patch series performs a clean up of the code in virtio_ring and
virtio_pci, modifying it to conform with the Linux kernel coding style
guidance [1]. The modifications ensure the code easy to read and
understand. This small series does few short cleanups in the code.
Patch-1 Remove unnecessary num zero check, which performs in power_of_2.
Patch-2 Avoid using inline for small functions.
2018 Jun 05
6
[RFC v6 0/5] virtio: support packed ring
Hello everyone,
This RFC implements packed ring support in virtio driver.
Some functional tests have been done with Jason's
packed ring implementation in vhost (RFC v5):
https://lwn.net/Articles/755862/
Both of ping and netperf worked as expected.
TODO:
- Refinements (for code and commit log);
- More tests and bug fixes if any;
- Send the formal patch set;
RFC v5 -> RFC v6:
- Avoid
2018 Jul 11
15
[PATCH net-next v2 0/5] virtio: support packed ring
Hello everyone,
This patch set implements packed ring support in virtio driver.
Some functional tests have been done with Jason's
packed ring implementation in vhost:
https://lkml.org/lkml/2018/7/3/33
Both of ping and netperf worked as expected.
v1 -> v2:
- Use READ_ONCE() to read event off_wrap and flags together (Jason);
- Add comments related to ccw (Jason);
RFC (v6) -> v1:
-
2018 Jul 11
15
[PATCH net-next v2 0/5] virtio: support packed ring
Hello everyone,
This patch set implements packed ring support in virtio driver.
Some functional tests have been done with Jason's
packed ring implementation in vhost:
https://lkml.org/lkml/2018/7/3/33
Both of ping and netperf worked as expected.
v1 -> v2:
- Use READ_ONCE() to read event off_wrap and flags together (Jason);
- Add comments related to ccw (Jason);
RFC (v6) -> v1:
-
2018 Apr 23
2
[RFC v2] virtio: support packed ring
...urn vq->desc_state[head].num;
> > +}
> > +
> > +static inline bool more_used_split(const struct vring_virtqueue *vq)
> > {
> > return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
> > }
> > +static inline bool more_used_packed(const struct vring_virtqueue *vq)
> > +{
> > + u16 last_used, flags;
> > + bool avail, used;
> > +
> > + if (vq->vq.num_free == vq->vring_packed.num)
> > + return false;
> > +
> > + last_used = vq->last_used_idx;
> > + flags = virtio16...
2018 Apr 23
2
[RFC v2] virtio: support packed ring
...urn vq->desc_state[head].num;
> > +}
> > +
> > +static inline bool more_used_split(const struct vring_virtqueue *vq)
> > {
> > return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
> > }
> > +static inline bool more_used_packed(const struct vring_virtqueue *vq)
> > +{
> > + u16 last_used, flags;
> > + bool avail, used;
> > +
> > + if (vq->vq.num_free == vq->vring_packed.num)
> > + return false;
> > +
> > + last_used = vq->last_used_idx;
> > + flags = virtio16...
2018 Apr 13
0
[RFC v2] virtio: support packed ring
On Sun, Apr 01, 2018 at 10:12:16PM +0800, Tiwei Bie wrote:
> +static inline bool more_used(const struct vring_virtqueue *vq)
> +{
> + return vq->packed ? more_used_packed(vq) : more_used_split(vq);
> +}
> +
> +void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, unsigned int *len,
> + void **ctx)
> +{
> + struct vring_virtqueue *vq = to_vvq(_vq);
> + void *ret;
> + unsigned int i;
> + u16 last_used;
> +
> + START_USE(vq);
&g...
2018 Nov 07
2
[PATCH net-next v2 3/5] virtio_ring: add packed ring support
...lags = virtio16_to_cpu(vq->vq.vdev,
> + vq->vring_packed.desc[idx].flags);
> + avail = !!(flags & VRING_DESC_F_AVAIL);
> + used = !!(flags & VRING_DESC_F_USED);
> +
> + return avail == used && used == used_wrap_counter;
> }
>
> static inline bool more_used_packed(const struct vring_virtqueue *vq)
> {
> - return false;
> + return is_used_desc_packed(vq, vq->last_used_idx,
> + vq->used_wrap_counter);
> }
>
> static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
> unsigned int *len,
> void **ct...