search for: vring_unmap_one_split_indirect

Displaying 20 results from an estimated 29 matches for "vring_unmap_one_split_indirect".

2023 Mar 22
1
[PATCH vhost v4 04/11] virtio_ring: split: support premapped
...irtio_ring.c @@ -70,6 +70,7 @@ struct vring_desc_state_split { void *data; /* Data for callback. */ struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ + bool dma_map_internal; /* Do dma map internally. */ }; struct vring_desc_state_packed { @@ -448,7 +449,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, } static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, - unsigned int i) + unsigned int i, bool dma_map_internal) { struct vring_desc_extra *extra = vq->split.desc_extra; u16 flags; @@ -465,6 +466,9 @@ static unsigned...
2023 Mar 21
1
[PATCH vhost v3 04/11] virtio_ring: split: support premapped
...> struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ > + bool map_inter; /* Do dma map internally. */ I prefer a full name. E.g. "dma_map_internal". Eschew abbreviation. > }; > > struct vring_desc_state_packed { > @@ -448,7 +449,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, > } > > static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, > - unsigned int i) > + unsigned int i, bool map_inter) > { > struct vring_desc_extra *extra = vq->split.desc_extra; > u16 flags; &gt...
2023 Mar 07
2
[PATCH vhost v1 03/12] virtio_ring: split: introduce virtqueue_add_split_premapped()
...*indir_desc; /* Indirect descriptor, if any. */ > + bool dma_map; /* Addr is mapped by virtio core or not. */ This will stress the cache, can we pack the boolean into indir_desc? > }; > > struct vring_desc_state_packed { > @@ -440,7 +441,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, > } > > static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, > - unsigned int i) > + unsigned int i, bool dma_map) > { > struct vr...
2023 Mar 07
1
[PATCH vhost v1 03/12] virtio_ring: split: introduce virtqueue_add_split_premapped()
...think we cannot pack this into indir_desc. Because we may save ctx to indir_desc. We can save this info to vring_desc_extra.addr, null means not dma mapped by virtio core. Thanks. > > > }; > > > > struct vring_desc_state_packed { > > @@ -440,7 +441,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, > > } > > > > static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, > > - unsigned int i) > > + unsigned int i, bool dma_map) >...
2023 Feb 20
1
[PATCH vhost 04/10] virtio_ring: split: introduce virtqueue_add_split_premapped()
...dir_desc; /* Indirect descriptor, if any. */ > > + bool premapped; > > Better with a comment. > > Not native speaker, but "dma_addr" might be better? > > > }; > > > > struct vring_desc_state_packed { > > @@ -440,7 +441,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, > > } > > > > static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, > > - unsigned int i) > > + unsigned int i, bool premapped) &gt...
2023 May 17
12
[PATCH vhost v9 00/12] virtio core prepares for AF_XDP
## About DMA APIs Now, virtio may can not work with DMA APIs when virtio features do not have VIRTIO_F_ACCESS_PLATFORM. 1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just work with the "real" devices. 2. I tried to let xsk support callballs to get phy address from virtio-net driver as the dma address. But the maintainers of xsk may want to use
2023 Mar 15
2
[PATCH v2 3/3] virtio_ring: Use const to annotate read-only pointer params
...device *vdev) return false; } -size_t virtio_max_dma_size(struct virtio_device *vdev) +size_t virtio_max_dma_size(const struct virtio_device *vdev) { size_t max_segment_size = SIZE_MAX; @@ -423,7 +423,7 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num) */ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, - struct vring_desc *desc) + const struct vring_desc *desc) { u16 flags; @@ -1183,7 +1183,7 @@ static u16 packed_last_used(u16 last_used_idx) } static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, - struct vring_desc...
2023 Mar 21
1
[PATCH vhost v3 01/11] virtio_ring: split: separate dma codes
...VRING_DESC_F_WRITE, @@ -679,22 +759,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, return 0; unmap_release: - err_idx = i; - - if (indirect) - i = 0; - else - i = head; - - for (n = 0; n < total_sg; n++) { - if (i == err_idx) - break; - if (indirect) { - vring_unmap_one_split_indirect(vq, &desc[i]); - i = virtio16_to_cpu(_vq->vdev, desc[i].next); - } else - i = vring_unmap_one_split(vq, i); - } + virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs); if (indirect) kfree(desc); -- 2.32.0.3.g01195cf9f
2023 Mar 02
1
[PATCH vhost v1 02/12] virtio_ring: split: separate DMA codes
...,30 +742,6 @@ static inline int virtqueue_add_vring_split(struct vring_virtqueue *vq, virtqueue_kick(_vq); return 0; - -unmap_release: - err_idx = i; - - if (indirect) - i = 0; - else - i = head; - - for (n = 0; n < total_sg; n++) { - if (i == err_idx) - break; - if (indirect) { - vring_unmap_one_split_indirect(vq, &desc[i]); - i = virtio16_to_cpu(_vq->vdev, desc[i].next); - } else - i = vring_unmap_one_split(vq, i); - } - - if (indirect) - kfree(desc); - - END_USE(vq); - return -ENOMEM; } static inline int virtqueue_add_split(struct virtqueue *_vq, @@ -729,6 +767,10 @@ static inline int...
2023 Jul 10
10
[PATCH vhost v11 00/10] virtio core prepares for AF_XDP
## About DMA APIs Now, virtio may can not work with DMA APIs when virtio features do not have VIRTIO_F_ACCESS_PLATFORM. 1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just work with the "real" devices. 2. I tried to let xsk support callballs to get phy address from virtio-net driver as the dma address. But the maintainers of xsk may want to use
2023 Mar 21
11
[PATCH vhost v3 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero copy feature of xsk (XDP socket) needs to be supported by the driver. The performance of zero copy is very good. ENV: Qemu with vhost. vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS -----------------------------|---------------|------------------|------------ xmit by sockperf: 90% | 100%
2023 Mar 22
11
[PATCH vhost v4 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero copy feature of xsk (XDP socket) needs to be supported by the driver. The performance of zero copy is very good. ENV: Qemu with vhost. vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS -----------------------------|---------------|------------------|------------ xmit by sockperf: 90% | 100%
2023 Mar 24
11
[PATCH vhost v5 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero copy feature of xsk (XDP socket) needs to be supported by the driver. The performance of zero copy is very good. ENV: Qemu with vhost. vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS -----------------------------|---------------|------------------|------------ xmit by sockperf: 90% | 100%
2023 Mar 27
11
[PATCH vhost v6 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero copy feature of xsk (XDP socket) needs to be supported by the driver. The performance of zero copy is very good. ENV: Qemu with vhost. vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS -----------------------------|---------------|------------------|------------ xmit by sockperf: 90% | 100%
2023 Aug 10
12
[PATCH vhost v13 00/12] virtio core prepares for AF_XDP
## About DMA APIs Now, virtio may can not work with DMA APIs when virtio features do not have VIRTIO_F_ACCESS_PLATFORM. 1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just work with the "real" devices. 2. I tried to let xsk support callballs to get phy address from virtio-net driver as the dma address. But the maintainers of xsk may want to use
2023 Aug 10
12
[PATCH vhost v13 00/12] virtio core prepares for AF_XDP
## About DMA APIs Now, virtio may can not work with DMA APIs when virtio features do not have VIRTIO_F_ACCESS_PLATFORM. 1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just work with the "real" devices. 2. I tried to let xsk support callballs to get phy address from virtio-net driver as the dma address. But the maintainers of xsk may want to use
2023 Feb 20
2
[PATCH vhost 01/10] virtio_ring: split: refactor virtqueue_add_split() for premapped
...> - i = 0; > - else > - i = head; > + START_USE(vq); > > - for (n = 0; n < total_sg; n++) { > - if (i == err_idx) > - break; > - if (indirect) { > - vring_unmap_one_split_indirect(vq, &desc[i]); > - i = virtio16_to_cpu(_vq->vdev, desc[i].next); > - } else > - i = vring_unmap_one_split(vq, i); > - } > + /* check vq state and try to alloc desc for indirect. */ > + err = virtque...
2023 May 09
12
[PATCH vhost v8 00/12] virtio core prepares for AF_XDP
## About DMA APIs Now, virtio may can not work with DMA APIs when virtio features do not have VIRTIO_F_ACCESS_PLATFORM. 1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just work with the "real" devices. 2. I tried to let xsk support callballs to get phy address from virtio-net driver as the dma address. But the maintainers of xsk may want to use
2023 Apr 25
12
[PATCH vhost v7 00/11] virtio core prepares for AF_XDP
## About DMA APIs Now, virtio may can not work with DMA APIs when virtio features do not have VIRTIO_F_ACCESS_PLATFORM. 1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just work with the "real" devices. 2. I tried to let xsk support callballs to get phy address from virtio-net driver as the dma address. But the maintainers of xsk may want to use
2023 Mar 22
1
[PATCH vhost v4 01/11] virtio_ring: split: separate dma codes
...VRING_DESC_F_WRITE, @@ -679,22 +759,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, return 0; unmap_release: - err_idx = i; - - if (indirect) - i = 0; - else - i = head; - - for (n = 0; n < total_sg; n++) { - if (i == err_idx) - break; - if (indirect) { - vring_unmap_one_split_indirect(vq, &desc[i]); - i = virtio16_to_cpu(_vq->vdev, desc[i].next); - } else - i = vring_unmap_one_split(vq, i); - } + virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs); if (indirect) kfree(desc); -- 2.32.0.3.g01195cf9f