Displaying 18 results from an estimated 18 matches for "virtqueue_unmap_sgs".
Did you mean:
virtqueue_map_sgs
2023 Mar 21
1
[PATCH vhost v3 01/11] virtio_ring: split: separate dma codes
...r_t)sg_phys(sg);
+}
+
static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
@@ -520,6 +528,80 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
return next;
}
+static void virtqueue_unmap_sgs(struct vring_virtqueue *vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs)
+{
+ struct scatterlist *sg;
+ unsigned int n;
+
+ if (!vq->use_dma_api)
+ return;
+
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg...
2023 Mar 02
1
[PATCH vhost v1 02/12] virtio_ring: split: separate DMA codes
...+ }
+ }
+
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ return -ENOMEM;
+
+ sg->dma_address = addr;
+ }
+ }
+
+ return 0;
+}
+
+static void virtqueue_unmap_sgs(struct vring_virtqueue *vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs)
+{
+ struct scatterlist *sg;
+ unsigned int n;
+
+ if (!vq->use_dma_api)
+ return;
+
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg...
2023 Mar 22
1
[PATCH vhost v4 04/11] virtio_ring: split: support premapped
...ndir_desc = ctx;
+ vq->split.desc_state[head].dma_map_internal = dma_map_internal;
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
@@ -759,7 +765,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
return 0;
unmap_release:
- virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
+ if (dma_map_internal)
+ virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
if (indirect)
kfree(desc);
@@ -804,20 +811,22 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
{
unsigned int i, j;
__virtio16 nextflag...
2023 Mar 21
1
[PATCH vhost v3 04/11] virtio_ring: split: support premapped
...split.desc_state[head].map_inter = map_inter;
>
> /* Put entry in available array (but don't update avail->idx until they
> * do sync). */
> @@ -759,7 +765,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> return 0;
>
> unmap_release:
> - virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
> + if (map_inter)
> + virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
>
> if (indirect)
> kfree(desc);
> @@ -804,20 +811,22 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> {
> unsigned...
2023 Mar 22
1
[PATCH vhost v4 01/11] virtio_ring: split: separate dma codes
...r_t)sg_phys(sg);
+}
+
static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
@@ -520,6 +528,80 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
return next;
}
+static void virtqueue_unmap_sgs(struct vring_virtqueue *vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs)
+{
+ struct scatterlist *sg;
+ unsigned int n;
+
+ if (!vq->use_dma_api)
+ return;
+
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg...
2023 Apr 25
1
[PATCH vhost v7 01/11] virtio_ring: split: separate dma codes
...r_t)sg_phys(sg);
+}
+
static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
@@ -520,6 +528,80 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
return next;
}
+static void virtqueue_unmap_sgs(struct vring_virtqueue *vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs)
+{
+ struct scatterlist *sg;
+ unsigned int n;
+
+ if (!vq->use_dma_api)
+ return;
+
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg...
2023 Mar 07
2
[PATCH vhost v1 03/12] virtio_ring: split: introduce virtqueue_add_split_premapped()
.../* Store token and indirect buffer state. */
> vq->split.desc_state[head].data = data;
> vq->split.desc_state[head].indir_desc = desc ? desc : ctx;
> + vq->split.desc_state[head].dma_map = dma_map;
>
> goto end;
>
> err:
> - virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
> + if (dma_map)
> + virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
>
> kfree(desc);
>
> @@ -828,20 +837,23 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> {
>...
2023 Mar 21
11
[PATCH vhost v3 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%
2023 Mar 22
11
[PATCH vhost v4 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%
2023 Mar 24
11
[PATCH vhost v5 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%
2023 Mar 27
11
[PATCH vhost v6 00/11] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%
2023 Feb 20
2
[PATCH vhost 01/10] virtio_ring: split: refactor virtqueue_add_split() for premapped
...OM_DEVICE);
> +
> + if (vring_mapping_error(vq, addr))
> + return -ENOMEM;
> +
> + sg->dma_address = addr;
> + }
> + }
> +
> + return 0;
> +}
> +
> +static void virtqueue_unmap_sgs(struct vring_virtqueue *vq,
> + struct scatterlist *sgs[],
> + unsigned int total_sg,
> + unsigned int out_sgs,
> + unsigned int in_sgs)
> +{
> + struct sc...
2023 Mar 07
1
[PATCH vhost v1 03/12] virtio_ring: split: introduce virtqueue_add_split_premapped()
...state. */
> > vq->split.desc_state[head].data = data;
> > vq->split.desc_state[head].indir_desc = desc ? desc : ctx;
> > + vq->split.desc_state[head].dma_map = dma_map;
> >
> > goto end;
> >
> > err:
> > - virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
> > + if (dma_map)
> > + virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
> >
> > kfree(desc);
> >
> > @@ -828,20 +837,23 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned...
2023 May 09
12
[PATCH vhost v8 00/12] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2023 Apr 25
12
[PATCH vhost v7 00/11] virtio core prepares for AF_XDP
## About DMA APIs
Now, virtio may can not work with DMA APIs when virtio features do not have
VIRTIO_F_ACCESS_PLATFORM.
1. I tried to let DMA APIs return phy address by virtio-device. But DMA APIs just
work with the "real" devices.
2. I tried to let xsk support callballs to get phy address from virtio-net
driver as the dma address. But the maintainers of xsk may want to use
2023 Mar 02
12
[PATCH vhost v1 00/12] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%
2023 Mar 02
1
[PATCH vhost v1 06/12] virtio_ring: packed: separate DMA codes
...virtqueue_add_packed(struct virtqueue *_vq,
goto end;
}
+ err = virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
+ if (err)
+ goto err;
+
id = vq->free_head;
if (desc) {
@@ -1637,6 +1601,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
goto end;
err:
+ virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
kfree(desc);
end:
--
2.32.0.3.g01195cf9f
2023 Feb 14
11
[PATCH vhost 00/10] virtio core prepares for AF_XDP
XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero
copy feature of xsk (XDP socket) needs to be supported by the driver. The
performance of zero copy is very good.
ENV: Qemu with vhost.
vhost cpu | Guest APP CPU |Guest Softirq CPU | PPS
-----------------------------|---------------|------------------|------------
xmit by sockperf: 90% | 100%