Xuan Zhuo
2023-Mar-21 09:34 UTC
[PATCH vhost v3 04/11] virtio_ring: split: support premapped
virtio core only supports virtual addresses, dma is completed in virtio core. In some scenarios (such as the AF_XDP), the memory is allocated and DMA mapping is completed in advance, so it is necessary for us to support passing the DMA address to virtio core. Drives can use sg->dma_address to pass the mapped dma address to virtio core. If one sg->dma_address is used then all sgs must use sg->dma_address, otherwise all must be null when passing it to the APIs of virtio. Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com> --- drivers/virtio/virtio_ring.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index c8ed4aef9462..be2ff96964c3 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -70,6 +70,7 @@ struct vring_desc_state_split { void *data; /* Data for callback. */ struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ + bool map_inter; /* Do dma map internally. */ }; struct vring_desc_state_packed { @@ -448,7 +449,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, } static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, - unsigned int i) + unsigned int i, bool map_inter) { struct vring_desc_extra *extra = vq->split.desc_extra; u16 flags; @@ -465,6 +466,9 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { + if (!map_inter) + goto out; + dma_unmap_page(vring_dma_dev(vq), extra[i].addr, extra[i].len, @@ -615,7 +619,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, struct scatterlist *sg; struct vring_desc *desc; unsigned int i, n, avail, descs_used, prev; - bool indirect; + bool indirect, map_inter; int head; START_USE(vq); @@ -668,7 +672,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, return -ENOSPC; } - if (virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs)) + map_inter = !sgs[0]->dma_address; + if (map_inter && virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs)) return -ENOMEM; for (n = 0; n < out_sgs; n++) { @@ -734,6 +739,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, vq->split.desc_state[head].indir_desc = desc; else vq->split.desc_state[head].indir_desc = ctx; + vq->split.desc_state[head].map_inter = map_inter; /* Put entry in available array (but don't update avail->idx until they * do sync). */ @@ -759,7 +765,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, return 0; unmap_release: - virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs); + if (map_inter) + virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs); if (indirect) kfree(desc); @@ -804,20 +811,22 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, { unsigned int i, j; __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); + bool map_inter; /* Clear data ptr. */ vq->split.desc_state[head].data = NULL; + map_inter = vq->split.desc_state[head].map_inter; /* Put back on free list: unmap first-level descriptors and find end */ i = head; while (vq->split.vring.desc[i].flags & nextflag) { - vring_unmap_one_split(vq, i); + vring_unmap_one_split(vq, i, map_inter); i = vq->split.desc_extra[i].next; vq->vq.num_free++; } - vring_unmap_one_split(vq, i); + vring_unmap_one_split(vq, i, map_inter); vq->split.desc_extra[i].next = vq->free_head; vq->free_head = head; @@ -839,8 +848,10 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, VRING_DESC_F_INDIRECT)); BUG_ON(len == 0 || len % sizeof(struct vring_desc)); - for (j = 0; j < len / sizeof(struct vring_desc); j++) - vring_unmap_one_split_indirect(vq, &indir_desc[j]); + if (map_inter) { + for (j = 0; j < len / sizeof(struct vring_desc); j++) + vring_unmap_one_split_indirect(vq, &indir_desc[j]); + } kfree(indir_desc); vq->split.desc_state[head].indir_desc = NULL; -- 2.32.0.3.g01195cf9f
Michael S. Tsirkin
2023-Mar-21 20:45 UTC
[PATCH vhost v3 04/11] virtio_ring: split: support premapped
On Tue, Mar 21, 2023 at 05:34:59PM +0800, Xuan Zhuo wrote:> virtio core only supports virtual addresses, dma is completed in virtio > core. > > In some scenarios (such as the AF_XDP), the memory is allocated > and DMA mapping is completed in advance, so it is necessary for us to > support passing the DMA address to virtio core. > > Drives can use sg->dma_address to pass the mapped dma address to virtio > core. If one sg->dma_address is used then all sgs must use > sg->dma_address, otherwise all must be null when passing it to the APIs > of virtio. > > Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com> > --- > drivers/virtio/virtio_ring.c | 27 +++++++++++++++++++-------- > 1 file changed, 19 insertions(+), 8 deletions(-) > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c > index c8ed4aef9462..be2ff96964c3 100644 > --- a/drivers/virtio/virtio_ring.c > +++ b/drivers/virtio/virtio_ring.c > @@ -70,6 +70,7 @@ > struct vring_desc_state_split { > void *data; /* Data for callback. */ > struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ > + bool map_inter; /* Do dma map internally. */I prefer a full name. E.g. "dma_map_internal". Eschew abbreviation.> }; > > struct vring_desc_state_packed { > @@ -448,7 +449,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, > } > > static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, > - unsigned int i) > + unsigned int i, bool map_inter) > { > struct vring_desc_extra *extra = vq->split.desc_extra; > u16 flags; > @@ -465,6 +466,9 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, > (flags & VRING_DESC_F_WRITE) ? > DMA_FROM_DEVICE : DMA_TO_DEVICE); > } else { > + if (!map_inter) > + goto out; > + > dma_unmap_page(vring_dma_dev(vq), > extra[i].addr, > extra[i].len, > @@ -615,7 +619,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > struct scatterlist *sg; > struct vring_desc *desc; > unsigned int i, n, avail, descs_used, prev; > - bool indirect; > + bool indirect, map_inter; > int head; > > START_USE(vq); > @@ -668,7 +672,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > return -ENOSPC; > } > > - if (virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs)) > + map_inter = !sgs[0]->dma_address; > + if (map_inter && virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs)) > return -ENOMEM; > > for (n = 0; n < out_sgs; n++) { > @@ -734,6 +739,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > vq->split.desc_state[head].indir_desc = desc; > else > vq->split.desc_state[head].indir_desc = ctx; > + vq->split.desc_state[head].map_inter = map_inter; > > /* Put entry in available array (but don't update avail->idx until they > * do sync). */ > @@ -759,7 +765,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > return 0; > > unmap_release: > - virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs); > + if (map_inter) > + virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs); > > if (indirect) > kfree(desc); > @@ -804,20 +811,22 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, > { > unsigned int i, j; > __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); > + bool map_inter; > > /* Clear data ptr. */ > vq->split.desc_state[head].data = NULL; > + map_inter = vq->split.desc_state[head].map_inter; > > /* Put back on free list: unmap first-level descriptors and find end */ > i = head; > > while (vq->split.vring.desc[i].flags & nextflag) { > - vring_unmap_one_split(vq, i); > + vring_unmap_one_split(vq, i, map_inter); > i = vq->split.desc_extra[i].next; > vq->vq.num_free++; > } > > - vring_unmap_one_split(vq, i); > + vring_unmap_one_split(vq, i, map_inter); > vq->split.desc_extra[i].next = vq->free_head; > vq->free_head = head; > > @@ -839,8 +848,10 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, > VRING_DESC_F_INDIRECT)); > BUG_ON(len == 0 || len % sizeof(struct vring_desc)); > > - for (j = 0; j < len / sizeof(struct vring_desc); j++) > - vring_unmap_one_split_indirect(vq, &indir_desc[j]); > + if (map_inter) { > + for (j = 0; j < len / sizeof(struct vring_desc); j++) > + vring_unmap_one_split_indirect(vq, &indir_desc[j]); > + } > > kfree(indir_desc); > vq->split.desc_state[head].indir_desc = NULL; > -- > 2.32.0.3.g01195cf9f
Possibly Parallel Threads
- [PATCH vhost v4 04/11] virtio_ring: split: support premapped
- [PATCH vhost v3 00/11] virtio core prepares for AF_XDP
- [PATCH vhost v1 03/12] virtio_ring: split: introduce virtqueue_add_split_premapped()
- [PATCH vhost v1 03/12] virtio_ring: split: introduce virtqueue_add_split_premapped()
- [PATCH vhost v7 00/11] virtio core prepares for AF_XDP