Xuan Zhuo
2023-Mar-02 11:58 UTC
[PATCH vhost v1 02/12] virtio_ring: split: separate DMA codes
DMA-related logic is separated from the virtqueue_add_vring_split() to
prepare for subsequent support for premapped.
DMA address will be saved as sg->dma_address, then
virtqueue_add_vring_split() will use it directly.
If it is a premapped scene, the transmitted sgs should have saved DMA
address in dma_address, and in virtio core, we need to pass
virtqueue_map_sgs().
Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com>
---
drivers/virtio/virtio_ring.c | 116 ++++++++++++++++++++++++-----------
1 file changed, 80 insertions(+), 36 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 3005893ecc61..17520f0d7649 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -520,6 +520,73 @@ static inline unsigned int virtqueue_add_desc_split(struct
virtqueue *vq,
return next;
}
+static int virtqueue_map_sgs(struct vring_virtqueue *vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs)
+{
+ struct scatterlist *sg;
+ unsigned int n;
+
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ return -ENOMEM;
+
+ sg->dma_address = addr;
+ }
+ }
+
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+
+ if (vring_mapping_error(vq, addr))
+ return -ENOMEM;
+
+ sg->dma_address = addr;
+ }
+ }
+
+ return 0;
+}
+
+static void virtqueue_unmap_sgs(struct vring_virtqueue *vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs)
+{
+ struct scatterlist *sg;
+ unsigned int n;
+
+ if (!vq->use_dma_api)
+ return;
+
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ if (!sg->dma_address)
+ return;
+
+ dma_unmap_page(vring_dma_dev(vq), sg->dma_address,
+ sg->length, DMA_TO_DEVICE);
+ }
+ }
+
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ if (!sg->dma_address)
+ return;
+
+ dma_unmap_page(vring_dma_dev(vq), sg->dma_address,
+ sg->length, DMA_FROM_DEVICE);
+ }
+ }
+}
+
/* note: return NULL means no indirect that is valid. */
static struct vring_desc *virtqueue_get_desc_split(struct vring_virtqueue *vq,
unsigned int total_sg,
@@ -577,7 +644,7 @@ static inline int virtqueue_add_vring_split(struct
vring_virtqueue *vq,
{
struct virtqueue *_vq = &vq->vq;
struct scatterlist *sg;
- unsigned int i, n, avail, descs_used, prev, err_idx;
+ unsigned int i, n, avail, descs_used, prev;
int head;
bool indirect;
@@ -598,30 +665,25 @@ static inline int virtqueue_add_vring_split(struct
vring_virtqueue *vq,
for (n = 0; n < out_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr))
- goto unmap_release;
-
prev = i;
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
- i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
+ i = virtqueue_add_desc_split(_vq, desc, i,
+ sg->dma_address,
+ sg->length,
VRING_DESC_F_NEXT,
indirect);
}
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
- goto unmap_release;
-
prev = i;
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
- i = virtqueue_add_desc_split(_vq, desc, i, addr,
+ i = virtqueue_add_desc_split(_vq, desc, i,
+ sg->dma_address,
sg->length,
VRING_DESC_F_NEXT |
VRING_DESC_F_WRITE,
@@ -640,7 +702,7 @@ static inline int virtqueue_add_vring_split(struct
vring_virtqueue *vq,
vq, desc, total_sg * sizeof(struct vring_desc),
DMA_TO_DEVICE);
if (vring_mapping_error(vq, addr))
- goto unmap_release;
+ return -ENOMEM;
virtqueue_add_desc_split(_vq, vq->split.vring.desc,
head, addr,
@@ -680,30 +742,6 @@ static inline int virtqueue_add_vring_split(struct
vring_virtqueue *vq,
virtqueue_kick(_vq);
return 0;
-
-unmap_release:
- err_idx = i;
-
- if (indirect)
- i = 0;
- else
- i = head;
-
- for (n = 0; n < total_sg; n++) {
- if (i == err_idx)
- break;
- if (indirect) {
- vring_unmap_one_split_indirect(vq, &desc[i]);
- i = virtio16_to_cpu(_vq->vdev, desc[i].next);
- } else
- i = vring_unmap_one_split(vq, i);
- }
-
- if (indirect)
- kfree(desc);
-
- END_USE(vq);
- return -ENOMEM;
}
static inline int virtqueue_add_split(struct virtqueue *_vq,
@@ -729,6 +767,10 @@ static inline int virtqueue_add_split(struct virtqueue
*_vq,
goto end;
}
+ err = virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
+ if (err)
+ goto err;
+
head = vq->free_head;
err = virtqueue_add_vring_split(vq, sgs, total_sg, out_sgs, in_sgs, desc);
if (err)
@@ -741,6 +783,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
goto end;
err:
+ virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
+
kfree(desc);
end:
--
2.32.0.3.g01195cf9f
Jason Wang
2023-Mar-07 06:43 UTC
[PATCH vhost v1 02/12] virtio_ring: split: separate DMA codes
On Thu, Mar 2, 2023 at 7:59?PM Xuan Zhuo <xuanzhuo at linux.alibaba.com> wrote:> > DMA-related logic is separated from the virtqueue_add_vring_split() to > prepare for subsequent support for premapped. > > DMA address will be saved as sg->dma_address, then > virtqueue_add_vring_split() will use it directly. > > If it is a premapped scene, the transmitted sgs should have saved DMA > address in dma_address, and in virtio core, we need to pass > virtqueue_map_sgs(). > > Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com> > --- > drivers/virtio/virtio_ring.c | 116 ++++++++++++++++++++++++----------- > 1 file changed, 80 insertions(+), 36 deletions(-) > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c > index 3005893ecc61..17520f0d7649 100644 > --- a/drivers/virtio/virtio_ring.c > +++ b/drivers/virtio/virtio_ring.c > @@ -520,6 +520,73 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, > return next; > } > > +static int virtqueue_map_sgs(struct vring_virtqueue *vq, > + struct scatterlist *sgs[], > + unsigned int total_sg, > + unsigned int out_sgs, > + unsigned int in_sgs) > +{ > + struct scatterlist *sg; > + unsigned int n; > + > + for (n = 0; n < out_sgs; n++) { > + for (sg = sgs[n]; sg; sg = sg_next(sg)) { > + dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); > + > + if (vring_mapping_error(vq, addr)) > + return -ENOMEM; > + > + sg->dma_address = addr; > + } > + } > + > + for (; n < (out_sgs + in_sgs); n++) { > + for (sg = sgs[n]; sg; sg = sg_next(sg)) { > + dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); > + > + if (vring_mapping_error(vq, addr)) > + return -ENOMEM; > + > + sg->dma_address = addr; > + } > + } > + > + return 0; > +} > + > +static void virtqueue_unmap_sgs(struct vring_virtqueue *vq, > + struct scatterlist *sgs[], > + unsigned int total_sg, > + unsigned int out_sgs, > + unsigned int in_sgs) > +{ > + struct scatterlist *sg; > + unsigned int n; > + > + if (!vq->use_dma_api) > + return; > + > + for (n = 0; n < out_sgs; n++) { > + for (sg = sgs[n]; sg; sg = sg_next(sg)) { > + if (!sg->dma_address) > + return; > + > + dma_unmap_page(vring_dma_dev(vq), sg->dma_address, > + sg->length, DMA_TO_DEVICE); > + } > + } > + > + for (; n < (out_sgs + in_sgs); n++) { > + for (sg = sgs[n]; sg; sg = sg_next(sg)) { > + if (!sg->dma_address) > + return; > + > + dma_unmap_page(vring_dma_dev(vq), sg->dma_address, > + sg->length, DMA_FROM_DEVICE); > + } > + } > +} > + > /* note: return NULL means no indirect that is valid. */ > static struct vring_desc *virtqueue_get_desc_split(struct vring_virtqueue *vq, > unsigned int total_sg, > @@ -577,7 +644,7 @@ static inline int virtqueue_add_vring_split(struct vring_virtqueue *vq, > { > struct virtqueue *_vq = &vq->vq; > struct scatterlist *sg; > - unsigned int i, n, avail, descs_used, prev, err_idx; > + unsigned int i, n, avail, descs_used, prev; > int head; > bool indirect; > > @@ -598,30 +665,25 @@ static inline int virtqueue_add_vring_split(struct vring_virtqueue *vq, > > for (n = 0; n < out_sgs; n++) { > for (sg = sgs[n]; sg; sg = sg_next(sg)) { > - dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); > - if (vring_mapping_error(vq, addr)) > - goto unmap_release; > - > prev = i; > /* Note that we trust indirect descriptor > * table since it use stream DMA mapping. > */ > - i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length, > + i = virtqueue_add_desc_split(_vq, desc, i, > + sg->dma_address, > + sg->length, > VRING_DESC_F_NEXT, > indirect); > } > } > for (; n < (out_sgs + in_sgs); n++) { > for (sg = sgs[n]; sg; sg = sg_next(sg)) { > - dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); > - if (vring_mapping_error(vq, addr)) > - goto unmap_release; > - > prev = i; > /* Note that we trust indirect descriptor > * table since it use stream DMA mapping. > */ > - i = virtqueue_add_desc_split(_vq, desc, i, addr, > + i = virtqueue_add_desc_split(_vq, desc, i, > + sg->dma_address, > sg->length, > VRING_DESC_F_NEXT | > VRING_DESC_F_WRITE, > @@ -640,7 +702,7 @@ static inline int virtqueue_add_vring_split(struct vring_virtqueue *vq, > vq, desc, total_sg * sizeof(struct vring_desc), > DMA_TO_DEVICE); > if (vring_mapping_error(vq, addr)) > - goto unmap_release; > + return -ENOMEM; > > virtqueue_add_desc_split(_vq, vq->split.vring.desc, > head, addr, > @@ -680,30 +742,6 @@ static inline int virtqueue_add_vring_split(struct vring_virtqueue *vq, > virtqueue_kick(_vq); > > return 0; > - > -unmap_release: > - err_idx = i; > - > - if (indirect) > - i = 0; > - else > - i = head; > - > - for (n = 0; n < total_sg; n++) { > - if (i == err_idx) > - break; > - if (indirect) { > - vring_unmap_one_split_indirect(vq, &desc[i]); > - i = virtio16_to_cpu(_vq->vdev, desc[i].next); > - } else > - i = vring_unmap_one_split(vq, i); > - } > - > - if (indirect) > - kfree(desc); > - > - END_USE(vq); > - return -ENOMEM; > } > > static inline int virtqueue_add_split(struct virtqueue *_vq, > @@ -729,6 +767,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > goto end; > } > > + err = virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs); > + if (err) > + goto err; > + > head = vq->free_head; > err = virtqueue_add_vring_split(vq, sgs, total_sg, out_sgs, in_sgs, desc);Any reason we don't do virtqueue_map_sgs()/virtqueue_unmap_sgs() inside virtqueue_add_vring_split()? Thanks> if (err) > @@ -741,6 +783,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > goto end; > > err: > + virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs); > + > kfree(desc); > > end: > -- > 2.32.0.3.g01195cf9f >