Xuan Zhuo
2023-Mar-02 11:58 UTC
[PATCH vhost v1 01/12] virtio_ring: split: refactor virtqueue_add_split() for premapped
This commit splits virtqueue_add_split() to two functions. The purpose of such splitting is to separate DMA operations. The first function includes all codes that may fail before the DMA operation. The subsequent part is used as the second function. In this way, we can perform DMA operations in the middle of the two functions. If the first function fails, we do not need to perform DMA operations. If it is premapped, we can pass the DMA operation. Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com> --- drivers/virtio/virtio_ring.c | 131 +++++++++++++++++++++++------------ 1 file changed, 88 insertions(+), 43 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 41144b5246a8..3005893ecc61 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -520,57 +520,38 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, return next; } -static inline int virtqueue_add_split(struct virtqueue *_vq, - struct scatterlist *sgs[], - unsigned int total_sg, - unsigned int out_sgs, - unsigned int in_sgs, - void *data, - void *ctx, - gfp_t gfp) +/* note: return NULL means no indirect that is valid. */ +static struct vring_desc *virtqueue_get_desc_split(struct vring_virtqueue *vq, + unsigned int total_sg, + unsigned int out_sgs, + void *data, + void *ctx, + gfp_t gfp) { - struct vring_virtqueue *vq = to_vvq(_vq); - struct scatterlist *sg; struct vring_desc *desc; - unsigned int i, n, avail, descs_used, prev, err_idx; - int head; - bool indirect; - - START_USE(vq); + unsigned int descs_used; BUG_ON(data == NULL); BUG_ON(ctx && vq->indirect); - if (unlikely(vq->broken)) { - END_USE(vq); - return -EIO; - } + if (unlikely(vq->broken)) + return ERR_PTR(-EIO); LAST_ADD_TIME_UPDATE(vq); BUG_ON(total_sg == 0); - head = vq->free_head; - if (virtqueue_use_indirect(vq, total_sg)) - desc = alloc_indirect_split(_vq, total_sg, gfp); + desc = alloc_indirect_split(&vq->vq, total_sg, gfp); else { desc = NULL; WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); } - if (desc) { - /* Use a single buffer which doesn't continue */ - indirect = true; - /* Set up rest to use this indirect table. */ - i = 0; + if (desc) descs_used = 1; - } else { - indirect = false; - desc = vq->split.vring.desc; - i = head; + else descs_used = total_sg; - } if (unlikely(vq->vq.num_free < descs_used)) { pr_debug("Can't add buf len %i - avail = %i\n", @@ -580,10 +561,39 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, * host should service the ring ASAP. */ if (out_sgs) vq->notify(&vq->vq); - if (indirect) - kfree(desc); - END_USE(vq); - return -ENOSPC; + kfree(desc); + return ERR_PTR(-ENOSPC); + } + + return desc; +} + +static inline int virtqueue_add_vring_split(struct vring_virtqueue *vq, + struct scatterlist *sgs[], + unsigned int total_sg, + unsigned int out_sgs, + unsigned int in_sgs, + struct vring_desc *desc) +{ + struct virtqueue *_vq = &vq->vq; + struct scatterlist *sg; + unsigned int i, n, avail, descs_used, prev, err_idx; + int head; + bool indirect; + + head = vq->free_head; + + if (desc) { + /* Use a single buffer which doesn't continue */ + indirect = true; + /* Set up rest to use this indirect table. */ + i = 0; + descs_used = 1; + } else { + indirect = false; + desc = vq->split.vring.desc; + i = head; + descs_used = total_sg; } for (n = 0; n < out_sgs; n++) { @@ -648,13 +658,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, else vq->free_head = i; - /* Store token and indirect buffer state. */ - vq->split.desc_state[head].data = data; - if (indirect) - vq->split.desc_state[head].indir_desc = desc; - else - vq->split.desc_state[head].indir_desc = ctx; - /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); @@ -703,6 +706,48 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, return -ENOMEM; } +static inline int virtqueue_add_split(struct virtqueue *_vq, + struct scatterlist *sgs[], + unsigned int total_sg, + unsigned int out_sgs, + unsigned int in_sgs, + void *data, + void *ctx, + gfp_t gfp) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + struct vring_desc *desc; + int head; + int err; + + START_USE(vq); + + /* check vq state and try to alloc desc for indirect. */ + desc = virtqueue_get_desc_split(vq, total_sg, out_sgs, data, ctx, gfp); + if (IS_ERR(desc)) { + err = PTR_ERR(desc); + goto end; + } + + head = vq->free_head; + err = virtqueue_add_vring_split(vq, sgs, total_sg, out_sgs, in_sgs, desc); + if (err) + goto err; + + /* Store token and indirect buffer state. */ + vq->split.desc_state[head].data = data; + vq->split.desc_state[head].indir_desc = desc ? desc : ctx; + + goto end; + +err: + kfree(desc); + +end: + END_USE(vq); + return err; +} + static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); -- 2.32.0.3.g01195cf9f
Jason Wang
2023-Mar-07 06:43 UTC
[PATCH vhost v1 01/12] virtio_ring: split: refactor virtqueue_add_split() for premapped
On Thu, Mar 2, 2023 at 7:59?PM Xuan Zhuo <xuanzhuo at linux.alibaba.com> wrote:> > This commit splits virtqueue_add_split() to two functions. The purpose > of such splitting is to separate DMA operations. > > The first function includes all codes that may fail before the DMA > operation. The subsequent part is used as the second function. > > In this way, we can perform DMA operations in the middle of the two > functions. If the first function fails, we do not need to perform DMA > operations. If it is premapped, we can pass the DMA operation. > > Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com> > --- > drivers/virtio/virtio_ring.c | 131 +++++++++++++++++++++++------------ > 1 file changed, 88 insertions(+), 43 deletions(-) > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c > index 41144b5246a8..3005893ecc61 100644 > --- a/drivers/virtio/virtio_ring.c > +++ b/drivers/virtio/virtio_ring.c > @@ -520,57 +520,38 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, > return next; > } > > -static inline int virtqueue_add_split(struct virtqueue *_vq, > - struct scatterlist *sgs[], > - unsigned int total_sg, > - unsigned int out_sgs, > - unsigned int in_sgs, > - void *data, > - void *ctx, > - gfp_t gfp) > +/* note: return NULL means no indirect that is valid. */ > +static struct vring_desc *virtqueue_get_desc_split(struct vring_virtqueue *vq, > + unsigned int total_sg, > + unsigned int out_sgs, > + void *data, > + void *ctx, > + gfp_t gfp) > {I don't object to the changes but I don't see much value in factoring this out, since it's not related to any dma mapping logic.> - struct vring_virtqueue *vq = to_vvq(_vq); > - struct scatterlist *sg; > struct vring_desc *desc; > - unsigned int i, n, avail, descs_used, prev, err_idx; > - int head; > - bool indirect; > - > - START_USE(vq); > + unsigned int descs_used; > > BUG_ON(data == NULL); > BUG_ON(ctx && vq->indirect); > > - if (unlikely(vq->broken)) { > - END_USE(vq); > - return -EIO; > - } > + if (unlikely(vq->broken)) > + return ERR_PTR(-EIO); > > LAST_ADD_TIME_UPDATE(vq); > > BUG_ON(total_sg == 0); > > - head = vq->free_head; > - > if (virtqueue_use_indirect(vq, total_sg)) > - desc = alloc_indirect_split(_vq, total_sg, gfp); > + desc = alloc_indirect_split(&vq->vq, total_sg, gfp); > else { > desc = NULL; > WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); > } > > - if (desc) { > - /* Use a single buffer which doesn't continue */ > - indirect = true; > - /* Set up rest to use this indirect table. */ > - i = 0; > + if (desc) > descs_used = 1; > - } else { > - indirect = false; > - desc = vq->split.vring.desc; > - i = head; > + else > descs_used = total_sg; > - } > > if (unlikely(vq->vq.num_free < descs_used)) { > pr_debug("Can't add buf len %i - avail = %i\n", > @@ -580,10 +561,39 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > * host should service the ring ASAP. */ > if (out_sgs) > vq->notify(&vq->vq); > - if (indirect) > - kfree(desc); > - END_USE(vq); > - return -ENOSPC; > + kfree(desc); > + return ERR_PTR(-ENOSPC); > + } > + > + return desc; > +} > + > +static inline int virtqueue_add_vring_split(struct vring_virtqueue *vq, > + struct scatterlist *sgs[], > + unsigned int total_sg, > + unsigned int out_sgs, > + unsigned int in_sgs, > + struct vring_desc *desc) > +{ > + struct virtqueue *_vq = &vq->vq; > + struct scatterlist *sg; > + unsigned int i, n, avail, descs_used, prev, err_idx; > + int head; > + bool indirect; > + > + head = vq->free_head; > + > + if (desc) { > + /* Use a single buffer which doesn't continue */ > + indirect = true; > + /* Set up rest to use this indirect table. */ > + i = 0; > + descs_used = 1; > + } else { > + indirect = false; > + desc = vq->split.vring.desc; > + i = head; > + descs_used = total_sg; > } > > for (n = 0; n < out_sgs; n++) { > @@ -648,13 +658,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > else > vq->free_head = i; > > - /* Store token and indirect buffer state. */ > - vq->split.desc_state[head].data = data; > - if (indirect) > - vq->split.desc_state[head].indir_desc = desc; > - else > - vq->split.desc_state[head].indir_desc = ctx; > - > /* Put entry in available array (but don't update avail->idx until they > * do sync). */ > avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); > @@ -703,6 +706,48 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > return -ENOMEM; > } > > +static inline int virtqueue_add_split(struct virtqueue *_vq, > + struct scatterlist *sgs[], > + unsigned int total_sg, > + unsigned int out_sgs, > + unsigned int in_sgs, > + void *data, > + void *ctx, > + gfp_t gfp) > +{ > + struct vring_virtqueue *vq = to_vvq(_vq); > + struct vring_desc *desc; > + int head; > + int err; > + > + START_USE(vq); > + > + /* check vq state and try to alloc desc for indirect. */ > + desc = virtqueue_get_desc_split(vq, total_sg, out_sgs, data, ctx, gfp); > + if (IS_ERR(desc)) { > + err = PTR_ERR(desc); > + goto end; > + } > + > + head = vq->free_head; > + err = virtqueue_add_vring_split(vq, sgs, total_sg, out_sgs, in_sgs, desc); > + if (err) > + goto err; > + > + /* Store token and indirect buffer state. */ > + vq->split.desc_state[head].data = data; > + vq->split.desc_state[head].indir_desc = desc ? desc : ctx; > + > + goto end;Nit: this seems not elegant. I would rather duplicate END_USE(vq); return err; here. Thanks> + > +err: > + kfree(desc); > + > +end: > + END_USE(vq); > + return err; > +} > + > static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) > { > struct vring_virtqueue *vq = to_vvq(_vq); > -- > 2.32.0.3.g01195cf9f >