Jason Wang
2022-Mar-21 06:04 UTC
[PATCH 1/2] vdpa: mlx5: prevent cvq work from hogging CPU
A userspace triggerable infinite loop could happen in mlx5_cvq_kick_handler() if userspace keeps sending a huge amount of cvq requests. Fixing this by introducing a quota and re-queue the work if we're out of the budget. While at it, using a per device workqueue to avoid on demand memory allocation for cvq. Fixes: 5262912ef3cfc ("vdpa/mlx5: Add support for control VQ and MAC setting") Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vdpa/mlx5/net/mlx5_vnet.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index d0f91078600e..d5a6fb3f9c41 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -163,6 +163,7 @@ struct mlx5_vdpa_net { u32 cur_num_vqs; struct notifier_block nb; struct vdpa_callback config_cb; + struct mlx5_vdpa_wq_ent cvq_ent; }; static void free_resources(struct mlx5_vdpa_net *ndev); @@ -1600,6 +1601,8 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) return status; } +#define MLX5_CVQ_BUDGET 16 + static void mlx5_cvq_kick_handler(struct work_struct *work) { virtio_net_ctrl_ack status = VIRTIO_NET_ERR; @@ -1609,17 +1612,17 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) struct mlx5_control_vq *cvq; struct mlx5_vdpa_net *ndev; size_t read, write; - int err; + int err, n = 0; wqent = container_of(work, struct mlx5_vdpa_wq_ent, work); mvdev = wqent->mvdev; ndev = to_mlx5_vdpa_ndev(mvdev); cvq = &mvdev->cvq; if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) - goto out; + return; if (!cvq->ready) - goto out; + return; while (true) { err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head, @@ -1653,9 +1656,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) if (vringh_need_notify_iotlb(&cvq->vring)) vringh_notify(&cvq->vring); + + n++; + if (n > MLX5_CVQ_BUDGET) { + queue_work(mvdev->wq, &wqent->work); + break; + } } -out: - kfree(wqent); } static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) @@ -1663,7 +1670,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_virtqueue *mvq; - struct mlx5_vdpa_wq_ent *wqent; if (!is_index_valid(mvdev, idx)) return; @@ -1672,13 +1678,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) if (!mvdev->cvq.ready) return; - wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); - if (!wqent) - return; - - wqent->mvdev = mvdev; - INIT_WORK(&wqent->work, mlx5_cvq_kick_handler); - queue_work(mvdev->wq, &wqent->work); + queue_work(mvdev->wq, &ndev->cvq_ent.work); return; } @@ -2668,6 +2668,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, if (err) goto err_mr; + ndev->cvq_ent.mvdev = mvdev; + INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); if (!mvdev->wq) { err = -ENOMEM; -- 2.18.1
Jason Wang
2022-Mar-21 06:04 UTC
[PATCH 2/2] vdpa: mlx5: synchronize driver status with CVQ
Currently, CVQ doesn't have any synchronization with the driver status. Then CVQ emulation code run in the middle of: 1) device reset 2) device status changed 3) map updating The will lead several unexpected issue like trying to execute CVQ command after the driver has been teared down. Fixing this by using reslock to synchronize CVQ emulation code with the driver status changing: - protect the whole device reset, status changing and map updating with reslock - protect the CVQ handler with the reslock and check VIRTIO_CONFIG_S_DRIVER_OK in the CVQ handler This will guarantee that: 1) CVQ handler won't work if VIRTIO_CONFIG_S_DRIVER_OK is not set 2) CVQ handler will see a consistent state of the driver instead of the partial one when it is running in the middle of the teardown_driver() or setup_driver(). Cc: 5262912ef3cfc ("vdpa/mlx5: Add support for control VQ and MAC setting") Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vdpa/mlx5/net/mlx5_vnet.c | 42 +++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index d5a6fb3f9c41..524240f55c1c 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1618,11 +1618,17 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) mvdev = wqent->mvdev; ndev = to_mlx5_vdpa_ndev(mvdev); cvq = &mvdev->cvq; + + mutex_lock(&ndev->reslock); + + if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) + goto done; + if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) - return; + goto done; if (!cvq->ready) - return; + goto done; while (true) { err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head, @@ -1663,6 +1669,9 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) break; } } + +done: + mutex_unlock(&ndev->reslock); } static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) @@ -2125,6 +2134,8 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); int err; + mutex_lock(&ndev->reslock); + suspend_vqs(ndev); err = save_channels_info(ndev); if (err) @@ -2137,18 +2148,20 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb goto err_mr; if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) - return 0; + goto err_mr; restore_channels_info(ndev); err = setup_driver(mvdev); if (err) goto err_setup; + mutex_unlock(&ndev->reslock); return 0; err_setup: mlx5_vdpa_destroy_mr(mvdev); err_mr: + mutex_unlock(&ndev->reslock); return err; } @@ -2157,7 +2170,8 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev) struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); int err; - mutex_lock(&ndev->reslock); + WARN_ON(!mutex_is_locked(&ndev->reslock)); + if (ndev->setup) { mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n"); err = 0; @@ -2187,7 +2201,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev) goto err_fwd; } ndev->setup = true; - mutex_unlock(&ndev->reslock); return 0; @@ -2198,23 +2211,22 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev) err_rqt: teardown_virtqueues(ndev); out: - mutex_unlock(&ndev->reslock); return err; } static void teardown_driver(struct mlx5_vdpa_net *ndev) { - mutex_lock(&ndev->reslock); + + WARN_ON(!mutex_is_locked(&ndev->reslock)); + if (!ndev->setup) - goto out; + return; remove_fwd_to_tir(ndev); destroy_tir(ndev); destroy_rqt(ndev); teardown_virtqueues(ndev); ndev->setup = false; -out: - mutex_unlock(&ndev->reslock); } static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) @@ -2235,6 +2247,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) print_status(mvdev, status, true); + mutex_lock(&ndev->reslock); + if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { if (status & VIRTIO_CONFIG_S_DRIVER_OK) { err = setup_driver(mvdev); @@ -2244,16 +2258,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) } } else { mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n"); - return; + goto err_clear; } } ndev->mvdev.status = status; + mutex_unlock(&ndev->reslock); return; err_setup: mlx5_vdpa_destroy_mr(&ndev->mvdev); ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; +err_clear: + mutex_unlock(&ndev->reslock); } static int mlx5_vdpa_reset(struct vdpa_device *vdev) @@ -2263,6 +2280,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) print_status(mvdev, 0, true); mlx5_vdpa_info(mvdev, "performing device reset\n"); + + mutex_lock(&ndev->reslock); teardown_driver(ndev); clear_vqs_ready(ndev); mlx5_vdpa_destroy_mr(&ndev->mvdev); @@ -2275,6 +2294,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) if (mlx5_vdpa_create_mr(mvdev, NULL)) mlx5_vdpa_warn(mvdev, "create MR failed\n"); } + mutex_unlock(&ndev->reslock); return 0; } -- 2.18.1
Michael S. Tsirkin
2022-Mar-21 07:20 UTC
[PATCH 1/2] vdpa: mlx5: prevent cvq work from hogging CPU
On Mon, Mar 21, 2022 at 02:04:28PM +0800, Jason Wang wrote:> A userspace triggerable infinite loop could happen in > mlx5_cvq_kick_handler() if userspace keeps sending a huge amount of > cvq requests. > > Fixing this by introducing a quota and re-queue the work if we're out > of the budget. While at it, using a per device workqueue to avoid on > demand memory allocation for cvq. > > Fixes: 5262912ef3cfc ("vdpa/mlx5: Add support for control VQ and MAC setting") > Signed-off-by: Jason Wang <jasowang at redhat.com> > --- > drivers/vdpa/mlx5/net/mlx5_vnet.c | 28 +++++++++++++++------------- > 1 file changed, 15 insertions(+), 13 deletions(-) > > diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c > index d0f91078600e..d5a6fb3f9c41 100644 > --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c > +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c > @@ -163,6 +163,7 @@ struct mlx5_vdpa_net { > u32 cur_num_vqs; > struct notifier_block nb; > struct vdpa_callback config_cb; > + struct mlx5_vdpa_wq_ent cvq_ent; > }; > > static void free_resources(struct mlx5_vdpa_net *ndev); > @@ -1600,6 +1601,8 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) > return status; > } > > +#define MLX5_CVQ_BUDGET 16 > + > static void mlx5_cvq_kick_handler(struct work_struct *work) > { > virtio_net_ctrl_ack status = VIRTIO_NET_ERR; > @@ -1609,17 +1612,17 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) > struct mlx5_control_vq *cvq; > struct mlx5_vdpa_net *ndev; > size_t read, write; > - int err; > + int err, n = 0; > > wqent = container_of(work, struct mlx5_vdpa_wq_ent, work); > mvdev = wqent->mvdev; > ndev = to_mlx5_vdpa_ndev(mvdev); > cvq = &mvdev->cvq; > if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) > - goto out; > + return; > > if (!cvq->ready) > - goto out; > + return; > > while (true) { > err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head, > @@ -1653,9 +1656,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) > > if (vringh_need_notify_iotlb(&cvq->vring)) > vringh_notify(&cvq->vring); > + > + n++; > + if (n > MLX5_CVQ_BUDGET) { > + queue_work(mvdev->wq, &wqent->work); > + break; > + } > } > -out: > - kfree(wqent); > } > > static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) > @@ -1663,7 +1670,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) > struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); > struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); > struct mlx5_vdpa_virtqueue *mvq; > - struct mlx5_vdpa_wq_ent *wqent; > > if (!is_index_valid(mvdev, idx)) > return; > @@ -1672,13 +1678,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) > if (!mvdev->cvq.ready) > return; > > - wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); > - if (!wqent) > - return; > - > - wqent->mvdev = mvdev; > - INIT_WORK(&wqent->work, mlx5_cvq_kick_handler); > - queue_work(mvdev->wq, &wqent->work); > + queue_work(mvdev->wq, &ndev->cvq_ent.work); > return; > } > > @@ -2668,6 +2668,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, > if (err) > goto err_mr; > > + ndev->cvq_ent.mvdev = mvdev; > + INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); > mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); > if (!mvdev->wq) { > err = -ENOMEM;Shouldn't there be a flush during cleanup somewhere?> -- > 2.18.1
Hillf Danton
2022-Mar-21 08:53 UTC
[PATCH 1/2] vdpa: mlx5: prevent cvq work from hogging CPU
On Mon, 21 Mar 2022 14:04:28 +0800 Jason Wang wrote:> A userspace triggerable infinite loop could happen in > mlx5_cvq_kick_handler() if userspace keeps sending a huge amount of > cvq requests. > > Fixing this by introducing a quota and re-queue the work if we're out > of the budget. While at it, using a per device workqueue to avoid on > demand memory allocation for cvq. > > Fixes: 5262912ef3cfc ("vdpa/mlx5: Add support for control VQ and MAC setting") > Signed-off-by: Jason Wang <jasowang at redhat.com> > --- > drivers/vdpa/mlx5/net/mlx5_vnet.c | 28 +++++++++++++++------------- > 1 file changed, 15 insertions(+), 13 deletions(-) > > diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c > index d0f91078600e..d5a6fb3f9c41 100644 > --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c > +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c > @@ -163,6 +163,7 @@ struct mlx5_vdpa_net { > u32 cur_num_vqs; > struct notifier_block nb; > struct vdpa_callback config_cb; > + struct mlx5_vdpa_wq_ent cvq_ent; > }; > > static void free_resources(struct mlx5_vdpa_net *ndev); > @@ -1600,6 +1601,8 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) > return status; > } > > +#define MLX5_CVQ_BUDGET 16 > +This is not needed as given a single thread workqueue, a cond_resched() can do the job in the worker context instead of requeue of work. Hillf> static void mlx5_cvq_kick_handler(struct work_struct *work) > { > virtio_net_ctrl_ack status = VIRTIO_NET_ERR; > @@ -1609,17 +1612,17 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) > struct mlx5_control_vq *cvq; > struct mlx5_vdpa_net *ndev; > size_t read, write; > - int err; > + int err, n = 0; > > wqent = container_of(work, struct mlx5_vdpa_wq_ent, work); > mvdev = wqent->mvdev; > ndev = to_mlx5_vdpa_ndev(mvdev); > cvq = &mvdev->cvq; > if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) > - goto out; > + return; > > if (!cvq->ready) > - goto out; > + return; > > while (true) { > err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head, > @@ -1653,9 +1656,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) > > if (vringh_need_notify_iotlb(&cvq->vring)) > vringh_notify(&cvq->vring); > + > + n++; > + if (n > MLX5_CVQ_BUDGET) { > + queue_work(mvdev->wq, &wqent->work); > + break; > + } > } > -out: > - kfree(wqent); > } > > static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) > @@ -1663,7 +1670,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) > struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); > struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); > struct mlx5_vdpa_virtqueue *mvq; > - struct mlx5_vdpa_wq_ent *wqent; > > if (!is_index_valid(mvdev, idx)) > return; > @@ -1672,13 +1678,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) > if (!mvdev->cvq.ready) > return; > > - wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); > - if (!wqent) > - return; > - > - wqent->mvdev = mvdev; > - INIT_WORK(&wqent->work, mlx5_cvq_kick_handler); > - queue_work(mvdev->wq, &wqent->work); > + queue_work(mvdev->wq, &ndev->cvq_ent.work); > return; > } > > @@ -2668,6 +2668,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, > if (err) > goto err_mr; > > + ndev->cvq_ent.mvdev = mvdev; > + INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); > mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); > if (!mvdev->wq) { > err = -ENOMEM; > -- > 2.18.1