Si-Wei Liu
2021-Dec-03 07:28 UTC
[PATCH 3/7] vdpa/mlx5: Support configuring max data virtqueue pairs
On 12/1/2021 11:57 AM, Eli Cohen wrote:> Check whether the max number of data virtqueue pairs was provided when a > adding a new device and verify the new value does not exceed device > capabilities. > > In addition, change the arrays holding virtqueue and callback contexts > to be dynamically allocated. > > Signed-off-by: Eli Cohen <elic at nvidia.com> > --- > drivers/vdpa/mlx5/net/mlx5_vnet.c | 33 ++++++++++++++++++++----------- > 1 file changed, 21 insertions(+), 12 deletions(-) > > diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c > index b66f41ccbac2..62aba5dbd8fa 100644 > --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c > +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c > @@ -131,11 +131,6 @@ struct mlx5_vdpa_virtqueue { > struct mlx5_vq_restore_info ri; > }; > > -/* We will remove this limitation once mlx5_vdpa_alloc_resources() > - * provides for driver space allocationIs this comment outdated, i.e. driver space allocation in mlx5_vdpa_alloc_resources() already provided?> - */ > -#define MLX5_MAX_SUPPORTED_VQS 16 > - > static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx) > { > if (unlikely(idx > mvdev->max_idx)) > @@ -148,8 +143,8 @@ struct mlx5_vdpa_net { > struct mlx5_vdpa_dev mvdev; > struct mlx5_vdpa_net_resources res; > struct virtio_net_config config; > - struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS]; > - struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1]; > + struct mlx5_vdpa_virtqueue *vqs; > + struct vdpa_callback *event_cbs; > > /* Serialize vq resources creation and destruction. This is required > * since memory map might change and we need to destroy and create > @@ -1218,7 +1213,7 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev) > { > int i; > > - for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++) > + for (i = 0; i < ndev->mvdev.max_vqs; i++) > suspend_vq(ndev, &ndev->vqs[i]); > } > > @@ -1245,7 +1240,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev) > int i, j; > int err; > > - max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2, > + max_rqt = min_t(int, ndev->mvdev.max_vqs / 2, > 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size)); > if (max_rqt < 1) > return -EOPNOTSUPP; > @@ -2235,7 +2230,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) > clear_vqs_ready(ndev); > mlx5_vdpa_destroy_mr(&ndev->mvdev); > ndev->mvdev.status = 0; > - memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs)); > + memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); > ndev->mvdev.actual_features = 0; > ++mvdev->generation; > if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { > @@ -2308,6 +2303,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev) > } > mlx5_vdpa_free_resources(&ndev->mvdev); > mutex_destroy(&ndev->reslock); > + kfree(ndev->event_cbs); > + kfree(ndev->vqs); > } > > static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx) > @@ -2547,13 +2544,24 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, > > /* we save one virtqueue for control virtqueue should we require it */ > max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues); > - max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); > + if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) { > + if (add_config->max_vq_pairs & (add_config->max_vq_pairs - 1) || > + add_config->max_vq_pairs > max_vqs / 2) > + return -EINVAL;It'd be the best to describe the failing cause here, the power of 2 limitation is known to me, but not friendly enough for first time user. dev_warn would work for me.> + max_vqs = min_t(u32, max_vqs, 2 * add_config->max_vq_pairs); > + } > > ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, > name, false); > if (IS_ERR(ndev)) > return PTR_ERR(ndev); > > + ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); > + ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); > + if (!ndev->vqs || !ndev->event_cbs) { > + err = -ENOMEM; > + goto err_mtu;Not a good idea to call mutex_destroy() without calling mutex_init() ahead. Introduce another err label before put_device()? -Siwei> + } > ndev->mvdev.max_vqs = max_vqs; > mvdev = &ndev->mvdev; > mvdev->mdev = mdev; > @@ -2676,7 +2684,8 @@ static int mlx5v_probe(struct auxiliary_device *adev, > mgtdev->mgtdev.ops = &mdev_ops; > mgtdev->mgtdev.device = mdev->device; > mgtdev->mgtdev.id_table = id_table; > - mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); > + mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | > + BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP); > mgtdev->madev = madev; > > err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
Parav Pandit
2021-Dec-03 07:31 UTC
[PATCH 3/7] vdpa/mlx5: Support configuring max data virtqueue pairs
> From: Si-Wei Liu <si-wei.liu at oracle.com> > Sent: Friday, December 3, 2021 12:58 PM > > On 12/1/2021 11:57 AM, Eli Cohen wrote: > > Check whether the max number of data virtqueue pairs was provided when > > a adding a new device and verify the new value does not exceed device > > capabilities. > > > > In addition, change the arrays holding virtqueue and callback contexts > > to be dynamically allocated. > > > > Signed-off-by: Eli Cohen <elic at nvidia.com> > > --- > > drivers/vdpa/mlx5/net/mlx5_vnet.c | 33 ++++++++++++++++++++----------- > > 1 file changed, 21 insertions(+), 12 deletions(-) > > > > diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c > > b/drivers/vdpa/mlx5/net/mlx5_vnet.c > > index b66f41ccbac2..62aba5dbd8fa 100644 > > --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c > > +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c > > @@ -131,11 +131,6 @@ struct mlx5_vdpa_virtqueue { > > struct mlx5_vq_restore_info ri; > > }; > > > > -/* We will remove this limitation once mlx5_vdpa_alloc_resources() > > - * provides for driver space allocation > Is this comment outdated, i.e. driver space allocation in > mlx5_vdpa_alloc_resources() already provided? > > > - */ > > -#define MLX5_MAX_SUPPORTED_VQS 16 > > - > > static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx) > > { > > if (unlikely(idx > mvdev->max_idx)) @@ -148,8 +143,8 @@ struct > > mlx5_vdpa_net { > > struct mlx5_vdpa_dev mvdev; > > struct mlx5_vdpa_net_resources res; > > struct virtio_net_config config; > > - struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS]; > > - struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1]; > > + struct mlx5_vdpa_virtqueue *vqs; > > + struct vdpa_callback *event_cbs; > > > > /* Serialize vq resources creation and destruction. This is required > > * since memory map might change and we need to destroy and create > > @@ -1218,7 +1213,7 @@ static void suspend_vqs(struct mlx5_vdpa_net > *ndev) > > { > > int i; > > > > - for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++) > > + for (i = 0; i < ndev->mvdev.max_vqs; i++) > > suspend_vq(ndev, &ndev->vqs[i]); > > } > > > > @@ -1245,7 +1240,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev) > > int i, j; > > int err; > > > > - max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2, > > + max_rqt = min_t(int, ndev->mvdev.max_vqs / 2, > > 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, > log_max_rqt_size)); > > if (max_rqt < 1) > > return -EOPNOTSUPP; > > @@ -2235,7 +2230,7 @@ static int mlx5_vdpa_reset(struct vdpa_device > *vdev) > > clear_vqs_ready(ndev); > > mlx5_vdpa_destroy_mr(&ndev->mvdev); > > ndev->mvdev.status = 0; > > - memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs)); > > + memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * > > +(mvdev->max_vqs + 1)); > > ndev->mvdev.actual_features = 0; > > ++mvdev->generation; > > if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { @@ -2308,6 > +2303,8 @@ > > static void mlx5_vdpa_free(struct vdpa_device *vdev) > > } > > mlx5_vdpa_free_resources(&ndev->mvdev); > > mutex_destroy(&ndev->reslock); > > + kfree(ndev->event_cbs); > > + kfree(ndev->vqs); > > } > > > > static struct vdpa_notification_area mlx5_get_vq_notification(struct > > vdpa_device *vdev, u16 idx) @@ -2547,13 +2544,24 @@ static int > > mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, > > > > /* we save one virtqueue for control virtqueue should we require it */ > > max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, > max_num_virtio_queues); > > - max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); > > + if (add_config->mask & > BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) { > > + if (add_config->max_vq_pairs & (add_config->max_vq_pairs - > 1) || > > + add_config->max_vq_pairs > max_vqs / 2) > > + return -EINVAL; > It'd be the best to describe the failing cause here, the power of 2 limitation is > known to me, but not friendly enough for first time user. > dev_warn would work for me.User commands shouldn't be creating dmsg unwanted messages. dev_warn_once() is better. But instead, extack error message should be returned that reaches the user who issues iproute2 command and makes user aware better.