Jason Wang
2021-Aug-24 09:09 UTC
[PATCH v4 4/6] vdpa/mlx5: Ensure valid indices are provided
? 2021/8/23 ??1:21, Eli Cohen ??:> Following patches add control virtuqeue and multiqueue support. We want > to verify that the index value to callbacks referencing a virtqueue is > valid. > > The logic defining valid indices is as follows: > CVQ clear: 0 and 1. > CVQ set, MQ clear: 0, 1 and 2 > CVQ set, MQ set: 0..nvq where nvq is whatever provided to > _vdpa_register_device() > > Signed-off-by: Eli Cohen <elic at nvidia.com>Acked-by: Jason Wang <jasowang at redhat.com>> --- > drivers/vdpa/mlx5/core/mlx5_vdpa.h | 1 + > drivers/vdpa/mlx5/net/mlx5_vnet.c | 54 ++++++++++++++++++++++++++++++ > 2 files changed, 55 insertions(+) > > diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h > index 8d0a6f2cb3f0..41b20855ed31 100644 > --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h > +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h > @@ -56,6 +56,7 @@ struct mlx5_vdpa_dev { > u64 actual_features; > u8 status; > u32 max_vqs; > + u16 max_idx; > u32 generation; > > struct mlx5_vdpa_mr mr; > diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c > index 3ae2e5ae2be1..eafad2a19299 100644 > --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c > +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c > @@ -45,6 +45,8 @@ MODULE_LICENSE("Dual BSD/GPL"); > (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \ > VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED) > > +#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature))) > + > struct mlx5_vdpa_net_resources { > u32 tisn; > u32 tdn; > @@ -133,6 +135,14 @@ struct mlx5_vdpa_virtqueue { > */ > #define MLX5_MAX_SUPPORTED_VQS 16 > > +static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx) > +{ > + if (unlikely(idx > mvdev->max_idx)) > + return false; > + > + return true; > +} > + > struct mlx5_vdpa_net { > struct mlx5_vdpa_dev mvdev; > struct mlx5_vdpa_net_resources res; > @@ -1354,6 +1364,9 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) > struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); > struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; > > + if (!is_index_valid(mvdev, idx)) > + return; > + > if (unlikely(!mvq->ready)) > return; > > @@ -1367,6 +1380,9 @@ static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_ > struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); > struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; > > + if (!is_index_valid(mvdev, idx)) > + return -EINVAL; > + > mvq->desc_addr = desc_area; > mvq->device_addr = device_area; > mvq->driver_addr = driver_area; > @@ -1379,6 +1395,9 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num) > struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); > struct mlx5_vdpa_virtqueue *mvq; > > + if (!is_index_valid(mvdev, idx)) > + return; > + > mvq = &ndev->vqs[idx]; > mvq->num_ent = num; > } > @@ -1397,6 +1416,9 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready > struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); > struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; > > + if (!is_index_valid(mvdev, idx)) > + return; > + > if (!ready) > suspend_vq(ndev, mvq); > > @@ -1409,6 +1431,9 @@ static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx) > struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); > struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; > > + if (!is_index_valid(mvdev, idx)) > + return false; > + > return mvq->ready; > } > > @@ -1419,6 +1444,9 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx, > struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); > struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; > > + if (!is_index_valid(mvdev, idx)) > + return -EINVAL; > + > if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) { > mlx5_vdpa_warn(mvdev, "can't modify available index\n"); > return -EINVAL; > @@ -1437,6 +1465,9 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa > struct mlx5_virtq_attr attr; > int err; > > + if (!is_index_valid(mvdev, idx)) > + return -EINVAL; > + > /* If the virtq object was destroyed, use the value saved at > * the last minute of suspend_vq. This caters for userspace > * that cares about emulating the index after vq is stopped. > @@ -1556,6 +1587,24 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val) > return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val); > } > > +static void update_cvq_info(struct mlx5_vdpa_dev *mvdev) > +{ > + if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) { > + if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) { > + /* MQ supported. CVQ index is right above the last data virtqueue's */ > + mvdev->max_idx = mvdev->max_vqs; > + } else { > + /* Only CVQ supportted. data virtqueues occupy indices 0 and 1. > + * CVQ gets index 2 > + */ > + mvdev->max_idx = 2; > + } > + } else { > + /* Two data virtqueues only: one for rx and one for tx */ > + mvdev->max_idx = 1; > + } > +} > + > static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) > { > struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); > @@ -1571,6 +1620,7 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) > ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; > ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu); > ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); > + update_cvq_info(mvdev); > return err; > } > > @@ -1792,6 +1842,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) > ndev->mvdev.status = 0; > ndev->mvdev.mlx_features = 0; > memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs)); > + ndev->mvdev.actual_features = 0; > ++mvdev->generation; > if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { > if (mlx5_vdpa_create_mr(mvdev, NULL)) > @@ -1892,6 +1943,9 @@ static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device > struct mlx5_vdpa_net *ndev; > phys_addr_t addr; > > + if (!is_index_valid(mvdev, idx)) > + return ret; > + > /* If SF BAR size is smaller than PAGE_SIZE, do not use direct > * notification to avoid the risk of mapping pages that contain BAR of more > * than one SF