On Sat, Jul 17, 2021 at 10:42:57AM +0300, Parav Pandit
wrote:> VQs may be accessed to mark the device broken while they are
> created/destroyed. Hence protect the access to the vqs list.
>
> Fixes: e2dcdfe95c0b ("virtio: virtio_break_device() to mark all
virtqueues broken.")
> Signed-off-by: Parav Pandit <parav at nvidia.com>
> ---
> drivers/virtio/virtio.c | 1 +
> drivers/virtio/virtio_ring.c | 8 ++++++++
> include/linux/virtio.h | 1 +
> 3 files changed, 10 insertions(+)
>
> diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
> index 4b15c00c0a0a..a0d81e35ec4b 100644
> --- a/drivers/virtio/virtio.c
> +++ b/drivers/virtio/virtio.c
> @@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev)
> virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
>
> INIT_LIST_HEAD(&dev->vqs);
> + rwlock_init(&dev->vqs_list_lock);
>
> /*
> * device_add() causes the bus infrastructure to look for a matching
Let's just use a simple spinlock. I don't think we are worried about
scaling the breaking of devices to multiple CPUs.
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index d2e1a7a21171..66a91dec39d9 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -1755,7 +1755,9 @@ static struct virtqueue
*vring_create_virtqueue_packed(
> cpu_to_le16(vq->packed.event_flags_shadow);
> }
>
> + write_lock(&vdev->vqs_list_lock);
> list_add_tail(&vq->vq.list, &vdev->vqs);
> + write_unlock(&vdev->vqs_list_lock);
> return &vq->vq;
>
> err_desc_extra:
> @@ -2229,7 +2231,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int
index,
> memset(vq->split.desc_state, 0, vring.num *
> sizeof(struct vring_desc_state_split));
>
> + write_lock(&vdev->vqs_list_lock);
> list_add_tail(&vq->vq.list, &vdev->vqs);
> + write_unlock(&vdev->vqs_list_lock);
> return &vq->vq;
>
> err_extra:
> @@ -2291,7 +2295,9 @@ void vring_del_virtqueue(struct virtqueue *_vq)
> {
> struct vring_virtqueue *vq = to_vvq(_vq);
>
> + write_lock(&vq->vq.vdev->vqs_list_lock);
> list_del(&_vq->list);
> + write_unlock(&vq->vq.vdev->vqs_list_lock);
>
> if (vq->we_own_ring) {
> if (vq->packed_ring) {
> @@ -2386,12 +2392,14 @@ void virtio_break_device(struct virtio_device *dev)
> {
> struct virtqueue *_vq;
>
> + read_lock(&dev->vqs_list_lock);
> list_for_each_entry(_vq, &dev->vqs, list) {
> struct vring_virtqueue *vq = to_vvq(_vq);
>
> /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
> smp_store_release(&vq->broken, true);
> }
> + read_unlock(&dev->vqs_list_lock);
> }
> EXPORT_SYMBOL_GPL(virtio_break_device);
>
> diff --git a/include/linux/virtio.h b/include/linux/virtio.h
> index b1894e0323fa..1cf77d480ef3 100644
> --- a/include/linux/virtio.h
> +++ b/include/linux/virtio.h
> @@ -115,6 +115,7 @@ struct virtio_device {
> const struct virtio_config_ops *config;
> const struct vringh_config_ops *vringh_config;
> struct list_head vqs;
> + rwlock_t vqs_list_lock;
> u64 features;
> void *priv;
> };
> --
> 2.27.0