search for: vfio_add_group_dev

Displaying 20 results from an estimated 45 matches for "vfio_add_group_dev".

2016 Apr 18
0
[PATCH RFC 2/3] vfio: report group noiommu status
When using vfio, callers might want to know whether device is added to a regular group or an non-iommu group. Report this status from vfio_add_group_dev. Signed-off-by: Michael S. Tsirkin <mst at redhat.com> --- drivers/vfio/pci/vfio_pci.c | 2 +- drivers/vfio/platform/vfio_platform_common.c | 2 +- drivers/vfio/vfio.c | 5 ++++- Documentation/vfio.txt | 4 +++- 4 files changed...
2016 Apr 18
5
[PATCH RFC 0/3] virtio-pci: iommu support
This is an attempt to allow enabling IOMMU for DMA. Design: - new feature bit IOMMU_PLATFORM which means host won't bypass IOMMU - virtio core uses DMA API if it sees IOMMU_PLATFORM - add quirk for vfio to disable device unless IOMMU_PLATFORM is set or the no-iommu mode is enabled - while I'm not sure how it will be used, it seems like a good idea to also have
2016 Apr 18
5
[PATCH RFC 0/3] virtio-pci: iommu support
This is an attempt to allow enabling IOMMU for DMA. Design: - new feature bit IOMMU_PLATFORM which means host won't bypass IOMMU - virtio core uses DMA API if it sees IOMMU_PLATFORM - add quirk for vfio to disable device unless IOMMU_PLATFORM is set or the no-iommu mode is enabled - while I'm not sure how it will be used, it seems like a good idea to also have
2019 Sep 19
3
[RFC v4 0/3] vhost: introduce mdev based hardware backend
...uot;vfio-vhost-mdev", .open = vfio_vhost_mdev_open, .release = vfio_vhost_mdev_release, }; static int vhost_mdev_probe(struct device *dev) { struct mdev_device *mdev = to_mdev_device(dev); ... Check the mdev device_id proposed in ... ... https://lkml.org/lkml/2019/9/12/151 ... return vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, mdev); } static void vhost_mdev_remove(struct device *dev) { vfio_del_group_dev(dev); } static struct mdev_driver vhost_mdev_driver = { .name = "vhost_mdev", .probe = vhost_mdev_probe, .remove = vhost_mdev_remove, }; So we can bind above mdev driv...
2019 Sep 19
3
[RFC v4 0/3] vhost: introduce mdev based hardware backend
...uot;vfio-vhost-mdev", .open = vfio_vhost_mdev_open, .release = vfio_vhost_mdev_release, }; static int vhost_mdev_probe(struct device *dev) { struct mdev_device *mdev = to_mdev_device(dev); ... Check the mdev device_id proposed in ... ... https://lkml.org/lkml/2019/9/12/151 ... return vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, mdev); } static void vhost_mdev_remove(struct device *dev) { vfio_del_group_dev(dev); } static struct mdev_driver vhost_mdev_driver = { .name = "vhost_mdev", .probe = vhost_mdev_probe, .remove = vhost_mdev_remove, }; So we can bind above mdev driv...
2019 Sep 20
1
[RFC v4 0/3] vhost: introduce mdev based hardware backend
...h of virtio-mdev and vhost-mdev will try to take this device. We may want a way to let vhost-mdev take this device only when users explicitly ask it to do it. Or maybe we can have a different MDEV_ID for vhost-mdev but share the device ops with virtio-mdev. > > > > > > return vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, mdev); > > > And in vfio_vhost_mdev_ops, all its need is to just implement vhost-net > ioctl and translate them to virtio-mdev transport (e.g device_ops I proposed > or ioctls other whatever other method) API. I see, so my previous understanding...
2019 Sep 17
2
[RFC v4 0/3] vhost: introduce mdev based hardware backend
...ase = vfio_vhost_mdev_release, .ioctl = vfio_vhost_mdev_unlocked_ioctl, .read = vfio_vhost_mdev_read, .write = vfio_vhost_mdev_write, .mmap = vfio_vhost_mdev_mmap, }; static int vfio_vhost_mdev_probe(struct device *dev) { struct mdev_device *mdev = to_mdev_device(dev); /* ... */ return vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, mdev); } static void vfio_vhost_mdev_remove(struct device *dev) { /* ... */ vfio_del_group_dev(dev); } static struct mdev_driver vfio_vhost_mdev_driver = { .name = "vfio_vhost_mdev", .probe = vfio_vhost_mdev_probe, .remove = vfio_vhost_mdev_remove...
2019 Sep 17
2
[RFC v4 0/3] vhost: introduce mdev based hardware backend
...ase = vfio_vhost_mdev_release, .ioctl = vfio_vhost_mdev_unlocked_ioctl, .read = vfio_vhost_mdev_read, .write = vfio_vhost_mdev_write, .mmap = vfio_vhost_mdev_mmap, }; static int vfio_vhost_mdev_probe(struct device *dev) { struct mdev_device *mdev = to_mdev_device(dev); /* ... */ return vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, mdev); } static void vfio_vhost_mdev_remove(struct device *dev) { /* ... */ vfio_del_group_dev(dev); } static struct mdev_driver vfio_vhost_mdev_driver = { .name = "vfio_vhost_mdev", .probe = vfio_vhost_mdev_probe, .remove = vfio_vhost_mdev_remove...
2019 Sep 18
2
[RFC v4 0/3] vhost: introduce mdev based hardware backend
...read, > > .write = vfio_vhost_mdev_write, > > .mmap = vfio_vhost_mdev_mmap, > > }; > > > > static int vfio_vhost_mdev_probe(struct device *dev) > > { > > struct mdev_device *mdev = to_mdev_device(dev); > > > > /* ... */ > > return vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, mdev); > > } > > > > static void vfio_vhost_mdev_remove(struct device *dev) > > { > > /* ... */ > > vfio_del_group_dev(dev); > > } > > > > static struct mdev_driver vfio_vhost_mdev_driver = { > > .na...
2019 Sep 18
2
[RFC v4 0/3] vhost: introduce mdev based hardware backend
...read, > > .write = vfio_vhost_mdev_write, > > .mmap = vfio_vhost_mdev_mmap, > > }; > > > > static int vfio_vhost_mdev_probe(struct device *dev) > > { > > struct mdev_device *mdev = to_mdev_device(dev); > > > > /* ... */ > > return vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, mdev); > > } > > > > static void vfio_vhost_mdev_remove(struct device *dev) > > { > > /* ... */ > > vfio_del_group_dev(dev); > > } > > > > static struct mdev_driver vfio_vhost_mdev_driver = { > > .na...
2019 Sep 20
0
[RFC v4 0/3] vhost: introduce mdev based hardware backend
...hrough the id_table fields in vhost_mdev_driver, and it should claim it supports virtio-mdev device only: static struct mdev_class_id id_table[] = { ??? { MDEV_ID_VIRTIO }, ??? { 0 }, }; static struct mdev_driver vhost_mdev_driver = { ??? ... ??? .id_table = id_table, } > > return vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, mdev); And in vfio_vhost_mdev_ops, all its need is to just implement vhost-net ioctl and translate them to virtio-mdev transport (e.g device_ops I proposed or ioctls other whatever other method) API. And it could have a dummy ops implementation for the other...
2019 Sep 26
6
[PATCH] vhost: introduce mdev based hardware backend
...__GFP_RETRY_MAYFAIL); + if (!m) + return -ENOMEM; + + mutex_init(&m->mutex); + + nvqs = ops->get_queue_max(mdev); + m->nvqs = nvqs; + + m->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), + GFP_KERNEL); + if (!m->vqs) { + r = -ENOMEM; + goto err; + } + + r = vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, m); + if (r) + goto err; + + m->features = ops->get_features(mdev); + m->mdev = mdev; + return 0; + +err: + kfree(m->vqs); + kfree(m); + return r; +} + +static void vhost_mdev_remove(struct device *dev) +{ + struct vhost_mdev *m; + + m = vfio_del_grou...
2019 Sep 26
6
[PATCH] vhost: introduce mdev based hardware backend
...__GFP_RETRY_MAYFAIL); + if (!m) + return -ENOMEM; + + mutex_init(&m->mutex); + + nvqs = ops->get_queue_max(mdev); + m->nvqs = nvqs; + + m->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), + GFP_KERNEL); + if (!m->vqs) { + r = -ENOMEM; + goto err; + } + + r = vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, m); + if (r) + goto err; + + m->features = ops->get_features(mdev); + m->mdev = mdev; + return 0; + +err: + kfree(m->vqs); + kfree(m); + return r; +} + +static void vhost_mdev_remove(struct device *dev) +{ + struct vhost_mdev *m; + + m = vfio_del_grou...
2016 Aug 30
6
[PATCH v2 0/2] vfio: blacklist legacy virtio devices
Legacy virtio devices always bypassed an IOMMU, so using them with vfio was never safe. This adds a quirk detecting these and disabling VFIO unless the noiommu mode is used. At the moment, this only applies to virtio-pci devices. The patch might make sense on stable as well. Michael S. Tsirkin (2): vfio: report group noiommu status vfio: add virtio pci quirk
2016 Aug 30
6
[PATCH v2 0/2] vfio: blacklist legacy virtio devices
Legacy virtio devices always bypassed an IOMMU, so using them with vfio was never safe. This adds a quirk detecting these and disabling VFIO unless the noiommu mode is used. At the moment, this only applies to virtio-pci devices. The patch might make sense on stable as well. Michael S. Tsirkin (2): vfio: report group noiommu status vfio: add virtio pci quirk
2019 Sep 18
0
[RFC v4 0/3] vhost: introduce mdev based hardware backend
...mdev_unlocked_ioctl, > .read = vfio_vhost_mdev_read, > .write = vfio_vhost_mdev_write, > .mmap = vfio_vhost_mdev_mmap, > }; > > static int vfio_vhost_mdev_probe(struct device *dev) > { > struct mdev_device *mdev = to_mdev_device(dev); > > /* ... */ > return vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, mdev); > } > > static void vfio_vhost_mdev_remove(struct device *dev) > { > /* ... */ > vfio_del_group_dev(dev); > } > > static struct mdev_driver vfio_vhost_mdev_driver = { > .name = "vfio_vhost_mdev", > .probe = vfi...
2019 Sep 26
0
[PATCH] vhost: introduce mdev based hardware backend
...x_init(&m->mutex); > + > + nvqs = ops->get_queue_max(mdev); > + m->nvqs = nvqs; > + > + m->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), > + GFP_KERNEL); > + if (!m->vqs) { > + r = -ENOMEM; > + goto err; > + } > + > + r = vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, m); > + if (r) > + goto err; > + > + m->features = ops->get_features(mdev); > + m->mdev = mdev; > + return 0; > + > +err: > + kfree(m->vqs); > + kfree(m); > + return r; > +} > + > +static void vhost_mdev_remov...
2019 Oct 22
2
[PATCH v2] vhost: introduce mdev based hardware backend
...; + + m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (!m) + return -ENOMEM; + + nvqs = VHOST_MDEV_VQ_MAX; + m->nvqs = nvqs; + + m->vqs = devm_kmalloc_array(dev, nvqs, sizeof(struct vhost_virtqueue), + GFP_KERNEL); + if (!m->vqs) + return -ENOMEM; + + r = vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, m); + if (r) + return r; + + mutex_init(&m->mutex); + m->features = ops->get_features(mdev); + m->mdev = mdev; + return 0; +} + +static void vhost_mdev_remove(struct device *dev) +{ + struct vhost_mdev *m; + + m = vfio_del_group_dev(dev); + mutex_...
2019 Oct 22
2
[PATCH v2] vhost: introduce mdev based hardware backend
...; + + m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (!m) + return -ENOMEM; + + nvqs = VHOST_MDEV_VQ_MAX; + m->nvqs = nvqs; + + m->vqs = devm_kmalloc_array(dev, nvqs, sizeof(struct vhost_virtqueue), + GFP_KERNEL); + if (!m->vqs) + return -ENOMEM; + + r = vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, m); + if (r) + return r; + + mutex_init(&m->mutex); + m->features = ops->get_features(mdev); + m->mdev = mdev; + return 0; +} + +static void vhost_mdev_remove(struct device *dev) +{ + struct vhost_mdev *m; + + m = vfio_del_group_dev(dev); + mutex_...
2019 Sep 27
0
[PATCH] vhost: introduce mdev based hardware backend
...hat we need another API to query the maximum number of virtqueues supported by the device. Thanks > + > + m->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), > + GFP_KERNEL); > + if (!m->vqs) { > + r = -ENOMEM; > + goto err; > + } > + > + r = vfio_add_group_dev(dev, &vfio_vhost_mdev_dev_ops, m); > + if (r) > + goto err; > + > + m->features = ops->get_features(mdev); > + m->mdev = mdev; > + return 0; > + > +err: > + kfree(m->vqs); > + kfree(m); > + return r; > +} > + > +static void vhost_mdev_remov...