As the current virtio-mmio only support single irq, so some advanced features such as vhost-net with irqfd are not supported. And the net performance is not the best without vhost-net and irqfd supporting. This patch support virtio-mmio to request multiple irqs like virtio-pci. With this patch and qemu assigning multiple irqs for virtio-mmio device, it's ok to use vhost-net with irqfd on arm/arm64. As arm doesn't support msi-x now, we use GSI for multiple irq. In this patch we use "vm_try_to_find_vqs" to check whether multiple irqs are supported like virtio-pci. Is this the right direction? is there other ways to make virtio-mmio support multiple irq? Hope for feedback. Thanks. Signed-off-by: Shannon Zhao <zhaoshenglong at huawei.com> --- drivers/virtio/virtio_mmio.c | 234 ++++++++++++++++++++++++++++++++++++------ 1 files changed, 203 insertions(+), 31 deletions(-) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index c600ccf..2b7d935 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -122,6 +122,15 @@ struct virtio_mmio_device { /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; + + /* multiple irq support */ + int single_irq_enabled; + /* Number of available irqs */ + unsigned num_irqs; + /* Used number of irqs */ + int used_irqs; + /* Name strings for interrupts. */ + char (*vm_vq_names)[256]; }; struct virtio_mmio_vq_info { @@ -229,33 +238,53 @@ static bool vm_notify(struct virtqueue *vq) return true; } +/* Handle a configuration change: Tell driver if it wants to know. */ +static irqreturn_t vm_config_changed(int irq, void *opaque) +{ + struct virtio_mmio_device *vm_dev = opaque; + struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver, + struct virtio_driver, driver); + + if (vdrv && vdrv->config_changed) + vdrv->config_changed(&vm_dev->vdev); + return IRQ_HANDLED; +} + /* Notify all virtqueues on an interrupt. */ -static irqreturn_t vm_interrupt(int irq, void *opaque) +static irqreturn_t vm_vring_interrupt(int irq, void *opaque) { struct virtio_mmio_device *vm_dev = opaque; struct virtio_mmio_vq_info *info; - struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver, - struct virtio_driver, driver); - unsigned long status; + irqreturn_t ret = IRQ_NONE; unsigned long flags; + + spin_lock_irqsave(&vm_dev->lock, flags); + list_for_each_entry(info, &vm_dev->virtqueues, node) { + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + spin_unlock_irqrestore(&vm_dev->lock, flags); + + return ret; +} + +/* Notify all virtqueues and handle a configuration + * change on an interrupt. */ +static irqreturn_t vm_interrupt(int irq, void *opaque) +{ + struct virtio_mmio_device *vm_dev = opaque; + unsigned long status; irqreturn_t ret = IRQ_NONE; /* Read and acknowledge interrupts */ status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); - if (unlikely(status & VIRTIO_MMIO_INT_CONFIG) - && vdrv && vdrv->config_changed) { - vdrv->config_changed(&vm_dev->vdev); - ret = IRQ_HANDLED; - } + if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) + return vm_config_changed(irq, opaque); - if (likely(status & VIRTIO_MMIO_INT_VRING)) { - spin_lock_irqsave(&vm_dev->lock, flags); - list_for_each_entry(info, &vm_dev->virtqueues, node) - ret |= vring_interrupt(irq, info->vq); - spin_unlock_irqrestore(&vm_dev->lock, flags); - } + if (likely(status & VIRTIO_MMIO_INT_VRING)) + return vm_vring_interrupt(irq, opaque); return ret; } @@ -284,18 +313,98 @@ static void vm_del_vq(struct virtqueue *vq) kfree(info); } -static void vm_del_vqs(struct virtio_device *vdev) +static void vm_free_irqs(struct virtio_device *vdev) { + int i; struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + if (vm_dev->single_irq_enabled) { + free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); + vm_dev->single_irq_enabled = 0; + } + + for (i = 0; i < vm_dev->used_irqs; ++i) + free_irq(platform_get_irq(vm_dev->pdev, i), vm_dev); + + vm_dev->num_irqs = 0; + vm_dev->used_irqs = 0; + kfree(vm_dev->vm_vq_names); + vm_dev->vm_vq_names = NULL; +} + +static void vm_del_vqs(struct virtio_device *vdev) +{ struct virtqueue *vq, *n; list_for_each_entry_safe(vq, n, &vdev->vqs, list) vm_del_vq(vq); - free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); + vm_free_irqs(vdev); +} + +static int vm_request_multiple_irqs(struct virtio_device *vdev, int nirqs, + bool per_vq_irq) +{ + int err = -ENOMEM; + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + unsigned i, v; + int irq = 0; + + vm_dev->num_irqs = nirqs; + vm_dev->used_irqs = 0; + + vm_dev->vm_vq_names = kmalloc_array(nirqs, sizeof(*vm_dev->vm_vq_names), + GFP_KERNEL); + if (!vm_dev->vm_vq_names) + goto error; + + for (i = 0; i < nirqs; i++) { + irq = platform_get_irq(vm_dev->pdev, i); + if (irq == -ENXIO) + goto error; + } + + /* Set the irq used for configuration */ + v = vm_dev->used_irqs; + snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), + "%s-config", dev_name(&vdev->dev)); + irq = platform_get_irq(vm_dev->pdev, v); + err = request_irq(irq, vm_config_changed, 0, + vm_dev->vm_vq_names[v], vm_dev); + ++vm_dev->used_irqs; + if (err) + goto error; + + if (!per_vq_irq) { + /* Shared irq for all VQs */ + v = vm_dev->used_irqs; + snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), + "%s-virtqueues", dev_name(&vdev->dev)); + irq = platform_get_irq(vm_dev->pdev, v); + err = request_irq(irq, vm_vring_interrupt, 0, + vm_dev->vm_vq_names[v], vm_dev); + if (err) + goto error; + ++vm_dev->used_irqs; + } + return 0; +error: + vm_free_irqs(vdev); + return err; } +static int vm_request_single_irq(struct virtio_device *vdev) +{ + int err; + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + int irq = platform_get_irq(vm_dev->pdev, 0); + err = request_irq(irq, vm_interrupt, IRQF_SHARED, + dev_name(&vdev->dev), vm_dev); + if (!err) + vm_dev->single_irq_enabled = 1; + return err; +} static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), @@ -392,29 +501,92 @@ error_available: return ERR_PTR(err); } -static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char *names[]) +static int vm_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[], + bool use_multiple_irq, + bool per_vq_irq) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - unsigned int irq = platform_get_irq(vm_dev->pdev, 0); - int i, err; + int i, err, nirqs, irq; + + if (!use_multiple_irq) { + /* Old style: one normal interrupt for change and all vqs. */ + err = vm_request_single_irq(vdev); + if (err) + goto error_request; + } else { + if (per_vq_irq) { + /* Best option: one for change interrupt, one per vq. */ + nirqs = 1; + for (i = 0; i < nvqs; ++i) + if (callbacks[i]) + ++nirqs; + } else { + /* Second best: one for change, shared for all vqs. */ + nirqs = 2; + } - err = request_irq(irq, vm_interrupt, IRQF_SHARED, - dev_name(&vdev->dev), vm_dev); - if (err) - return err; + err = vm_request_multiple_irqs(vdev, nirqs, per_vq_irq); + if (err) + goto error_request; + } - for (i = 0; i < nvqs; ++i) { + for (i = 0; i < nvqs; i++) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); if (IS_ERR(vqs[i])) { - vm_del_vqs(vdev); - return PTR_ERR(vqs[i]); + err = PTR_ERR(vqs[i]); + goto error_find; + } + if (!per_vq_irq || !callbacks[i]) + continue; + /* allocate per-vq irq if available and necessary */ + snprintf(vm_dev->vm_vq_names[vm_dev->used_irqs], + sizeof(*vm_dev->vm_vq_names), + "%s-%s", + dev_name(&vm_dev->vdev.dev), names[i]); + irq = platform_get_irq(vm_dev->pdev, vm_dev->used_irqs); + err = request_irq(irq, vring_interrupt, 0, + vm_dev->vm_vq_names[vm_dev->used_irqs], vqs[i]); + if (err) { + vm_del_vq(vqs[i]); + goto error_find; } + ++vm_dev->used_irqs; } - return 0; +error_find: + vm_del_vqs(vdev); + +error_request: + return err; +} + +static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + int err; + + /* Try multiple irqs with one irq per queue. */ + err = vm_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); + if (!err) + return 0; + /* Fallback: multiple irqs with one irq for config, + * one shared for queues. */ + err = vm_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + true, false); + if (!err) + return 0; + /* Finally fall back to regular single interrupts. */ + return vm_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + false, false); } static const char *vm_bus_name(struct virtio_device *vdev) -- 1.7.1
On 2014/11/4 17:35, Shannon Zhao wrote:> As the current virtio-mmio only support single irq, > so some advanced features such as vhost-net with irqfd > are not supported. And the net performance is not > the best without vhost-net and irqfd supporting. >Hi Joel, Peter, Mst, Some virtio-net with virtio-mmio performance data on ARM added as followed: Type of backend bandwith(GBytes/sec) virtio-net 0.66 vhost-net 1.49 vhost-net with irqfd 2.01 Test cmd: ./iperf -c 192.168.0.2 -P 1 -i 10 -p 5001 -f G -t 60>From this test data, irqfd has great improvement (about 30%) on performance.So maybe it's necessary to enable multiple irq support to make vhost-net with virtio-mmio on ARM be able to use irqfd. How do you guys think? Look forward for your feedback. Thanks, Shannon> This patch support virtio-mmio to request multiple > irqs like virtio-pci. With this patch and qemu assigning > multiple irqs for virtio-mmio device, it's ok to use > vhost-net with irqfd on arm/arm64. >
GAUGUEY Rémy 228890
2014-Nov-05 08:26 UTC
[RFC PATCH] virtio-mmio: support for multiple irqs
Hi Shannon,>Type of backend bandwith(GBytes/sec) >virtio-net 0.66 >vhost-net 1.49 >vhost-net with irqfd 2.01 > >Test cmd: ./iperf -c 192.168.0.2 -P 1 -i 10 -p 5001 -f G -t 60Impressive results ! Could you please detail your setup ? which platform are you using and which GbE controller ? As a reference, it would be good also to have result with an iperf to the HOST to see how far we are from a native configuration... Also, I assume a pending Qemu patch is necessary to assign multiple irqs ? I'm correct ? Thanks a lot, Best regards R?my -----Message d'origine----- De?: Shannon Zhao [mailto:zhaoshenglong at huawei.com] Envoy??: mercredi 5 novembre 2014 09:00 ??: linux-kernel at vger.kernel.org Cc?: mst at redhat.com; peter.maydell at linaro.org; john.liuli at huawei.com; joel.schopp at amd.com; GAUGUEY R?my 228890; qemu-devel at nongnu.org; n.nikolaev at virtualopensystems.com; virtualization at lists.linux-foundation.org; peter.huangpeng at huawei.com; hangaohuai at huawei.com Objet?: Re: [RFC PATCH] virtio-mmio: support for multiple irqs On 2014/11/4 17:35, Shannon Zhao wrote:> As the current virtio-mmio only support single irq, so some advanced > features such as vhost-net with irqfd are not supported. And the net > performance is not the best without vhost-net and irqfd supporting. >Hi Joel, Peter, Mst, Some virtio-net with virtio-mmio performance data on ARM added as followed: Type of backend bandwith(GBytes/sec) virtio-net 0.66 vhost-net 1.49 vhost-net with irqfd 2.01 Test cmd: ./iperf -c 192.168.0.2 -P 1 -i 10 -p 5001 -f G -t 60>From this test data, irqfd has great improvement (about 30%) on performance.So maybe it's necessary to enable multiple irq support to make vhost-net with virtio-mmio on ARM be able to use irqfd. How do you guys think? Look forward for your feedback. Thanks, Shannon> This patch support virtio-mmio to request multiple irqs like > virtio-pci. With this patch and qemu assigning multiple irqs for > virtio-mmio device, it's ok to use vhost-net with irqfd on arm/arm64. >
Michael S. Tsirkin
2014-Nov-06 09:34 UTC
[RFC PATCH] virtio-mmio: support for multiple irqs
On Tue, Nov 04, 2014 at 05:35:12PM +0800, Shannon Zhao wrote:> As the current virtio-mmio only support single irq, > so some advanced features such as vhost-net with irqfd > are not supported. And the net performance is not > the best without vhost-net and irqfd supporting. > > This patch support virtio-mmio to request multiple > irqs like virtio-pci. With this patch and qemu assigning > multiple irqs for virtio-mmio device, it's ok to use > vhost-net with irqfd on arm/arm64. > > As arm doesn't support msi-x now, we use GSI for > multiple irq. In this patch we use "vm_try_to_find_vqs" > to check whether multiple irqs are supported like > virtio-pci. > > Is this the right direction? is there other ways to > make virtio-mmio support multiple irq? Hope for feedback. > Thanks. > > Signed-off-by: Shannon Zhao <zhaoshenglong at huawei.com>So how does guest discover whether host device supports multiple IRQs? Could you please document the new interface? E.g. send a patch for virtio spec. I think this really should be controlled by hypervisor, per device. I'm also tempted to make this a virtio 1.0 only feature.> --- > drivers/virtio/virtio_mmio.c | 234 ++++++++++++++++++++++++++++++++++++------ > 1 files changed, 203 insertions(+), 31 deletions(-) > > diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c > index c600ccf..2b7d935 100644 > --- a/drivers/virtio/virtio_mmio.c > +++ b/drivers/virtio/virtio_mmio.c > @@ -122,6 +122,15 @@ struct virtio_mmio_device { > /* a list of queues so we can dispatch IRQs */ > spinlock_t lock; > struct list_head virtqueues; > + > + /* multiple irq support */ > + int single_irq_enabled; > + /* Number of available irqs */ > + unsigned num_irqs; > + /* Used number of irqs */ > + int used_irqs; > + /* Name strings for interrupts. */ > + char (*vm_vq_names)[256]; > }; > > struct virtio_mmio_vq_info { > @@ -229,33 +238,53 @@ static bool vm_notify(struct virtqueue *vq) > return true; > } > > +/* Handle a configuration change: Tell driver if it wants to know. */ > +static irqreturn_t vm_config_changed(int irq, void *opaque) > +{ > + struct virtio_mmio_device *vm_dev = opaque; > + struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver, > + struct virtio_driver, driver); > + > + if (vdrv && vdrv->config_changed) > + vdrv->config_changed(&vm_dev->vdev); > + return IRQ_HANDLED; > +} > + > /* Notify all virtqueues on an interrupt. */ > -static irqreturn_t vm_interrupt(int irq, void *opaque) > +static irqreturn_t vm_vring_interrupt(int irq, void *opaque) > { > struct virtio_mmio_device *vm_dev = opaque; > struct virtio_mmio_vq_info *info; > - struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver, > - struct virtio_driver, driver); > - unsigned long status; > + irqreturn_t ret = IRQ_NONE; > unsigned long flags; > + > + spin_lock_irqsave(&vm_dev->lock, flags); > + list_for_each_entry(info, &vm_dev->virtqueues, node) { > + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) > + ret = IRQ_HANDLED; > + } > + spin_unlock_irqrestore(&vm_dev->lock, flags); > + > + return ret; > +} > + > +/* Notify all virtqueues and handle a configuration > + * change on an interrupt. */ > +static irqreturn_t vm_interrupt(int irq, void *opaque) > +{ > + struct virtio_mmio_device *vm_dev = opaque; > + unsigned long status; > irqreturn_t ret = IRQ_NONE; > > /* Read and acknowledge interrupts */ > status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); > writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); > > - if (unlikely(status & VIRTIO_MMIO_INT_CONFIG) > - && vdrv && vdrv->config_changed) { > - vdrv->config_changed(&vm_dev->vdev); > - ret = IRQ_HANDLED; > - } > + if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) > + return vm_config_changed(irq, opaque); > > - if (likely(status & VIRTIO_MMIO_INT_VRING)) { > - spin_lock_irqsave(&vm_dev->lock, flags); > - list_for_each_entry(info, &vm_dev->virtqueues, node) > - ret |= vring_interrupt(irq, info->vq); > - spin_unlock_irqrestore(&vm_dev->lock, flags); > - } > + if (likely(status & VIRTIO_MMIO_INT_VRING)) > + return vm_vring_interrupt(irq, opaque); > > return ret; > } > @@ -284,18 +313,98 @@ static void vm_del_vq(struct virtqueue *vq) > kfree(info); > } > > -static void vm_del_vqs(struct virtio_device *vdev) > +static void vm_free_irqs(struct virtio_device *vdev) > { > + int i; > struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + > + if (vm_dev->single_irq_enabled) { > + free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); > + vm_dev->single_irq_enabled = 0; > + } > + > + for (i = 0; i < vm_dev->used_irqs; ++i) > + free_irq(platform_get_irq(vm_dev->pdev, i), vm_dev); > + > + vm_dev->num_irqs = 0; > + vm_dev->used_irqs = 0; > + kfree(vm_dev->vm_vq_names); > + vm_dev->vm_vq_names = NULL; > +} > + > +static void vm_del_vqs(struct virtio_device *vdev) > +{ > struct virtqueue *vq, *n; > > list_for_each_entry_safe(vq, n, &vdev->vqs, list) > vm_del_vq(vq); > > - free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); > + vm_free_irqs(vdev); > +} > + > +static int vm_request_multiple_irqs(struct virtio_device *vdev, int nirqs, > + bool per_vq_irq) > +{ > + int err = -ENOMEM; > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + unsigned i, v; > + int irq = 0; > + > + vm_dev->num_irqs = nirqs; > + vm_dev->used_irqs = 0; > + > + vm_dev->vm_vq_names = kmalloc_array(nirqs, sizeof(*vm_dev->vm_vq_names), > + GFP_KERNEL); > + if (!vm_dev->vm_vq_names) > + goto error; > + > + for (i = 0; i < nirqs; i++) { > + irq = platform_get_irq(vm_dev->pdev, i); > + if (irq == -ENXIO) > + goto error; > + } > + > + /* Set the irq used for configuration */ > + v = vm_dev->used_irqs; > + snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), > + "%s-config", dev_name(&vdev->dev)); > + irq = platform_get_irq(vm_dev->pdev, v); > + err = request_irq(irq, vm_config_changed, 0, > + vm_dev->vm_vq_names[v], vm_dev); > + ++vm_dev->used_irqs; > + if (err) > + goto error; > + > + if (!per_vq_irq) { > + /* Shared irq for all VQs */ > + v = vm_dev->used_irqs; > + snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), > + "%s-virtqueues", dev_name(&vdev->dev)); > + irq = platform_get_irq(vm_dev->pdev, v); > + err = request_irq(irq, vm_vring_interrupt, 0, > + vm_dev->vm_vq_names[v], vm_dev); > + if (err) > + goto error; > + ++vm_dev->used_irqs; > + } > + return 0; > +error: > + vm_free_irqs(vdev); > + return err; > } > > +static int vm_request_single_irq(struct virtio_device *vdev) > +{ > + int err; > + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > + int irq = platform_get_irq(vm_dev->pdev, 0); > > + err = request_irq(irq, vm_interrupt, IRQF_SHARED, > + dev_name(&vdev->dev), vm_dev); > + if (!err) > + vm_dev->single_irq_enabled = 1; > + return err; > +} > > static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, > void (*callback)(struct virtqueue *vq), > @@ -392,29 +501,92 @@ error_available: > return ERR_PTR(err); > } > > -static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, > - struct virtqueue *vqs[], > - vq_callback_t *callbacks[], > - const char *names[]) > +static int vm_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, > + struct virtqueue *vqs[], > + vq_callback_t *callbacks[], > + const char *names[], > + bool use_multiple_irq, > + bool per_vq_irq) > { > struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); > - unsigned int irq = platform_get_irq(vm_dev->pdev, 0); > - int i, err; > + int i, err, nirqs, irq; > + > + if (!use_multiple_irq) { > + /* Old style: one normal interrupt for change and all vqs. */ > + err = vm_request_single_irq(vdev); > + if (err) > + goto error_request; > + } else { > + if (per_vq_irq) { > + /* Best option: one for change interrupt, one per vq. */ > + nirqs = 1; > + for (i = 0; i < nvqs; ++i) > + if (callbacks[i]) > + ++nirqs; > + } else { > + /* Second best: one for change, shared for all vqs. */ > + nirqs = 2; > + } > > - err = request_irq(irq, vm_interrupt, IRQF_SHARED, > - dev_name(&vdev->dev), vm_dev); > - if (err) > - return err; > + err = vm_request_multiple_irqs(vdev, nirqs, per_vq_irq); > + if (err) > + goto error_request; > + } > > - for (i = 0; i < nvqs; ++i) { > + for (i = 0; i < nvqs; i++) { > + if (!names[i]) { > + vqs[i] = NULL; > + continue; > + } > vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); > if (IS_ERR(vqs[i])) { > - vm_del_vqs(vdev); > - return PTR_ERR(vqs[i]); > + err = PTR_ERR(vqs[i]); > + goto error_find; > + } > + if (!per_vq_irq || !callbacks[i]) > + continue; > + /* allocate per-vq irq if available and necessary */ > + snprintf(vm_dev->vm_vq_names[vm_dev->used_irqs], > + sizeof(*vm_dev->vm_vq_names), > + "%s-%s", > + dev_name(&vm_dev->vdev.dev), names[i]); > + irq = platform_get_irq(vm_dev->pdev, vm_dev->used_irqs); > + err = request_irq(irq, vring_interrupt, 0, > + vm_dev->vm_vq_names[vm_dev->used_irqs], vqs[i]); > + if (err) { > + vm_del_vq(vqs[i]); > + goto error_find; > } > + ++vm_dev->used_irqs; > } > - > return 0; > +error_find: > + vm_del_vqs(vdev); > + > +error_request: > + return err; > +} > + > +static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, > + struct virtqueue *vqs[], > + vq_callback_t *callbacks[], > + const char *names[]) > +{ > + int err; > + > + /* Try multiple irqs with one irq per queue. */ > + err = vm_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); > + if (!err) > + return 0; > + /* Fallback: multiple irqs with one irq for config, > + * one shared for queues. */ > + err = vm_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, > + true, false); > + if (!err) > + return 0; > + /* Finally fall back to regular single interrupts. */ > + return vm_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, > + false, false); > } > > static const char *vm_bus_name(struct virtio_device *vdev) > -- > 1.7.1
On 2014/11/6 17:34, Michael S. Tsirkin wrote:> On Tue, Nov 04, 2014 at 05:35:12PM +0800, Shannon Zhao wrote: >> As the current virtio-mmio only support single irq, >> so some advanced features such as vhost-net with irqfd >> are not supported. And the net performance is not >> the best without vhost-net and irqfd supporting. >> >> This patch support virtio-mmio to request multiple >> irqs like virtio-pci. With this patch and qemu assigning >> multiple irqs for virtio-mmio device, it's ok to use >> vhost-net with irqfd on arm/arm64. >> >> As arm doesn't support msi-x now, we use GSI for >> multiple irq. In this patch we use "vm_try_to_find_vqs" >> to check whether multiple irqs are supported like >> virtio-pci. >> >> Is this the right direction? is there other ways to >> make virtio-mmio support multiple irq? Hope for feedback. >> Thanks. >> >> Signed-off-by: Shannon Zhao <zhaoshenglong at huawei.com> > > > So how does guest discover whether host device supports multiple IRQs?Guest uses vm_try_to_find_vqs to check whether it can get multiple IRQs like virtio-pci uses vp_try_to_find_vqs. And within function vm_request_multiple_irqs, guest check whether the number of IRQs host device gives is equal to the number we want. for (i = 0; i < nirqs; i++) { irq = platform_get_irq(vm_dev->pdev, i); if (irq == -ENXIO) goto error; } If we can't get the expected number of IRQs, return error and this try fails. Then guest will try two IRQS and single IRQ like virtio-pci.> Could you please document the new interface? > E.g. send a patch for virtio spec.Ok, I'll send it later. Thank you very much :) Shannon> I think this really should be controlled by hypervisor, per device. > I'm also tempted to make this a virtio 1.0 only feature. > > > >> --- >> drivers/virtio/virtio_mmio.c | 234 ++++++++++++++++++++++++++++++++++++------ >> 1 files changed, 203 insertions(+), 31 deletions(-) >> >> diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c >> index c600ccf..2b7d935 100644 >> --- a/drivers/virtio/virtio_mmio.c >> +++ b/drivers/virtio/virtio_mmio.c >> @@ -122,6 +122,15 @@ struct virtio_mmio_device { >> /* a list of queues so we can dispatch IRQs */ >> spinlock_t lock; >> struct list_head virtqueues; >> + >> + /* multiple irq support */ >> + int single_irq_enabled; >> + /* Number of available irqs */ >> + unsigned num_irqs; >> + /* Used number of irqs */ >> + int used_irqs; >> + /* Name strings for interrupts. */ >> + char (*vm_vq_names)[256]; >> }; >> >> struct virtio_mmio_vq_info { >> @@ -229,33 +238,53 @@ static bool vm_notify(struct virtqueue *vq) >> return true; >> } >> >> +/* Handle a configuration change: Tell driver if it wants to know. */ >> +static irqreturn_t vm_config_changed(int irq, void *opaque) >> +{ >> + struct virtio_mmio_device *vm_dev = opaque; >> + struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver, >> + struct virtio_driver, driver); >> + >> + if (vdrv && vdrv->config_changed) >> + vdrv->config_changed(&vm_dev->vdev); >> + return IRQ_HANDLED; >> +} >> + >> /* Notify all virtqueues on an interrupt. */ >> -static irqreturn_t vm_interrupt(int irq, void *opaque) >> +static irqreturn_t vm_vring_interrupt(int irq, void *opaque) >> { >> struct virtio_mmio_device *vm_dev = opaque; >> struct virtio_mmio_vq_info *info; >> - struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver, >> - struct virtio_driver, driver); >> - unsigned long status; >> + irqreturn_t ret = IRQ_NONE; >> unsigned long flags; >> + >> + spin_lock_irqsave(&vm_dev->lock, flags); >> + list_for_each_entry(info, &vm_dev->virtqueues, node) { >> + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) >> + ret = IRQ_HANDLED; >> + } >> + spin_unlock_irqrestore(&vm_dev->lock, flags); >> + >> + return ret; >> +} >> + >> +/* Notify all virtqueues and handle a configuration >> + * change on an interrupt. */ >> +static irqreturn_t vm_interrupt(int irq, void *opaque) >> +{ >> + struct virtio_mmio_device *vm_dev = opaque; >> + unsigned long status; >> irqreturn_t ret = IRQ_NONE; >> >> /* Read and acknowledge interrupts */ >> status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); >> writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); >> >> - if (unlikely(status & VIRTIO_MMIO_INT_CONFIG) >> - && vdrv && vdrv->config_changed) { >> - vdrv->config_changed(&vm_dev->vdev); >> - ret = IRQ_HANDLED; >> - } >> + if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) >> + return vm_config_changed(irq, opaque); >> >> - if (likely(status & VIRTIO_MMIO_INT_VRING)) { >> - spin_lock_irqsave(&vm_dev->lock, flags); >> - list_for_each_entry(info, &vm_dev->virtqueues, node) >> - ret |= vring_interrupt(irq, info->vq); >> - spin_unlock_irqrestore(&vm_dev->lock, flags); >> - } >> + if (likely(status & VIRTIO_MMIO_INT_VRING)) >> + return vm_vring_interrupt(irq, opaque); >> >> return ret; >> } >> @@ -284,18 +313,98 @@ static void vm_del_vq(struct virtqueue *vq) >> kfree(info); >> } >> >> -static void vm_del_vqs(struct virtio_device *vdev) >> +static void vm_free_irqs(struct virtio_device *vdev) >> { >> + int i; >> struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); >> + >> + if (vm_dev->single_irq_enabled) { >> + free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); >> + vm_dev->single_irq_enabled = 0; >> + } >> + >> + for (i = 0; i < vm_dev->used_irqs; ++i) >> + free_irq(platform_get_irq(vm_dev->pdev, i), vm_dev); >> + >> + vm_dev->num_irqs = 0; >> + vm_dev->used_irqs = 0; >> + kfree(vm_dev->vm_vq_names); >> + vm_dev->vm_vq_names = NULL; >> +} >> + >> +static void vm_del_vqs(struct virtio_device *vdev) >> +{ >> struct virtqueue *vq, *n; >> >> list_for_each_entry_safe(vq, n, &vdev->vqs, list) >> vm_del_vq(vq); >> >> - free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); >> + vm_free_irqs(vdev); >> +} >> + >> +static int vm_request_multiple_irqs(struct virtio_device *vdev, int nirqs, >> + bool per_vq_irq) >> +{ >> + int err = -ENOMEM; >> + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); >> + unsigned i, v; >> + int irq = 0; >> + >> + vm_dev->num_irqs = nirqs; >> + vm_dev->used_irqs = 0; >> + >> + vm_dev->vm_vq_names = kmalloc_array(nirqs, sizeof(*vm_dev->vm_vq_names), >> + GFP_KERNEL); >> + if (!vm_dev->vm_vq_names) >> + goto error; >> + >> + for (i = 0; i < nirqs; i++) { >> + irq = platform_get_irq(vm_dev->pdev, i); >> + if (irq == -ENXIO) >> + goto error; >> + } >> + >> + /* Set the irq used for configuration */ >> + v = vm_dev->used_irqs; >> + snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), >> + "%s-config", dev_name(&vdev->dev)); >> + irq = platform_get_irq(vm_dev->pdev, v); >> + err = request_irq(irq, vm_config_changed, 0, >> + vm_dev->vm_vq_names[v], vm_dev); >> + ++vm_dev->used_irqs; >> + if (err) >> + goto error; >> + >> + if (!per_vq_irq) { >> + /* Shared irq for all VQs */ >> + v = vm_dev->used_irqs; >> + snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), >> + "%s-virtqueues", dev_name(&vdev->dev)); >> + irq = platform_get_irq(vm_dev->pdev, v); >> + err = request_irq(irq, vm_vring_interrupt, 0, >> + vm_dev->vm_vq_names[v], vm_dev); >> + if (err) >> + goto error; >> + ++vm_dev->used_irqs; >> + } >> + return 0; >> +error: >> + vm_free_irqs(vdev); >> + return err; >> } >> >> +static int vm_request_single_irq(struct virtio_device *vdev) >> +{ >> + int err; >> + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); >> + int irq = platform_get_irq(vm_dev->pdev, 0); >> >> + err = request_irq(irq, vm_interrupt, IRQF_SHARED, >> + dev_name(&vdev->dev), vm_dev); >> + if (!err) >> + vm_dev->single_irq_enabled = 1; >> + return err; >> +} >> >> static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, >> void (*callback)(struct virtqueue *vq), >> @@ -392,29 +501,92 @@ error_available: >> return ERR_PTR(err); >> } >> >> -static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, >> - struct virtqueue *vqs[], >> - vq_callback_t *callbacks[], >> - const char *names[]) >> +static int vm_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, >> + struct virtqueue *vqs[], >> + vq_callback_t *callbacks[], >> + const char *names[], >> + bool use_multiple_irq, >> + bool per_vq_irq) >> { >> struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); >> - unsigned int irq = platform_get_irq(vm_dev->pdev, 0); >> - int i, err; >> + int i, err, nirqs, irq; >> + >> + if (!use_multiple_irq) { >> + /* Old style: one normal interrupt for change and all vqs. */ >> + err = vm_request_single_irq(vdev); >> + if (err) >> + goto error_request; >> + } else { >> + if (per_vq_irq) { >> + /* Best option: one for change interrupt, one per vq. */ >> + nirqs = 1; >> + for (i = 0; i < nvqs; ++i) >> + if (callbacks[i]) >> + ++nirqs; >> + } else { >> + /* Second best: one for change, shared for all vqs. */ >> + nirqs = 2; >> + } >> >> - err = request_irq(irq, vm_interrupt, IRQF_SHARED, >> - dev_name(&vdev->dev), vm_dev); >> - if (err) >> - return err; >> + err = vm_request_multiple_irqs(vdev, nirqs, per_vq_irq); >> + if (err) >> + goto error_request; >> + } >> >> - for (i = 0; i < nvqs; ++i) { >> + for (i = 0; i < nvqs; i++) { >> + if (!names[i]) { >> + vqs[i] = NULL; >> + continue; >> + } >> vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); >> if (IS_ERR(vqs[i])) { >> - vm_del_vqs(vdev); >> - return PTR_ERR(vqs[i]); >> + err = PTR_ERR(vqs[i]); >> + goto error_find; >> + } >> + if (!per_vq_irq || !callbacks[i]) >> + continue; >> + /* allocate per-vq irq if available and necessary */ >> + snprintf(vm_dev->vm_vq_names[vm_dev->used_irqs], >> + sizeof(*vm_dev->vm_vq_names), >> + "%s-%s", >> + dev_name(&vm_dev->vdev.dev), names[i]); >> + irq = platform_get_irq(vm_dev->pdev, vm_dev->used_irqs); >> + err = request_irq(irq, vring_interrupt, 0, >> + vm_dev->vm_vq_names[vm_dev->used_irqs], vqs[i]); >> + if (err) { >> + vm_del_vq(vqs[i]); >> + goto error_find; >> } >> + ++vm_dev->used_irqs; >> } >> - >> return 0; >> +error_find: >> + vm_del_vqs(vdev); >> + >> +error_request: >> + return err; >> +} >> + >> +static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, >> + struct virtqueue *vqs[], >> + vq_callback_t *callbacks[], >> + const char *names[]) >> +{ >> + int err; >> + >> + /* Try multiple irqs with one irq per queue. */ >> + err = vm_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); >> + if (!err) >> + return 0; >> + /* Fallback: multiple irqs with one irq for config, >> + * one shared for queues. */ >> + err = vm_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, >> + true, false); >> + if (!err) >> + return 0; >> + /* Finally fall back to regular single interrupts. */ >> + return vm_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, >> + false, false); >> } >> >> static const char *vm_bus_name(struct virtio_device *vdev) >> -- >> 1.7.1 > > . >-- Shannon
On Tue, 2014-11-04 at 09:35 +0000, Shannon Zhao wrote:> As the current virtio-mmio only support single irq, > so some advanced features such as vhost-net with irqfd > are not supported. And the net performance is not > the best without vhost-net and irqfd supporting.Could you, please, help understanding me where does the main issue is? Is it about: 1. The fact that the existing implementation blindly kicks all queues, instead only of the updated ones? or: 2. Literally having a dedicated interrupt line (remember, we're talking "real" interrupts here, not message signalled ones) per queue, so they can be handled by different processors at the same time? Now, if it's only about 1, the simplest solution would be to extend the VIRTIO_MMIO_INTERRUPT_STATUS register to signal up to 30 queues "readiness" in bits 2-31, still keeping bit 0 as a "combined" VIRTIO_MMIO_INT_VRING. In case when VIRTIO_MMIO_INT_VRING is set and none of the "individual" bits is (a device which doesn't support this feature or one that has more than 30 queues and of of those is ready) we would fall back to the original "kick all queues" approach. This could be a useful (and pretty simple) extension. In the worst case scenario it could be a post-1.0 standard addition, as it would provide backward compatibility. However, if it's about 2, we're talking larger changes here. From the device perspective, we can define it as having per-queue (plus one for config) interrupt output *and* a "combined" output, being simple logical "or" of all the others. Then, the Device Tree bindings would be used to express the implementation choices (I'd keep the kernel parameter approach supporting the single interrupt case only). This is a very popular and well understood approach for memory mapped peripherals (for example, see the . It allows the system integrator to make a decision when it's coming to latency vs number interrupt lines trade off. The main issue is that we can't really impose a limit on a number of queues, therefore on a number of interrupts. This would require adding a new "interrupt acknowledge" register, which would take a number of the queue (or a symbolic value for the config one) instead of a bit mask. And I must say that I'm not enjoying the idea of such substantial change to the specification that late in the process... (in other words: you'll have to put extra effort into convincing me :-)> This patch support virtio-mmio to request multiple > irqs like virtio-pci. With this patch and qemu assigning > multiple irqs for virtio-mmio device, it's ok to use > vhost-net with irqfd on arm/arm64.Could you please tell me how many queues (interrupts) are we talking about in this case? 5? A dozen? Hundreds? Disclaimer: I have no personal experience with virtio and network (due to the fact how our Fast Models are implemented, I mostly us block devices and 9p protocol over virtio and I get enough performance from them :-).> As arm doesn't support msi-x now,To be precise: "ARM" does "support" MSI-X :-) (google for GICv2m) The correct statement would be: "normal memory mapped devices have no interface for message signalled interrupts (like MSI-X)"> we use GSI for multiple irq.I'm not sure what GSI stands for, but looking at the code I assume it's just a "normal" peripheral interrupt.> In this patch we use "vm_try_to_find_vqs" > to check whether multiple irqs are supported like > virtio-pci.Yeah, I can see that you have followed virtio-pci quite literally. I'm particularly not convinced to the one interrupt for config, one for all queues option. Doesn't make any sense to me here.> Is this the right direction? is there other ways to > make virtio-mmio support multiple irq? Hope for feedback.One point I'd like to make is that the device was intentionally designed with simplicity in mind first, performance later (something about "embedded" etc" :-). Changing this assumption is of course possible, but - I must say - makes me slightly uncomfortable... The extensions we're discussing here seem doable, but I've noticed your other patches doing with a shared memory region and I didn't like them at all, sorry. I see the subject has been already touched in the discussions, but let me bring PCI to the surface again. We're getting more server-class SOCs in the market, which obviously bring PCI with them to both arm and arm64 world, something unheard of in the "mobile past". I believe the PCI patches for the arm64 have been already merged in the kernel. Therefore: I'm not your boss so, obviously, I can't tell you what to do, but could you consider redirecting your efforts into getting the "ARM PCI" up and running in qemu so you can simply use the existing infrastructure? This would save us a lot of work and pain in doing late functional changes to the standard and will be probably more future-proof from your perspective (PCI will happen, sooner or later - you can make it sooner ;-) Regards Pawel
On 2014/11/11 23:11, Pawel Moll wrote:> On Tue, 2014-11-04 at 09:35 +0000, Shannon Zhao wrote: >> As the current virtio-mmio only support single irq, >> so some advanced features such as vhost-net with irqfd >> are not supported. And the net performance is not >> the best without vhost-net and irqfd supporting. > > Could you, please, help understanding me where does the main issue is? > Is it about: > > 1. The fact that the existing implementation blindly kicks all queues, > instead only of the updated ones? > > or: > > 2. Literally having a dedicated interrupt line (remember, we're talking > "real" interrupts here, not message signalled ones) per queue, so they > can be handled by different processors at the same time? >The main issue is that current virtio-mmio only support one interrupt which is shared by config and queues. Therefore the virtio-mmio driver should read the "VIRTIO_MMIO_INTERRUPT_STATUS" to get the interrupt reason and check whom this interrupt is to. If we use vhost-net which uses irqfd to inject interrupt, the vhost-net doesn't update "VIRTIO_MMIO_INTERRUPT_STATUS", then the guest driver can't read the interrupt reason and doesn't call a handler to process. So we can assign a dedicated interrupt line per queue for virtio-mmio and it can work with irqfd.> Now, if it's only about 1, the simplest solution would be to extend the > VIRTIO_MMIO_INTERRUPT_STATUS register to signal up to 30 queues > "readiness" in bits 2-31, still keeping bit 0 as a "combined" > VIRTIO_MMIO_INT_VRING. In case when VIRTIO_MMIO_INT_VRING is set and > none of the "individual" bits is (a device which doesn't support this > feature or one that has more than 30 queues and of of those is ready) we > would fall back to the original "kick all queues" approach. This could > be a useful (and pretty simple) extension. In the worst case scenario it > could be a post-1.0 standard addition, as it would provide backward > compatibility. > > However, if it's about 2, we're talking larger changes here. From the > device perspective, we can define it as having per-queue (plus one for > config) interrupt output *and* a "combined" output, being simple logical > "or" of all the others. Then, the Device Tree bindings would be used to > express the implementation choices (I'd keep the kernel parameter > approach supporting the single interrupt case only). This is a very > popular and well understood approach for memory mapped peripherals (for > example, see the . It allows the system integrator to make a decision > when it's coming to latency vs number interrupt lines trade off. The > main issue is that we can't really impose a limit on a number of queues, > therefore on a number of interrupts. This would require adding a new > "interrupt acknowledge" register, which would take a number of the queue > (or a symbolic value for the config one) instead of a bit mask. And IYes, maybe should add a new "interrupt acknowledge" register for backend and frontend to consult the number of queues.> must say that I'm not enjoying the idea of such substantial change to > the specification that late in the process... (in other words: you'll > have to put extra effort into convincing me :-) > >> This patch support virtio-mmio to request multiple >> irqs like virtio-pci. With this patch and qemu assigning >> multiple irqs for virtio-mmio device, it's ok to use >> vhost-net with irqfd on arm/arm64. > > Could you please tell me how many queues (interrupts) are we talking > about in this case? 5? A dozen? Hundreds? >Theoretically the number of interrupts has no limit, but as the limit of ARM interrupt line, the number should be less than ARM interrupt lines. In the real situation, I think, the number is generally less than 17 (8 pairs of vring interrupts and one config interrupt).> Disclaimer: I have no personal experience with virtio and network (due > to the fact how our Fast Models are implemented, I mostly us block > devices and 9p protocol over virtio and I get enough performance from > them :-). > >> As arm doesn't support msi-x now, > > To be precise: "ARM" does "support" MSI-X :-) (google for GICv2m)Sorry, I mean ARM with GICv2.> > The correct statement would be: "normal memory mapped devices have no > interface for message signalled interrupts (like MSI-X)" >Yes, that's right.>> we use GSI for multiple irq. > > I'm not sure what GSI stands for, but looking at the code I assume it's > just a "normal" peripheral interrupt. > >> In this patch we use "vm_try_to_find_vqs" >> to check whether multiple irqs are supported like >> virtio-pci. > > Yeah, I can see that you have followed virtio-pci quite literally. I'm > particularly not convinced to the one interrupt for config, one for all > queues option. Doesn't make any sense to me here. >About one interrupt for all queues, it's not a typical case. But just offer one more choice for users. Users should configure the number of interrupts according to their situation.>> Is this the right direction? is there other ways to >> make virtio-mmio support multiple irq? Hope for feedback. > > One point I'd like to make is that the device was intentionally designed > with simplicity in mind first, performance later (something about > "embedded" etc" :-). Changing this assumption is of course possible, butAh, I think ARM is not only about embedded things. Maybe it could has a wider application such as micro server. Just my personal opinion.> - I must say - makes me slightly uncomfortable... The extensions we're > discussing here seem doable, but I've noticed your other patches doing > with a shared memory region and I didn't like them at all, sorry. >The approach with a shared memory region is dropped as you can see from the mailing list. The approach of this patch get a net performance improvement about 30%. This maybe makes sense to the paltform without MSI support(e.g ARM with GICv2).> I see the subject has been already touched in the discussions, but let > me bring PCI to the surface again. We're getting more server-class SOCs > in the market, which obviously bring PCI with them to both arm and arm64 > world, something unheard of in the "mobile past". I believe the PCI > patches for the arm64 have been already merged in the kernel. > > Therefore: I'm not your boss so, obviously, I can't tell you what to do, > but could you consider redirecting your efforts into getting the "ARM > PCI" up and running in qemu so you can simply use the existing > infrastructure? This would save us a lot of work and pain in doing late > functional changes to the standard and will be probably more > future-proof from your perspective (PCI will happen, sooner or later - > you can make it sooner ;-) > > Regards > > Pawel > > > . >-- Shannon
Reasonably Related Threads
- [RFC PATCH] virtio-mmio: support for multiple irqs
- [Qemu-devel] [RFC PATCH] virtio-mmio: support for multiple irqs
- [Qemu-devel] [RFC PATCH] virtio-mmio: support for multiple irqs
- [RFC PATCH] virtio-mmio: support for multiple irqs
- [RFC PATCH] virtio-mmio: support for multiple irqs