search for: vp_vdpa_free_irq

Displaying 17 results from an estimated 17 matches for "vp_vdpa_free_irq".

2023 Jun 30
2
[PATCH] vp_vdpa: synchronize irq when free irq
...pa/virtio_pci/vp_vdpa.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index 281287fae89f..d8ee3e68cd2d 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -116,6 +116,7 @@ static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) for (i = 0; i < vp_vdpa->queues; i++) { if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR); + synchronize_irq(vp_vdpa->vring[i].irq); devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq...
2023 Jun 30
1
[PATCH] vp_vdpa: synchronize irq when free irq
...1 file changed, 2 insertions(+) > > diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c > index 281287fae89f..d8ee3e68cd2d 100644 > --- a/drivers/vdpa/virtio_pci/vp_vdpa.c > +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c > @@ -116,6 +116,7 @@ static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) > for (i = 0; i < vp_vdpa->queues; i++) { > if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { > vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR); > + synchronize_irq(vp_vdpa->vring[i].irq); > devm_free_irq(&pdev->dev,...
2023 Jun 30
1
[PATCH] vp_vdpa: synchronize irq when free irq
...; 1 file changed, 2 insertions(+) > > diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c > index 281287fae89f..d8ee3e68cd2d 100644 > --- a/drivers/vdpa/virtio_pci/vp_vdpa.c > +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c > @@ -116,6 +116,7 @@ static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) > for (i = 0; i < vp_vdpa->queues; i++) { > if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { > vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR); > + synchronize_irq(vp_vdpa-&...
2020 Jun 10
2
[PATCH V3] vdpa: introduce virtio pci driver
...gt; + return 0; >> +} >> + >> +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) >> +{ >> + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); >> + >> + return vp_ioread8(&vp_vdpa->common->device_status); >> +} >> + >> +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) >> +{ >> + struct pci_dev *pdev = vp_vdpa->pdev; >> + int i; >> + >> + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { >> + if (vp_vdpa->vring[i].irq != -1) { >> + vp_iowrite16(i, &vp_vdpa->common->queue_select); &gt...
2020 Jun 10
2
[PATCH V3] vdpa: introduce virtio pci driver
...gt; + return 0; >> +} >> + >> +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) >> +{ >> + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); >> + >> + return vp_ioread8(&vp_vdpa->common->device_status); >> +} >> + >> +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) >> +{ >> + struct pci_dev *pdev = vp_vdpa->pdev; >> + int i; >> + >> + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { >> + if (vp_vdpa->vring[i].irq != -1) { >> + vp_iowrite16(i, &vp_vdpa->common->queue_select); &gt...
2020 Jun 10
2
[PATCH V3] vdpa: introduce virtio pci driver
...e_select); + vp_iowrite32(features >> 32, &vp_vdpa->common->guest_feature); + + return 0; +} + +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + return vp_ioread8(&vp_vdpa->common->device_status); +} + +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) +{ + struct pci_dev *pdev = vp_vdpa->pdev; + int i; + + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { + if (vp_vdpa->vring[i].irq != -1) { + vp_iowrite16(i, &vp_vdpa->common->queue_select); + vp_iowrite16(VIRTIO_MSI_NO_VECTOR, + &vp_vdpa->...
2020 Jun 10
2
[PATCH V3] vdpa: introduce virtio pci driver
...e_select); + vp_iowrite32(features >> 32, &vp_vdpa->common->guest_feature); + + return 0; +} + +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + return vp_ioread8(&vp_vdpa->common->device_status); +} + +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) +{ + struct pci_dev *pdev = vp_vdpa->pdev; + int i; + + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { + if (vp_vdpa->vring[i].irq != -1) { + vp_iowrite16(i, &vp_vdpa->common->queue_select); + vp_iowrite16(VIRTIO_MSI_NO_VECTOR, + &vp_vdpa->...
2020 Jun 10
2
[PATCH RESEND V2] vdpa: introduce virtio pci driver
...e_select); + vp_iowrite32(features >> 32, &vp_vdpa->common->guest_feature); + + return 0; +} + +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + return vp_ioread8(&vp_vdpa->common->device_status); +} + +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) +{ + struct pci_dev *pdev = vp_vdpa->pdev; + int i; + + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { + if (vp_vdpa->vring[i].irq != -1) { + vp_iowrite16(i, &vp_vdpa->common->queue_select); + vp_iowrite16(VIRTIO_MSI_NO_VECTOR, + &vp_vdpa->...
2020 Jun 10
2
[PATCH RESEND V2] vdpa: introduce virtio pci driver
...e_select); + vp_iowrite32(features >> 32, &vp_vdpa->common->guest_feature); + + return 0; +} + +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + return vp_ioread8(&vp_vdpa->common->device_status); +} + +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) +{ + struct pci_dev *pdev = vp_vdpa->pdev; + int i; + + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { + if (vp_vdpa->vring[i].irq != -1) { + vp_iowrite16(i, &vp_vdpa->common->queue_select); + vp_iowrite16(VIRTIO_MSI_NO_VECTOR, + &vp_vdpa->...
2020 Jun 10
0
[PATCH V3] vdpa: introduce virtio pci driver
...> +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) > > > +{ > > > + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); > > > + > > > + return vp_ioread8(&vp_vdpa->common->device_status); > > > +} > > > + > > > +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) > > > +{ > > > + struct pci_dev *pdev = vp_vdpa->pdev; > > > + int i; > > > + > > > + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { > > > + if (vp_vdpa->vring[i].irq != -1) { > > > + vp_iowrite16(i, &amp...
2020 May 29
0
[PATCH 5/6] vdpa: introduce virtio pci driver
...e_select); + vp_iowrite32(features >> 32, &vp_vdpa->common->guest_feature); + + return 0; +} + +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + return vp_ioread8(&vp_vdpa->common->device_status); +} + +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) +{ + struct pci_dev *pdev = vp_vdpa->pdev; + int i; + + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { + if (vp_vdpa->vring[i].irq != -1) { + vp_iowrite16(i, &vp_vdpa->common->queue_select); + vp_iowrite16(VIRTIO_MSI_NO_VECTOR, + &vp_vdpa->...
2020 Jun 10
0
[PATCH RESEND V2] vdpa: introduce virtio pci driver
...->common->guest_feature); > + > + return 0; > +} > + > +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) > +{ > + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); > + > + return vp_ioread8(&vp_vdpa->common->device_status); > +} > + > +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) > +{ > + struct pci_dev *pdev = vp_vdpa->pdev; > + int i; > + > + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { > + if (vp_vdpa->vring[i].irq != -1) { > + vp_iowrite16(i, &vp_vdpa->common->queue_select); > + vp_iowrite16(VIRTIO_MS...
2020 Jun 10
0
[PATCH V3] vdpa: introduce virtio pci driver
...->common->guest_feature); > + > + return 0; > +} > + > +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) > +{ > + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); > + > + return vp_ioread8(&vp_vdpa->common->device_status); > +} > + > +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) > +{ > + struct pci_dev *pdev = vp_vdpa->pdev; > + int i; > + > + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { > + if (vp_vdpa->vring[i].irq != -1) { > + vp_iowrite16(i, &vp_vdpa->common->queue_select); > + vp_iowrite16(VIRTIO_MS...
2020 Jun 10
2
[PATCH V2] vdpa: introduce virtio pci driver
...e_select); + vp_iowrite32(features >> 32, &vp_vdpa->common->guest_feature); + + return 0; +} + +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + return vp_ioread8(&vp_vdpa->common->device_status); +} + +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) +{ + struct pci_dev *pdev = vp_vdpa->pdev; + int i; + + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { + if (vp_vdpa->vring[i].irq != -1) { + vp_iowrite16(i, &vp_vdpa->common->queue_select); + vp_iowrite16(VIRTIO_MSI_NO_VECTOR, + &vp_vdpa->...
2020 Jun 10
2
[PATCH V2] vdpa: introduce virtio pci driver
...e_select); + vp_iowrite32(features >> 32, &vp_vdpa->common->guest_feature); + + return 0; +} + +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + return vp_ioread8(&vp_vdpa->common->device_status); +} + +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) +{ + struct pci_dev *pdev = vp_vdpa->pdev; + int i; + + for (i = 0; i < VP_VDPA_MAX_QUEUE; i++) { + if (vp_vdpa->vring[i].irq != -1) { + vp_iowrite16(i, &vp_vdpa->common->queue_select); + vp_iowrite16(VIRTIO_MSI_NO_VECTOR, + &vp_vdpa->...
2020 May 29
12
[PATCH 0/6] vDPA: doorbell mapping
Hi all: This series introduce basic functionality of doorbell mapping support for vhost-vDPA. Userspace program may use mmap() to map a the doorbell of a specific virtqueue into its address space. This is help to reudce the syscall or vmexit overhead. A new vdpa_config_ops was introduced to report the location of the doorbell, vhost_vdpa may then choose to map the doorbell when: - The doorbell
2020 May 29
12
[PATCH 0/6] vDPA: doorbell mapping
Hi all: This series introduce basic functionality of doorbell mapping support for vhost-vDPA. Userspace program may use mmap() to map a the doorbell of a specific virtqueue into its address space. This is help to reudce the syscall or vmexit overhead. A new vdpa_config_ops was introduced to report the location of the doorbell, vhost_vdpa may then choose to map the doorbell when: - The doorbell