Displaying 16 results from an estimated 16 matches for "msix_irq_entri".
Did you mean:
msix_irq_entries
2009 May 11
0
[PATCH 1/2] qemu-kvm: add MSI-X support
...bar_nr);
+ pci_add_capability(pdev, PCI_CAP_ID_MSIX, PCI_CAPABILITY_CONFIG_MSIX_LENGTH);
+ pdev->cap.msix = config_offset;
+ return 0;
+}
+
+static void msix_free_irq_entries(PCIDevice *dev)
+{
+ int i;
+
+ /* TODO: handle errors */
+ for (i = 0; i < dev->msix_irq_entries_nr; i++)
+ msix_vector_unuse(dev, i);
+}
+
+static void msix_enable(PCIDevice *dev)
+{
+ uint32_t ctrl, data;
+ int i;
+
+ if (!dev->msix_irq_entries_nr) {
+ fprintf(stderr, "MSI-X entry number is zero!\n");
+ return;
+ }
+
+ for (i = 0; i < dev...
2009 May 11
0
[PATCH 1/2] qemu-kvm: add MSI-X support
...bar_nr);
+ pci_add_capability(pdev, PCI_CAP_ID_MSIX, PCI_CAPABILITY_CONFIG_MSIX_LENGTH);
+ pdev->cap.msix = config_offset;
+ return 0;
+}
+
+static void msix_free_irq_entries(PCIDevice *dev)
+{
+ int i;
+
+ /* TODO: handle errors */
+ for (i = 0; i < dev->msix_irq_entries_nr; i++)
+ msix_vector_unuse(dev, i);
+}
+
+static void msix_enable(PCIDevice *dev)
+{
+ uint32_t ctrl, data;
+ int i;
+
+ if (!dev->msix_irq_entries_nr) {
+ fprintf(stderr, "MSI-X entry number is zero!\n");
+ return;
+ }
+
+ for (i = 0; i < dev...
2009 May 20
0
[PATCHv2-RFC 1/2] qemu-kvm: add MSI-X support
...+ MSIX_PBA_OFFSET, (bar_size + MSIX_PAGE_SIZE / 2) |
+ bar_nr);
+ pdev->cap.msix = config_offset;
+ return 0;
+}
+
+static void msix_free_irq_entries(PCIDevice *dev)
+{
+ int vector, changed = 0;
+
+ /* TODO: handle errors */
+ for (vector = 0; vector < dev->msix_irq_entries_nr; ++vector)
+ if (dev->msix_entry_used[vector]) {
+ kvm_del_routing_entry(kvm_context, &dev->msix_irq_entries[vector]);
+ dev->msix_entry_used[vector] = 0;
+ changed = 1;
+ }
+ if (changed)
+ kvm_commit_irq_routes(kvm_context...
2009 May 20
0
[PATCHv2-RFC 1/2] qemu-kvm: add MSI-X support
...+ MSIX_PBA_OFFSET, (bar_size + MSIX_PAGE_SIZE / 2) |
+ bar_nr);
+ pdev->cap.msix = config_offset;
+ return 0;
+}
+
+static void msix_free_irq_entries(PCIDevice *dev)
+{
+ int vector, changed = 0;
+
+ /* TODO: handle errors */
+ for (vector = 0; vector < dev->msix_irq_entries_nr; ++vector)
+ if (dev->msix_entry_used[vector]) {
+ kvm_del_routing_entry(kvm_context, &dev->msix_irq_entries[vector]);
+ dev->msix_entry_used[vector] = 0;
+ changed = 1;
+ }
+ if (changed)
+ kvm_commit_irq_routes(kvm_context...
2009 Aug 13
0
[PATCHv2 3/3] qemu-kvm: vhost-net implementation
...{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_irqfd call = { };
+ int r;
+
+ if (vector >= proxy->pci_dev.msix_entries_nr)
+ return -EINVAL;
+ if (!proxy->pci_dev.msix_entry_used[vector])
+ return -ENOENT;
+ call.fd = fd;
+ call.gsi = proxy->pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+...
2009 Aug 13
0
[PATCHv2 3/3] qemu-kvm: vhost-net implementation
...{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_irqfd call = { };
+ int r;
+
+ if (vector >= proxy->pci_dev.msix_entries_nr)
+ return -EINVAL;
+ if (!proxy->pci_dev.msix_entry_used[vector])
+ return -ENOENT;
+ call.fd = fd;
+ call.gsi = proxy->pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+...
2009 Aug 17
1
[PATCHv3 3/4] qemu-kvm: vhost-net implementation
...{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_irqfd call = { };
+ int r;
+
+ if (vector >= proxy->pci_dev.msix_entries_nr)
+ return -EINVAL;
+ if (!proxy->pci_dev.msix_entry_used[vector])
+ return -ENOENT;
+ call.fd = fd;
+ call.gsi = proxy->pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+...
2009 Aug 17
1
[PATCHv3 3/4] qemu-kvm: vhost-net implementation
...{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_irqfd call = { };
+ int r;
+
+ if (vector >= proxy->pci_dev.msix_entries_nr)
+ return -EINVAL;
+ if (!proxy->pci_dev.msix_entry_used[vector])
+ return -ENOENT;
+ call.fd = fd;
+ call.gsi = proxy->pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+...
2009 May 20
0
[PATCHv2-RFC 2/2] qemu-kvm: use common code for assigned msix
...y number is zero!\n");
- return -EINVAL;
- }
msix_nr.assigned_dev_id = calc_assigned_dev_id(adev->h_busnr,
(uint8_t)adev->h_devfn);
- msix_nr.entry_nr = entries_nr;
+ msix_nr.entry_nr = 0;
+ for (i = 0; i < pci_dev->msix_irq_entries_nr; ++i)
+ if (msix_vector_is_used(pci_dev, i))
+ ++msix_nr.entry_nr;
+
r = kvm_assign_set_msix_nr(kvm_context, &msix_nr);
if (r != 0) {
fprintf(stderr, "fail to set MSI-X entry number for MSIX! %s\n",
@@ -845,65 +817,29 @@ static int assigned_de...
2009 May 20
0
[PATCHv2-RFC 2/2] qemu-kvm: use common code for assigned msix
...y number is zero!\n");
- return -EINVAL;
- }
msix_nr.assigned_dev_id = calc_assigned_dev_id(adev->h_busnr,
(uint8_t)adev->h_devfn);
- msix_nr.entry_nr = entries_nr;
+ msix_nr.entry_nr = 0;
+ for (i = 0; i < pci_dev->msix_irq_entries_nr; ++i)
+ if (msix_vector_is_used(pci_dev, i))
+ ++msix_nr.entry_nr;
+
r = kvm_assign_set_msix_nr(kvm_context, &msix_nr);
if (r != 0) {
fprintf(stderr, "fail to set MSI-X entry number for MSIX! %s\n",
@@ -845,65 +817,29 @@ static int assigned_de...
2009 Aug 10
0
[PATCH 3/3] qemu-kvm: vhost-net implementation
...{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_irqfd call = { };
+ int r;
+
+ if (vector >= proxy->pci_dev.msix_entries_nr)
+ return -EINVAL;
+ if (!proxy->pci_dev.msix_entry_used[vector])
+ return -ENOENT;
+ call.fd = fd;
+ call.gsi = proxy->pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+...
2009 Aug 10
0
[PATCH 3/3] qemu-kvm: vhost-net implementation
...{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_irqfd call = { };
+ int r;
+
+ if (vector >= proxy->pci_dev.msix_entries_nr)
+ return -EINVAL;
+ if (!proxy->pci_dev.msix_entry_used[vector])
+ return -ENOENT;
+ call.fd = fd;
+ call.gsi = proxy->pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+...
2009 May 11
0
[PATCH 2/2] qemu-kvm: use common code for assigned msix
...y number is zero!\n");
- return -EINVAL;
- }
msix_nr.assigned_dev_id = calc_assigned_dev_id(adev->h_busnr,
(uint8_t)adev->h_devfn);
- msix_nr.entry_nr = entries_nr;
+ msix_nr.entry_nr = 0;
+ for (i = 0; i < pci_dev->msix_irq_entries_nr; ++i)
+ if (msix_vector_is_used(pci_dev, i))
+ ++msix_nr.entry_nr;
+
r = kvm_assign_set_msix_nr(kvm_context, &msix_nr);
if (r != 0) {
fprintf(stderr, "fail to set MSI-X entry number for MSIX! %s\n",
@@ -844,67 +816,29 @@ static int assigned_de...
2009 May 11
0
[PATCH 2/2] qemu-kvm: use common code for assigned msix
...y number is zero!\n");
- return -EINVAL;
- }
msix_nr.assigned_dev_id = calc_assigned_dev_id(adev->h_busnr,
(uint8_t)adev->h_devfn);
- msix_nr.entry_nr = entries_nr;
+ msix_nr.entry_nr = 0;
+ for (i = 0; i < pci_dev->msix_irq_entries_nr; ++i)
+ if (msix_vector_is_used(pci_dev, i))
+ ++msix_nr.entry_nr;
+
r = kvm_assign_set_msix_nr(kvm_context, &msix_nr);
if (r != 0) {
fprintf(stderr, "fail to set MSI-X entry number for MSIX! %s\n",
@@ -844,67 +816,29 @@ static int assigned_de...
2009 Nov 02
2
[PATCHv4 6/6] qemu-kvm: vhost-net implementation
...{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_irqfd call = { };
+ int r;
+
+ if (vector >= proxy->pci_dev.msix_entries_nr)
+ return -EINVAL;
+ if (!proxy->pci_dev.msix_entry_used[vector])
+ return -ENOENT;
+ call.fd = fd;
+ call.gsi = proxy->pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+...
2009 Nov 02
2
[PATCHv4 6/6] qemu-kvm: vhost-net implementation
...{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_irqfd call = { };
+ int r;
+
+ if (vector >= proxy->pci_dev.msix_entries_nr)
+ return -EINVAL;
+ if (!proxy->pci_dev.msix_entry_used[vector])
+ return -ENOENT;
+ call.fd = fd;
+ call.gsi = proxy->pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+...