Displaying 16 results from an estimated 16 matches for "datamatch".
Did you mean:
datamat
2013 Apr 04
1
[PATCH RFC] kvm: add PV MMIO EVENTFD
...{
enum kvm_bus {
KVM_MMIO_BUS,
KVM_PIO_BUS,
+ KVM_PV_MMIO_BUS,
KVM_NR_BUSES
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 3c56ba3..61783ee 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -449,11 +449,19 @@ enum {
kvm_ioeventfd_flag_nr_datamatch,
kvm_ioeventfd_flag_nr_pio,
kvm_ioeventfd_flag_nr_deassign,
+ kvm_ioeventfd_flag_nr_pv_mmio,
kvm_ioeventfd_flag_nr_max,
};
#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch)
#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio)
+/*
+ *...
2013 Apr 04
1
[PATCH RFC] kvm: add PV MMIO EVENTFD
...{
enum kvm_bus {
KVM_MMIO_BUS,
KVM_PIO_BUS,
+ KVM_PV_MMIO_BUS,
KVM_NR_BUSES
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 3c56ba3..61783ee 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -449,11 +449,19 @@ enum {
kvm_ioeventfd_flag_nr_datamatch,
kvm_ioeventfd_flag_nr_pio,
kvm_ioeventfd_flag_nr_deassign,
+ kvm_ioeventfd_flag_nr_pv_mmio,
kvm_ioeventfd_flag_nr_max,
};
#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch)
#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio)
+/*
+ *...
2013 Apr 04
0
[PATCH v2 0/6] kvm: pci PORT IO MMIO and PV MMIO speed tests
...for this instead. Used together with the
kvm unittest patches I posted on kvm mailing list.
To use, simply add the device on the pci bus.
Example test output:
vmcall 1519
....
outl_to_kernel 1745
....
mmio-no-eventfd:pci-mem 9075
mmio-wildcard-eventfd:pci-mem 3529
mmio-datamatch-eventfd:pci-mem 3509
mmio-pv-eventfd:pci-mem 1878
portio-no-eventfd:pci-io 5535
portio-wildcard-eventfd:pci-io 1846
portio-datamatch-eventfd:pci-io 1848
portio-pv-eventfd:pci-io 1842
First interesting conclusion is that the overhead of MMIO
exit to QEMU as compared to PIO is d...
2013 Apr 04
0
[PATCH v2 0/6] kvm: pci PORT IO MMIO and PV MMIO speed tests
...for this instead. Used together with the
kvm unittest patches I posted on kvm mailing list.
To use, simply add the device on the pci bus.
Example test output:
vmcall 1519
....
outl_to_kernel 1745
....
mmio-no-eventfd:pci-mem 9075
mmio-wildcard-eventfd:pci-mem 3529
mmio-datamatch-eventfd:pci-mem 3509
mmio-pv-eventfd:pci-mem 1878
portio-no-eventfd:pci-io 5535
portio-wildcard-eventfd:pci-io 1846
portio-datamatch-eventfd:pci-io 1848
portio-pv-eventfd:pci-io 1842
First interesting conclusion is that the overhead of MMIO
exit to QEMU as compared to PIO is d...
2012 Apr 07
0
[PATCH 05/14] kvm tools: Add virtio-mmio support
...gt;ioeventfds[vq] = (struct virtio_mmio_ioevent_param) {
+ .vdev = vdev,
+ .vq = vq,
+ };
+
+ ioevent = (struct ioevent) {
+ .io_addr = vmmio->addr + VIRTIO_MMIO_QUEUE_NOTIFY,
+ .io_len = sizeof(u32),
+ .fn = virtio_mmio_ioevent_callback,
+ .fn_ptr = &vmmio->ioeventfds[vq],
+ .datamatch = vq,
+ .fn_kvm = kvm,
+ .fd = eventfd(0, 0),
+ };
+
+ err = ioeventfd__add_event(&ioevent, false);
+ if (err)
+ return err;
+
+ if (vdev->ops->notify_vq_eventfd)
+ vdev->ops->notify_vq_eventfd(kvm, vmmio->dev, vq, ioevent.fd);
+
+ return 0;
+}
+
+int virtio_mmio_signal_vq(...
2012 Apr 07
0
[PATCH 05/14] kvm tools: Add virtio-mmio support
...gt;ioeventfds[vq] = (struct virtio_mmio_ioevent_param) {
+ .vdev = vdev,
+ .vq = vq,
+ };
+
+ ioevent = (struct ioevent) {
+ .io_addr = vmmio->addr + VIRTIO_MMIO_QUEUE_NOTIFY,
+ .io_len = sizeof(u32),
+ .fn = virtio_mmio_ioevent_callback,
+ .fn_ptr = &vmmio->ioeventfds[vq],
+ .datamatch = vq,
+ .fn_kvm = kvm,
+ .fd = eventfd(0, 0),
+ };
+
+ err = ioeventfd__add_event(&ioevent, false);
+ if (err)
+ return err;
+
+ if (vdev->ops->notify_vq_eventfd)
+ vdev->ops->notify_vq_eventfd(kvm, vmmio->dev, vq, ioevent.fd);
+
+ return 0;
+}
+
+int virtio_mmio_signal_vq(...
2009 Aug 13
0
[PATCHv2 3/3] qemu-kvm: vhost-net implementation
...t;pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+ .addr = proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
+ .len = 2,
+ .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+ .fd = fd,
+ };
+ int r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+ if (r < 0)
+ return r;
+ return 0;...
2009 Aug 13
0
[PATCHv2 3/3] qemu-kvm: vhost-net implementation
...t;pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+ .addr = proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
+ .len = 2,
+ .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+ .fd = fd,
+ };
+ int r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+ if (r < 0)
+ return r;
+ return 0;...
2009 Aug 17
1
[PATCHv3 3/4] qemu-kvm: vhost-net implementation
...t;pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+ .addr = proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
+ .len = 2,
+ .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+ .fd = fd,
+ };
+ int r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+ if (r < 0)
+ return r;
+ return 0;...
2009 Aug 17
1
[PATCHv3 3/4] qemu-kvm: vhost-net implementation
...t;pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+ .addr = proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
+ .len = 2,
+ .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+ .fd = fd,
+ };
+ int r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+ if (r < 0)
+ return r;
+ return 0;...
2009 Aug 10
0
[PATCH 3/3] qemu-kvm: vhost-net implementation
...t;pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+ .addr = proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
+ .len = 2,
+ .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+ .fd = fd,
+ };
+ int r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+ if (r < 0)
+ return r;
+ return 0;...
2009 Aug 10
0
[PATCH 3/3] qemu-kvm: vhost-net implementation
...t;pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+ .addr = proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
+ .len = 2,
+ .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+ .fd = fd,
+ };
+ int r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+ if (r < 0)
+ return r;
+ return 0;...
2009 Nov 02
2
[PATCHv4 6/6] qemu-kvm: vhost-net implementation
...t;pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+ .addr = proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
+ .len = 2,
+ .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+ .fd = fd,
+ };
+ int r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+ if (r < 0)
+ return r;
+ return 0;...
2009 Nov 02
2
[PATCHv4 6/6] qemu-kvm: vhost-net implementation
...t;pci_dev.msix_irq_entries[vector].gsi;
+ r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &call);
+ if (r < 0)
+ return r;
+ return 0;
+}
+
+static int virtio_pci_queuefd(void * opaque, int n, int fd)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ struct kvm_ioeventfd kick = {
+ .datamatch = n,
+ .addr = proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
+ .len = 2,
+ .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+ .fd = fd,
+ };
+ int r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+ if (r < 0)
+ return r;
+ return 0;...
2015 Nov 18
2
[RFC] kvmtool: add support for modern virtio-pci
...o_pci_modern *vpci = vdev->virtio;
+ int i, r, flags = 0;
+ int fds[2];
+
+ vpci->ioeventfds[vq] = (struct virtio_pci_ioevent_param) {
+ .vdev = vdev,
+ .vq = vq,
+ };
+
+ ioevent = (struct ioevent) {
+ .fn = virtio_pcim__ioevent_callback,
+ .fn_ptr = &vpci->ioeventfds[vq],
+ .datamatch = vq,
+ .fn_kvm = kvm,
+ };
+
+ /*
+ * Vhost will poll the eventfd in host kernel side, otherwise we
+ * need to poll in userspace.
+ */
+ if (!vdev->use_vhost)
+ flags |= IOEVENTFD_FLAG_USER_POLL;
+
+ /* ioport */
+ ioevent.io_addr = vpci->port_addr + 0x80 + vq * 2;
+ ioevent.io_len =...
2015 Nov 18
2
[RFC] kvmtool: add support for modern virtio-pci
...o_pci_modern *vpci = vdev->virtio;
+ int i, r, flags = 0;
+ int fds[2];
+
+ vpci->ioeventfds[vq] = (struct virtio_pci_ioevent_param) {
+ .vdev = vdev,
+ .vq = vq,
+ };
+
+ ioevent = (struct ioevent) {
+ .fn = virtio_pcim__ioevent_callback,
+ .fn_ptr = &vpci->ioeventfds[vq],
+ .datamatch = vq,
+ .fn_kvm = kvm,
+ };
+
+ /*
+ * Vhost will poll the eventfd in host kernel side, otherwise we
+ * need to poll in userspace.
+ */
+ if (!vdev->use_vhost)
+ flags |= IOEVENTFD_FLAG_USER_POLL;
+
+ /* ioport */
+ ioevent.io_addr = vpci->port_addr + 0x80 + vq * 2;
+ ioevent.io_len =...