Displaying 20 results from an estimated 103 matches for "virtio_add_queue".
2008 Jan 09
2
[PATCH] Increase the tx queue to 512 descriptors to fix performance problem.
...t.c
index 777fe2c..3d07b65 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -293,7 +293,7 @@ void *virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn)
n->vdev.update_config = virtio_net_update_config;
n->vdev.get_features = virtio_net_get_features;
n->rx_vq = virtio_add_queue(&n->vdev, 512, virtio_net_handle_rx);
- n->tx_vq = virtio_add_queue(&n->vdev, 128, virtio_net_handle_tx);
+ n->tx_vq = virtio_add_queue(&n->vdev, 512, virtio_net_handle_tx);
n->can_receive = 0;
memcpy(n->mac, nd->macaddr, 6);
n->vc = qemu_...
2008 Jan 09
2
[PATCH] Increase the tx queue to 512 descriptors to fix performance problem.
...t.c
index 777fe2c..3d07b65 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -293,7 +293,7 @@ void *virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn)
n->vdev.update_config = virtio_net_update_config;
n->vdev.get_features = virtio_net_get_features;
n->rx_vq = virtio_add_queue(&n->vdev, 512, virtio_net_handle_rx);
- n->tx_vq = virtio_add_queue(&n->vdev, 128, virtio_net_handle_tx);
+ n->tx_vq = virtio_add_queue(&n->vdev, 512, virtio_net_handle_tx);
n->can_receive = 0;
memcpy(n->mac, nd->macaddr, 6);
n->vc = qemu_...
2016 Mar 03
0
[RFC qemu 2/4] virtio-balloon: Add a new feature to balloon device
...virtio_balloon_free_pages, s);
>
> if (ret < 0) {
> error_setg(errp, "Only one balloon device is supported");
> @@ -440,6 +518,7 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
> s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
> s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
> s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats);
> + s->fvq = virtio_add_queue(vdev, 128, virtio_balloon_get_free_pages);
>
>...
2016 Mar 03
2
[RFC qemu 2/4] virtio-balloon: Add a new feature to balloon device
...irtio_balloon_stat,
+ virtio_balloon_free_pages, s);
if (ret < 0) {
error_setg(errp, "Only one balloon device is supported");
@@ -440,6 +518,7 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats);
+ s->fvq = virtio_add_queue(vdev, 128, virtio_balloon_get_free_pages);
reset_stats(s);
dif...
2014 Aug 25
2
help? looking for limits on in-flight write operations for virtio-blk
...full amount of data to read/write.
Monitoring the "inflight" numbers in the guest I've seen it go as high
as 184.
I'm trying to figure out if there are any limits on how high the
inflight numbers can go, but I'm not having much luck.
I was hopeful when I saw qemu calling virtio_add_queue() with a queue
size, but the queue size was 128 which didn't match the inflight numbers
I was seeing, and after changing the queue size down to 16 I still saw
the number of inflight requests go up to 184 and then the guest took a
kernel panic in virtqueue_add_buf().
Can someone with more k...
2014 Aug 25
2
help? looking for limits on in-flight write operations for virtio-blk
...full amount of data to read/write.
Monitoring the "inflight" numbers in the guest I've seen it go as high
as 184.
I'm trying to figure out if there are any limits on how high the
inflight numbers can go, but I'm not having much luck.
I was hopeful when I saw qemu calling virtio_add_queue() with a queue
size, but the queue size was 128 which didn't match the inflight numbers
I was seeing, and after changing the queue size down to 16 I still saw
the number of inflight requests go up to 184 and then the guest took a
kernel panic in virtqueue_add_buf().
Can someone with more k...
2020 Jul 16
0
[RFC for qemu v4 2/2] virtio_balloon: Add dcvq to deflate continuous pages
...else {
> g_assert_not_reached();
> }
> @@ -838,6 +841,7 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
>
> if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_CONT_PAGES)) {
> s->icvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
> + s->dcvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
> }
>
> reset_stats(s);
> diff --git a/include/hw/virtio/virtio-balloon.h b/include/hw/virtio/virtio-balloon.h
> index 6a2514d..848a7fb 100644
&...
2007 Dec 21
0
[Virtio-for-kvm] [PATCH 2/7] userspace virtio
..., "virtio-net", 6900, 0x1000,
+ 0, VIRTIO_ID_NET,
+ 0x02, 0x00, 0x00,
+ 6, sizeof(VirtIONet));
+
+ n->vdev.update_config = virtio_net_update_config;
+ n->vdev.get_features = virtio_net_get_features;
+ n->rx_vq = virtio_add_queue(&n->vdev, 512, virtio_net_handle_rx);
+ n->tx_vq = virtio_add_queue(&n->vdev, 128, virtio_net_handle_tx);
+ n->can_receive = 0;
+ memcpy(n->mac, nd->macaddr, 6);
+ n->vc = qemu_new_vlan_client(nd->vlan, virtio_net_receive,
+ virtio_net_can...
2007 Dec 21
0
[Virtio-for-kvm] [PATCH 2/7] userspace virtio
..., "virtio-net", 6900, 0x1000,
+ 0, VIRTIO_ID_NET,
+ 0x02, 0x00, 0x00,
+ 6, sizeof(VirtIONet));
+
+ n->vdev.update_config = virtio_net_update_config;
+ n->vdev.get_features = virtio_net_get_features;
+ n->rx_vq = virtio_add_queue(&n->vdev, 512, virtio_net_handle_rx);
+ n->tx_vq = virtio_add_queue(&n->vdev, 128, virtio_net_handle_tx);
+ n->can_receive = 0;
+ memcpy(n->mac, nd->macaddr, 6);
+ n->vc = qemu_new_vlan_client(nd->vlan, virtio_net_receive,
+ virtio_net_can...
2016 Mar 03
16
[RFC qemu 0/4] A PV solution for live migration optimization
The current QEMU live migration implementation mark the all the
guest's RAM pages as dirtied in the ram bulk stage, all these pages
will be processed and that takes quit a lot of CPU cycles.
>From guest's point of view, it doesn't care about the content in free
pages. We can make use of this fact and skip processing the free
pages in the ram bulk stage, it can save a lot CPU cycles
2016 Mar 03
16
[RFC qemu 0/4] A PV solution for live migration optimization
The current QEMU live migration implementation mark the all the
guest's RAM pages as dirtied in the ram bulk stage, all these pages
will be processed and that takes quit a lot of CPU cycles.
>From guest's point of view, it doesn't care about the content in free
pages. We can make use of this fact and skip processing the free
pages in the ram bulk stage, it can save a lot CPU cycles
2009 Aug 13
0
[PATCHv2 3/3] qemu-kvm: vhost-net implementation
...ONet *n;
@@ -837,6 +865,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.set_features = virtio_net_set_features;
n->vdev.bad_features = virtio_net_bad_features;
n->vdev.reset = virtio_net_reset;
+ n->vdev.driver_ok = virtio_net_driver_ok;
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx);
n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
@@ -863,6 +892,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.n...
2009 Aug 13
0
[PATCHv2 3/3] qemu-kvm: vhost-net implementation
...ONet *n;
@@ -837,6 +865,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.set_features = virtio_net_set_features;
n->vdev.bad_features = virtio_net_bad_features;
n->vdev.reset = virtio_net_reset;
+ n->vdev.driver_ok = virtio_net_driver_ok;
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx);
n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
@@ -863,6 +892,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.n...
2009 Aug 17
1
[PATCHv3 3/4] qemu-kvm: vhost-net implementation
...ONet *n;
@@ -837,6 +865,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.set_features = virtio_net_set_features;
n->vdev.bad_features = virtio_net_bad_features;
n->vdev.reset = virtio_net_reset;
+ n->vdev.driver_ok = virtio_net_driver_ok;
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx);
n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
@@ -863,6 +892,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.n...
2009 Aug 17
1
[PATCHv3 3/4] qemu-kvm: vhost-net implementation
...ONet *n;
@@ -837,6 +865,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.set_features = virtio_net_set_features;
n->vdev.bad_features = virtio_net_bad_features;
n->vdev.reset = virtio_net_reset;
+ n->vdev.driver_ok = virtio_net_driver_ok;
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx);
n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
@@ -863,6 +892,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.n...
2009 Aug 10
0
[PATCH 3/3] qemu-kvm: vhost-net implementation
...ONet *n;
@@ -838,6 +866,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.set_features = virtio_net_set_features;
n->vdev.bad_features = virtio_net_bad_features;
n->vdev.reset = virtio_net_reset;
+ n->vdev.driver_ok = virtio_net_driver_ok;
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx);
n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
@@ -864,6 +893,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.n...
2009 Aug 10
0
[PATCH 3/3] qemu-kvm: vhost-net implementation
...ONet *n;
@@ -838,6 +866,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.set_features = virtio_net_set_features;
n->vdev.bad_features = virtio_net_bad_features;
n->vdev.reset = virtio_net_reset;
+ n->vdev.driver_ok = virtio_net_driver_ok;
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx);
n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
@@ -864,6 +893,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev)
n->vdev.n...
2012 Jun 25
4
[RFC V2 PATCH 0/4] Multiqueue support for tap and virtio-net/vhost
Hello all:
This seires is an update of last version of multiqueue support to add multiqueue
capability to both tap and virtio-net.
Some kinds of tap backends has (macvatp in linux) or would (tap) support
multiqueue. In such kind of tap backend, each file descriptor of a tap is a
qeueu and ioctls were prodived to attach an exist tap file descriptor to the
tun/tap device. So the patch let qemu to
2012 Jun 25
4
[RFC V2 PATCH 0/4] Multiqueue support for tap and virtio-net/vhost
Hello all:
This seires is an update of last version of multiqueue support to add multiqueue
capability to both tap and virtio-net.
Some kinds of tap backends has (macvatp in linux) or would (tap) support
multiqueue. In such kind of tap backend, each file descriptor of a tap is a
qeueu and ioctls were prodived to attach an exist tap file descriptor to the
tun/tap device. So the patch let qemu to
2012 Jul 06
5
[RFC V3 0/5] Multiqueue support for tap and virtio-net/vhost
Hello all:
This seires is an update of last version of multiqueue support to add multiqueue
capability to both tap and virtio-net.
Some kinds of tap backends has (macvatp in linux) or would (tap) support
multiqueue. In such kind of tap backend, each file descriptor of a tap is a
qeueu and ioctls were prodived to attach an exist tap file descriptor to the
tun/tap device. So the patch let qemu to