search for: max_queue_pair

Displaying 20 results from an estimated 321 matches for "max_queue_pair".

Did you mean: max_queue_pairs
2016 Nov 25
7
[PATCH net-next] virtio-net: enable multiqueue by default
...t.c @@ -1886,8 +1886,11 @@ static int virtnet_probe(struct virtio_device *vdev) if (vi->any_header_sg) dev->needed_headroom = vi->hdr_len; - /* Use single tx/rx queue pair as default */ - vi->curr_queue_pairs = 1; + /* Enable multiqueue by default */ + if (num_online_cpus() >= max_queue_pairs) + vi->curr_queue_pairs = max_queue_pairs; + else + vi->curr_queue_pairs = num_online_cpus(); vi->max_queue_pairs = max_queue_pairs; /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ @@ -1918,6 +1921,8 @@ static int virtnet_probe(struct virtio_device *vdev) goto f...
2016 Nov 25
7
[PATCH net-next] virtio-net: enable multiqueue by default
...t.c @@ -1886,8 +1886,11 @@ static int virtnet_probe(struct virtio_device *vdev) if (vi->any_header_sg) dev->needed_headroom = vi->hdr_len; - /* Use single tx/rx queue pair as default */ - vi->curr_queue_pairs = 1; + /* Enable multiqueue by default */ + if (num_online_cpus() >= max_queue_pairs) + vi->curr_queue_pairs = max_queue_pairs; + else + vi->curr_queue_pairs = num_online_cpus(); vi->max_queue_pairs = max_queue_pairs; /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ @@ -1918,6 +1921,8 @@ static int virtnet_probe(struct virtio_device *vdev) goto f...
2014 Oct 14
4
[PATCH RFC] virtio_net: enable tx interrupt
...ble_cb_delayed(sq->vq)) { + free_old_xmit_skbs(sq, qsize); } return NETDEV_TX_OK; @@ -1124,8 +1161,10 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + napi_disable(&vi->sq[i].napi); + } return 0; } @@ -1438,8 +1477,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) { int i; - for (i = 0; i < vi->max_queue_pairs...
2014 Oct 14
4
[PATCH RFC] virtio_net: enable tx interrupt
...ble_cb_delayed(sq->vq)) { + free_old_xmit_skbs(sq, qsize); } return NETDEV_TX_OK; @@ -1124,8 +1161,10 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + napi_disable(&vi->sq[i].napi); + } return 0; } @@ -1438,8 +1477,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) { int i; - for (i = 0; i < vi->max_queue_pairs...
2015 Mar 12
2
[PATCH net] virtio-net: correctly delete napi hash
...(+), 5 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f1ff366..59b0e97 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) { int i; - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { + napi_hash_del(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); + } kfree(vi->rq); kfree(vi->sq); @@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev) cancel_delayed_work_sync(&...
2015 Mar 12
2
[PATCH net] virtio-net: correctly delete napi hash
...(+), 5 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f1ff366..59b0e97 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) { int i; - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { + napi_hash_del(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); + } kfree(vi->rq); kfree(vi->sq); @@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev) cancel_delayed_work_sync(&...
2014 Oct 15
1
[PATCH RFC v2 1/3] virtio_net: enable tx interrupt
...t;= 2+MAX_SKB_FRAGS) + netif_start_subqueue(dev, qnum); + } + return NETDEV_TX_OK; } @@ -1137,8 +1178,10 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + napi_disable(&vi->sq[i].napi); + } return 0; } @@ -1457,8 +1500,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) { int i; - for (i = 0; i < vi->max_queue_pairs...
2014 Oct 15
1
[PATCH RFC v2 1/3] virtio_net: enable tx interrupt
...t;= 2+MAX_SKB_FRAGS) + netif_start_subqueue(dev, qnum); + } + return NETDEV_TX_OK; } @@ -1137,8 +1178,10 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + napi_disable(&vi->sq[i].napi); + } return 0; } @@ -1457,8 +1500,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) { int i; - for (i = 0; i < vi->max_queue_pairs...
2012 Dec 04
3
[PATCH net-next 0/3] Multiqueue support for virtio-net
...essing Rusty's comments: - align the implementation (location of cvq) to v5. - fix the style issue. - use a global refill instead of per-vq one. - check the VIRTIO_NET_F_RFS before calling virtnet_set_queues() Addresing Michael's comments - rename the curr_queue_pairs in virtnet_probe() to max_queue_pairs - validate the number of queue pairs supported by the device against VIRTIO_NET_CTRL_RFS_VQ_PAIRS_MIN and VIRTIO_NET_CTRL_RFS_VQ_PAIRS_MAX. - don't crash when failing to change the number of virtqueues - don't set the affinity hint when onle single queue is used or there's too much...
2012 Dec 04
3
[PATCH net-next 0/3] Multiqueue support for virtio-net
...essing Rusty's comments: - align the implementation (location of cvq) to v5. - fix the style issue. - use a global refill instead of per-vq one. - check the VIRTIO_NET_F_RFS before calling virtnet_set_queues() Addresing Michael's comments - rename the curr_queue_pairs in virtnet_probe() to max_queue_pairs - validate the number of queue pairs supported by the device against VIRTIO_NET_CTRL_RFS_VQ_PAIRS_MIN and VIRTIO_NET_CTRL_RFS_VQ_PAIRS_MAX. - don't crash when failing to change the number of virtqueues - don't set the affinity hint when onle single queue is used or there's too much...
2014 Jul 16
9
[PATCH net-next V2 0/3] rx busy polling support for virtio-net
Hi all: This series introduces the support for rx busy polling support. This was useful for reduing the latency for a kvm guest. Patch 1-2 introduces helpers which is used for rx busy polling. Patch 3 implement the main function. Test was done between a kvm guest and an external host. Two hosts were connected through 40gb mlx4 cards. With both busy_poll and busy_read are set to 50 in guest, 1
2014 Jul 16
9
[PATCH net-next V2 0/3] rx busy polling support for virtio-net
Hi all: This series introduces the support for rx busy polling support. This was useful for reduing the latency for a kvm guest. Patch 1-2 introduces helpers which is used for rx busy polling. Patch 3 implement the main function. Test was done between a kvm guest and an external host. Two hosts were connected through 40gb mlx4 cards. With both busy_poll and busy_read are set to 50 in guest, 1
2012 Dec 05
3
[PATCH net-next v2 0/3] Multiqueue support in virtio-net
...irnet_del_vqs to patch 1 - change the meaningless kzalloc() to kmalloc() - open code the err handling - store the name of virtqueue in send/receive queue - avoid type cast in virtnet_find_vqs() - fix the mem leak and freeing issue of names in virtnet_find_vqs() - check cvq during before setting the max_queue_pairs in virtnet_probe() - check the cvq and VIRTIO_NET_F_RFS in virtnet_set_queues() - set the curr_queue_pairs in virtnet_set_queue() - use the err report by virtnet_set_queue() as the return value of ethtool_set_channels() Changes from RFC v7: Addressing Rusty's comments: - align the implement...
2012 Dec 05
3
[PATCH net-next v2 0/3] Multiqueue support in virtio-net
...irnet_del_vqs to patch 1 - change the meaningless kzalloc() to kmalloc() - open code the err handling - store the name of virtqueue in send/receive queue - avoid type cast in virtnet_find_vqs() - fix the mem leak and freeing issue of names in virtnet_find_vqs() - check cvq during before setting the max_queue_pairs in virtnet_probe() - check the cvq and VIRTIO_NET_F_RFS in virtnet_set_queues() - set the curr_queue_pairs in virtnet_set_queue() - use the err report by virtnet_set_queue() as the return value of ethtool_set_channels() Changes from RFC v7: Addressing Rusty's comments: - align the implement...
2014 Dec 01
1
[PATCH RFC v4 net-next 1/5] virtio_net: enable tx interrupt
...;napi); > + } > + > return NETDEV_TX_OK; > } > > @@ -1138,8 +1170,10 @@ static int virtnet_close(struct net_device *dev) > /* Make sure refill_work doesn't re-enable napi! */ > cancel_delayed_work_sync(&vi->refill); > > - for (i = 0; i < vi->max_queue_pairs; i++) > + for (i = 0; i < vi->max_queue_pairs; i++) { > napi_disable(&vi->rq[i].napi); > + napi_disable(&vi->sq[i].napi); > + } > > return 0; > } > @@ -1452,8 +1486,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) > { > in...
2014 Dec 01
1
[PATCH RFC v4 net-next 1/5] virtio_net: enable tx interrupt
...;napi); > + } > + > return NETDEV_TX_OK; > } > > @@ -1138,8 +1170,10 @@ static int virtnet_close(struct net_device *dev) > /* Make sure refill_work doesn't re-enable napi! */ > cancel_delayed_work_sync(&vi->refill); > > - for (i = 0; i < vi->max_queue_pairs; i++) > + for (i = 0; i < vi->max_queue_pairs; i++) { > napi_disable(&vi->rq[i].napi); > + napi_disable(&vi->sq[i].napi); > + } > > return 0; > } > @@ -1452,8 +1486,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) > { > in...
2014 Apr 18
3
[PATCH] virtio_net: zero is an invald queue_pairs number
...ex 7b68746..8a852b5 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1285,7 +1285,7 @@ static int virtnet_set_channels(struct net_device *dev, if (channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; - if (queue_pairs > vi->max_queue_pairs) + if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) return -EINVAL; get_online_cpus(); -- 1.9.0
2014 Apr 18
3
[PATCH] virtio_net: zero is an invald queue_pairs number
...ex 7b68746..8a852b5 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1285,7 +1285,7 @@ static int virtnet_set_channels(struct net_device *dev, if (channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; - if (queue_pairs > vi->max_queue_pairs) + if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) return -EINVAL; get_online_cpus(); -- 1.9.0
2016 Jun 02
1
[PATCH -next 2/2] virtio_net: Read the advised MTU
...ead16(vdev, 1901 offsetof(struct virtio_net_config, 1902 mtu)); 1903 } 1904 1905 if (vi->any_header_sg) 1906 dev->needed_headroom = vi->hdr_len; 1907 1908 /* Use single tx/rx queue pair as default */ 1909 vi->curr_queue_pairs = 1; 1910 vi->max_queue_pairs = max_queue_pairs; 1911 1912 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 1913 err = init_vqs(vi); 1914 if (err) 1915 goto free_stats; 1916 1917 #ifdef CONFIG_SYSFS 1918 if (vi->mergeable_rx_bufs) 1919 dev->sysfs_rx_queue_group = &virtio_net...
2016 Jun 02
1
[PATCH -next 2/2] virtio_net: Read the advised MTU
...ead16(vdev, 1901 offsetof(struct virtio_net_config, 1902 mtu)); 1903 } 1904 1905 if (vi->any_header_sg) 1906 dev->needed_headroom = vi->hdr_len; 1907 1908 /* Use single tx/rx queue pair as default */ 1909 vi->curr_queue_pairs = 1; 1910 vi->max_queue_pairs = max_queue_pairs; 1911 1912 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 1913 err = init_vqs(vi); 1914 if (err) 1915 goto free_stats; 1916 1917 #ifdef CONFIG_SYSFS 1918 if (vi->mergeable_rx_bufs) 1919 dev->sysfs_rx_queue_group = &virtio_net...