Displaying 20 results from an estimated 202 matches for "curr_queue_pair".
Did you mean:
curr_queue_pairs
2016 Nov 25
7
[PATCH net-next] virtio-net: enable multiqueue by default
...index d4ac7a6..a21d93a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1886,8 +1886,11 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
- /* Use single tx/rx queue pair as default */
- vi->curr_queue_pairs = 1;
+ /* Enable multiqueue by default */
+ if (num_online_cpus() >= max_queue_pairs)
+ vi->curr_queue_pairs = max_queue_pairs;
+ else
+ vi->curr_queue_pairs = num_online_cpus();
vi->max_queue_pairs = max_queue_pairs;
/* Allocate/initialize the rx/tx queues, and invoke find_vqs...
2016 Nov 25
7
[PATCH net-next] virtio-net: enable multiqueue by default
...index d4ac7a6..a21d93a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1886,8 +1886,11 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
- /* Use single tx/rx queue pair as default */
- vi->curr_queue_pairs = 1;
+ /* Enable multiqueue by default */
+ if (num_online_cpus() >= max_queue_pairs)
+ vi->curr_queue_pairs = max_queue_pairs;
+ else
+ vi->curr_queue_pairs = num_online_cpus();
vi->max_queue_pairs = max_queue_pairs;
/* Allocate/initialize the rx/tx queues, and invoke find_vqs...
2016 Jun 02
1
[PATCH -next 2/2] virtio_net: Read the advised MTU
...)) {
1900 dev->mtu = virtio_cread16(vdev,
1901 offsetof(struct virtio_net_config,
1902 mtu));
1903 }
1904
1905 if (vi->any_header_sg)
1906 dev->needed_headroom = vi->hdr_len;
1907
1908 /* Use single tx/rx queue pair as default */
1909 vi->curr_queue_pairs = 1;
1910 vi->max_queue_pairs = max_queue_pairs;
1911
1912 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1913 err = init_vqs(vi);
1914 if (err)
1915 goto free_stats;
1916
1917 #ifdef CONFIG_SYSFS
1918 if (vi->mergeable_rx_bufs)
1919 dev->sy...
2016 Jun 02
1
[PATCH -next 2/2] virtio_net: Read the advised MTU
...)) {
1900 dev->mtu = virtio_cread16(vdev,
1901 offsetof(struct virtio_net_config,
1902 mtu));
1903 }
1904
1905 if (vi->any_header_sg)
1906 dev->needed_headroom = vi->hdr_len;
1907
1908 /* Use single tx/rx queue pair as default */
1909 vi->curr_queue_pairs = 1;
1910 vi->max_queue_pairs = max_queue_pairs;
1911
1912 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1913 err = init_vqs(vi);
1914 if (err)
1915 goto free_stats;
1916
1917 #ifdef CONFIG_SYSFS
1918 if (vi->mergeable_rx_bufs)
1919 dev->sy...
2017 Jan 02
2
[PATCH net 9/9] virtio-net: XDP support for small buffers
...+1898,10 @@ static void free_receive_page_frags(struct virtnet_info *vi)
static bool is_xdp_queue(struct virtnet_info *vi, int q)
{
+ /* For small receive mode always use kfree_skb variants */
+ if (!vi->mergeable_rx_bufs)
+ return false;
+
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
return false;
else if (q < vi->curr_queue_pairs)
patch is untested just spotted doing code review.
Thanks,
John
2017 Jan 02
2
[PATCH net 9/9] virtio-net: XDP support for small buffers
...+1898,10 @@ static void free_receive_page_frags(struct virtnet_info *vi)
static bool is_xdp_queue(struct virtnet_info *vi, int q)
{
+ /* For small receive mode always use kfree_skb variants */
+ if (!vi->mergeable_rx_bufs)
+ return false;
+
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
return false;
else if (q < vi->curr_queue_pairs)
patch is untested just spotted doing code review.
Thanks,
John
2013 Jul 03
4
[PATCH net] virtio-net: fix the race between channels setting and refill
...struct net_device *dev = vi->dev;
- int i;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
@@ -915,10 +914,8 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
queue_pairs);
return -EINVAL;
} else {
- for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
vi->curr_queue_pairs = queue_pairs;
+ schedule_delayed_work(&vi->refill, 0);
}
return 0;
--
1.7.1
2013 Jul 03
4
[PATCH net] virtio-net: fix the race between channels setting and refill
...struct net_device *dev = vi->dev;
- int i;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
@@ -915,10 +914,8 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
queue_pairs);
return -EINVAL;
} else {
- for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
vi->curr_queue_pairs = queue_pairs;
+ schedule_delayed_work(&vi->refill, 0);
}
return 0;
--
1.7.1
2014 Sep 05
1
[PATCH 1/1] add selftest for virtio-net v1.0
...,6 +845,9 @@ static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
int i;
+ /* disallow open during test */
+ if (test_bit(__VIRTNET_TESTING, &vi->flags))
+ return -EBUSY;
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
@@ -1363,12 +1398,166 @@ static void virtnet_get_channels(struct net_device *dev,
channels->other_count = 0;
}
+static int virtnet_reset(struct virtnet_info *vi, u64 *data);
+
+static void virtnet_create_lb_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data...
2014 Sep 05
1
[PATCH 1/1] add selftest for virtio-net v1.0
...,6 +845,9 @@ static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
int i;
+ /* disallow open during test */
+ if (test_bit(__VIRTNET_TESTING, &vi->flags))
+ return -EBUSY;
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
@@ -1363,12 +1398,166 @@ static void virtnet_get_channels(struct net_device *dev,
channels->other_count = 0;
}
+static int virtnet_reset(struct virtnet_info *vi, u64 *data);
+
+static void virtnet_create_lb_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data...
2017 Jan 03
1
[PATCH net 9/9] virtio-net: XDP support for small buffers
...gt;>
>> static bool is_xdp_queue(struct virtnet_info *vi, int q)
>> {
>> + /* For small receive mode always use kfree_skb variants */
>> + if (!vi->mergeable_rx_bufs)
>> + return false;
>> +
>> if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
>> return false;
>> else if (q < vi->curr_queue_pairs)
>>
>>
>> patch is untested just spotted doing code review.
>>
>> Thanks,
>> John
>
> We probably need a better name for this func...
2017 Jan 03
1
[PATCH net 9/9] virtio-net: XDP support for small buffers
...gt;>
>> static bool is_xdp_queue(struct virtnet_info *vi, int q)
>> {
>> + /* For small receive mode always use kfree_skb variants */
>> + if (!vi->mergeable_rx_bufs)
>> + return false;
>> +
>> if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
>> return false;
>> else if (q < vi->curr_queue_pairs)
>>
>>
>> patch is untested just spotted doing code review.
>>
>> Thanks,
>> John
>
> We probably need a better name for this func...
2013 Dec 26
2
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...to
block on GFP_KERNEL allocations is to allow some asynchronous behavior.
I have hard time to convince myself virtio_net is safe anyway with this
work queue thing.
virtnet_open() seems racy for example :
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(&vi->rq[i]);
What if t...
2013 Dec 26
2
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...to
block on GFP_KERNEL allocations is to allow some asynchronous behavior.
I have hard time to convince myself virtio_net is safe anyway with this
work queue thing.
virtnet_open() seems racy for example :
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(&vi->rq[i]);
What if t...
2023 Mar 06
4
[PATCH net 0/2] add checking sq is full inside xdp xmit
If the queue of xdp xmit is not an independent queue, then when the xdp
xmit used all the desc, the xmit from the __dev_queue_xmit() may encounter
the following error.
net ens4: Unexpected TXQ (0) queue failure: -28
This patch adds a check whether sq is full in XDP Xmit.
Thanks.
Xuan Zhuo (2):
virtio_net: separate the logic of checking whether sq is full
virtio_net: add checking sq is full
2017 Feb 15
3
[PATCH net-next] virito-net: set queues after reset during xdp_set
We set queues before reset which will cause a crash[1]. This is
because is_xdp_raw_buffer_queue() depends on the old xdp queue pairs
number to do the correct detection. So fix this by:
- set queues after reset, to keep the old vi->curr_queue_pairs. (in
fact setting queues before reset does not works since after feature
set, all queue pairs were enabled by default during reset).
- change xdp_queue_pairs only after virtnet_reset() is succeed.
[1]
[ 74.328168] general protection fault: 0000 [#1] SMP
[ 74.328625] Modules linked in: nf...
2017 Feb 15
3
[PATCH net-next] virito-net: set queues after reset during xdp_set
We set queues before reset which will cause a crash[1]. This is
because is_xdp_raw_buffer_queue() depends on the old xdp queue pairs
number to do the correct detection. So fix this by:
- set queues after reset, to keep the old vi->curr_queue_pairs. (in
fact setting queues before reset does not works since after feature
set, all queue pairs were enabled by default during reset).
- change xdp_queue_pairs only after virtnet_reset() is succeed.
[1]
[ 74.328168] general protection fault: 0000 [#1] SMP
[ 74.328625] Modules linked in: nf...
2013 Jan 25
5
[PATCH V8 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
...info *vi, bool set)
{
int i;
+ int cpu;
/* In multiqueue mode, when the number of cpu is equal to the number of
* queue pairs, we let the queue pairs to be private to one cpu by
@@ -1023,22 +1027,40 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
*/
if ((vi->curr_queue_pairs == 1 ||
vi->max_queue_pairs != num_online_cpus()) && set) {
- if (vi->affinity_hint_set)
+ if (vi->affinity_hint_set) {
set = false;
- else
+ } else {
+ i = 0;
+ for_each_online_cpu(cpu)
+ *per_cpu_ptr(vi->vq_index, cpu) =
+ ++i % vi->curr_queue_pai...
2013 Jan 25
5
[PATCH V8 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
...info *vi, bool set)
{
int i;
+ int cpu;
/* In multiqueue mode, when the number of cpu is equal to the number of
* queue pairs, we let the queue pairs to be private to one cpu by
@@ -1023,22 +1027,40 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
*/
if ((vi->curr_queue_pairs == 1 ||
vi->max_queue_pairs != num_online_cpus()) && set) {
- if (vi->affinity_hint_set)
+ if (vi->affinity_hint_set) {
set = false;
- else
+ } else {
+ i = 0;
+ for_each_online_cpu(cpu)
+ *per_cpu_ptr(vi->vq_index, cpu) =
+ ++i % vi->curr_queue_pai...
2012 Nov 27
4
[net-next rfc v7 0/3] Multiqueue virtio-net
...netdev tap,queues=2,... -device virtio-net-pci,queues=2,...
then enable the multiqueue through ethtool by:
ethtool -L eth0 combined 2
Changes from V6:
- Align the implementation with the RFC spec update v5
- Addressing Rusty's comments:
* split the patches
* rename to max_queue_pairs and curr_queue_pairs
* remove the useless status
* fix the hibernation bug
- Addressing Ben's comments:
* check other parameters in ethtool_set_queues
Changes from v5:
- Align the implementation with the RFC spec update v4
- Switch the mode between single mode and multiqueue mode without reset
- Remove the...