Displaying 10 results from an estimated 10 matches for "virtnet_napi_disable_all".
2014 Jul 15
3
[PATCH net-next] virtio-net: rx busy polling support
...+ return received;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+static void virtnet_napi_enable_all(struct virtnet_info *vi)
+{
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtnet_rq_init_lock(&vi->rq[i]);
+ virtnet_napi_enable(&vi->rq[i]);
+ }
+}
+
+static void virtnet_napi_disable_all(struct virtnet_info *vi)
+{
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ napi_disable(&vi->rq[i].napi);
+ while (!virtnet_rq_disable(&vi->rq[i])) {
+ pr_info("RQ %d locked\n", i);
+ usleep_range(1000, 20000);
+ }
+ }
+}
+
static int virtnet_open(s...
2014 Jul 15
3
[PATCH net-next] virtio-net: rx busy polling support
...+ return received;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+static void virtnet_napi_enable_all(struct virtnet_info *vi)
+{
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtnet_rq_init_lock(&vi->rq[i]);
+ virtnet_napi_enable(&vi->rq[i]);
+ }
+}
+
+static void virtnet_napi_disable_all(struct virtnet_info *vi)
+{
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ napi_disable(&vi->rq[i].napi);
+ while (!virtnet_rq_disable(&vi->rq[i])) {
+ pr_info("RQ %d locked\n", i);
+ usleep_range(1000, 20000);
+ }
+ }
+}
+
static int virtnet_open(s...
2014 Jul 16
9
[PATCH net-next V2 0/3] rx busy polling support for virtio-net
Hi all:
This series introduces the support for rx busy polling support. This
was useful for reduing the latency for a kvm guest. Patch 1-2
introduces helpers which is used for rx busy polling. Patch 3
implement the main function.
Test was done between a kvm guest and an external host. Two hosts were
connected through 40gb mlx4 cards. With both busy_poll and busy_read are
set to 50 in guest, 1
2014 Jul 16
9
[PATCH net-next V2 0/3] rx busy polling support for virtio-net
Hi all:
This series introduces the support for rx busy polling support. This
was useful for reduing the latency for a kvm guest. Patch 1-2
introduces helpers which is used for rx busy polling. Patch 3
implement the main function.
Test was done between a kvm guest and an external host. Two hosts were
connected through 40gb mlx4 cards. With both busy_poll and busy_read are
set to 50 in guest, 1
2014 Jul 20
1
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...t virtnet_info *vi)
> {
> int i;
>
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> + virtnet_rq_init_lock(&vi->rq[i]);
> virtnet_napi_enable(&vi->rq[i]);
> + }
> }
>
> static void virtnet_napi_disable_all(struct virtnet_info *vi)
> {
> int i;
>
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> napi_disable(&vi->rq[i].napi);
> + while (!virtnet_rq_disable(&vi->rq[i])) {
> + pr_info("RQ %d l...
2014 Jul 20
1
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...t virtnet_info *vi)
> {
> int i;
>
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> + virtnet_rq_init_lock(&vi->rq[i]);
> virtnet_napi_enable(&vi->rq[i]);
> + }
> }
>
> static void virtnet_napi_disable_all(struct virtnet_info *vi)
> {
> int i;
>
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> napi_disable(&vi->rq[i].napi);
> + while (!virtnet_rq_disable(&vi->rq[i])) {
> + pr_info("RQ %d l...
2014 Jul 16
0
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...SY_POLL */
+
static void virtnet_napi_enable_all(struct virtnet_info *vi)
{
int i;
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtnet_rq_init_lock(&vi->rq[i]);
virtnet_napi_enable(&vi->rq[i]);
+ }
}
static void virtnet_napi_disable_all(struct virtnet_info *vi)
{
int i;
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ while (!virtnet_rq_disable(&vi->rq[i])) {
+ pr_info("RQ %d locked\n", i);
+ usleep_range(1000...
2014 Jul 15
0
[PATCH net-next] virtio-net: rx busy polling support
...n
> rate was increased from 9151.94 to 19787.37.
This is a misleading changelog.
You forgot to describe how you allowed busy polling, as it is not
'automatic'
If not, this patch adds two spinlock/unlock pairs in fast path.
Also, it looks like you could split this in smaller patches.
virtnet_napi_disable_all() & virtnet_napi_enable_all() could be added in
a separate patch for example.
2014 Jul 16
2
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...et_info *vi)
> {
> int i;
>
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> + virtnet_rq_init_lock(&vi->rq[i]);
> virtnet_napi_enable(&vi->rq[i]);
> + }
> }
>
> static void virtnet_napi_disable_all(struct virtnet_info *vi)
> {
> int i;
>
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> napi_disable(&vi->rq[i].napi);
> + while (!virtnet_rq_disable(&vi->rq[i])) {
> + pr_info("RQ...
2014 Jul 16
2
[PATCH net-next V2 3/3] virtio-net: rx busy polling support
...et_info *vi)
> {
> int i;
>
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> + virtnet_rq_init_lock(&vi->rq[i]);
> virtnet_napi_enable(&vi->rq[i]);
> + }
> }
>
> static void virtnet_napi_disable_all(struct virtnet_info *vi)
> {
> int i;
>
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> napi_disable(&vi->rq[i].napi);
> + while (!virtnet_rq_disable(&vi->rq[i])) {
> + pr_info("RQ...