Displaying 19 results from an estimated 19 matches for "tun_rx_batch".
Did you mean:
tun_rx_batched
2017 Jan 03
2
[PATCH net-next V2 3/3] tun: rx batching
On Wed, Dec 28, 2016 at 04:09:31PM +0800, Jason Wang wrote:
> +static int tun_rx_batched(struct tun_file *tfile, struct sk_buff *skb,
> + int more)
> +{
> + struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
> + struct sk_buff_head process_queue;
> + int qlen;
> + bool rcv = false;
> +
> + spin_lock(&queue->lock);
Should this be spin_lo...
2017 Jan 03
2
[PATCH net-next V2 3/3] tun: rx batching
On Wed, Dec 28, 2016 at 04:09:31PM +0800, Jason Wang wrote:
> +static int tun_rx_batched(struct tun_file *tfile, struct sk_buff *skb,
> + int more)
> +{
> + struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
> + struct sk_buff_head process_queue;
> + int qlen;
> + bool rcv = false;
> +
> + spin_lock(&queue->lock);
Should this be spin_lo...
2016 Dec 31
1
[PATCH net-next V3 3/3] tun: rx batching
...>
> rxhash = skb_get_hash(skb);
> +
> #ifndef CONFIG_4KSTACKS
> - local_bh_disable();
> - netif_receive_skb(skb);
> - local_bh_enable();
> + if (!rx_batched) {
> + local_bh_disable();
> + netif_receive_skb(skb);
> + local_bh_enable();
> + } else {
> + tun_rx_batched(tfile, skb, more);
> + }
> #else
> netif_rx_ni(skb);
> #endif
If rx_batched has been set, and we are talking to clients not using
this new MSG_MORE facility (or such clients don't have multiple TX
packets to send to you, thus MSG_MORE is often clear), you are doing a
lot more...
2016 Dec 31
1
[PATCH net-next V3 3/3] tun: rx batching
...>
> rxhash = skb_get_hash(skb);
> +
> #ifndef CONFIG_4KSTACKS
> - local_bh_disable();
> - netif_receive_skb(skb);
> - local_bh_enable();
> + if (!rx_batched) {
> + local_bh_disable();
> + netif_receive_skb(skb);
> + local_bh_enable();
> + } else {
> + tun_rx_batched(tfile, skb, more);
> + }
> #else
> netif_rx_ni(skb);
> #endif
If rx_batched has been set, and we are talking to clients not using
this new MSG_MORE facility (or such clients don't have multiple TX
packets to send to you, thus MSG_MORE is often clear), you are doing a
lot more...
2016 Dec 30
0
[PATCH net-next V3 3/3] tun: rx batching
...b_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
+ skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -1140,10 +1145,36 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
+static void tun_rx_batched(struct tun_file *tfile, struct sk_buff *skb,
+ int more)
+{
+ struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
+ struct sk_buff_head process_queue;
+ int qlen;
+ bool rcv = false;
+
+ spin_lock(&queue->lock);
+ qlen = skb_queue_len(queue);
+ __skb_queue_tail(queue, skb);...
2016 Dec 28
0
[PATCH net-next V2 3/3] tun: rx batching
...kb_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
+ skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -1140,10 +1145,44 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
+static int tun_rx_batched(struct tun_file *tfile, struct sk_buff *skb,
+ int more)
+{
+ struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
+ struct sk_buff_head process_queue;
+ int qlen;
+ bool rcv = false;
+
+ spin_lock(&queue->lock);
+ qlen = skb_queue_len(queue);
+ if (qlen > rx_batched)
+ g...
2016 Dec 30
5
[PATCH net-next V3 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2016 Dec 30
5
[PATCH net-next V3 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 18
0
[PATCH net-next V5 3/3] tun: rx batching
...b_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
+ skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -1139,10 +1141,46 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
+static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
+ struct sk_buff *skb, int more)
+{
+ struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
+ struct sk_buff_head process_queue;
+ u32 rx_batched = tun->rx_batched;
+ bool rcv = false;
+
+ if (!rx_batched || (!more && skb_qu...
2017 Jan 06
0
[PATCH V4 net-next 3/3] tun: rx batching
...b_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
+ skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -1140,10 +1142,45 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
+static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
+ struct sk_buff *skb, int more)
+{
+ struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
+ struct sk_buff_head process_queue;
+ u32 rx_batched = tun->rx_batched;
+ bool rcv = false;
+
+ if (!rx_batched || (!more && skb_qu...
2017 Jan 06
5
[PATCH V4 net-next 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 06
5
[PATCH V4 net-next 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2016 Dec 28
7
[PATCH net-next V2 0/3] vhost net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2016 Dec 28
7
[PATCH net-next V2 0/3] vhost net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 18
7
[PATCH net-next V5 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 18
7
[PATCH net-next V5 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 06
2
[PATCH V4 net-next 3/3] tun: rx batching
...kfree_skb(skb);
>
> + skb_queue_purge(&tfile->sk.sk_write_queue);
> skb_queue_purge(&tfile->sk.sk_error_queue);
> }
>
> @@ -1140,10 +1142,45 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
> return skb;
> }
>
> +static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
> + struct sk_buff *skb, int more)
> +{
> + struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
> + struct sk_buff_head process_queue;
> + u32 rx_batched = tun->rx_batched;
> + bool rcv = false;
> +
> + if...
2017 Jan 06
2
[PATCH V4 net-next 3/3] tun: rx batching
...kfree_skb(skb);
>
> + skb_queue_purge(&tfile->sk.sk_write_queue);
> skb_queue_purge(&tfile->sk.sk_error_queue);
> }
>
> @@ -1140,10 +1142,45 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
> return skb;
> }
>
> +static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
> + struct sk_buff *skb, int more)
> +{
> + struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
> + struct sk_buff_head process_queue;
> + u32 rx_batched = tun->rx_batched;
> + bool rcv = false;
> +
> + if...
2017 Jan 04
0
[PATCH net-next V2 3/3] tun: rx batching
On 2017?01?03? 21:33, Stefan Hajnoczi wrote:
> On Wed, Dec 28, 2016 at 04:09:31PM +0800, Jason Wang wrote:
>> +static int tun_rx_batched(struct tun_file *tfile, struct sk_buff *skb,
>> + int more)
>> +{
>> + struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
>> + struct sk_buff_head process_queue;
>> + int qlen;
>> + bool rcv = false;
>> +
>> + spin_lock(&queue-&g...