Displaying 20 results from an estimated 24 matches for "tx_array".
Did you mean:
rx_array
2016 Jun 30
0
[PATCH net-next V3 6/6] tun: switch to use skb array for tx
...-71,6 +71,7 @@
#include <net/sock.h>
#include <linux/seq_file.h>
#include <linux/uio.h>
+#include <linux/skb_array.h>
#include <asm/uaccess.h>
@@ -167,6 +168,7 @@ struct tun_file {
};
struct list_head next;
struct tun_struct *detached;
+ struct skb_array tx_array;
};
struct tun_flow_entry {
@@ -515,7 +517,11 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
static void tun_queue_purge(struct tun_file *tfile)
{
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ struct sk_buff *skb;
+
+ while ((skb = skb_array_consume(&tf...
2016 Jun 17
0
[PATCH net-next V2] tun: introduce tx skb ring
...0, Jason Wang wrote:
> We used to queue tx packets in sk_receive_queue, this is less
> efficient since it requires spinlocks to synchronize between producer
> and consumer.
>
> This patch tries to address this by:
>
> - introduce a new mode which will be only enabled with IFF_TX_ARRAY
> set and switch from sk_receive_queue to a fixed size of skb
> array with 256 entries in this mode.
> - introduce a new proto_ops peek_len which was used for peeking the
> skb length.
> - implement a tun version of peek_len for vhost_net to use and convert
> vhost_net to...
2016 Jun 15
7
[PATCH net-next V2] tun: introduce tx skb ring
We used to queue tx packets in sk_receive_queue, this is less
efficient since it requires spinlocks to synchronize between producer
and consumer.
This patch tries to address this by:
- introduce a new mode which will be only enabled with IFF_TX_ARRAY
set and switch from sk_receive_queue to a fixed size of skb
array with 256 entries in this mode.
- introduce a new proto_ops peek_len which was used for peeking the
skb length.
- implement a tun version of peek_len for vhost_net to use and convert
vhost_net to use peek_len if possible.
Pkt...
2016 Jun 15
7
[PATCH net-next V2] tun: introduce tx skb ring
We used to queue tx packets in sk_receive_queue, this is less
efficient since it requires spinlocks to synchronize between producer
and consumer.
This patch tries to address this by:
- introduce a new mode which will be only enabled with IFF_TX_ARRAY
set and switch from sk_receive_queue to a fixed size of skb
array with 256 entries in this mode.
- introduce a new proto_ops peek_len which was used for peeking the
skb length.
- implement a tun version of peek_len for vhost_net to use and convert
vhost_net to use peek_len if possible.
Pkt...
2016 Jun 30
9
[PATCH net-next V3 0/6] switch to use tx skb array in tun
Hi all:
This series tries to switch to use skb array in tun. This is used to
eliminate the spinlock contention between producer and consumer. The
conversion was straightforward: just introdce a tx skb array and use
it instead of sk_receive_queue.
A minor issue is to keep the tx_queue_len behaviour, since tun used to
use it for the length of sk_receive_queue. This is done through:
- add the
2016 Jun 30
9
[PATCH net-next V3 0/6] switch to use tx skb array in tun
Hi all:
This series tries to switch to use skb array in tun. This is used to
eliminate the spinlock contention between producer and consumer. The
conversion was straightforward: just introdce a tx skb array and use
it instead of sk_receive_queue.
A minor issue is to keep the tx_queue_len behaviour, since tun used to
use it for the length of sk_receive_queue. This is done through:
- add the
2016 Jun 30
10
[PATCH net-next V4 0/6] switch to use tx skb array in tun
Hi all:
This series tries to switch to use skb array in tun. This is used to
eliminate the spinlock contention between producer and consumer. The
conversion was straightforward: just introdce a tx skb array and use
it instead of sk_receive_queue.
A minor issue is to keep the tx_queue_len behaviour, since tun used to
use it for the length of sk_receive_queue. This is done through:
- add the
2016 Jun 30
10
[PATCH net-next V4 0/6] switch to use tx skb array in tun
Hi all:
This series tries to switch to use skb array in tun. This is used to
eliminate the spinlock contention between producer and consumer. The
conversion was straightforward: just introdce a tx skb array and use
it instead of sk_receive_queue.
A minor issue is to keep the tx_queue_len behaviour, since tun used to
use it for the length of sk_receive_queue. This is done through:
- add the
2016 Dec 30
0
[PATCH net-next V3 3/3] tun: rx batching
...dule_param(rx_batched, int, 0444);
+MODULE_PARM_DESC(rx_batched, "Number of packets batched in rx");
+
/* Uncomment to enable debugging */
/* #define TUN_DEBUG 1 */
@@ -522,6 +526,7 @@ static void tun_queue_purge(struct tun_file *tfile)
while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
+ skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -1140,10 +1145,36 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
+static void tun_rx_batched(struct tun_file *tfile,...
2016 Dec 28
0
[PATCH net-next V2 3/3] tun: rx batching
...dule_param(rx_batched, int, 0444);
+MODULE_PARM_DESC(rx_batched, "Number of packets batched in rx");
+
/* Uncomment to enable debugging */
/* #define TUN_DEBUG 1 */
@@ -522,6 +526,7 @@ static void tun_queue_purge(struct tun_file *tfile)
while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
+ skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -1140,10 +1145,44 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
+static int tun_rx_batched(struct tun_file *tfile, s...
2017 Jan 18
0
[PATCH net-next V5 3/3] tun: rx batching
...n.c
@@ -218,6 +218,7 @@ struct tun_struct {
struct list_head disabled;
void *security;
u32 flow_count;
+ u32 rx_batched;
struct tun_pcpu_stats __percpu *pcpu_stats;
};
@@ -522,6 +523,7 @@ static void tun_queue_purge(struct tun_file *tfile)
while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
+ skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -1139,10 +1141,46 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
+static void tun_rx_batched(struct tun_struct *tun,...
2017 Jan 06
0
[PATCH V4 net-next 3/3] tun: rx batching
...n.c
@@ -218,6 +218,7 @@ struct tun_struct {
struct list_head disabled;
void *security;
u32 flow_count;
+ u32 rx_batched;
struct tun_pcpu_stats __percpu *pcpu_stats;
};
@@ -522,6 +523,7 @@ static void tun_queue_purge(struct tun_file *tfile)
while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
+ skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -1140,10 +1142,45 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
+static void tun_rx_batched(struct tun_struct *tun,...
2017 Jan 06
5
[PATCH V4 net-next 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 06
5
[PATCH V4 net-next 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 18
7
[PATCH net-next V5 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 18
7
[PATCH net-next V5 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2017 Jan 06
2
[PATCH V4 net-next 3/3] tun: rx batching
...gt; struct list_head disabled;
> void *security;
> u32 flow_count;
> + u32 rx_batched;
> struct tun_pcpu_stats __percpu *pcpu_stats;
> };
>
> @@ -522,6 +523,7 @@ static void tun_queue_purge(struct tun_file *tfile)
> while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
> kfree_skb(skb);
>
> + skb_queue_purge(&tfile->sk.sk_write_queue);
> skb_queue_purge(&tfile->sk.sk_error_queue);
> }
>
> @@ -1140,10 +1142,45 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
> return skb;
> }
>
&g...
2017 Jan 06
2
[PATCH V4 net-next 3/3] tun: rx batching
...gt; struct list_head disabled;
> void *security;
> u32 flow_count;
> + u32 rx_batched;
> struct tun_pcpu_stats __percpu *pcpu_stats;
> };
>
> @@ -522,6 +523,7 @@ static void tun_queue_purge(struct tun_file *tfile)
> while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
> kfree_skb(skb);
>
> + skb_queue_purge(&tfile->sk.sk_write_queue);
> skb_queue_purge(&tfile->sk.sk_error_queue);
> }
>
> @@ -1140,10 +1142,45 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
> return skb;
> }
>
&g...
2016 Dec 30
5
[PATCH net-next V3 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%
2016 Dec 30
5
[PATCH net-next V3 0/3] vhost_net tx batching
Hi:
This series tries to implement tx batching support for vhost. This was
done by using MSG_MORE as a hint for under layer socket. The backend
(e.g tap) can then batch the packets temporarily in a list and
submit it all once the number of bacthed exceeds a limitation.
Tests shows obvious improvement on guest pktgen over over
mlx4(noqueue) on host:
Mpps -+%