search for: tun_get_user

Displaying 20 results from an estimated 89 matches for "tun_get_user".

2014 Oct 23
0
[PATCH RFC] tun: fix sparse warnings for virtio headers
...6 typedefs come in. drivers/net/tun.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 186ce54..ee27ecb 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1043,10 +1043,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EFAULT; if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && - gso.csum_start + gso.csum_offset + 2 > gso.hdr_len) - gso.hdr_len = gso.csum_start + gso.csum_offset + 2; + __virtio16_to_cpu(false, gso.csum_start)...
2014 Oct 23
0
[PATCH RFC] tun: fix sparse warnings for virtio headers
...6 typedefs come in. drivers/net/tun.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 186ce54..ee27ecb 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1043,10 +1043,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EFAULT; if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && - gso.csum_start + gso.csum_offset + 2 > gso.hdr_len) - gso.hdr_len = gso.csum_start + gso.csum_offset + 2; + __virtio16_to_cpu(false, gso.csum_start)...
2016 Dec 30
0
[PATCH net-next V3 3/3] tun: rx batching
...tail_init(queue, &process_queue); + rcv = true; + } + spin_unlock(&queue->lock); + + if (rcv) { + local_bh_disable(); + while ((skb = __skb_dequeue(&process_queue))) + netif_receive_skb(skb); + local_bh_enable(); + } +} + /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, - int noblock) + int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; @@ -1283,10 +1314,15 @@ static ssize_t tun_get_user(struct tun_struct *...
2016 Dec 28
0
[PATCH net-next V2 3/3] tun: rx batching
...+ + if (rcv) { + local_bh_disable(); + while ((skb = __skb_dequeue(&process_queue))) + netif_receive_skb(skb); + local_bh_enable(); + } + + return 0; +drop: + spin_unlock(&queue->lock); + kfree_skb(skb); + return -EFAULT; +} + /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, - int noblock) + int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; @@ -1283,18 +1322,27 @@ static ssize_t tun_get_user(struct tun_struct *...
2017 Jan 18
0
[PATCH net-next V5 3/3] tun: rx batching
...+ spin_unlock(&queue->lock); + + if (rcv) { + struct sk_buff *nskb; + + local_bh_disable(); + while ((nskb = __skb_dequeue(&process_queue))) + netif_receive_skb(nskb); + netif_receive_skb(skb); + local_bh_enable(); + } +} + /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, - int noblock) + int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; @@ -1283,9 +1321,7 @@ static ssize_t tun_get_user(struct tun_struct *tu...
2017 Jan 06
0
[PATCH V4 net-next 3/3] tun: rx batching
...} + spin_unlock(&queue->lock); + + if (rcv) { + struct sk_buff *nskb; + local_bh_disable(); + while ((nskb = __skb_dequeue(&process_queue))) + netif_receive_skb(nskb); + netif_receive_skb(skb); + local_bh_enable(); + } +} + /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, - int noblock) + int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; @@ -1283,10 +1320,9 @@ static ssize_t tun_get_user(struct tun_struct *t...
2017 Jan 06
5
[PATCH V4 net-next 0/3] vhost_net tx batching
Hi: This series tries to implement tx batching support for vhost. This was done by using MSG_MORE as a hint for under layer socket. The backend (e.g tap) can then batch the packets temporarily in a list and submit it all once the number of bacthed exceeds a limitation. Tests shows obvious improvement on guest pktgen over over mlx4(noqueue) on host: Mpps -+%
2017 Jan 06
5
[PATCH V4 net-next 0/3] vhost_net tx batching
Hi: This series tries to implement tx batching support for vhost. This was done by using MSG_MORE as a hint for under layer socket. The backend (e.g tap) can then batch the packets temporarily in a list and submit it all once the number of bacthed exceeds a limitation. Tests shows obvious improvement on guest pktgen over over mlx4(noqueue) on host: Mpps -+%
2017 Jan 18
7
[PATCH net-next V5 0/3] vhost_net tx batching
Hi: This series tries to implement tx batching support for vhost. This was done by using MSG_MORE as a hint for under layer socket. The backend (e.g tap) can then batch the packets temporarily in a list and submit it all once the number of bacthed exceeds a limitation. Tests shows obvious improvement on guest pktgen over over mlx4(noqueue) on host: Mpps -+%
2017 Jan 18
7
[PATCH net-next V5 0/3] vhost_net tx batching
Hi: This series tries to implement tx batching support for vhost. This was done by using MSG_MORE as a hint for under layer socket. The backend (e.g tap) can then batch the packets temporarily in a list and submit it all once the number of bacthed exceeds a limitation. Tests shows obvious improvement on guest pktgen over over mlx4(noqueue) on host: Mpps -+%
2016 Dec 30
5
[PATCH net-next V3 0/3] vhost_net tx batching
Hi: This series tries to implement tx batching support for vhost. This was done by using MSG_MORE as a hint for under layer socket. The backend (e.g tap) can then batch the packets temporarily in a list and submit it all once the number of bacthed exceeds a limitation. Tests shows obvious improvement on guest pktgen over over mlx4(noqueue) on host: Mpps -+%
2016 Dec 30
5
[PATCH net-next V3 0/3] vhost_net tx batching
Hi: This series tries to implement tx batching support for vhost. This was done by using MSG_MORE as a hint for under layer socket. The backend (e.g tap) can then batch the packets temporarily in a list and submit it all once the number of bacthed exceeds a limitation. Tests shows obvious improvement on guest pktgen over over mlx4(noqueue) on host: Mpps -+%
2017 Jan 06
2
[PATCH V4 net-next 3/3] tun: rx batching
...gt; + struct sk_buff *nskb; > + local_bh_disable(); > + while ((nskb = __skb_dequeue(&process_queue))) > + netif_receive_skb(nskb); > + netif_receive_skb(skb); > + local_bh_enable(); > + } > +} > + > /* Get packet from user space buffer */ > static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, > void *msg_control, struct iov_iter *from, > - int noblock) > + int noblock, bool more) > { > struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; > struct sk_buff *skb; > @@ -1283,10 +1320,9 @@ static ssize_...
2017 Jan 06
2
[PATCH V4 net-next 3/3] tun: rx batching
...gt; + struct sk_buff *nskb; > + local_bh_disable(); > + while ((nskb = __skb_dequeue(&process_queue))) > + netif_receive_skb(nskb); > + netif_receive_skb(skb); > + local_bh_enable(); > + } > +} > + > /* Get packet from user space buffer */ > static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, > void *msg_control, struct iov_iter *from, > - int noblock) > + int noblock, bool more) > { > struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; > struct sk_buff *skb; > @@ -1283,10 +1320,9 @@ static ssize_...
2016 Dec 28
7
[PATCH net-next V2 0/3] vhost net tx batching
Hi: This series tries to implement tx batching support for vhost. This was done by using MSG_MORE as a hint for under layer socket. The backend (e.g tap) can then batch the packets temporarily in a list and submit it all once the number of bacthed exceeds a limitation. Tests shows obvious improvement on guest pktgen over over mlx4(noqueue) on host: Mpps -+%
2016 Dec 28
7
[PATCH net-next V2 0/3] vhost net tx batching
Hi: This series tries to implement tx batching support for vhost. This was done by using MSG_MORE as a hint for under layer socket. The backend (e.g tap) can then batch the packets temporarily in a list and submit it all once the number of bacthed exceeds a limitation. Tests shows obvious improvement on guest pktgen over over mlx4(noqueue) on host: Mpps -+%
2017 Jan 03
2
[PATCH net-next V2 3/3] tun: rx batching
...le, struct sk_buff *skb, > + int more) > +{ > + struct sk_buff_head *queue = &tfile->sk.sk_write_queue; > + struct sk_buff_head process_queue; > + int qlen; > + bool rcv = false; > + > + spin_lock(&queue->lock); Should this be spin_lock_bh()? Below and in tun_get_user() there are explicit local_bh_disable() calls so I guess BHs can interrupt us here and this would deadlock. -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 455 bytes Desc: not available URL: <http://lists.lin...
2017 Jan 03
2
[PATCH net-next V2 3/3] tun: rx batching
...le, struct sk_buff *skb, > + int more) > +{ > + struct sk_buff_head *queue = &tfile->sk.sk_write_queue; > + struct sk_buff_head process_queue; > + int qlen; > + bool rcv = false; > + > + spin_lock(&queue->lock); Should this be spin_lock_bh()? Below and in tun_get_user() there are explicit local_bh_disable() calls so I guess BHs can interrupt us here and this would deadlock. -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 455 bytes Desc: not available URL: <http://lists.lin...
2015 Jan 30
0
[PATCH v2 2/3] Revert "drivers/net, ipv6: Select IPv6 fragment idents for virtio UFO packets"
...@@ -65,7 +65,6 @@ #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> -#include <net/ipv6.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> @@ -1167,8 +1166,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, break; } - skb_reset_network_header(skb); - if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -1189,8 +1186,6 @@ static ssize_t tun_get_user(struct tun_...
2015 Jan 30
0
[PATCH v2 2/3] Revert "drivers/net, ipv6: Select IPv6 fragment idents for virtio UFO packets"
...@@ -65,7 +65,6 @@ #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> -#include <net/ipv6.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> @@ -1167,8 +1166,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, break; } - skb_reset_network_header(skb); - if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -1189,8 +1186,6 @@ static ssize_t tun_get_user(struct tun_...