search for: skb_peek

Displaying 20 results from an estimated 36 matches for "skb_peek".

2011 Jan 17
11
[PATCH 1/3] vhost-net: check the support of mergeable buffer outside the receive loop
No need to check the support of mergeable buffer inside the recevie loop as the whole handle_rx()_xx is in the read critical region. So this patch move it ahead of the receiving loop. Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vhost/net.c | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index
2011 Jan 17
11
[PATCH 1/3] vhost-net: check the support of mergeable buffer outside the receive loop
No need to check the support of mergeable buffer inside the recevie loop as the whole handle_rx()_xx is in the read critical region. So this patch move it ahead of the receiving loop. Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vhost/net.c | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index
2012 Aug 13
9
[PATCH RFC] xen/netback: Count ring slots properly when larger MTU sizes are used
Hi, I ran into an issue where netback driver is crashing with BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta)). It is happening in Intel 10Gbps network when larger mtu values are used. The problem seems to be the way the slots are counted. After applying this patch things ran fine in my environment. I request to validate my changes. Thanks Siva
2005 Mar 30
5
netem with prio hangs on duplicate
hi i tried the example given on the examples page to duplicate selected traffic like tc qdisc add dev eth0 root handle 1: prio tc qdisc add dev eth0 parent 1:3 handle 3: netem duplicate 40% tc filter add dev eth0 protocol ip parent 1:0 prio 3 u32 match ip dst 11.0.2.2 flowid 1:3 when i ping from 11.0.2.2 to this interface my machine hangs. the same thing works for drop or delay. i would
2016 Jun 17
0
[PATCH net-next V2] tun: introduce tx skb ring
...+ tun = __tun_get(tfile); > + if (!tun) > + return 0; > + > + if (tun->flags & IFF_TX_ARRAY) { > + ret = skb_array_peek_len(&tfile->tx_array); > + } else { > + struct sk_buff *head; > + > + spin_lock_bh(&sk->sk_receive_queue.lock); > + head = skb_peek(&sk->sk_receive_queue); > + if (likely(head)) { > + ret = head->len; > + if (skb_vlan_tag_present(head)) > + ret += VLAN_HLEN; > + } > + spin_unlock_bh(&sk->sk_receive_queue.lock); > + } > + > + tun_put(tun); > + return ret; > +} > + &g...
2010 Jun 28
3
[PATCHv2] vhost-net: add dhclient work-around from userspace
...udp.h> +#include <linux/netdevice.h> + #include <net/sock.h> #include "vhost.h" @@ -186,6 +190,44 @@ static void handle_tx(struct vhost_net *net) unuse_mm(net->dev.mm); } +static int peek_head(struct sock *sk) +{ + struct sk_buff *skb; + + lock_sock(sk); + skb = skb_peek(&sk->sk_receive_queue); + if (unlikely(!skb)) { + release_sock(sk); + return 0; + } + /* Userspace virtio server has the following hack so + * guests rely on it, and we have to replicate it, too: */ + /* Use port number to detect incoming IPv4 DHCP response packets, + * and fill in the c...
2010 Jun 28
3
[PATCHv2] vhost-net: add dhclient work-around from userspace
...udp.h> +#include <linux/netdevice.h> + #include <net/sock.h> #include "vhost.h" @@ -186,6 +190,44 @@ static void handle_tx(struct vhost_net *net) unuse_mm(net->dev.mm); } +static int peek_head(struct sock *sk) +{ + struct sk_buff *skb; + + lock_sock(sk); + skb = skb_peek(&sk->sk_receive_queue); + if (unlikely(!skb)) { + release_sock(sk); + return 0; + } + /* Userspace virtio server has the following hack so + * guests rely on it, and we have to replicate it, too: */ + /* Use port number to detect incoming IPv4 DHCP response packets, + * and fill in the c...
2016 Jun 15
7
[PATCH net-next V2] tun: introduce tx skb ring
...>sk; + struct tun_struct *tun; + int ret = 0; + + tun = __tun_get(tfile); + if (!tun) + return 0; + + if (tun->flags & IFF_TX_ARRAY) { + ret = skb_array_peek_len(&tfile->tx_array); + } else { + struct sk_buff *head; + + spin_lock_bh(&sk->sk_receive_queue.lock); + head = skb_peek(&sk->sk_receive_queue); + if (likely(head)) { + ret = head->len; + if (skb_vlan_tag_present(head)) + ret += VLAN_HLEN; + } + spin_unlock_bh(&sk->sk_receive_queue.lock); + } + + tun_put(tun); + return ret; +} + /* Ops structure to mimic raw sockets with tun */ static con...
2016 Jun 15
7
[PATCH net-next V2] tun: introduce tx skb ring
...>sk; + struct tun_struct *tun; + int ret = 0; + + tun = __tun_get(tfile); + if (!tun) + return 0; + + if (tun->flags & IFF_TX_ARRAY) { + ret = skb_array_peek_len(&tfile->tx_array); + } else { + struct sk_buff *head; + + spin_lock_bh(&sk->sk_receive_queue.lock); + head = skb_peek(&sk->sk_receive_queue); + if (likely(head)) { + ret = head->len; + if (skb_vlan_tag_present(head)) + ret += VLAN_HLEN; + } + spin_unlock_bh(&sk->sk_receive_queue.lock); + } + + tun_put(tun); + return ret; +} + /* Ops structure to mimic raw sockets with tun */ static con...
2023 Mar 06
0
[RFC PATCH v2 3/4] virtio/vsock: free skb on data copy failure
...the user's buffer, I think we should queue it again. In fact, before commit 71dc9ec9ac7d ("virtio/vsock: replace virtio_vsock_pkt with sk_buff"), we used to remove the packet from the rx_queue, only if memcpy_to_msg() was successful. Maybe it is better to do as we did before and use skb_peek() at the beginning of the loop and __skb_unlink() when skb->len == 0. Thanks, Stefano > goto out; >+ } > > spin_lock_bh(&vvs->rx_lock); > >-- >2.25.1 >
2023 Mar 10
0
[RFC PATCH v4 0/4] several updates to virtio/vsock
...9;t need to change skbuff state to update > 'rx_bytes' and 'fwd_cnt' correctly. >2) For SOCK_STREAM, when copying data to user fails, current skbuff is > not dropped. Next read attempt will use same skbuff and last offset. > Instead of 'skb_dequeue()', 'skb_peek()' + '__skb_unlink()' are used. > This behaviour was implemented before skbuff support. >3) For SOCK_SEQPACKET it removes unneeded 'skb_pull()' call, because for > this type of socket each skbuff is used only once: after removing it > from socket's queue, i...
2010 Jun 27
0
[PATCH RFC] vhost-net: add dhclient work-around from userspace
...nclude <linux/netdevice.h> + #include <net/sock.h> #include "vhost.h" @@ -191,6 +195,42 @@ static void handle_tx(struct vhost_net *net) unuse_mm(net->dev.mm); } +static int peek_head(struct sock *sk) +{ + struct sk_buff *head; + int ret; + + lock_sock(sk); + head = skb_peek(&sk->sk_receive_queue); + if (likely(head)) { + ret = 1; + /* Userspace virtio server has the following hack so + * guests rely on it, and we have to replicate it, too: */ + /* On linux guests, some apps that use recvmsg with AF_PACKET + * sockets, don't know how to handle CHECKSU...
2010 Jun 27
0
[PATCH RFC] vhost-net: add dhclient work-around from userspace
...nclude <linux/netdevice.h> + #include <net/sock.h> #include "vhost.h" @@ -191,6 +195,42 @@ static void handle_tx(struct vhost_net *net) unuse_mm(net->dev.mm); } +static int peek_head(struct sock *sk) +{ + struct sk_buff *head; + int ret; + + lock_sock(sk); + head = skb_peek(&sk->sk_receive_queue); + if (likely(head)) { + ret = 1; + /* Userspace virtio server has the following hack so + * guests rely on it, and we have to replicate it, too: */ + /* On linux guests, some apps that use recvmsg with AF_PACKET + * sockets, don't know how to handle CHECKSU...
2023 Mar 09
0
[RFC PATCH v3 0/4] several updates to virtio/vsock
...o update >>> ? 'rx_bytes' and 'fwd_cnt' correctly. >>> 2) For SOCK_STREAM, when copying data to user fails, current skbuff is >>> ? not dropped. Next read attempt will use same skbuff and last offset. >>> ? Instead of 'skb_dequeue()', 'skb_peek()' + '__skb_unlink()' are used. >>> ? This behaviour was implemented before skbuff support. >>> 3) For SOCK_SEQPACKET it removes unneeded 'skb_pull()' call, because for >>> ? this type of socket each skbuff is used only once: after removing it >>...
2010 Mar 03
1
[RFC][ PATCH 1/3] vhost-net: support multiple buffer heads in receiver
...len += len; if (unlikely(total_len >= VHOST_NET_WEIGHT)) { vhost_poll_queue(&vq->poll); @@ -182,12 +181,22 @@ unuse_mm(net->dev.mm); } +static int skb_head_len(struct sk_buff_head *skq) +{ + struct sk_buff *head; + + head = skb_peek(skq); + if (head) + return head->len; + return 0; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_rx(struct vhost_net *net) { struct vhost_virtqueue *vq = &net->d...
2010 Mar 03
1
[RFC][ PATCH 1/3] vhost-net: support multiple buffer heads in receiver
...len += len; if (unlikely(total_len >= VHOST_NET_WEIGHT)) { vhost_poll_queue(&vq->poll); @@ -182,12 +181,22 @@ unuse_mm(net->dev.mm); } +static int skb_head_len(struct sk_buff_head *skq) +{ + struct sk_buff *head; + + head = skb_peek(skq); + if (head) + return head->len; + return 0; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_rx(struct vhost_net *net) { struct vhost_virtqueue *vq = &net->d...
2016 Jun 30
0
[PATCH net-next V3 6/6] tun: switch to use skb array for tx
...tatic int peek_head_len(struct sock *sk) { + struct socket *sock = sk->sk_socket; struct sk_buff *head; int len = 0; unsigned long flags; + if (sock->ops->peek_len) + return sock->ops->peek_len(sock); + spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); head = skb_peek(&sk->sk_receive_queue); if (likely(head)) { @@ -497,6 +501,16 @@ static int peek_head_len(struct sock *sk) return len; } +static int sk_has_rx_data(struct sock *sk) +{ + struct socket *sock = sk->sk_socket; + + if (sock->ops->peek_len) + return sock->ops->peek_len(sock...
2010 Apr 06
1
[PATCH v3] Add Mergeable receive buffer support to vhost_net
...signal(&net->dev, vq, head, + 0); + } break; } if (err != len) @@ -186,12 +192,25 @@ static void handle_tx(struct vhost_net * unuse_mm(net->dev.mm); } +static int vhost_head_len(struct sock *sk) +{ + struct sk_buff *head; + int len = 0; + + lock_sock(sk); + head = skb_peek(&sk->sk_receive_queue); + if (head) + len = head->len; + release_sock(sk); + return len; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_rx(struct vhost_net *net) { struct vhost_virtqueue *vq =...
2010 Apr 06
1
[PATCH v3] Add Mergeable receive buffer support to vhost_net
...signal(&net->dev, vq, head, + 0); + } break; } if (err != len) @@ -186,12 +192,25 @@ static void handle_tx(struct vhost_net * unuse_mm(net->dev.mm); } +static int vhost_head_len(struct sock *sk) +{ + struct sk_buff *head; + int len = 0; + + lock_sock(sk); + head = skb_peek(&sk->sk_receive_queue); + if (head) + len = head->len; + release_sock(sk); + return len; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_rx(struct vhost_net *net) { struct vhost_virtqueue *vq =...
2010 Apr 19
2
[PATCH v4] Add mergeable RX bufs support to vhost
...rd_desc(vq, 1); tx_poll_start(net, sock); break; } @@ -186,12 +186,25 @@ static void handle_tx(struct vhost_net * unuse_mm(net->dev.mm); } +static int vhost_head_len(struct vhost_virtqueue *vq, struct sock *sk) +{ + struct sk_buff *head; + int len = 0; + + lock_sock(sk); + head = skb_peek(&sk->sk_receive_queue); + if (head) + len = head->len + vq->sock_hlen; + release_sock(sk); + return len; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_rx(struct vhost_net *net) { struct vh...