Displaying 20 results from an estimated 83 matches for "skb_reserv".
Did you mean:
skb_reserve
2018 Sep 06
2
[PATCH net-next 06/11] tuntap: split out XDP logic
...);
> local_bh_enable();
> @@ -1729,18 +1747,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
> build:
> skb = build_skb(buf, buflen);
> if (!skb) {
> + put_page(alloc_frag->page);
> skb = ERR_PTR(-ENOMEM);
> goto out;
> }
>
> - skb_reserve(skb, pad - delta);
> + skb_reserve(skb, pad);
> skb_put(skb, len);
>
> return skb;
>
> err_xdp:
> - alloc_frag->offset -= buflen;
> - put_page(alloc_frag->page);
> + this_cpu_inc(tun->pcpu_stats->rx_dropped);
This fixes bug in previous patch which...
2018 Sep 06
2
[PATCH net-next 06/11] tuntap: split out XDP logic
...);
> local_bh_enable();
> @@ -1729,18 +1747,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
> build:
> skb = build_skb(buf, buflen);
> if (!skb) {
> + put_page(alloc_frag->page);
> skb = ERR_PTR(-ENOMEM);
> goto out;
> }
>
> - skb_reserve(skb, pad - delta);
> + skb_reserve(skb, pad);
> skb_put(skb, len);
>
> return skb;
>
> err_xdp:
> - alloc_frag->offset -= buflen;
> - put_page(alloc_frag->page);
> + this_cpu_inc(tun->pcpu_stats->rx_dropped);
This fixes bug in previous patch which...
2018 Sep 06
0
[PATCH net-next 06/11] tuntap: split out XDP logic
...len = xdp.data_end - xdp.data;
}
rcu_read_unlock();
local_bh_enable();
@@ -1729,18 +1747,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
build:
skb = build_skb(buf, buflen);
if (!skb) {
+ put_page(alloc_frag->page);
skb = ERR_PTR(-ENOMEM);
goto out;
}
- skb_reserve(skb, pad - delta);
+ skb_reserve(skb, pad);
skb_put(skb, len);
return skb;
err_xdp:
- alloc_frag->offset -= buflen;
- put_page(alloc_frag->page);
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
out:
rcu_read_unlock();
local_bh_enable();
--
2.17.1
2018 Sep 07
0
[PATCH net-next 06/11] tuntap: split out XDP logic
...18 +1747,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
>> build:
>> skb = build_skb(buf, buflen);
>> if (!skb) {
>> + put_page(alloc_frag->page);
>> skb = ERR_PTR(-ENOMEM);
>> goto out;
>> }
>>
>> - skb_reserve(skb, pad - delta);
>> + skb_reserve(skb, pad);
>> skb_put(skb, len);
>>
>> return skb;
>>
>> err_xdp:
>> - alloc_frag->offset -= buflen;
>> - put_page(alloc_frag->page);
>> + this_cpu_inc(tun->pcpu_stats->rx_dropped);...
2008 Apr 18
4
[0/6] [NET]: virtio SG/TSO patches
Hi:
Here are the patches I used for testing KVM with virtio-net using
TSO. There are three patches for the tun device which are basically
Rusty's patches with the mmap turned into copying (for correctness).
Two patches are for the virtio-net frontend, one required to support
receiving SG/TSO, and the other useful for testing SG per se. The
other patch is to the KVM backend to make all this
2008 Apr 18
4
[0/6] [NET]: virtio SG/TSO patches
Hi:
Here are the patches I used for testing KVM with virtio-net using
TSO. There are three patches for the tun device which are basically
Rusty's patches with the mmap turned into copying (for correctness).
Two patches are for the virtio-net frontend, one required to support
receiving SG/TSO, and the other useful for testing SG per se. The
other patch is to the KVM backend to make all this
2018 Sep 06
22
[PATCH net-next 00/11] Vhost_net TX batching
Hi all:
This series tries to batch submitting packets to underlayer socket
through msg_control during sendmsg(). This is done by:
1) Doing userspace copy inside vhost_net
2) Build XDP buff
3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once
through msg_control during sendmsg().
4) Underlayer sockets can use XDP buffs directly when XDP is enalbed,
or build skb based on XDP
2018 Sep 12
14
[PATCH net-next V2 00/11] vhost_net TX batching
Hi all:
This series tries to batch submitting packets to underlayer socket
through msg_control during sendmsg(). This is done by:
1) Doing userspace copy inside vhost_net
2) Build XDP buff
3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once
through msg_control during sendmsg().
4) Underlayer sockets can use XDP buffs directly when XDP is enalbed,
or build skb based on XDP
2018 Sep 12
14
[PATCH net-next V2 00/11] vhost_net TX batching
Hi all:
This series tries to batch submitting packets to underlayer socket
through msg_control during sendmsg(). This is done by:
1) Doing userspace copy inside vhost_net
2) Build XDP buff
3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once
through msg_control during sendmsg().
4) Underlayer sockets can use XDP buffs directly when XDP is enalbed,
or build skb based on XDP
2018 Sep 06
1
[PATCH net-next 10/11] tap: accept an array of XDP buffs through sendmsg()
...s & IFF_VNET_HDR)
> + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
> +
> + skb = build_skb(xdp->data_hard_start, buflen);
> + if (!skb) {
> + err = -ENOMEM;
> + goto err;
> + }
So fundamentally why is it called XDP?
We just build and skb, don't we?
> +
> + skb_reserve(skb, xdp->data - xdp->data_hard_start);
> + skb_put(skb, xdp->data_end - xdp->data);
> +
> + skb_set_network_header(skb, ETH_HLEN);
> + skb_reset_mac_header(skb);
> + skb->protocol = eth_hdr(skb)->h_proto;
> +
> + if (vnet_hdr_len) {
> + err = virtio_net_...
2008 Jan 23
1
[PATCH 1/3] Cleanup and simplify virtnet header
1) Turn GSO on virtio net into an all-or-nothing (keep checksumming
separate). Having multiple bits is a pain: if you can't support something
you should handle it in software, which is still a performance win.
2) Make VIRTIO_NET_HDR_GSO_ECN a flag in the header, so it can apply to
IPv6 or v4.
3) Rename VIRTIO_NET_F_NO_CSUM to VIRTIO_NET_F_CSUM (ie. means we do
checksumming).
4)
2008 Jan 23
1
[PATCH 1/3] Cleanup and simplify virtnet header
1) Turn GSO on virtio net into an all-or-nothing (keep checksumming
separate). Having multiple bits is a pain: if you can't support something
you should handle it in software, which is still a performance win.
2) Make VIRTIO_NET_HDR_GSO_ECN a flag in the header, so it can apply to
IPv6 or v4.
3) Rename VIRTIO_NET_F_NO_CSUM to VIRTIO_NET_F_CSUM (ie. means we do
checksumming).
4)
2018 Sep 06
2
[PATCH net-next 04/11] tuntap: simplify error handling in tun_build_skb()
...skb = build_skb(buf, buflen);
> - if (!skb)
> - return ERR_PTR(-ENOMEM);
> + if (!skb) {
> + skb = ERR_PTR(-ENOMEM);
> + goto out;
So goto out will skip put_page, and we did
do get_page above. Seems wrong. You should
goto err_skb or something like this.
> + }
>
> skb_reserve(skb, pad - delta);
> skb_put(skb, len);
> - get_page(alloc_frag->page);
> - alloc_frag->offset += buflen;
>
> return skb;
>
> -err_redirect:
> - put_page(alloc_frag->page);
> err_xdp:
> + alloc_frag->offset -= buflen;
> + put_page(alloc_frag-&g...
2018 Sep 06
2
[PATCH net-next 04/11] tuntap: simplify error handling in tun_build_skb()
...skb = build_skb(buf, buflen);
> - if (!skb)
> - return ERR_PTR(-ENOMEM);
> + if (!skb) {
> + skb = ERR_PTR(-ENOMEM);
> + goto out;
So goto out will skip put_page, and we did
do get_page above. Seems wrong. You should
goto err_skb or something like this.
> + }
>
> skb_reserve(skb, pad - delta);
> skb_put(skb, len);
> - get_page(alloc_frag->page);
> - alloc_frag->offset += buflen;
>
> return skb;
>
> -err_redirect:
> - put_page(alloc_frag->page);
> err_xdp:
> + alloc_frag->offset -= buflen;
> + put_page(alloc_frag-&g...
2016 Feb 21
1
[PATCH] virtio_net: switch to build_skb for mrg_rxbuf
...tio_net_hdr_mrg_rxbuf))
+#define VNET_SKB_OFF VNET_SKB_LEN(VNET_SKB_PAD)
+
+static struct sk_buff *vnet_build_skb(struct virtnet_info *vi,
+ void *buf,
+ unsigned int len, unsigned int truesize)
+{
+ struct sk_buff *skb = build_skb(buf, truesize);
+
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, VNET_SKB_PAD);
+ skb_put(skb, VNET_SKB_LEN(len));
+
+ return skb;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
@@ -354,14 +374,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,...
2016 Feb 21
1
[PATCH] virtio_net: switch to build_skb for mrg_rxbuf
...tio_net_hdr_mrg_rxbuf))
+#define VNET_SKB_OFF VNET_SKB_LEN(VNET_SKB_PAD)
+
+static struct sk_buff *vnet_build_skb(struct virtnet_info *vi,
+ void *buf,
+ unsigned int len, unsigned int truesize)
+{
+ struct sk_buff *skb = build_skb(buf, truesize);
+
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, VNET_SKB_PAD);
+ skb_put(skb, VNET_SKB_LEN(len));
+
+ return skb;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
@@ -354,14 +374,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,...
2018 Sep 06
0
[PATCH net-next 03/11] tuntap: enable bh early during processing XDP
...tun.c
@@ -1726,22 +1726,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
goto err_xdp;
}
}
+ rcu_read_unlock();
+ local_bh_enable();
skb = build_skb(buf, buflen);
- if (!skb) {
- rcu_read_unlock();
- local_bh_enable();
+ if (!skb)
return ERR_PTR(-ENOMEM);
- }
skb_reserve(skb, pad - delta);
skb_put(skb, len);
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
- rcu_read_unlock();
- local_bh_enable();
-
return skb;
err_redirect:
--
2.17.1
2018 Sep 06
0
[PATCH net-next 04/11] tuntap: simplify error handling in tun_build_skb()
...orig_data - xdp.data;
len = xdp.data_end - xdp.data;
@@ -1730,23 +1725,23 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
local_bh_enable();
skb = build_skb(buf, buflen);
- if (!skb)
- return ERR_PTR(-ENOMEM);
+ if (!skb) {
+ skb = ERR_PTR(-ENOMEM);
+ goto out;
+ }
skb_reserve(skb, pad - delta);
skb_put(skb, len);
- get_page(alloc_frag->page);
- alloc_frag->offset += buflen;
return skb;
-err_redirect:
- put_page(alloc_frag->page);
err_xdp:
+ alloc_frag->offset -= buflen;
+ put_page(alloc_frag->page);
+out:
rcu_read_unlock();
local_bh_enable()...
2008 Aug 26
0
[PATCH] xen-netfront: Avoid unaligned accesses to IP header.
...n(&np->rx_batch); i < batch_target; i++) {
- skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
+ skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto no_skb;
+ /* Align ip header to a 16 bytes boundary */
+ skb_reserve(skb, NET_IP_ALIGN);
+
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page) {
kfree_skb(skb);
--
1.6.0.rc0.42.g186458
2008 Jul 03
0
[PATCH] xen/netfront: Avoid unaligned accesses to IP datagrams.
...n(&np->rx_batch); i < batch_target; i++) {
- skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
+ skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto no_skb;
+ /* Align ip header to a 16 bytes boundary */
+ skb_reserve(skb, NET_IP_ALIGN);
+
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page) {
kfree_skb(skb);
--
1.5.3
--
yamahata