Displaying 20 results from an estimated 23 matches for "ewma_pkt_len_add".
2017 Sep 19
0
[PATCH net-next 2/3] virtio-net: add packet len average only when needed during XDP
...0ef4b0..db5924c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -656,6 +656,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.data_end = xdp.data + (len - vi->hdr_len);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ if (act != XDP_PASS)
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+
switch (act) {
case XDP_PASS:
/* recalculate offset to account for any header
@@ -671,14 +674,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page,
offset...
2016 Dec 23
0
[PATCH net 6/9] virtio-net: make rx buf size estimation works for XDP
...t/virtio_net.c
index 0778dc8..77ae358 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -584,10 +584,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page,
0, len, PAGE_SIZE);
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
return head_skb;
}
break;
case XDP_TX:
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
if (unlikely(xdp_page != page))
goto err_xdp;
rcu_read_unlock();
@@ -596,6 +598,7 @@ static struct sk_buff *receive_mergeable(struct net_dev...
2017 Sep 19
6
[PATCH net-next 1/3] virtio-net: remove unnecessary parameter of virtnet_xdp_xmit()
...goto xdp_xmit;
@@ -677,7 +676,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
break;
case XDP_TX:
- if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp)))
+ if (unlikely(!virtnet_xdp_xmit(vi, &xdp)))
trace_xdp_exception(vi->dev, xdp_prog, act);
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
if (unlikely(xdp_page != page))
--
2.7.4
2017 Sep 19
6
[PATCH net-next 1/3] virtio-net: remove unnecessary parameter of virtnet_xdp_xmit()
...goto xdp_xmit;
@@ -677,7 +676,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
break;
case XDP_TX:
- if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp)))
+ if (unlikely(!virtnet_xdp_xmit(vi, &xdp)))
trace_xdp_exception(vi->dev, xdp_prog, act);
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
if (unlikely(xdp_page != page))
--
2.7.4
2018 Mar 01
0
[PATCH net-next 1/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
..._hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
xdp.data = data + vi->hdr_len;
xdp_set_data_meta_invalid(&xdp);
@@ -736,9 +755,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
act = bpf_prog_run_xdp(xdp_prog, &xdp);
- if (act != XDP_PASS)
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
-
switch (act) {
case XDP_PASS:
/* recalculate offset to account for any header
@@ -746,28 +762,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
* skb and avoid using offset
*/
offset = xdp.data -
- page_address(xd...
2018 Mar 01
1
[PATCH net-next 1/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...+ vi->hdr_len;
> xdp.data = data + vi->hdr_len;
> xdp_set_data_meta_invalid(&xdp);
> @@ -736,9 +755,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>
> act = bpf_prog_run_xdp(xdp_prog, &xdp);
>
> - if (act != XDP_PASS)
> - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
> -
> switch (act) {
> case XDP_PASS:
> /* recalculate offset to account for any header
> @@ -746,28 +762,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> * skb and avoid using offset
> */
> o...
2018 Mar 01
7
[PATCH net-next 0/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
Hi:
This series tries to re-enable XDP_REDIRECT for mergeable buffer which
was removed since commit 7324f5399b06 ("virtio_net: disable
XDP_REDIRECT in receive_mergeable() case"). Main concerns are:
- not enough tailroom was reserved which breaks cpumap
- complex logic like EWMA and linearizing during XDP processing
Fix those by:
- reserve enough tailroom during refill
- disable EWMA
2018 Mar 01
7
[PATCH net-next 0/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
Hi:
This series tries to re-enable XDP_REDIRECT for mergeable buffer which
was removed since commit 7324f5399b06 ("virtio_net: disable
XDP_REDIRECT in receive_mergeable() case"). Main concerns are:
- not enough tailroom was reserved which breaks cpumap
- complex logic like EWMA and linearizing during XDP processing
Fix those by:
- reserve enough tailroom during refill
- disable EWMA
2016 Dec 23
21
[PATCH net 0/9] several fixups for virtio-net XDP
Merry Xmas and a Happy New year to all:
This series tries to fixes several issues for virtio-net XDP which
could be categorized into several parts:
- fix several issues during XDP linearizing
- allow csumed packet to work for XDP_PASS
- make EWMA rxbuf size estimation works for XDP
- forbid XDP when GUEST_UFO is support
- remove big packet XDP support
- add XDP support or small buffer
Please
2016 Dec 23
21
[PATCH net 0/9] several fixups for virtio-net XDP
Merry Xmas and a Happy New year to all:
This series tries to fixes several issues for virtio-net XDP which
could be categorized into several parts:
- fix several issues during XDP linearizing
- allow csumed packet to work for XDP_PASS
- make EWMA rxbuf size estimation works for XDP
- forbid XDP when GUEST_UFO is support
- remove big packet XDP support
- add XDP support or small buffer
Please
2023 Mar 22
1
[PATCH net-next 7/8] virtio_net: introduce receive_mergeable_xdp()
...o_cpu(vi->vdev, hdr->num_buffers);
+ page = virt_to_head_page(buf);
+ offset = buf - page_address(page);
+
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
curr_skb = head_skb;
@@ -1458,9 +1486,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
-err_xdp:
- rcu_read_unlock();
- stats->xdp_drops++;
err_skb:
put_page(page);
mergeable_buf_free(rq, num_buf, dev, stats);
@@ -1468,7 +1493,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
err_buf:...
2015 Aug 19
0
[PATCH 1/4] virtio_net: use DECLARE_EWMA
...ewma mrg_avg_pkt_len;
+ struct ewma_pkt_len mrg_avg_pkt_len;
/* Page frag for packet buffer allocation. */
struct page_frag alloc_frag;
@@ -407,7 +407,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
}
- ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
err_skb:
@@ -600,12 +600,12 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
return err;
}
-static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+static unsigned int get_mergeab...
2015 Aug 19
0
[PATCH 1/4] virtio_net: use DECLARE_EWMA
...ewma mrg_avg_pkt_len;
+ struct ewma_pkt_len mrg_avg_pkt_len;
/* Page frag for packet buffer allocation. */
struct page_frag alloc_frag;
@@ -407,7 +407,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
}
- ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
err_skb:
@@ -600,12 +600,12 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
return err;
}
-static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+static unsigned int get_mergeab...
2018 Mar 02
6
[PATCH net V2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...formance.
+ */
if (unlikely(num_buf > 1 ||
headroom < virtnet_get_headroom(vi))) {
/* linearize data for XDP */
@@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
act = bpf_prog_run_xdp(xdp_prog, &xdp);
- if (act != XDP_PASS)
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
-
switch (act) {
case XDP_PASS:
/* recalculate offset to account for any header
@@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_xdp;
rcu_read_unlock();
goto xdp_xmit;
+ case XDP_REDIRECT:
+ e...
2018 Mar 02
6
[PATCH net V2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...formance.
+ */
if (unlikely(num_buf > 1 ||
headroom < virtnet_get_headroom(vi))) {
/* linearize data for XDP */
@@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
act = bpf_prog_run_xdp(xdp_prog, &xdp);
- if (act != XDP_PASS)
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
-
switch (act) {
case XDP_PASS:
/* recalculate offset to account for any header
@@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_xdp;
rcu_read_unlock();
goto xdp_xmit;
+ case XDP_REDIRECT:
+ e...
2018 Mar 02
0
[PATCH net V2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...> 1 ||
> headroom < virtnet_get_headroom(vi))) {
> /* linearize data for XDP */
> @@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>
> act = bpf_prog_run_xdp(xdp_prog, &xdp);
>
> - if (act != XDP_PASS)
> - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
> -
> switch (act) {
> case XDP_PASS:
> /* recalculate offset to account for any header
> @@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> goto err_xdp;
> rcu_read_unlock();
> go...
2020 May 06
4
performance bug in virtio net xdp
So for mergeable bufs, we use ewma machinery to guess the correct buffer
size. If we don't guess correctly, XDP has to do aggressive copies.
Problem is, xdp paths do not update the ewma at all, except
sometimes with XDP_PASS. So whatever we happen to have
before we attach XDP, will mostly stay around.
The fix is probably to update ewma unconditionally.
--
MST
2020 May 06
4
performance bug in virtio net xdp
So for mergeable bufs, we use ewma machinery to guess the correct buffer
size. If we don't guess correctly, XDP has to do aggressive copies.
Problem is, xdp paths do not update the ewma at all, except
sometimes with XDP_PASS. So whatever we happen to have
before we attach XDP, will mostly stay around.
The fix is probably to update ewma unconditionally.
--
MST
2018 Mar 02
2
[PATCH net] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...formance.
+ */
if (unlikely(num_buf > 1 ||
headroom < virtnet_get_headroom(vi))) {
/* linearize data for XDP */
@@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
act = bpf_prog_run_xdp(xdp_prog, &xdp);
- if (act != XDP_PASS)
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
-
switch (act) {
case XDP_PASS:
/* recalculate offset to account for any header
@@ -770,6 +774,19 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_xdp;
rcu_read_unlock();
goto xdp_xmit;
+ case XDP_REDIRECT:
+ e...
2018 Mar 02
2
[PATCH net] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...formance.
+ */
if (unlikely(num_buf > 1 ||
headroom < virtnet_get_headroom(vi))) {
/* linearize data for XDP */
@@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
act = bpf_prog_run_xdp(xdp_prog, &xdp);
- if (act != XDP_PASS)
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
-
switch (act) {
case XDP_PASS:
/* recalculate offset to account for any header
@@ -770,6 +774,19 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_xdp;
rcu_read_unlock();
goto xdp_xmit;
+ case XDP_REDIRECT:
+ e...