search for: mrg_avg_pkt_len

Displaying 20 results from an estimated 120 matches for "mrg_avg_pkt_len".

2015 Aug 19
0
[PATCH 1/4] virtio_net: use DECLARE_EWMA
...E_AVG_WEIGHT 64 +DECLARE_EWMA(pkt_len, 1, 64) /* Minimum alignment for mergeable packet buffers. */ #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) @@ -85,7 +85,7 @@ struct receive_queue { struct page *pages; /* Average packet length for mergeable receive buffers. */ - struct ewma mrg_avg_pkt_len; + struct ewma_pkt_len mrg_avg_pkt_len; /* Page frag for packet buffer allocation. */ struct page_frag alloc_frag; @@ -407,7 +407,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } } - ewma_add(&rq->mrg_avg_pkt_len, head_skb->len); + ewma_pkt_len_add(&amp...
2015 Aug 19
0
[PATCH 1/4] virtio_net: use DECLARE_EWMA
...E_AVG_WEIGHT 64 +DECLARE_EWMA(pkt_len, 1, 64) /* Minimum alignment for mergeable packet buffers. */ #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) @@ -85,7 +85,7 @@ struct receive_queue { struct page *pages; /* Average packet length for mergeable receive buffers. */ - struct ewma mrg_avg_pkt_len; + struct ewma_pkt_len mrg_avg_pkt_len; /* Page frag for packet buffer allocation. */ struct page_frag alloc_frag; @@ -407,7 +407,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } } - ewma_add(&rq->mrg_avg_pkt_len, head_skb->len); + ewma_pkt_len_add(&amp...
2014 Jan 16
2
[PATCH net-next v3 5/5] virtio-net: initial rx sysfs support, export mergeable rx buffer size
...lude <linux/average.h> > +#include <linux/seqlock.h> > > static int napi_weight = NAPI_POLL_WEIGHT; > module_param(napi_weight, int, 0444); > @@ -89,6 +90,12 @@ struct receive_queue { > /* Average packet length for mergeable receive buffers. */ > struct ewma mrg_avg_pkt_len; > > + /* Sequence counter to allow sysfs readers to safely access stats. > + * Assumes a single virtio-net writer, which is enforced by virtio-net > + * and NAPI. > + */ > + seqcount_t sysfs_seq; > + > /* Page frag for packet buffer allocation. */ > struct page_...
2014 Jan 16
2
[PATCH net-next v3 5/5] virtio-net: initial rx sysfs support, export mergeable rx buffer size
...lude <linux/average.h> > +#include <linux/seqlock.h> > > static int napi_weight = NAPI_POLL_WEIGHT; > module_param(napi_weight, int, 0444); > @@ -89,6 +90,12 @@ struct receive_queue { > /* Average packet length for mergeable receive buffers. */ > struct ewma mrg_avg_pkt_len; > > + /* Sequence counter to allow sysfs readers to safely access stats. > + * Assumes a single virtio-net writer, which is enforced by virtio-net > + * and NAPI. > + */ > + seqcount_t sysfs_seq; > + > /* Page frag for packet buffer allocation. */ > struct page_...
2014 Jan 16
0
[PATCH net-next v3 5/5] virtio-net: initial rx sysfs support, export mergeable rx buffer size
...#include <linux/cpu.h> #include <linux/average.h> +#include <linux/seqlock.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -89,6 +90,12 @@ struct receive_queue { /* Average packet length for mergeable receive buffers. */ struct ewma mrg_avg_pkt_len; + /* Sequence counter to allow sysfs readers to safely access stats. + * Assumes a single virtio-net writer, which is enforced by virtio-net + * and NAPI. + */ + seqcount_t sysfs_seq; + /* Page frag for packet buffer allocation. */ struct page_frag alloc_frag; @@ -416,7 +423,9 @@ static...
2017 Mar 29
1
[PATCH] virtio_net: fix support for small rings
...GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); + rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); return ALIGN(len, L1_CACHE_BYTES); } @@ -914,7 +918,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, int err; unsigned int len, hole; - len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); + len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) return -ENOMEM; @@ -2086,6 +2090,21 @@ static void virtnet_del_vqs(struct virtnet_info *vi) virtnet_free_queues(vi); } +/* How large should a single bu...
2017 Mar 29
1
[PATCH] virtio_net: fix support for small rings
...GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); + rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); return ALIGN(len, L1_CACHE_BYTES); } @@ -914,7 +918,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, int err; unsigned int len, hole; - len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); + len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) return -ENOMEM; @@ -2086,6 +2090,21 @@ static void virtnet_del_vqs(struct virtnet_info *vi) virtnet_free_queues(vi); } +/* How large should a single bu...
2018 Mar 01
0
[PATCH net-next 1/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...DP_HEADROOM + vi->hdr_len; xdp.data = data + vi->hdr_len; xdp_set_data_meta_invalid(&xdp); @@ -736,9 +755,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, act = bpf_prog_run_xdp(xdp_prog, &xdp); - if (act != XDP_PASS) - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); - switch (act) { case XDP_PASS: /* recalculate offset to account for any header @@ -746,28 +762,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, * skb and avoid using offset */ offset = xdp.data - - page_address(xdp_page) - vi->hdr_len; -...
2017 Mar 29
1
[PATCH v2] virtio_net: fix support for small rings
...GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); + rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); return ALIGN(len, L1_CACHE_BYTES); } @@ -914,7 +919,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, int err; unsigned int len, hole; - len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); + len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) return -ENOMEM; @@ -2086,6 +2091,21 @@ static void virtnet_del_vqs(struct virtnet_info *vi) virtnet_free_queues(vi); } +/* How large should a single bu...
2017 Mar 29
1
[PATCH v2] virtio_net: fix support for small rings
...GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); + rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); return ALIGN(len, L1_CACHE_BYTES); } @@ -914,7 +919,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, int err; unsigned int len, hole; - len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); + len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) return -ENOMEM; @@ -2086,6 +2091,21 @@ static void virtnet_del_vqs(struct virtnet_info *vi) virtnet_free_queues(vi); } +/* How large should a single bu...
2017 Sep 19
0
[PATCH net-next 2/3] virtio-net: add packet len average only when needed during XDP
...drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -656,6 +656,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp.data_end = xdp.data + (len - vi->hdr_len); act = bpf_prog_run_xdp(xdp_prog, &xdp); + if (act != XDP_PASS) + ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); + switch (act) { case XDP_PASS: /* recalculate offset to account for any header @@ -671,14 +674,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(page); head_skb = page_to_skb(vi, rq, xdp_page, offset, len, PAGE_SIZE); - ewma...
2014 Jan 16
6
[PATCH net-next v3 1/5] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs unless GFP_WAIT is used. Change skb_page_frag_refill to attempt higher-order page allocations whether or not GFP_WAIT is used. If memory cannot be allocated, the allocator will fall back to successively smaller page allocs (down to order-0 page allocs). This change brings skb_page_frag_refill in line with the existing page allocation
2014 Jan 16
6
[PATCH net-next v3 1/5] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs unless GFP_WAIT is used. Change skb_page_frag_refill to attempt higher-order page allocations whether or not GFP_WAIT is used. If memory cannot be allocated, the allocator will fall back to successively smaller page allocs (down to order-0 page allocs). This change brings skb_page_frag_refill in line with the existing page allocation
2018 Mar 01
1
[PATCH net-next 1/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...p.data = data + vi->hdr_len; > xdp_set_data_meta_invalid(&xdp); > @@ -736,9 +755,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, > > act = bpf_prog_run_xdp(xdp_prog, &xdp); > > - if (act != XDP_PASS) > - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); > - > switch (act) { > case XDP_PASS: > /* recalculate offset to account for any header > @@ -746,28 +762,22 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, > * skb and avoid using offset > */ > offset = xdp.data - > -...
2018 Mar 02
6
[PATCH net V2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...ely(num_buf > 1 || headroom < virtnet_get_headroom(vi))) { /* linearize data for XDP */ @@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, act = bpf_prog_run_xdp(xdp_prog, &xdp); - if (act != XDP_PASS) - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); - switch (act) { case XDP_PASS: /* recalculate offset to account for any header @@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_xdp; rcu_read_unlock(); goto xdp_xmit; + case XDP_REDIRECT: + err = xdp_do_redirect(dev, &a...
2018 Mar 02
6
[PATCH net V2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...ely(num_buf > 1 || headroom < virtnet_get_headroom(vi))) { /* linearize data for XDP */ @@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, act = bpf_prog_run_xdp(xdp_prog, &xdp); - if (act != XDP_PASS) - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); - switch (act) { case XDP_PASS: /* recalculate offset to account for any header @@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_xdp; rcu_read_unlock(); goto xdp_xmit; + case XDP_REDIRECT: + err = xdp_do_redirect(dev, &a...
2018 Mar 01
7
[PATCH net-next 0/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
Hi: This series tries to re-enable XDP_REDIRECT for mergeable buffer which was removed since commit 7324f5399b06 ("virtio_net: disable XDP_REDIRECT in receive_mergeable() case"). Main concerns are: - not enough tailroom was reserved which breaks cpumap - complex logic like EWMA and linearizing during XDP processing Fix those by: - reserve enough tailroom during refill - disable EWMA
2018 Mar 01
7
[PATCH net-next 0/2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
Hi: This series tries to re-enable XDP_REDIRECT for mergeable buffer which was removed since commit 7324f5399b06 ("virtio_net: disable XDP_REDIRECT in receive_mergeable() case"). Main concerns are: - not enough tailroom was reserved which breaks cpumap - complex logic like EWMA and linearizing during XDP processing Fix those by: - reserve enough tailroom during refill - disable EWMA
2018 Mar 02
0
[PATCH net V2] virtio-net: re enable XDP_REDIRECT for mergeable buffer
...room < virtnet_get_headroom(vi))) { > /* linearize data for XDP */ > @@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, > > act = bpf_prog_run_xdp(xdp_prog, &xdp); > > - if (act != XDP_PASS) > - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); > - > switch (act) { > case XDP_PASS: > /* recalculate offset to account for any header > @@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, > goto err_xdp; > rcu_read_unlock(); > goto xdp_xmit; > + case XD...
2016 Dec 23
0
[PATCH net 6/9] virtio-net: make rx buf size estimation works for XDP
....77ae358 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -584,10 +584,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(page); head_skb = page_to_skb(vi, rq, xdp_page, 0, len, PAGE_SIZE); + ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); return head_skb; } break; case XDP_TX: + ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); if (unlikely(xdp_page != page)) goto err_xdp; rcu_read_unlock(); @@ -596,6 +598,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, default: if...