Displaying 20 results from an estimated 50 matches for "free_receive_buf".
Did you mean:
free_receive_bufs
2014 Jan 16
0
[PATCH net-next v4 2/6] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...if (err < 0)
put_page(virt_to_head_page(buf));
@@ -617,6 +610,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
int err;
bool oom;
+ gfp |= __GFP_COLD;
do {
if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(rq, gfp);
@@ -1377,6 +1371,14 @@ static void free_receive_bufs(struct virtnet_info *vi)
}
}
+static void free_receive_page_frags(struct virtnet_info *vi)
+{
+ int i;
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ if (vi->rq[i].alloc_frag.page)
+ put_page(vi->rq[i].alloc_frag.page);
+}
+
static void free_unused_bufs(struct virtnet_info *vi)...
2016 Jun 02
1
[PATCH -next 2/2] virtio_net: Read the advised MTU
...free_netdev(dev);
1968 return err;
1969 }
1970
1971 static void remove_vq_common(struct virtnet_info *vi)
1972 {
1973 vi->vdev->config->reset(vi->vdev);
1974
1975 /* Free unused buffers in both send and recv, if any. */
1976 free_unused_bufs(vi);
1977
1978 free_receive_bufs(vi);
1979
1980 free_receive_page_frags(vi);
1981
1982 virtnet_del_vqs(vi);
1983 }
1984
1985 static void virtnet_remove(struct virtio_device *vdev)
1986 {
1987 struct virtnet_info *vi = vdev->priv;
1988
1989 unregister_hotcpu_notifier(&vi->nb);
1990
1991...
2016 Jun 02
1
[PATCH -next 2/2] virtio_net: Read the advised MTU
...free_netdev(dev);
1968 return err;
1969 }
1970
1971 static void remove_vq_common(struct virtnet_info *vi)
1972 {
1973 vi->vdev->config->reset(vi->vdev);
1974
1975 /* Free unused buffers in both send and recv, if any. */
1976 free_unused_bufs(vi);
1977
1978 free_receive_bufs(vi);
1979
1980 free_receive_page_frags(vi);
1981
1982 virtnet_del_vqs(vi);
1983 }
1984
1985 static void virtnet_remove(struct virtio_device *vdev)
1986 {
1987 struct virtnet_info *vi = vdev->priv;
1988
1989 unregister_hotcpu_notifier(&vi->nb);
1990
1991...
2014 Oct 05
0
[PATCH 16/16] virtio_net: fix use after free on allocation failure
...t a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7afc990..85e6098 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1830,6 +1830,8 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
free_recv_bufs:
+ vi->vdev->config->reset(vdev);
+
free_receive_bufs(vi);
unregister_netdev(dev);
free_vqs:
--
MST
2013 Dec 17
0
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...N) {
+ len += hole;
+ alloc_frag->offset += hole;
+ }
- sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
+ sg_init_one(rq->sg, buf, len);
err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
@@ -1377,6 +1376,16 @@ static void free_receive_bufs(struct virtnet_info *vi)
}
}
+static void free_receive_page_frags(struct virtnet_info *vi)
+{
+ int i;
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ if (vi->rq[i].atomic_frag.page)
+ put_page(vi->rq[i].atomic_frag.page);
+ if (vi->sleep_frag.page)
+ put_page(vi->sleep_fr...
2013 Nov 12
0
[PATCH net-next 3/4] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...N) {
+ len += hole;
+ alloc_frag->offset += hole;
+ }
- sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
+ sg_init_one(rq->sg, buf, len);
err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
@@ -1335,6 +1335,16 @@ static void free_receive_bufs(struct virtnet_info *vi)
}
}
+static void free_receive_page_frags(struct virtnet_info *vi)
+{
+ int i;
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ if (vi->rq[i].atomic_frag.page)
+ put_page(vi->rq[i].atomic_frag.page);
+ if (vi->sleep_frag.page)
+ put_page(vi->sleep_fr...
2014 Jan 16
13
[PATCH net-next v4 1/6] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 16
13
[PATCH net-next v4 1/6] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2013 Dec 23
2
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...pages() to recycle the
pages like before which may help the performance. We can also do some
optimizations for this in vhost.
> err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
> if (err < 0)
> put_page(virt_to_head_page(buf));
> @@ -1377,6 +1376,16 @@ static void free_receive_bufs(struct virtnet_info *vi)
> }
> }
>
> +static void free_receive_page_frags(struct virtnet_info *vi)
> +{
> + int i;
> + for (i = 0; i < vi->max_queue_pairs; i++)
> + if (vi->rq[i].atomic_frag.page)
> + put_page(vi->rq[i].atomic_frag.page);
> + if (v...
2013 Dec 23
2
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...pages() to recycle the
pages like before which may help the performance. We can also do some
optimizations for this in vhost.
> err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
> if (err < 0)
> put_page(virt_to_head_page(buf));
> @@ -1377,6 +1376,16 @@ static void free_receive_bufs(struct virtnet_info *vi)
> }
> }
>
> +static void free_receive_page_frags(struct virtnet_info *vi)
> +{
> + int i;
> + for (i = 0; i < vi->max_queue_pairs; i++)
> + if (vi->rq[i].atomic_frag.page)
> + put_page(vi->rq[i].atomic_frag.page);
> + if (v...
2012 Nov 27
4
[net-next rfc v7 0/3] Multiqueue virtio-net
Hi all:
This series is an update version of multiqueue virtio-net driver based on
Krishna Kumar's work to let virtio-net use multiple rx/tx queues to do the
packets reception and transmission. Please review and comments.
A protype implementation of qemu-kvm support could by found in
git://github.com/jasowang/qemu-kvm-mq.git. To start a guest with two queues, you
could specify the queues
2012 Nov 27
4
[net-next rfc v7 0/3] Multiqueue virtio-net
Hi all:
This series is an update version of multiqueue virtio-net driver based on
Krishna Kumar's work to let virtio-net use multiple rx/tx queues to do the
packets reception and transmission. Please review and comments.
A protype implementation of qemu-kvm support could by found in
git://github.com/jasowang/qemu-kvm-mq.git. To start a guest with two queues, you
could specify the queues
2012 Dec 04
3
[PATCH net-next 0/3] Multiqueue support for virtio-net
Hi all:
This series is an update version of multiqueue virtio-net driver based on
Krishna Kumar's work to let virtio-net use multiple rx/tx queues to do the
packets reception and transmission. Please review and comments.
A protype implementation of qemu-kvm support could by found in
git://github.com/jasowang/qemu-kvm-mq.git. To start a guest with two queues, you
could specify the queues
2012 Dec 04
3
[PATCH net-next 0/3] Multiqueue support for virtio-net
Hi all:
This series is an update version of multiqueue virtio-net driver based on
Krishna Kumar's work to let virtio-net use multiple rx/tx queues to do the
packets reception and transmission. Please review and comments.
A protype implementation of qemu-kvm support could by found in
git://github.com/jasowang/qemu-kvm-mq.git. To start a guest with two queues, you
could specify the queues
2014 Jan 17
7
[PATCH net-next v5 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive
packet buffers. Network throughput for workloads with large average
packet size can be improved by posting larger receive packet buffers.
However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE)
buffers reduces the throughput of workloads that do not benefit from GRO
and have no large inbound packets.
This
2014 Jan 17
7
[PATCH net-next v5 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive
packet buffers. Network throughput for workloads with large average
packet size can be improved by posting larger receive packet buffers.
However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE)
buffers reduces the throughput of workloads that do not benefit from GRO
and have no large inbound packets.
This
2012 Dec 05
3
[PATCH net-next v2 0/3] Multiqueue support in virtio-net
Hi all:
This series is an update version of multiqueue virtio-net driver based on
Krishna Kumar's work to let virtio-net use multiple rx/tx queues to do the
packets reception and transmission. Please review and comments.
A protype implementation of qemu-kvm support could by found in
git://github.com/jasowang/qemu-kvm-mq.git. To start a guest with two queues, you
could specify the queues
2012 Dec 05
3
[PATCH net-next v2 0/3] Multiqueue support in virtio-net
Hi all:
This series is an update version of multiqueue virtio-net driver based on
Krishna Kumar's work to let virtio-net use multiple rx/tx queues to do the
packets reception and transmission. Please review and comments.
A protype implementation of qemu-kvm support could by found in
git://github.com/jasowang/qemu-kvm-mq.git. To start a guest with two queues, you
could specify the queues
2012 Oct 30
6
[rfc net-next v6 0/3] Multiqueue virtio-net
Hi all:
This series is an update version of multiqueue virtio-net driver based on
Krishna Kumar's work to let virtio-net use multiple rx/tx queues to do the
packets reception and transmission. Please review and comments.
Changes from v5:
- Align the implementation with the RFC spec update v4
- Switch the mode between single mode and multiqueue mode without reset
- Remove the 256 limitation
2012 Oct 30
6
[rfc net-next v6 0/3] Multiqueue virtio-net
Hi all:
This series is an update version of multiqueue virtio-net driver based on
Krishna Kumar's work to let virtio-net use multiple rx/tx queues to do the
packets reception and transmission. Please review and comments.
Changes from v5:
- Align the implementation with the RFC spec update v4
- Switch the mode between single mode and multiqueue mode without reset
- Remove the 256 limitation