search for: sock_async_nospac

Displaying 20 results from an estimated 35 matches for "sock_async_nospac".

Did you mean: sock_async_nospace
2013 Mar 07
3
[PATCH] vhost_net: remove tx polling state
...n; zcopy = vq->ubufs; @@ -287,7 +256,7 @@ static void handle_tx(struct vhost_net *net) wmem = atomic_read(&sock->sk->sk_wmem_alloc); if (wmem >= sock->sk->sk_sndbuf * 3 / 4) { - tx_poll_start(net, sock); + vhost_poll_start(poll, sock->file); set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); break; } @@ -298,7 +267,7 @@ static void handle_tx(struct vhost_net *net) (vq->upend_idx - vq->done_idx) : (vq->upend_idx + UIO_MAXIOV - vq->done_idx); if (unlikely(num_pends > VHOST_MAX_PEND)) { - tx_poll_start(net, sock); +...
2013 Mar 07
3
[PATCH] vhost_net: remove tx polling state
...n; zcopy = vq->ubufs; @@ -287,7 +256,7 @@ static void handle_tx(struct vhost_net *net) wmem = atomic_read(&sock->sk->sk_wmem_alloc); if (wmem >= sock->sk->sk_sndbuf * 3 / 4) { - tx_poll_start(net, sock); + vhost_poll_start(poll, sock->file); set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); break; } @@ -298,7 +267,7 @@ static void handle_tx(struct vhost_net *net) (vq->upend_idx - vq->done_idx) : (vq->upend_idx + UIO_MAXIOV - vq->done_idx); if (unlikely(num_pends > VHOST_MAX_PEND)) { - tx_poll_start(net, sock); +...
2013 Jan 06
2
[PATCH V3 0/2] handle polling errors
This is an update version of last version to fix the handling of polling errors in vhost/vhost_net. Currently, vhost and vhost_net ignore polling errors which can lead kernel crashing when it tries to remove itself from waitqueue after the polling failure. Fix this by checking the poll->wqh before the removing and report an error when meet polling errors. Changes from v2: - check poll->wqh
2013 Jan 06
2
[PATCH V3 0/2] handle polling errors
This is an update version of last version to fix the handling of polling errors in vhost/vhost_net. Currently, vhost and vhost_net ignore polling errors which can lead kernel crashing when it tries to remove itself from waitqueue after the polling failure. Fix this by checking the poll->wqh before the removing and report an error when meet polling errors. Changes from v2: - check poll->wqh
2013 Apr 11
1
[PATCH] vhost_net: remove tx polling state
...copy = vq->ubufs; @@ -285,23 +243,14 @@ static void handle_tx(struct vhost_net *net) if (head == vq->num) { int num_pends; - wmem = atomic_read(&sock->sk->sk_wmem_alloc); - if (wmem >= sock->sk->sk_sndbuf * 3 / 4) { - tx_poll_start(net, sock); - set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); - break; - } /* If more outstanding DMAs, queue the work. * Handle upend_idx wrap around */ num_pends = likely(vq->upend_idx >= vq->done_idx) ? (vq->upend_idx - vq->done_idx) : (vq->upend_idx + UIO_MAXIOV - vq->do...
2013 Apr 11
1
[PATCH] vhost_net: remove tx polling state
...copy = vq->ubufs; @@ -285,23 +243,14 @@ static void handle_tx(struct vhost_net *net) if (head == vq->num) { int num_pends; - wmem = atomic_read(&sock->sk->sk_wmem_alloc); - if (wmem >= sock->sk->sk_sndbuf * 3 / 4) { - tx_poll_start(net, sock); - set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); - break; - } /* If more outstanding DMAs, queue the work. * Handle upend_idx wrap around */ num_pends = likely(vq->upend_idx >= vq->done_idx) ? (vq->upend_idx - vq->done_idx) : (vq->upend_idx + UIO_MAXIOV - vq->do...
2012 Dec 27
3
[PATCH 1/2] vhost_net: correct error hanlding in vhost_net_set_backend()
Fix the leaking of oldubufs and fd refcnt when fail to initialized used ring. Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vhost/net.c | 14 +++++++++++--- 1 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b2..629d6b5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -834,8 +834,10 @@ static
2012 Dec 27
3
[PATCH 1/2] vhost_net: correct error hanlding in vhost_net_set_backend()
Fix the leaking of oldubufs and fd refcnt when fail to initialized used ring. Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vhost/net.c | 14 +++++++++++--- 1 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b2..629d6b5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -834,8 +834,10 @@ static
2011 Jul 17
3
[PATCHv9] vhost: experimental tx zero-copy support
...r (;;) { + /* Release DMAs done buffers first */ + if (zcopy) + vhost_zerocopy_signal_used(vq); + head = vhost_get_vq_desc(&net->dev, vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, @@ -166,6 +187,12 @@ static void handle_tx(struct vhost_net *net) set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); break; } + /* If more outstanding DMAs, queue the work */ + if (vq->upend_idx - vq->done_idx > VHOST_MAX_PEND) { + tx_poll_start(net, sock); + set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + break; + } if (unlikely(vhost_enable_notify...
2011 Jul 17
3
[PATCHv9] vhost: experimental tx zero-copy support
...r (;;) { + /* Release DMAs done buffers first */ + if (zcopy) + vhost_zerocopy_signal_used(vq); + head = vhost_get_vq_desc(&net->dev, vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, @@ -166,6 +187,12 @@ static void handle_tx(struct vhost_net *net) set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); break; } + /* If more outstanding DMAs, queue the work */ + if (vq->upend_idx - vq->done_idx > VHOST_MAX_PEND) { + tx_poll_start(net, sock); + set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + break; + } if (unlikely(vhost_enable_notify...
2011 Jul 18
1
[PATCHv10] vhost: vhost TX zero-copy support
...r (;;) { + /* Release DMAs done buffers first */ + if (zcopy) + vhost_zerocopy_signal_used(vq); + head = vhost_get_vq_desc(&net->dev, vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, @@ -166,6 +187,12 @@ static void handle_tx(struct vhost_net *net) set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); break; } + /* If more outstanding DMAs, queue the work */ + if (vq->upend_idx - vq->done_idx > VHOST_MAX_PEND) { + tx_poll_start(net, sock); + set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + break; + } if (unlikely(vhost_enable_notify...
2011 Jul 18
1
[PATCHv10] vhost: vhost TX zero-copy support
...r (;;) { + /* Release DMAs done buffers first */ + if (zcopy) + vhost_zerocopy_signal_used(vq); + head = vhost_get_vq_desc(&net->dev, vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, @@ -166,6 +187,12 @@ static void handle_tx(struct vhost_net *net) set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); break; } + /* If more outstanding DMAs, queue the work */ + if (vq->upend_idx - vq->done_idx > VHOST_MAX_PEND) { + tx_poll_start(net, sock); + set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + break; + } if (unlikely(vhost_enable_notify...
2011 Jul 18
1
[PATCHv11] vhost: vhost TX zero-copy support
...r (;;) { + /* Release DMAs done buffers first */ + if (zcopy) + vhost_zerocopy_signal_used(vq); + head = vhost_get_vq_desc(&net->dev, vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, @@ -166,6 +188,13 @@ static void handle_tx(struct vhost_net *net) set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); break; } + /* If more outstanding DMAs, queue the work */ + if (unlikely(vq->upend_idx - vq->done_idx > + VHOST_MAX_PEND)) { + tx_poll_start(net, sock); + set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + break; + } if (unlikely...
2011 Jul 18
1
[PATCHv11] vhost: vhost TX zero-copy support
...r (;;) { + /* Release DMAs done buffers first */ + if (zcopy) + vhost_zerocopy_signal_used(vq); + head = vhost_get_vq_desc(&net->dev, vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, @@ -166,6 +188,13 @@ static void handle_tx(struct vhost_net *net) set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); break; } + /* If more outstanding DMAs, queue the work */ + if (unlikely(vq->upend_idx - vq->done_idx > + VHOST_MAX_PEND)) { + tx_poll_start(net, sock); + set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + break; + } if (unlikely...
2010 Jan 27
9
[Bridge] [PATCH 0/3 v3] macvtap driver
This is the third version of the macvtap device driver, following another major restructuring and a lot of bug fixes: * Change macvtap to be based around a struct sock * macvtap: fix initialization * return 0 to netlink * don't use rcu for q->file and q->vlan pointers * macvtap: checkpatch.pl fixes * macvtap: fix tun IFF flags * Use a struct socket to make tx flow control work * disable
2010 Jan 27
9
[Bridge] [PATCH 0/3 v3] macvtap driver
This is the third version of the macvtap device driver, following another major restructuring and a lot of bug fixes: * Change macvtap to be based around a struct sock * macvtap: fix initialization * return 0 to netlink * don't use rcu for q->file and q->vlan pointers * macvtap: checkpatch.pl fixes * macvtap: fix tun IFF flags * Use a struct socket to make tx flow control work * disable
2010 Jan 27
9
[Bridge] [PATCH 0/3 v3] macvtap driver
This is the third version of the macvtap device driver, following another major restructuring and a lot of bug fixes: * Change macvtap to be based around a struct sock * macvtap: fix initialization * return 0 to netlink * don't use rcu for q->file and q->vlan pointers * macvtap: checkpatch.pl fixes * macvtap: fix tun IFF flags * Use a struct socket to make tx flow control work * disable
2013 Apr 27
0
[PATCH] vhost: Move vhost-net zerocopy support fields to net.c
...->done_idx); + num_pends = likely(nvq->upend_idx >= nvq->done_idx) ? + (nvq->upend_idx - nvq->done_idx) : + (nvq->upend_idx + UIO_MAXIOV - + nvq->done_idx); if (unlikely(num_pends > VHOST_MAX_PEND)) { tx_poll_start(net, sock); set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); @@ -329,34 +431,34 @@ static void handle_tx(struct vhost_net *net) break; } zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN || - vq->upend_idx != vq->done_idx); + nvq->upend_idx != nvq->done_idx); /* use msg_control...
2013 Apr 27
0
[PATCH] vhost: Move vhost-net zerocopy support fields to net.c
...->done_idx); + num_pends = likely(nvq->upend_idx >= nvq->done_idx) ? + (nvq->upend_idx - nvq->done_idx) : + (nvq->upend_idx + UIO_MAXIOV - + nvq->done_idx); if (unlikely(num_pends > VHOST_MAX_PEND)) { tx_poll_start(net, sock); set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); @@ -329,34 +431,34 @@ static void handle_tx(struct vhost_net *net) break; } zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN || - vq->upend_idx != vq->done_idx); + nvq->upend_idx != nvq->done_idx); /* use msg_control...
2011 Nov 11
10
[RFC] [ver3 PATCH 0/6] Implement multiqueue virtio-net
This patch series resurrects the earlier multiple TX/RX queues functionality for virtio_net, and addresses the issues pointed out. It also includes an API to share irq's, f.e. amongst the TX vqs. I plan to run TCP/UDP STREAM and RR tests for local->host and local->remote, and send the results in the next couple of days. patch #1: Introduce VIRTIO_NET_F_MULTIQUEUE patch #2: Move