Displaying 20 results from an estimated 54 matches for "tx_poll_start".
2013 Mar 07
3
[PATCH] vhost_net: remove tx polling state
...t have TX VQ lock */
-static void tx_poll_stop(struct vhost_net *net)
-{
- if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
- return;
- vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
- net->tx_poll_state = VHOST_NET_POLL_STOPPED;
-}
-
-/* Caller must have TX VQ lock */
-static int tx_poll_start(struct vhost_net *net, struct socket *sock)
-{
- int ret;
-
- if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
- return 0;
- ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
- if (!ret)
- net->tx_poll_state = VHOST_NET_POLL_STARTED;
- return ret;
-}
-
/* In...
2013 Mar 07
3
[PATCH] vhost_net: remove tx polling state
...t have TX VQ lock */
-static void tx_poll_stop(struct vhost_net *net)
-{
- if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
- return;
- vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
- net->tx_poll_state = VHOST_NET_POLL_STOPPED;
-}
-
-/* Caller must have TX VQ lock */
-static int tx_poll_start(struct vhost_net *net, struct socket *sock)
-{
- int ret;
-
- if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
- return 0;
- ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
- if (!ret)
- net->tx_poll_state = VHOST_NET_POLL_STARTED;
- return ret;
-}
-
/* In...
2013 Apr 11
1
[PATCH] vhost_net: remove tx polling state
...t have TX VQ lock */
-static void tx_poll_stop(struct vhost_net *net)
-{
- if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
- return;
- vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
- net->tx_poll_state = VHOST_NET_POLL_STOPPED;
-}
-
-/* Caller must have TX VQ lock */
-static int tx_poll_start(struct vhost_net *net, struct socket *sock)
-{
- int ret;
-
- if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
- return 0;
- ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
- if (!ret)
- net->tx_poll_state = VHOST_NET_POLL_STARTED;
- return ret;
-}
-
/* In...
2013 Apr 11
1
[PATCH] vhost_net: remove tx polling state
...t have TX VQ lock */
-static void tx_poll_stop(struct vhost_net *net)
-{
- if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
- return;
- vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
- net->tx_poll_state = VHOST_NET_POLL_STOPPED;
-}
-
-/* Caller must have TX VQ lock */
-static int tx_poll_start(struct vhost_net *net, struct socket *sock)
-{
- int ret;
-
- if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
- return 0;
- ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
- if (!ret)
- net->tx_poll_state = VHOST_NET_POLL_STARTED;
- return ret;
-}
-
/* In...
2012 Dec 27
3
[PATCH 1/2] vhost_net: correct error hanlding in vhost_net_set_backend()
Fix the leaking of oldubufs and fd refcnt when fail to initialized used ring.
Signed-off-by: Jason Wang <jasowang at redhat.com>
---
drivers/vhost/net.c | 14 +++++++++++---
1 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ebd08b2..629d6b5 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -834,8 +834,10 @@ static
2012 Dec 27
3
[PATCH 1/2] vhost_net: correct error hanlding in vhost_net_set_backend()
Fix the leaking of oldubufs and fd refcnt when fail to initialized used ring.
Signed-off-by: Jason Wang <jasowang at redhat.com>
---
drivers/vhost/net.c | 14 +++++++++++---
1 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ebd08b2..629d6b5 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -834,8 +834,10 @@ static
2013 Jan 06
2
[PATCH V3 0/2] handle polling errors
This is an update version of last version to fix the handling of polling errors
in vhost/vhost_net.
Currently, vhost and vhost_net ignore polling errors which can lead kernel
crashing when it tries to remove itself from waitqueue after the polling
failure. Fix this by checking the poll->wqh before the removing and report an
error when meet polling errors.
Changes from v2:
- check poll->wqh
2013 Jan 06
2
[PATCH V3 0/2] handle polling errors
This is an update version of last version to fix the handling of polling errors
in vhost/vhost_net.
Currently, vhost and vhost_net ignore polling errors which can lead kernel
crashing when it tries to remove itself from waitqueue after the polling
failure. Fix this by checking the poll->wqh before the removing and report an
error when meet polling errors.
Changes from v2:
- check poll->wqh
2010 Mar 03
1
[RFC][ PATCH 2/3] vhost-net: handle vnet_hdr processing for MRG_RX_BUF
...break;
}
/* TODO: Check specific error and bomb out unless ENOBUFS?
*/
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
- vhost_discard(vq, 1);
- tx_poll_start(net, sock);
+ if (err == -EAGAIN) {
+ tx_poll_start(net, sock);
+ } else {
+ vq_err(vq, "sendmsg: errno %d\n", -err);
+ /* drop packet; do not discard/rese...
2010 Mar 03
1
[RFC][ PATCH 2/3] vhost-net: handle vnet_hdr processing for MRG_RX_BUF
...break;
}
/* TODO: Check specific error and bomb out unless ENOBUFS?
*/
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
- vhost_discard(vq, 1);
- tx_poll_start(net, sock);
+ if (err == -EAGAIN) {
+ tx_poll_start(net, sock);
+ } else {
+ vq_err(vq, "sendmsg: errno %d\n", -err);
+ /* drop packet; do not discard/rese...
2010 Mar 03
1
[RFC][ PATCH 1/3] vhost-net: support multiple buffer heads in receiver
...filled. */
- if (head == vq->num) {
+ if (head.iov_base == (void *)vq->num) {
wmem = atomic_read(&sock->sk->sk_wmem_alloc);
if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
tx_poll_start(net, sock);
@@ -152,7 +151,7 @@
/* Skip header. TODO: support TSO. */
s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
msg.msg_iovlen = out;
- len = iov_length(vq->iov, out);
+ head.iov_len = len = iov_length(v...
2010 Mar 03
1
[RFC][ PATCH 1/3] vhost-net: support multiple buffer heads in receiver
...filled. */
- if (head == vq->num) {
+ if (head.iov_base == (void *)vq->num) {
wmem = atomic_read(&sock->sk->sk_wmem_alloc);
if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
tx_poll_start(net, sock);
@@ -152,7 +151,7 @@
/* Skip header. TODO: support TSO. */
s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
msg.msg_iovlen = out;
- len = iov_length(vq->iov, out);
+ head.iov_len = len = iov_length(v...
2011 Jul 17
3
[PATCHv9] vhost: experimental tx zero-copy support
...vq->iov),
&out, &in,
@@ -166,6 +187,12 @@ static void handle_tx(struct vhost_net *net)
set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
break;
}
+ /* If more outstanding DMAs, queue the work */
+ if (vq->upend_idx - vq->done_idx > VHOST_MAX_PEND) {
+ tx_poll_start(net, sock);
+ set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+ break;
+ }
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
@@ -188,9 +215,39 @@ static void handle_tx(struct vhost_net *net)
iov_length...
2011 Jul 17
3
[PATCHv9] vhost: experimental tx zero-copy support
...vq->iov),
&out, &in,
@@ -166,6 +187,12 @@ static void handle_tx(struct vhost_net *net)
set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
break;
}
+ /* If more outstanding DMAs, queue the work */
+ if (vq->upend_idx - vq->done_idx > VHOST_MAX_PEND) {
+ tx_poll_start(net, sock);
+ set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+ break;
+ }
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
@@ -188,9 +215,39 @@ static void handle_tx(struct vhost_net *net)
iov_length...
2011 Nov 11
10
[RFC] [ver3 PATCH 0/6] Implement multiqueue virtio-net
This patch series resurrects the earlier multiple TX/RX queues
functionality for virtio_net, and addresses the issues pointed
out. It also includes an API to share irq's, f.e. amongst the
TX vqs.
I plan to run TCP/UDP STREAM and RR tests for local->host and
local->remote, and send the results in the next couple of days.
patch #1: Introduce VIRTIO_NET_F_MULTIQUEUE
patch #2: Move
2011 Nov 11
10
[RFC] [ver3 PATCH 0/6] Implement multiqueue virtio-net
This patch series resurrects the earlier multiple TX/RX queues
functionality for virtio_net, and addresses the issues pointed
out. It also includes an API to share irq's, f.e. amongst the
TX vqs.
I plan to run TCP/UDP STREAM and RR tests for local->host and
local->remote, and send the results in the next couple of days.
patch #1: Introduce VIRTIO_NET_F_MULTIQUEUE
patch #2: Move
2010 Apr 06
1
[PATCH v3] Add Mergeable receive buffer support to vhost_net
...ey refilled. */
if (head == vq->num) {
@@ -167,8 +166,15 @@ static void handle_tx(struct vhost_net *
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
- vhost_discard_vq_desc(vq);
- tx_poll_start(net, sock);
+ if (err == -EAGAIN) {
+ vhost_discard_desc(vq, 1);
+ tx_poll_start(net, sock);
+ } else {
+ vq_err(vq, "sendmsg: errno %d\n", -err);
+ /* drop packet; do not discard/resend */
+ vhost_add_used_and_signal(&net->dev, vq, head,
+ 0);
+ }...
2010 Apr 06
1
[PATCH v3] Add Mergeable receive buffer support to vhost_net
...ey refilled. */
if (head == vq->num) {
@@ -167,8 +166,15 @@ static void handle_tx(struct vhost_net *
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
- vhost_discard_vq_desc(vq);
- tx_poll_start(net, sock);
+ if (err == -EAGAIN) {
+ vhost_discard_desc(vq, 1);
+ tx_poll_start(net, sock);
+ } else {
+ vq_err(vq, "sendmsg: errno %d\n", -err);
+ /* drop packet; do not discard/resend */
+ vhost_add_used_and_signal(&net->dev, vq, head,
+ 0);
+ }...
2011 Jul 18
1
[PATCHv10] vhost: vhost TX zero-copy support
...vq->iov),
&out, &in,
@@ -166,6 +187,12 @@ static void handle_tx(struct vhost_net *net)
set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
break;
}
+ /* If more outstanding DMAs, queue the work */
+ if (vq->upend_idx - vq->done_idx > VHOST_MAX_PEND) {
+ tx_poll_start(net, sock);
+ set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+ break;
+ }
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
@@ -188,9 +215,39 @@ static void handle_tx(struct vhost_net *net)
iov_length...
2011 Jul 18
1
[PATCHv10] vhost: vhost TX zero-copy support
...vq->iov),
&out, &in,
@@ -166,6 +187,12 @@ static void handle_tx(struct vhost_net *net)
set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
break;
}
+ /* If more outstanding DMAs, queue the work */
+ if (vq->upend_idx - vq->done_idx > VHOST_MAX_PEND) {
+ tx_poll_start(net, sock);
+ set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+ break;
+ }
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
@@ -188,9 +215,39 @@ static void handle_tx(struct vhost_net *net)
iov_length...