search for: vhost_net_vq_tx

Displaying 20 results from an estimated 260 matches for "vhost_net_vq_tx".

2012 Dec 27
3
[PATCH 1/2] vhost_net: correct error hanlding in vhost_net_set_backend()
Fix the leaking of oldubufs and fd refcnt when fail to initialized used ring. Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vhost/net.c | 14 +++++++++++--- 1 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b2..629d6b5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -834,8 +834,10 @@ static
2012 Dec 27
3
[PATCH 1/2] vhost_net: correct error hanlding in vhost_net_set_backend()
Fix the leaking of oldubufs and fd refcnt when fail to initialized used ring. Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vhost/net.c | 14 +++++++++++--- 1 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b2..629d6b5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -834,8 +834,10 @@ static
2013 Apr 27
2
[PATCH v6 0/2] tcm_vhost flush
Changes in v6: - Allow device specific fields per vq - Track cmd per vq - Do not track evt - Switch to static array for inflight allocation, completely get rid of the pain to handle inflight allocation failure. Asias He (2): vhost: Allow device specific fields per vq tcm_vhost: Wait for pending requests in vhost_scsi_flush() drivers/vhost/net.c | 60 +++++++++++--------
2013 Apr 27
2
[PATCH v6 0/2] tcm_vhost flush
Changes in v6: - Allow device specific fields per vq - Track cmd per vq - Do not track evt - Switch to static array for inflight allocation, completely get rid of the pain to handle inflight allocation failure. Asias He (2): vhost: Allow device specific fields per vq tcm_vhost: Wait for pending requests in vhost_scsi_flush() drivers/vhost/net.c | 60 +++++++++++--------
2013 Mar 07
3
[PATCH] vhost_net: remove tx polling state
...ets; @@ -155,28 +145,6 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, } } -/* Caller must have TX VQ lock */ -static void tx_poll_stop(struct vhost_net *net) -{ - if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED)) - return; - vhost_poll_stop(net->poll + VHOST_NET_VQ_TX); - net->tx_poll_state = VHOST_NET_POLL_STOPPED; -} - -/* Caller must have TX VQ lock */ -static int tx_poll_start(struct vhost_net *net, struct socket *sock) -{ - int ret; - - if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) - return 0; - ret = vhost_poll_start(net->poll + VHO...
2013 Mar 07
3
[PATCH] vhost_net: remove tx polling state
...ets; @@ -155,28 +145,6 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, } } -/* Caller must have TX VQ lock */ -static void tx_poll_stop(struct vhost_net *net) -{ - if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED)) - return; - vhost_poll_stop(net->poll + VHOST_NET_VQ_TX); - net->tx_poll_state = VHOST_NET_POLL_STOPPED; -} - -/* Caller must have TX VQ lock */ -static int tx_poll_start(struct vhost_net *net, struct socket *sock) -{ - int ret; - - if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) - return 0; - ret = vhost_poll_start(net->poll + VHO...
2019 Jun 06
1
memory leak in vhost_net_ioctl
...EL | __GFP_RETRY_MAYFAIL); if (!n) return -ENOMEM; + n->ld = false; vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL); if (!vqs) { kvfree(n); @@ -1376,7 +1378,10 @@ static void vhost_net_flush(struct vhost_net *n) n->tx_flush = true; mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); /* Wait for all lower device DMAs done. */ - vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); + if (n->ld) + vhost_net_ubuf_put_wait_and_free(n->vqs[VHOST_NET_VQ_TX].ubufs); + else + vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); mutex_lo...
2019 Jun 06
1
memory leak in vhost_net_ioctl
...EL | __GFP_RETRY_MAYFAIL); if (!n) return -ENOMEM; + n->ld = false; vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL); if (!vqs) { kvfree(n); @@ -1376,7 +1378,10 @@ static void vhost_net_flush(struct vhost_net *n) n->tx_flush = true; mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); /* Wait for all lower device DMAs done. */ - vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); + if (n->ld) + vhost_net_ubuf_put_wait_and_free(n->vqs[VHOST_NET_VQ_TX].ubufs); + else + vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); mutex_lo...
2013 Apr 27
0
[PATCH] vhost: Move vhost-net zerocopy support fields to net.c
...ct vhost_net *net, break; } if (j) - vq->done_idx = i; + nvq->done_idx = i; return j; } @@ -235,6 +335,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) static void handle_tx(struct vhost_net *net) { struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_TX].vq; + struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; unsigned out, in, s; int head; struct msghdr msg = { @@ -271,7 +372,7 @@ static void handle_tx(struct vhost_net *net) if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); hdr_size = vq->vhost_hlen...
2013 Apr 27
0
[PATCH] vhost: Move vhost-net zerocopy support fields to net.c
...ct vhost_net *net, break; } if (j) - vq->done_idx = i; + nvq->done_idx = i; return j; } @@ -235,6 +335,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) static void handle_tx(struct vhost_net *net) { struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_TX].vq; + struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; unsigned out, in, s; int head; struct msghdr msg = { @@ -271,7 +372,7 @@ static void handle_tx(struct vhost_net *net) if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); hdr_size = vq->vhost_hlen...
2018 Jul 03
2
[PATCH net-next v4 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...eue *rvq, > + struct vhost_virtqueue *tvq, > + bool rx) > +{ > + unsigned long uninitialized_var(endtime); > + unsigned long busyloop_timeout; > + struct socket *sock; > + struct vhost_virtqueue *vq = rx ? tvq : rvq; > + > + mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); > + > + vhost_disable_notify(&net->dev, vq); > + sock = rvq->private_data; > + busyloop_timeout = rx ? rvq->busyloop_timeout : tvq->busyloop_timeout; > + > + preempt_disable(); > + endtime = busy_clock() + busyloop_timeout; > + while (vhost...
2018 Jul 03
2
[PATCH net-next v4 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...eue *rvq, > + struct vhost_virtqueue *tvq, > + bool rx) > +{ > + unsigned long uninitialized_var(endtime); > + unsigned long busyloop_timeout; > + struct socket *sock; > + struct vhost_virtqueue *vq = rx ? tvq : rvq; > + > + mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); > + > + vhost_disable_notify(&net->dev, vq); > + sock = rvq->private_data; > + busyloop_timeout = rx ? rvq->busyloop_timeout : tvq->busyloop_timeout; > + > + preempt_disable(); > + endtime = busy_clock() + busyloop_timeout; > + while (vhost...
2014 Feb 13
2
[PATCH net v2] vhost: fix ref cnt checking deadlock
...->ubufs; - kref_get(&ubufs->kref); + atomic_inc(&ubufs->refcount); nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; } else { msg.msg_control = NULL; @@ -785,7 +784,7 @@ static void vhost_net_flush(struct vhost_net *n) vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n->tx_flush = false; - kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); + atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); } } -- MST
2014 Feb 13
2
[PATCH net v2] vhost: fix ref cnt checking deadlock
...->ubufs; - kref_get(&ubufs->kref); + atomic_inc(&ubufs->refcount); nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; } else { msg.msg_control = NULL; @@ -785,7 +784,7 @@ static void vhost_net_flush(struct vhost_net *n) vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n->tx_flush = false; - kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); + atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); } } -- MST
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...d5b4 100644 > --- a/drivers/vhost/net.c > +++ b/drivers/vhost/net.c > @@ -513,7 +513,6 @@ static void vhost_net_busy_poll(struct vhost_net *net, > struct socket *sock; > struct vhost_virtqueue *vq = poll_rx ? tvq : rvq; > > - mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); > vhost_disable_notify(&net->dev, vq); > sock = rvq->private_data; > > @@ -543,8 +542,6 @@ static void vhost_net_busy_poll(struct vhost_net *net, > vhost_net_busy_poll_try_queue(net, vq); > else if (!poll_rx) /* On tx here, sock has no rx da...
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
...d5b4 100644 > --- a/drivers/vhost/net.c > +++ b/drivers/vhost/net.c > @@ -513,7 +513,6 @@ static void vhost_net_busy_poll(struct vhost_net *net, > struct socket *sock; > struct vhost_virtqueue *vq = poll_rx ? tvq : rvq; > > - mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); > vhost_disable_notify(&net->dev, vq); > sock = rvq->private_data; > > @@ -543,8 +542,6 @@ static void vhost_net_busy_poll(struct vhost_net *net, > vhost_net_busy_poll_try_queue(net, vq); > else if (!poll_rx) /* On tx here, sock has no rx da...
2019 Jun 13
0
memory leak in vhost_net_ioctl
...gt; +??? n->ld = false; > ????vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL); > ????if (!vqs) { > ??????? kvfree(n); > @@ -1376,7 +1378,10 @@ static void vhost_net_flush(struct vhost_net *n) > ??????? n->tx_flush = true; > ??????? mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); > ??????? /* Wait for all lower device DMAs done. */ > - vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); > +??????? if (n->ld) > + vhost_net_ubuf_put_wait_and_free(n->vqs[VHOST_NET_VQ_TX].ubufs); > +??????? else > + vhost_net_ubuf_put_and_wait(n-&g...
2013 Apr 11
1
[PATCH] vhost_net: remove tx polling state
...ets; @@ -155,28 +145,6 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, } } -/* Caller must have TX VQ lock */ -static void tx_poll_stop(struct vhost_net *net) -{ - if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED)) - return; - vhost_poll_stop(net->poll + VHOST_NET_VQ_TX); - net->tx_poll_state = VHOST_NET_POLL_STOPPED; -} - -/* Caller must have TX VQ lock */ -static int tx_poll_start(struct vhost_net *net, struct socket *sock) -{ - int ret; - - if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) - return 0; - ret = vhost_poll_start(net->poll + VHO...
2013 Apr 11
1
[PATCH] vhost_net: remove tx polling state
...ets; @@ -155,28 +145,6 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, } } -/* Caller must have TX VQ lock */ -static void tx_poll_stop(struct vhost_net *net) -{ - if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED)) - return; - vhost_poll_stop(net->poll + VHOST_NET_VQ_TX); - net->tx_poll_state = VHOST_NET_POLL_STOPPED; -} - -/* Caller must have TX VQ lock */ -static int tx_poll_start(struct vhost_net *net, struct socket *sock) -{ - int ret; - - if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) - return 0; - ret = vhost_poll_start(net->poll + VHO...
2018 Jul 02
1
[PATCH net-next v3 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...socket *sock = rvq->private_data; > + struct vhost_virtqueue *vq = rx ? tvq : rvq; > + unsigned long busyloop_timeout = rx ? rvq->busyloop_timeout : > + tvq->busyloop_timeout; As simple as vq->busyloop_timeout? > + > + mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); We need move sock = rvq->private_data under the protection of vq mutex if rx is false. > + vhost_disable_notify(&net->dev, vq); > + > + preempt_disable(); > + endtime = busy_clock() + busyloop_timeout; > + while (vhost_can_busy_poll(tvq->dev, endtim...