Displaying 20 results from an estimated 54 matches for "handle_tx_zerocopy".
2019 Apr 25
2
[PATCH net] vhost_net: fix possible infinite loop
...+sent_pkts, total_len)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
+
+ if (next_round)
+ vhost_poll_queue(&vq->poll);
vhost_tx_batch(net, nvq, sock, &msg);
}
@@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
bool zcopy_used;
int sent_pkts = 0;
+ bool next_round = false;
- for (;;) {
+ do {
bool busyloop_intr;
/* Release DMAs done buffers first */
@@ -951,11 +952,10 @@ static void handle_tx_z...
2019 Apr 25
2
[PATCH net] vhost_net: fix possible infinite loop
...+sent_pkts, total_len)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
+
+ if (next_round)
+ vhost_poll_queue(&vq->poll);
vhost_tx_batch(net, nvq, sock, &msg);
}
@@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
bool zcopy_used;
int sent_pkts = 0;
+ bool next_round = false;
- for (;;) {
+ do {
bool busyloop_intr;
/* Release DMAs done buffers first */
@@ -951,11 +952,10 @@ static void handle_tx_z...
2020 Jun 03
1
[PATCH RFC 08/13] vhost/net: convert to new API: heads->bufs
...heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
> - vq->heads[nvq->done_idx].len = 0;
> + nvq->bufs[nvq->done_idx] = buf;
> ++nvq->done_idx;
> } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
>
> @@ -850,7 +864,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
> struct vhost_virtqueue *vq = &nvq->vq;
> unsigned out, in;
> - int head;
> + int ret;
> struct msghdr msg = {
> .msg_name = NULL,
>...
2019 Apr 26
2
[PATCH net] vhost_net: fix possible infinite loop
...t;> - }
>> + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
>> +
>> + if (next_round)
>> + vhost_poll_queue(&vq->poll);
>>
>> vhost_tx_batch(net, nvq, sock, &msg);
>> }
>> @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
>> struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
>> bool zcopy_used;
>> int sent_pkts = 0;
>> + bool next_round = false;
>>
>> - for (;;) {
>> + do {
>> bool busyloop_intr;
>>...
2019 Apr 26
2
[PATCH net] vhost_net: fix possible infinite loop
...t;> - }
>> + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
>> +
>> + if (next_round)
>> + vhost_poll_queue(&vq->poll);
>>
>> vhost_tx_batch(net, nvq, sock, &msg);
>> }
>> @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
>> struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
>> bool zcopy_used;
>> int sent_pkts = 0;
>> + bool next_round = false;
>>
>> - for (;;) {
>> + do {
>> bool busyloop_intr;
>>...
2020 Jun 02
0
[PATCH RFC 08/13] vhost/net: convert to new API: heads->bufs
...err, len);
done:
- vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
- vq->heads[nvq->done_idx].len = 0;
+ nvq->bufs[nvq->done_idx] = buf;
++nvq->done_idx;
} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
@@ -850,7 +864,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
unsigned out, in;
- int head;
+ int ret;
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
@@ -864,6 +878,7 @@ static...
2019 May 12
2
[PATCH net] vhost_net: fix possible infinite loop
...; > > total_len)));
> > > > +
> > > > +??? if (next_round)
> > > > +??????? vhost_poll_queue(&vq->poll);
> > > > ? ????? vhost_tx_batch(net, nvq, sock, &msg);
> > > > ? }
> > > > @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct
> > > > vhost_net *net, struct socket *sock)
> > > > ????? struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
> > > > ????? bool zcopy_used;
> > > > ????? int sent_pkts = 0;
> > > > +??? bool next_round = false;
> > > >...
2019 May 12
2
[PATCH net] vhost_net: fix possible infinite loop
...; > > total_len)));
> > > > +
> > > > +??? if (next_round)
> > > > +??????? vhost_poll_queue(&vq->poll);
> > > > ? ????? vhost_tx_batch(net, nvq, sock, &msg);
> > > > ? }
> > > > @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct
> > > > vhost_net *net, struct socket *sock)
> > > > ????? struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
> > > > ????? bool zcopy_used;
> > > > ????? int sent_pkts = 0;
> > > > +??? bool next_round = false;
> > > >...
2018 Sep 06
1
[PATCH net-next 08/11] tun: switch to new type of msg_control
...msg_flags & MSG_DONTWAIT,
> m->msg_flags & MSG_MORE);
> tun_put(tun);
> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> index 4e656f89cb22..fb01ce6d981c 100644
> --- a/drivers/vhost/net.c
> +++ b/drivers/vhost/net.c
> @@ -620,6 +620,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> .msg_controllen = 0,
> .msg_flags = MSG_DONTWAIT,
> };
> + struct tun_msg_ctl ctl;
> size_t len, total_len = 0;
> int err;
> struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
> @@ -664,8 +665,10 @@ static void ha...
2019 Apr 25
0
[PATCH net] vhost_net: fix possible infinite loop
...>poll);
> - break;
> - }
> - }
> + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len)));
> +
> + if (next_round)
> + vhost_poll_queue(&vq->poll);
>
> vhost_tx_batch(net, nvq, sock, &msg);
> }
> @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
> bool zcopy_used;
> int sent_pkts = 0;
> + bool next_round = false;
>
> - for (;;) {
> + do {
> bool busyloop_intr;
>
> /* Release DMAs done buffers firs...
2019 May 05
0
[PATCH net] vhost_net: fix possible infinite loop
...und = vhost_exceeds_weight(++sent_pkts,
>>> total_len)));
>>> +
>>> +??? if (next_round)
>>> +??????? vhost_poll_queue(&vq->poll);
>>> ? ????? vhost_tx_batch(net, nvq, sock, &msg);
>>> ? }
>>> @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net
>>> *net, struct socket *sock)
>>> ????? struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
>>> ????? bool zcopy_used;
>>> ????? int sent_pkts = 0;
>>> +??? bool next_round = false;
>>> ? -??? for (;;) {
>>> +??? do...
2018 May 21
0
[RFC PATCH net-next 04/12] vhost_net: split out datacopy logic
...pkts, total_len)) {
+ vhost_poll_queue(&vq->poll);
+ break;
+ }
+ }
+out:
+ mutex_unlock(&vq->mutex);
+}
+
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
-static void handle_tx(struct vhost_net *net)
+static void handle_tx_zerocopy(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
@@ -512,7 +598,7 @@ static void handle_tx(struct vhost_net *net)
size_t hdr_size;
struct socket *sock;
struct vhost_net_ubuf_ref *uninitialized_var(...
2019 May 13
0
[PATCH net] vhost_net: fix possible infinite loop
...gt; total_len)));
>>>>> +
>>>>> +??? if (next_round)
>>>>> +??????? vhost_poll_queue(&vq->poll);
>>>>> ? ????? vhost_tx_batch(net, nvq, sock, &msg);
>>>>> ? }
>>>>> @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct
>>>>> vhost_net *net, struct socket *sock)
>>>>> ????? struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
>>>>> ????? bool zcopy_used;
>>>>> ????? int sent_pkts = 0;
>>>>> +??? bool next_round = false;
>>&g...
2018 Sep 06
0
[PATCH net-next 08/11] tun: switch to new type of msg_control
...&m->msg_iter,
m->msg_flags & MSG_DONTWAIT,
m->msg_flags & MSG_MORE);
tun_put(tun);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 4e656f89cb22..fb01ce6d981c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -620,6 +620,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
.msg_controllen = 0,
.msg_flags = MSG_DONTWAIT,
};
+ struct tun_msg_ctl ctl;
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
@@ -664,8 +665,10 @@ static void handle_tx_zerocopy(struct vhost_net *net,...
2019 May 14
1
[PATCH net] vhost_net: fix possible infinite loop
...> > > > > +??? if (next_round)
> > > > > > +??????? vhost_poll_queue(&vq->poll);
> > > > > > ? ????? vhost_tx_batch(net, nvq, sock, &msg);
> > > > > > ? }
> > > > > > @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct
> > > > > > vhost_net *net, struct socket *sock)
> > > > > > ????? struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
> > > > > > ????? bool zcopy_used;
> > > > > > ????? int sent_pkts = 0;
> > > > >...
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
..._queue(net, vq);
> else if (!poll_rx) /* On tx here, sock has no rx data. */
> vhost_enable_notify(&net->dev, rvq);
> -
> - mutex_unlock(&vq->mutex);
> }
>
> static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
> @@ -913,10 +910,16 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> static void handle_tx(struct vhost_net *net)
> {
> struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
> + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
> struct vhost_virtqueue *vq = &...
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
..._queue(net, vq);
> else if (!poll_rx) /* On tx here, sock has no rx data. */
> vhost_enable_notify(&net->dev, rvq);
> -
> - mutex_unlock(&vq->mutex);
> }
>
> static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
> @@ -913,10 +910,16 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> static void handle_tx(struct vhost_net *net)
> {
> struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
> + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX];
> struct vhost_virtqueue *vq = &...
2018 Nov 23
5
[PATCH net-next 0/3] basic in order support for vhost_net
Hi:
This series implement basic in order feature support for
vhost_net. This feature requires both driver and device to use
descriptors in order which can simplify the implementation and
optimizaton for both side. The series also implement a simple
optimization that avoid read available ring. Test shows 10%
performance improvement.
More optimizations could be done on top.
Jason Wang (3):
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
..., sock has no rx data. */
> > > vhost_enable_notify(&net->dev, rvq);
> > > -
> > > - mutex_unlock(&vq->mutex);
> > > }
> > > static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
> > > @@ -913,10 +910,16 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> > > static void handle_tx(struct vhost_net *net)
> > > {
> > > struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
> > > + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX]...
2018 Dec 11
2
[PATCH net 2/4] vhost_net: rework on the lock ordering for busy polling
..., sock has no rx data. */
> > > vhost_enable_notify(&net->dev, rvq);
> > > -
> > > - mutex_unlock(&vq->mutex);
> > > }
> > > static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
> > > @@ -913,10 +910,16 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
> > > static void handle_tx(struct vhost_net *net)
> > > {
> > > struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
> > > + struct vhost_net_virtqueue *nvq_rx = &net->vqs[VHOST_NET_VQ_RX]...