search for: vhost_net_tx_packet

Displaying 20 results from an estimated 95 matches for "vhost_net_tx_packet".

2017 Sep 28
1
[PATCH net-next RFC 5/5] vhost_net: basic tx virtqueue batched processing
...vq, head, 0); While batching, perhaps can also move this producer index update out of the loop and using vhost_add_used_and_signal_n. > + } else > + vhost_zerocopy_signal_used(net, vq); > + vhost_net_tx_packet(net); > + if (unlikely(total_len >= VHOST_NET_WEIGHT)) { > + vhost_poll_queue(&vq->poll); > + goto out; > } > - vhost_discard_vq_desc(vq, 1); >...
2017 Sep 28
1
[PATCH net-next RFC 5/5] vhost_net: basic tx virtqueue batched processing
...vq, head, 0); While batching, perhaps can also move this producer index update out of the loop and using vhost_add_used_and_signal_n. > + } else > + vhost_zerocopy_signal_used(net, vq); > + vhost_net_tx_packet(net); > + if (unlikely(total_len >= VHOST_NET_WEIGHT)) { > + vhost_poll_queue(&vq->poll); > + goto out; > } > - vhost_discard_vq_desc(vq, 1); >...
2018 Mar 28
2
[PATCH] vhost-net: add time limitation for tx polling(Internet mail)
...hers. >> >>> >>> mutex_lock(&vq->mutex); >>> sock = vq->private_data; >>> @@ -580,7 +581,7 @@ static void handle_tx(struct vhost_net *net) >>> else >>> vhost_zerocopy_signal_used(net, vq); >>> vhost_net_tx_packet(net); >>> - if (unlikely(total_len >= VHOST_NET_WEIGHT)) { >>> + if (unlikely(total_len >= VHOST_NET_WEIGHT) || unlikely(jiffies - start >= 1)) { >> How value 1 is determined here? And we need a complete test to make sure >> this won't affect other use c...
2018 Mar 28
2
[PATCH] vhost-net: add time limitation for tx polling(Internet mail)
...hers. >> >>> >>> mutex_lock(&vq->mutex); >>> sock = vq->private_data; >>> @@ -580,7 +581,7 @@ static void handle_tx(struct vhost_net *net) >>> else >>> vhost_zerocopy_signal_used(net, vq); >>> vhost_net_tx_packet(net); >>> - if (unlikely(total_len >= VHOST_NET_WEIGHT)) { >>> + if (unlikely(total_len >= VHOST_NET_WEIGHT) || unlikely(jiffies - start >= 1)) { >> How value 1 is determined here? And we need a complete test to make sure >> this won't affect other use c...
2018 May 21
1
[RFC PATCH net-next 02/12] vhost_net: introduce vhost_exceeds_weight()
...pace changes? > total_len += len; > if (total_len < VHOST_NET_WEIGHT && > !vhost_vq_avail_empty(&net->dev, vq) && > @@ -600,8 +605,7 @@ static void handle_tx(struct vhost_net *net) > else > vhost_zerocopy_signal_used(net, vq); > vhost_net_tx_packet(net); > - if (unlikely(total_len >= VHOST_NET_WEIGHT) || > - unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT)) { > + if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) { > vhost_poll_queue(&vq->poll); > break; > } > @@ -887,8 +891,7 @@ stati...
2018 May 21
1
[RFC PATCH net-next 03/12] vhost_net: introduce vhost_has_more_pkts()
...ould encapsulate everything inside the if, unless I'm mistaken. > msg.msg_flags |= MSG_MORE; > } else { > msg.msg_flags &= ~MSG_MORE; > @@ -605,7 +611,7 @@ static void handle_tx(struct vhost_net *net) > else > vhost_zerocopy_signal_used(net, vq); > vhost_net_tx_packet(net); > - if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) { > + if (vhost_exceeds_weight(++sent_pkts, total_len)) { You should have kept the unlikely here, and not had it inside the helper (as per the previous patch. Also, why wasn't this change part of the previous patch?...
2017 Sep 01
2
[PATCH net-next] virtio-net: invoke zerocopy callback on xmit path if no tx napi
...that that is a trade-off worth making compared to > the alternative drop in throughput. It probably would be good to be > able to measure this without kernel instrumentation: export > counters similar to net->tx_zcopy_err and net->tx_packets (though > without reset to zero, as in vhost_net_tx_packet). > >> 1) sndbuf is not INT_MAX > > You mean the case where the device stalls, later zerocopy notifications > are queued, but these are never cleaned in free_old_xmit_skbs, > because it requires a start_xmit and by now the (only) socket is out of > descriptors? Typo, sorry....
2017 Sep 01
2
[PATCH net-next] virtio-net: invoke zerocopy callback on xmit path if no tx napi
...that that is a trade-off worth making compared to > the alternative drop in throughput. It probably would be good to be > able to measure this without kernel instrumentation: export > counters similar to net->tx_zcopy_err and net->tx_packets (though > without reset to zero, as in vhost_net_tx_packet). > >> 1) sndbuf is not INT_MAX > > You mean the case where the device stalls, later zerocopy notifications > are queued, but these are never cleaned in free_old_xmit_skbs, > because it requires a start_xmit and by now the (only) socket is out of > descriptors? Typo, sorry....
2018 Apr 24
2
[PATCH] vhost_net: use packet weight for rx handler, too
...nts one virtqueue from starving others with small + * pkts. + */ +#define VHOST_NET_PKT_WEIGHT 256 /* MAX number of TX used buffers for outstanding zerocopy */ #define VHOST_MAX_PEND 128 @@ -587,7 +589,7 @@ static void handle_tx(struct vhost_net *net) vhost_zerocopy_signal_used(net, vq); vhost_net_tx_packet(net); if (unlikely(total_len >= VHOST_NET_WEIGHT) || - unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT(vq))) { + unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT)) { vhost_poll_queue(&vq->poll); break; } @@ -769,6 +771,7 @@ static void handle_rx(struct vhost_net *ne...
2017 Sep 05
1
[PATCH net-next] virtio-net: invoke zerocopy callback on xmit path if no tx napi
...making compared to >>> the alternative drop in throughput. It probably would be good to be >>> able to measure this without kernel instrumentation: export >>> counters similar to net->tx_zcopy_err and net->tx_packets (though >>> without reset to zero, as in vhost_net_tx_packet). > > > I think it's acceptable if extra cycles were spent if we detect HOL anyhow. > >>> >>>> 1) sndbuf is not INT_MAX >>> >>> You mean the case where the device stalls, later zerocopy notifications >>> are queued, but these are never...
2017 Sep 05
1
[PATCH net-next] virtio-net: invoke zerocopy callback on xmit path if no tx napi
...making compared to >>> the alternative drop in throughput. It probably would be good to be >>> able to measure this without kernel instrumentation: export >>> counters similar to net->tx_zcopy_err and net->tx_packets (though >>> without reset to zero, as in vhost_net_tx_packet). > > > I think it's acceptable if extra cycles were spent if we detect HOL anyhow. > >>> >>>> 1) sndbuf is not INT_MAX >>> >>> You mean the case where the device stalls, later zerocopy notifications >>> are queued, but these are never...
2017 Sep 22
0
[PATCH net-next RFC 5/5] vhost_net: basic tx virtqueue batched processing
...; + " len %d != %zd\n", err, len); + if (!zcopy) { + vhost_add_used_idx(vq, 1); + vhost_signal(&net->dev, vq); + } else if (!zcopy_used) { + vhost_add_used_and_signal(&net->dev, + vq, head, 0); + } else + vhost_zerocopy_signal_used(net, vq); + vhost_net_tx_packet(net); + if (unlikely(total_len >= VHOST_NET_WEIGHT)) { + vhost_poll_queue(&vq->poll); + goto out; } - vhost_discard_vq_desc(vq, 1); - break; - } - if (err != len) - pr_debug("Truncated TX packet: " - " len %d != %zd\n", err, len); - if (!zcopy_...
2019 Apr 25
2
[PATCH net] vhost_net: fix possible infinite loop
...ol zcopy_used; int sent_pkts = 0; + bool next_round = false; - for (;;) { + do { bool busyloop_intr; /* Release DMAs done buffers first */ @@ -951,11 +952,10 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) else vhost_zerocopy_signal_used(net, vq); vhost_net_tx_packet(net); - if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) { - vhost_poll_queue(&vq->poll); - break; - } - } + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len))); + + if (next_round) + vhost_poll_queue(&vq->poll); } /* Expects to be always run f...
2019 Apr 25
2
[PATCH net] vhost_net: fix possible infinite loop
...ol zcopy_used; int sent_pkts = 0; + bool next_round = false; - for (;;) { + do { bool busyloop_intr; /* Release DMAs done buffers first */ @@ -951,11 +952,10 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) else vhost_zerocopy_signal_used(net, vq); vhost_net_tx_packet(net); - if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) { - vhost_poll_queue(&vq->poll); - break; - } - } + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len))); + + if (next_round) + vhost_poll_queue(&vq->poll); } /* Expects to be always run f...
2017 Sep 22
17
[PATCH net-next RFC 0/5] batched tx processing in vhost_net
Hi: This series tries to implement basic tx batched processing. This is done by prefetching descriptor indices and update used ring in a batch. This intends to speed up used ring updating and improve the cache utilization. Test shows about ~22% improvement in tx pss. Please review. Jason Wang (5): vhost: split out ring head fetching logic vhost: introduce helper to prefetch desc index
2017 Sep 22
17
[PATCH net-next RFC 0/5] batched tx processing in vhost_net
Hi: This series tries to implement basic tx batched processing. This is done by prefetching descriptor indices and update used ring in a batch. This intends to speed up used ring updating and improve the cache utilization. Test shows about ~22% improvement in tx pss. Please review. Jason Wang (5): vhost: split out ring head fetching logic vhost: introduce helper to prefetch desc index
2014 Aug 15
2
[PATCH net-next] vhost_net: stop rx net polling when possible
...utex); sock = vq->private_data; + rxsock = rx_vq->private_data; if (!sock) goto out; + vhost_poll_stop(rx_poll); vhost_disable_notify(&net->dev, vq); hdr_size = nvq->vhost_hlen; @@ -451,11 +456,17 @@ static void handle_tx(struct vhost_net *net) total_len += len; vhost_net_tx_packet(net); if (unlikely(total_len >= VHOST_NET_WEIGHT)) { - vhost_poll_queue(&vq->poll); + poll = true; break; } } + + if (rxsock) + vhost_poll_start(rx_poll, rxsock->file); + if (poll) + vhost_poll_queue(&vq->poll); out: + mutex_unlock(&rx_vq->mutex); mut...
2014 Aug 15
2
[PATCH net-next] vhost_net: stop rx net polling when possible
...utex); sock = vq->private_data; + rxsock = rx_vq->private_data; if (!sock) goto out; + vhost_poll_stop(rx_poll); vhost_disable_notify(&net->dev, vq); hdr_size = nvq->vhost_hlen; @@ -451,11 +456,17 @@ static void handle_tx(struct vhost_net *net) total_len += len; vhost_net_tx_packet(net); if (unlikely(total_len >= VHOST_NET_WEIGHT)) { - vhost_poll_queue(&vq->poll); + poll = true; break; } } + + if (rxsock) + vhost_poll_start(rx_poll, rxsock->file); + if (poll) + vhost_poll_queue(&vq->poll); out: + mutex_unlock(&rx_vq->mutex); mut...
2016 Dec 28
0
[PATCH net-next V2 2/3] vhost_net: tx batching
...ENOBUFS? */ err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { @@ -472,7 +490,6 @@ static void handle_tx(struct vhost_net *net) vhost_add_used_and_signal(&net->dev, vq, head, 0); else vhost_zerocopy_signal_used(net, vq); - total_len += len; vhost_net_tx_packet(net); if (unlikely(total_len >= VHOST_NET_WEIGHT)) { vhost_poll_queue(&vq->poll); -- 2.7.4
2017 Jan 18
0
[PATCH net-next V5 2/3] vhost_net: tx batching
...ENOBUFS? */ err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { @@ -472,7 +490,6 @@ static void handle_tx(struct vhost_net *net) vhost_add_used_and_signal(&net->dev, vq, head, 0); else vhost_zerocopy_signal_used(net, vq); - total_len += len; vhost_net_tx_packet(net); if (unlikely(total_len >= VHOST_NET_WEIGHT)) { vhost_poll_queue(&vq->poll); -- 2.7.4