search for: get_tx_buf

Displaying 20 results from an estimated 28 matches for "get_tx_buf".

Did you mean: get_tx_bufs
2020 Jun 03
1
[PATCH RFC 08/13] vhost/net: convert to new API: heads->bufs
...NULL, NULL); > + r = vhost_get_avail_buf(tvq, buf, tvq->iov, ARRAY_SIZE(tvq->iov), > + out_num, in_num, NULL, NULL); > } > > return r; > @@ -607,6 +620,7 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter, > > static int get_tx_bufs(struct vhost_net *net, > struct vhost_net_virtqueue *nvq, > + struct vhost_buf *buf, > struct msghdr *msg, > unsigned int *out, unsigned int *in, > size_t *len, bool *busyloop_intr) > @@ -614,9 +628,9 @@ static int get_tx_bufs(s...
2020 Jun 02
0
[PATCH RFC 08/13] vhost/net: convert to new API: heads->bufs
...ZE(tvq->iov), - out_num, in_num, NULL, NULL); + r = vhost_get_avail_buf(tvq, buf, tvq->iov, ARRAY_SIZE(tvq->iov), + out_num, in_num, NULL, NULL); } return r; @@ -607,6 +620,7 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter, static int get_tx_bufs(struct vhost_net *net, struct vhost_net_virtqueue *nvq, + struct vhost_buf *buf, struct msghdr *msg, unsigned int *out, unsigned int *in, size_t *len, bool *busyloop_intr) @@ -614,9 +628,9 @@ static int get_tx_bufs(struct vhost_net *net, struct vho...
2018 Jul 20
12
[PATCH net-next 0/9] TX used ring batched updating for vhost
...he split of datapath will also be helpful for future implementation like in order completion. Please review. Thanks Jason Wang (9): vhost_net: drop unnecessary parameter vhost_net: introduce helper to initialize tx iov iter vhost_net: introduce vhost_exceeds_weight() vhost_net: introduce get_tx_bufs() vhost_net: introduce tx_can_batch() vhost_net: split out datacopy logic vhost_net: rename vhost_rx_signal_used() to vhost_net_signal_used() vhost_net: rename VHOST_RX_BATCH to VHOST_NET_BATCH vhost_net: batch update used ring for datacopy TX drivers/vhost/net.c | 249 ++++++++++++++++...
2018 Jul 20
12
[PATCH net-next 0/9] TX used ring batched updating for vhost
...he split of datapath will also be helpful for future implementation like in order completion. Please review. Thanks Jason Wang (9): vhost_net: drop unnecessary parameter vhost_net: introduce helper to initialize tx iov iter vhost_net: introduce vhost_exceeds_weight() vhost_net: introduce get_tx_bufs() vhost_net: introduce tx_can_batch() vhost_net: split out datacopy logic vhost_net: rename vhost_rx_signal_used() to vhost_net_signal_used() vhost_net: rename VHOST_RX_BATCH to VHOST_NET_BATCH vhost_net: batch update used ring for datacopy TX drivers/vhost/net.c | 249 ++++++++++++++++...
2018 Sep 07
1
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...ot INT_MAX. But what does batching have to do with sndbuf? > > > for (;;) { > > > bool busyloop_intr = false; > > > + if (nvq->done_idx == VHOST_NET_BATCH) > > > + vhost_tx_batch(net, nvq, sock, &msg); > > > + > > > head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, > > > &busyloop_intr); > > > /* On error, stop handling until the next kick. */ > > > @@ -577,14 +692,34 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) > > > break;...
2018 Sep 06
2
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...ck_zcopy(vq->private_data)) > - vhost_net_signal_used(nvq); > + vhost_tx_batch(net, nvq, vq->private_data, msghdr); > preempt_disable(); > endtime = busy_clock() + vq->busyloop_timeout; > while (vhost_can_busy_poll(endtime)) { > @@ -512,7 +546,7 @@ static int get_tx_bufs(struct vhost_net *net, > struct vhost_virtqueue *vq = &nvq->vq; > int ret; > > - ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr); > + ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr); > > if (ret < 0 || ret == vq->nu...
2018 Sep 06
2
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...ck_zcopy(vq->private_data)) > - vhost_net_signal_used(nvq); > + vhost_tx_batch(net, nvq, vq->private_data, msghdr); > preempt_disable(); > endtime = busy_clock() + vq->busyloop_timeout; > while (vhost_can_busy_poll(endtime)) { > @@ -512,7 +546,7 @@ static int get_tx_bufs(struct vhost_net *net, > struct vhost_virtqueue *vq = &nvq->vq; > int ret; > > - ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr); > + ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr); > > if (ret < 0 || ret == vq->nu...
2018 Sep 06
0
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...kets first */ if (!vhost_sock_zcopy(vq->private_data)) - vhost_net_signal_used(nvq); + vhost_tx_batch(net, nvq, vq->private_data, msghdr); preempt_disable(); endtime = busy_clock() + vq->busyloop_timeout; while (vhost_can_busy_poll(endtime)) { @@ -512,7 +546,7 @@ static int get_tx_bufs(struct vhost_net *net, struct vhost_virtqueue *vq = &nvq->vq; int ret; - ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr); + ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr); if (ret < 0 || ret == vq->num) return ret; @@ -540,6 +574,83...
2018 Sep 12
0
[PATCH net-next V2 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...kets first */ if (!vhost_sock_zcopy(vq->private_data)) - vhost_net_signal_used(nvq); + vhost_tx_batch(net, nvq, vq->private_data, msghdr); preempt_disable(); endtime = busy_clock() + vq->busyloop_timeout; while (vhost_can_busy_poll(endtime)) { @@ -512,7 +549,7 @@ static int get_tx_bufs(struct vhost_net *net, struct vhost_virtqueue *vq = &nvq->vq; int ret; - ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr); + ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr); if (ret < 0 || ret == vq->num) return ret; @@ -540,6 +577,80...
2018 Sep 07
0
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...a)) >> - vhost_net_signal_used(nvq); >> + vhost_tx_batch(net, nvq, vq->private_data, msghdr); >> preempt_disable(); >> endtime = busy_clock() + vq->busyloop_timeout; >> while (vhost_can_busy_poll(endtime)) { >> @@ -512,7 +546,7 @@ static int get_tx_bufs(struct vhost_net *net, >> struct vhost_virtqueue *vq = &nvq->vq; >> int ret; >> >> - ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr); >> + ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr); >> >> i...
2020 Jun 02
21
[PATCH RFC 00/13] vhost: format independence
We let the specifics of the ring format seep through to vhost API callers - mostly because there was only one format so it was hard to imagine what an independent API would look like. Now that there's an alternative in form of the packed ring, it's easier to see the issues, and fixing them is perhaps the cleanest way to add support for more formats. This patchset does this by indtroducing
2018 Jul 22
0
[PATCH net-next 0/9] TX used ring batched updating for vhost
...hat.com> I'm very happy with the split, the mixed data path became hard to maintain. > Jason Wang (9): > vhost_net: drop unnecessary parameter > vhost_net: introduce helper to initialize tx iov iter > vhost_net: introduce vhost_exceeds_weight() > vhost_net: introduce get_tx_bufs() > vhost_net: introduce tx_can_batch() > vhost_net: split out datacopy logic > vhost_net: rename vhost_rx_signal_used() to vhost_net_signal_used() > vhost_net: rename VHOST_RX_BATCH to VHOST_NET_BATCH > vhost_net: batch update used ring for datacopy TX > > drivers...
2020 Jun 08
14
[PATCH RFC v6 00/11] vhost: ring format independence
This adds infrastructure required for supporting multiple ring formats. The idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. Used ring is similar: we fetch into an independent struct first, convert that to IOV later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will
2019 Sep 25
0
[PATCH] vhost: It's better to use size_t for the 3rd parameter of vhost_exceeds_weight()
...ael > > Thanks for your fast reply. > > As the following code, the 2nd branch of iov_iter_advance() does not check if i->count < size, when this happens, i->count -= size may cause len exceed INT_MAX, and then total_len exceed INT_MAX. > > handle_tx_copy() -> > get_tx_bufs(..., &len, ...) -> > init_iov_iter() -> > iov_iter_advance(iter, ...) // has 3 branches: > pipe_advance() // has checked the size: if (unlikely(i->count < size)) size = i->count; > iov_iter_is_discard() ... // no check. Yes, but I don't th...
2020 Jun 07
17
[PATCH RFC v5 00/13] vhost: ring format independence
This adds infrastructure required for supporting multiple ring formats. The idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. Used ring is similar: we fetch into an independent struct first, convert that to IOV later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will
2019 May 17
0
[PATCH V2 1/4] vhost: introduce vhost_exceeds_weight()
...-604,12 +604,6 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter, return iov_iter_count(iter); } -static bool vhost_exceeds_weight(int pkts, int total_len) -{ - return total_len >= VHOST_NET_WEIGHT || - pkts >= VHOST_NET_PKT_WEIGHT; -} - static int get_tx_bufs(struct vhost_net *net, struct vhost_net_virtqueue *nvq, struct msghdr *msg, @@ -845,10 +839,8 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); vq->heads[nvq->done_idx].len = 0;...
2020 Jun 10
18
[PATCH RFC v7 00/14] vhost: ring format independence
This intentionally leaves "fixup" changes separate - hopefully that is enough to fix vhost-net crashes reported here, but it helps me keep track of what changed. I will naturally squash them later when we are done. This adds infrastructure required for supporting multiple ring formats. The idea is as follows: we convert descriptors to an independent format first, and process that
2020 Jun 10
18
[PATCH RFC v7 00/14] vhost: ring format independence
This intentionally leaves "fixup" changes separate - hopefully that is enough to fix vhost-net crashes reported here, but it helps me keep track of what changed. I will naturally squash them later when we are done. This adds infrastructure required for supporting multiple ring formats. The idea is as follows: we convert descriptors to an independent format first, and process that
2019 Jul 17
17
[PATCH V3 00/15] Packed virtqueue support for vhost
Hi all: This series implements packed virtqueues which were described at [1]. In this version we try to address the performance regression saw by V2. The root cause is packed virtqueue need more times of userspace memory accesssing which turns out to be very expensive. Thanks to the help of 7f466032dc9e ("vhost: access vq metadata through kernel virtual address"), such overhead cold be
2019 Jul 17
17
[PATCH V3 00/15] Packed virtqueue support for vhost
Hi all: This series implements packed virtqueues which were described at [1]. In this version we try to address the performance regression saw by V2. The root cause is packed virtqueue need more times of userspace memory accesssing which turns out to be very expensive. Thanks to the help of 7f466032dc9e ("vhost: access vq metadata through kernel virtual address"), such overhead cold be