search for: vhost_tx_batch

Displaying 20 results from an estimated 40 matches for "vhost_tx_batch".

2018 Sep 06
2
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...used to track end of used idx, done_idx is used to track head > * of used idx. Once lower device DMA done contiguously, we will signal KVM > @@ -444,10 +451,36 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) > nvq->done_idx = 0; > } > > +static void vhost_tx_batch(struct vhost_net *net, > + struct vhost_net_virtqueue *nvq, > + struct socket *sock, > + struct msghdr *msghdr) > +{ > + struct tun_msg_ctl ctl = { > + .type = nvq->batched_xdp << 16 | TUN_MSG_PTR, > + .ptr = nvq->xdp, > + }; > + int err; >...
2018 Sep 06
2
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...used to track end of used idx, done_idx is used to track head > * of used idx. Once lower device DMA done contiguously, we will signal KVM > @@ -444,10 +451,36 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) > nvq->done_idx = 0; > } > > +static void vhost_tx_batch(struct vhost_net *net, > + struct vhost_net_virtqueue *nvq, > + struct socket *sock, > + struct msghdr *msghdr) > +{ > + struct tun_msg_ctl ctl = { > + .type = nvq->batched_xdp << 16 | TUN_MSG_PTR, > + .ptr = nvq->xdp, > + }; > + int err; >...
2018 Sep 06
0
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...some reason. * upend_idx is used to track end of used idx, done_idx is used to track head * of used idx. Once lower device DMA done contiguously, we will signal KVM @@ -444,10 +451,36 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) nvq->done_idx = 0; } +static void vhost_tx_batch(struct vhost_net *net, + struct vhost_net_virtqueue *nvq, + struct socket *sock, + struct msghdr *msghdr) +{ + struct tun_msg_ctl ctl = { + .type = nvq->batched_xdp << 16 | TUN_MSG_PTR, + .ptr = nvq->xdp, + }; + int err; + + if (nvq->batched_xdp == 0) + goto signal_...
2018 Sep 12
0
[PATCH net-next V2 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...some reason. * upend_idx is used to track end of used idx, done_idx is used to track head * of used idx. Once lower device DMA done contiguously, we will signal KVM @@ -444,10 +453,37 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) nvq->done_idx = 0; } +static void vhost_tx_batch(struct vhost_net *net, + struct vhost_net_virtqueue *nvq, + struct socket *sock, + struct msghdr *msghdr) +{ + struct tun_msg_ctl ctl = { + .type = TUN_MSG_PTR, + .num = nvq->batched_xdp, + .ptr = nvq->xdp, + }; + int err; + + if (nvq->batched_xdp == 0) + goto signal_use...
2018 Sep 07
0
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...x, done_idx is used to track head >> * of used idx. Once lower device DMA done contiguously, we will signal KVM >> @@ -444,10 +451,36 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) >> nvq->done_idx = 0; >> } >> >> +static void vhost_tx_batch(struct vhost_net *net, >> + struct vhost_net_virtqueue *nvq, >> + struct socket *sock, >> + struct msghdr *msghdr) >> +{ >> + struct tun_msg_ctl ctl = { >> + .type = nvq->batched_xdp << 16 | TUN_MSG_PTR, >> + .ptr = nvq->xdp, &g...
2018 May 21
0
[RFC PATCH net-next 12/12] vhost_net: batch submitting XDP buffers to underlayer sockets
...t_net_virtqueue *nvq, void *buf; int copied; - if (len < nvq->sock_hlen) + if (unlikely(len < nvq->sock_hlen)) return -EFAULT; if (SKB_DATA_ALIGN(len + pad) + @@ -567,11 +568,37 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, return 0; } +static void vhost_tx_batch(struct vhost_net *net, + struct vhost_net_virtqueue *nvq, + struct socket *sock, + struct msghdr *msghdr, int n) +{ + struct tun_msg_ctl ctl = { + .type = n << 16 | TUN_MSG_PTR, + .ptr = nvq->xdp, + }; + int err; + + if (n == 0) + return; + + msghdr->msg_control = &amp...
2018 Sep 13
1
[PATCH net-next V2 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
On Wed, Sep 12, 2018 at 11:17:09AM +0800, Jason Wang wrote: > +static void vhost_tx_batch(struct vhost_net *net, > + struct vhost_net_virtqueue *nvq, > + struct socket *sock, > + struct msghdr *msghdr) > +{ > + struct tun_msg_ctl ctl = { > + .type = TUN_MSG_PTR, > + .num = nvq->batched_xdp, > + .ptr = nvq->xdp, > + }; > + int err; &g...
2018 Sep 07
1
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...ns whether we can do batching. For simplicity, > I disable batching is sndbuf is not INT_MAX. But what does batching have to do with sndbuf? > > > for (;;) { > > > bool busyloop_intr = false; > > > + if (nvq->done_idx == VHOST_NET_BATCH) > > > + vhost_tx_batch(net, nvq, sock, &msg); > > > + > > > head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, > > > &busyloop_intr); > > > /* On error, stop handling until the next kick. */ > > > @@ -577,14 +692,34 @@ static void hand...
2020 Jun 03
1
[PATCH RFC 08/13] vhost/net: convert to new API: heads->bufs
...E(tvq->iov), > + out_num, in_num, NULL, NULL); > > - if (r == tvq->num && tvq->busyloop_timeout) { > + if (!r && tvq->busyloop_timeout) { > /* Flush batched packets first */ > if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq))) > vhost_tx_batch(net, tnvq, > @@ -577,8 +590,8 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net, > > vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false); > > - r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), > - out_num, in_num, NULL, NULL); &g...
2018 Sep 06
22
[PATCH net-next 00/11] Vhost_net TX batching
Hi all: This series tries to batch submitting packets to underlayer socket through msg_control during sendmsg(). This is done by: 1) Doing userspace copy inside vhost_net 2) Build XDP buff 3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once through msg_control during sendmsg(). 4) Underlayer sockets can use XDP buffs directly when XDP is enalbed, or build skb based on XDP
2018 Sep 12
14
[PATCH net-next V2 00/11] vhost_net TX batching
Hi all: This series tries to batch submitting packets to underlayer socket through msg_control during sendmsg(). This is done by: 1) Doing userspace copy inside vhost_net 2) Build XDP buff 3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once through msg_control during sendmsg(). 4) Underlayer sockets can use XDP buffs directly when XDP is enalbed, or build skb based on XDP
2018 Sep 12
14
[PATCH net-next V2 00/11] vhost_net TX batching
Hi all: This series tries to batch submitting packets to underlayer socket through msg_control during sendmsg(). This is done by: 1) Doing userspace copy inside vhost_net 2) Build XDP buff 3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once through msg_control during sendmsg(). 4) Underlayer sockets can use XDP buffs directly when XDP is enalbed, or build skb based on XDP
2020 Jun 02
0
[PATCH RFC 08/13] vhost/net: convert to new API: heads->bufs
...il_buf(tvq, buf, tvq->iov, ARRAY_SIZE(tvq->iov), + out_num, in_num, NULL, NULL); - if (r == tvq->num && tvq->busyloop_timeout) { + if (!r && tvq->busyloop_timeout) { /* Flush batched packets first */ if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq))) vhost_tx_batch(net, tnvq, @@ -577,8 +590,8 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net, vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false); - r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), - out_num, in_num, NULL, NULL); + r = vhost_get_avail_buf(tvq, buf...
2018 Sep 25
6
[REBASE PATCH net-next v9 0/4] net: vhost: improve performance when enable busyloop
From: Tonghao Zhang <xiangxia.m.yue at gmail.com> This patches improve the guest receive performance. On the handle_tx side, we poll the sock receive queue at the same time. handle_rx do that in the same way. For more performance report, see patch 4 Tonghao Zhang (4): net: vhost: lock the vqs one by one net: vhost: replace magic number of lock annotation net: vhost: factor out busy
2018 May 21
20
[RFC PATCH net-next 00/12] XDP batching for TUN/vhost_net
Hi all: We do not support XDP batching for TUN since it can only receive one packet a time from vhost_net. This series tries to remove this limitation by: - introduce a TUN specific msg_control that can hold a pointer to an array of XDP buffs - try copy and build XDP buff in vhost_net - store XDP buffs in an array and submit them once for every N packets from vhost_net - since TUN can only
2019 Apr 25
2
[PATCH net] vhost_net: fix possible infinite loop
...q->done_idx].len = 0; ++nvq->done_idx; - if (vhost_exceeds_weight(++sent_pkts, total_len)) { - vhost_poll_queue(&vq->poll); - break; - } - } + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len))); + + if (next_round) + vhost_poll_queue(&vq->poll); vhost_tx_batch(net, nvq, sock, &msg); } @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) struct vhost_net_ubuf_ref *uninitialized_var(ubufs); bool zcopy_used; int sent_pkts = 0; + bool next_round = false; - for (;;) { + do { bool busyloop_intr; /*...
2019 Apr 25
2
[PATCH net] vhost_net: fix possible infinite loop
...q->done_idx].len = 0; ++nvq->done_idx; - if (vhost_exceeds_weight(++sent_pkts, total_len)) { - vhost_poll_queue(&vq->poll); - break; - } - } + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len))); + + if (next_round) + vhost_poll_queue(&vq->poll); vhost_tx_batch(net, nvq, sock, &msg); } @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) struct vhost_net_ubuf_ref *uninitialized_var(ubufs); bool zcopy_used; int sent_pkts = 0; + bool next_round = false; - for (;;) { + do { bool busyloop_intr; /*...
2018 Dec 20
0
4.20-rc6: WARNING: CPU: 30 PID: 197360 at net/core/flow_dissector.c:764 __skb_flow_dissect
...cf0c80000 mvhi 200(%r15),0 000000000092e332: c01b00000008 nilf %r1,8 [85109.572129] Call Trace: [85109.572130] ([<0000000000000000>] (null)) [85109.572134] [<000003ff800c81e4>] tap_sendmsg+0x384/0x430 [tap] [85109.572137] [<000003ff801acdee>] vhost_tx_batch.isra.10+0x66/0xe0 [vhost_net] [85109.572138] [<000003ff801ad61c>] handle_tx_copy+0x18c/0x568 [vhost_net] [85109.572140] [<000003ff801adab4>] handle_tx+0xbc/0x100 [vhost_net] [85109.572145] [<000003ff8019bbe8>] vhost_worker+0xc8/0x128 [vhost] [85109.572148] [<00000000001...
2019 Apr 26
2
[PATCH net] vhost_net: fix possible infinite loop
...al_len)) { >> - vhost_poll_queue(&vq->poll); >> - break; >> - } >> - } >> + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len))); >> + >> + if (next_round) >> + vhost_poll_queue(&vq->poll); >> >> vhost_tx_batch(net, nvq, sock, &msg); >> } >> @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) >> struct vhost_net_ubuf_ref *uninitialized_var(ubufs); >> bool zcopy_used; >> int sent_pkts = 0; >> + bool next_round = f...
2019 Apr 26
2
[PATCH net] vhost_net: fix possible infinite loop
...al_len)) { >> - vhost_poll_queue(&vq->poll); >> - break; >> - } >> - } >> + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len))); >> + >> + if (next_round) >> + vhost_poll_queue(&vq->poll); >> >> vhost_tx_batch(net, nvq, sock, &msg); >> } >> @@ -873,8 +873,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) >> struct vhost_net_ubuf_ref *uninitialized_var(ubufs); >> bool zcopy_used; >> int sent_pkts = 0; >> + bool next_round = f...