search for: vhost_net_build_xdp

Displaying 20 results from an estimated 23 matches for "vhost_net_build_xdp".

2018 May 21
0
[RFC PATCH net-next 12/12] vhost_net: batch submitting XDP buffers to underlayer sockets
...-- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -118,6 +118,7 @@ struct vhost_net_virtqueue { struct ptr_ring *rx_ring; struct vhost_net_buf rxq; struct xdp_buff xdp[VHOST_RX_BATCH]; + struct vring_used_elem heads[VHOST_RX_BATCH]; }; struct vhost_net { @@ -511,7 +512,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, void *buf; int copied; - if (len < nvq->sock_hlen) + if (unlikely(len < nvq->sock_hlen)) return -EFAULT; if (SKB_DATA_ALIGN(len + pad) + @@ -567,11 +568,37 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, return 0; } +...
2018 Nov 15
3
[PATCH net-next 1/2] vhost_net: mitigate page reference counting during page frag refill
...age = alloc_page(gfp); + if (likely(pfrag->page)) { + pfrag->size = PAGE_SIZE; + goto done; + } + return false; + +done: + net->refcnt_bias = USHRT_MAX; + page_ref_add(pfrag->page, USHRT_MAX - 1); + return true; +} + #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, struct iov_iter *from) { struct vhost_virtqueue *vq = &nvq->vq; + struct vhost_net *net = container_of(vq->dev, struct vhost_net, + dev); struct socket *sock = vq->private_data; - struct page_frag *alloc_frag = &current->...
2018 May 21
20
[RFC PATCH net-next 00/12] XDP batching for TUN/vhost_net
Hi all: We do not support XDP batching for TUN since it can only receive one packet a time from vhost_net. This series tries to remove this limitation by: - introduce a TUN specific msg_control that can hold a pointer to an array of XDP buffs - try copy and build XDP buff in vhost_net - store XDP buffs in an array and submit them once for every N packets from vhost_net - since TUN can only
2018 May 21
2
[RFC PATCH net-next 10/12] vhost_net: build xdp buff
...> +++ b/drivers/vhost/net.c > @@ -492,6 +492,80 @@ static bool vhost_has_more_pkts(struct vhost_net *net, > likely(!vhost_exceeds_maxpend(net)); > } > > +#define VHOST_NET_HEADROOM 256 > +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) > + > +static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, > + struct iov_iter *from, > + struct xdp_buff *xdp) > +{ > + struct vhost_virtqueue *vq = &nvq->vq; > + struct page_frag *alloc_frag = &current->task_frag; > + struct virtio_net_hdr *gso; > + size_t len = iov_ite...
2018 Sep 06
2
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...gt; @@ -540,6 +574,83 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) > !vhost_vq_avail_empty(vq->dev, vq); > } > > +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) I wonder whether NET_IP_ALIGN make sense for XDP. > + > +static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, > + struct iov_iter *from) > +{ > + struct vhost_virtqueue *vq = &nvq->vq; > + struct socket *sock = vq->private_data; > + struct page_frag *alloc_frag = &current->task_frag; > + struct virtio_net_hdr *gso; > + struct x...
2018 Sep 06
2
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...gt; @@ -540,6 +574,83 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) > !vhost_vq_avail_empty(vq->dev, vq); > } > > +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) I wonder whether NET_IP_ALIGN make sense for XDP. > + > +static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, > + struct iov_iter *from) > +{ > + struct vhost_virtqueue *vq = &nvq->vq; > + struct socket *sock = vq->private_data; > + struct page_frag *alloc_frag = &current->task_frag; > + struct virtio_net_hdr *gso; > + struct x...
2018 Sep 06
0
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...out, in, msg, busyloop_intr); if (ret < 0 || ret == vq->num) return ret; @@ -540,6 +574,83 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) !vhost_vq_avail_empty(vq->dev, vq); } +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) + +static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, + struct iov_iter *from) +{ + struct vhost_virtqueue *vq = &nvq->vq; + struct socket *sock = vq->private_data; + struct page_frag *alloc_frag = &current->task_frag; + struct virtio_net_hdr *gso; + struct xdp_buff *xdp = &nvq->xdp[nvq...
2018 Sep 12
0
[PATCH net-next V2 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...out, in, msg, busyloop_intr); if (ret < 0 || ret == vq->num) return ret; @@ -540,6 +577,80 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) !vhost_vq_avail_empty(vq->dev, vq); } +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) + +static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, + struct iov_iter *from) +{ + struct vhost_virtqueue *vq = &nvq->vq; + struct socket *sock = vq->private_data; + struct page_frag *alloc_frag = &current->task_frag; + struct virtio_net_hdr *gso; + struct xdp_buff *xdp = &nvq->xdp[nvq...
2018 Sep 07
0
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...; !vhost_vq_avail_empty(vq->dev, vq); >> } >> >> +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) > I wonder whether NET_IP_ALIGN make sense for XDP. XDP is not the only consumer, socket may build skb based on this. > >> + >> +static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, >> + struct iov_iter *from) >> +{ >> + struct vhost_virtqueue *vq = &nvq->vq; >> + struct socket *sock = vq->private_data; >> + struct page_frag *alloc_frag = &current->task_frag; >> + struct virtio_net_h...
2018 May 21
0
[RFC PATCH net-next 10/12] vhost_net: build xdp buff
...9d7..1209e84 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -492,6 +492,80 @@ static bool vhost_has_more_pkts(struct vhost_net *net, likely(!vhost_exceeds_maxpend(net)); } +#define VHOST_NET_HEADROOM 256 +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) + +static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, + struct iov_iter *from, + struct xdp_buff *xdp) +{ + struct vhost_virtqueue *vq = &nvq->vq; + struct page_frag *alloc_frag = &current->task_frag; + struct virtio_net_hdr *gso; + size_t len = iov_iter_count(from); + int buflen = SKB_D...
2019 Apr 26
2
[PATCH net] vhost_net: fix possible infinite loop
...ested. > > Signed-off-by: Michael S. Tsirkin <mst at redhat.com> > > --- > > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c > index df51a35cf537..a0f89a504cd9 100644 > --- a/drivers/vhost/net.c > +++ b/drivers/vhost/net.c > @@ -761,6 +761,23 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, > return 0; > } > > +/* Returns true if caller needs to go back and re-read the ring. */ > +static bool empty_ring(struct vhost_net *net, struct vhost_virtqueue *vq, > + int pkts, size_t total_len, bool busyloop_intr) > +{ > +...
2019 Apr 26
2
[PATCH net] vhost_net: fix possible infinite loop
...ested. > > Signed-off-by: Michael S. Tsirkin <mst at redhat.com> > > --- > > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c > index df51a35cf537..a0f89a504cd9 100644 > --- a/drivers/vhost/net.c > +++ b/drivers/vhost/net.c > @@ -761,6 +761,23 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, > return 0; > } > > +/* Returns true if caller needs to go back and re-read the ring. */ > +static bool empty_ring(struct vhost_net *net, struct vhost_virtqueue *vq, > + int pkts, size_t total_len, bool busyloop_intr) > +{ > +...
2019 May 12
2
[PATCH net] vhost_net: fix possible infinite loop
...> > > > > --- > > > > > > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c > > > index df51a35cf537..a0f89a504cd9 100644 > > > --- a/drivers/vhost/net.c > > > +++ b/drivers/vhost/net.c > > > @@ -761,6 +761,23 @@ static int vhost_net_build_xdp(struct > > > vhost_net_virtqueue *nvq, > > > ????? return 0; > > > ? } > > > ? +/* Returns true if caller needs to go back and re-read the ring. */ > > > +static bool empty_ring(struct vhost_net *net, struct > > > vhost_virtqueue *vq, > >...
2019 May 12
2
[PATCH net] vhost_net: fix possible infinite loop
...> > > > > --- > > > > > > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c > > > index df51a35cf537..a0f89a504cd9 100644 > > > --- a/drivers/vhost/net.c > > > +++ b/drivers/vhost/net.c > > > @@ -761,6 +761,23 @@ static int vhost_net_build_xdp(struct > > > vhost_net_virtqueue *nvq, > > > ????? return 0; > > > ? } > > > ? +/* Returns true if caller needs to go back and re-read the ring. */ > > > +static bool empty_ring(struct vhost_net *net, struct > > > vhost_virtqueue *vq, > >...
2019 Apr 25
2
[PATCH net] vhost_net: fix possible infinite loop
When the rx buffer is too small for a packet, we will discard the vq descriptor and retry it for the next packet: while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, &busyloop_intr))) { ... /* On overrun, truncate and discard */ if (unlikely(headcount > UIO_MAXIOV)) { iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); err = sock->ops->recvmsg(sock,
2019 Apr 25
2
[PATCH net] vhost_net: fix possible infinite loop
When the rx buffer is too small for a packet, we will discard the vq descriptor and retry it for the next packet: while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, &busyloop_intr))) { ... /* On overrun, truncate and discard */ if (unlikely(headcount > UIO_MAXIOV)) { iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); err = sock->ops->recvmsg(sock,
2019 Apr 25
0
[PATCH net] vhost_net: fix possible infinite loop
...n? E.g. like the below. Warning: completely untested. Signed-off-by: Michael S. Tsirkin <mst at redhat.com> --- diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index df51a35cf537..a0f89a504cd9 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -761,6 +761,23 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, return 0; } +/* Returns true if caller needs to go back and re-read the ring. */ +static bool empty_ring(struct vhost_net *net, struct vhost_virtqueue *vq, + int pkts, size_t total_len, bool busyloop_intr) +{ + if (unlikely(busyloop_intr)) { + vhost_po...
2019 May 05
0
[PATCH net] vhost_net: fix possible infinite loop
...Michael S. Tsirkin <mst at redhat.com> >> >> --- >> >> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c >> index df51a35cf537..a0f89a504cd9 100644 >> --- a/drivers/vhost/net.c >> +++ b/drivers/vhost/net.c >> @@ -761,6 +761,23 @@ static int vhost_net_build_xdp(struct >> vhost_net_virtqueue *nvq, >> ????? return 0; >> ? } >> ? +/* Returns true if caller needs to go back and re-read the ring. */ >> +static bool empty_ring(struct vhost_net *net, struct vhost_virtqueue >> *vq, >> +?????????????? int pkts, size_t to...
2019 May 13
0
[PATCH net] vhost_net: fix possible infinite loop
...t;>>> --- >>>> >>>> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c >>>> index df51a35cf537..a0f89a504cd9 100644 >>>> --- a/drivers/vhost/net.c >>>> +++ b/drivers/vhost/net.c >>>> @@ -761,6 +761,23 @@ static int vhost_net_build_xdp(struct >>>> vhost_net_virtqueue *nvq, >>>> ????? return 0; >>>> ? } >>>> ? +/* Returns true if caller needs to go back and re-read the ring. */ >>>> +static bool empty_ring(struct vhost_net *net, struct >>>> vhost_virtqueue...
2018 Sep 06
22
[PATCH net-next 00/11] Vhost_net TX batching
Hi all: This series tries to batch submitting packets to underlayer socket through msg_control during sendmsg(). This is done by: 1) Doing userspace copy inside vhost_net 2) Build XDP buff 3) Batch at most 64 (VHOST_NET_BATCH) XDP buffs and submit them once through msg_control during sendmsg(). 4) Underlayer sockets can use XDP buffs directly when XDP is enalbed, or build skb based on XDP