search for: vhost_net_virtqueu

Displaying 20 results from an estimated 222 matches for "vhost_net_virtqueu".

Did you mean: vhost_net_virtqueue
2016 May 30
1
[PATCH V2 1/2] vhost_net: stop polling socket during rx processing
...ers/vhost/net.c > +++ b/drivers/vhost/net.c > @@ -301,6 +301,32 @@ static bool vhost_can_busy_poll(struct vhost_dev *dev, > !vhost_has_work(dev); > } > > +static void vhost_net_disable_vq(struct vhost_net *n, > + struct vhost_virtqueue *vq) > +{ > + struct vhost_net_virtqueue *nvq = > + container_of(vq, struct vhost_net_virtqueue, vq); > + struct vhost_poll *poll = n->poll + (nvq - n->vqs); > + if (!vq->private_data) > + return; > + vhost_poll_stop(poll); > +} > + > +static int vhost_net_enable_vq(struct vhost_net *n, > + struct...
2016 May 30
1
[PATCH V2 1/2] vhost_net: stop polling socket during rx processing
...ers/vhost/net.c > +++ b/drivers/vhost/net.c > @@ -301,6 +301,32 @@ static bool vhost_can_busy_poll(struct vhost_dev *dev, > !vhost_has_work(dev); > } > > +static void vhost_net_disable_vq(struct vhost_net *n, > + struct vhost_virtqueue *vq) > +{ > + struct vhost_net_virtqueue *nvq = > + container_of(vq, struct vhost_net_virtqueue, vq); > + struct vhost_poll *poll = n->poll + (nvq - n->vqs); > + if (!vq->private_data) > + return; > + vhost_poll_stop(poll); > +} > + > +static int vhost_net_enable_vq(struct vhost_net *n, > + struct...
2018 Jul 03
2
[PATCH net-next v4 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...; + } > + > + mutex_unlock(&vq->mutex); > +} > + > + > static int vhost_net_tx_get_vq_desc(struct vhost_net *net, > struct vhost_virtqueue *vq, > struct iovec iov[], unsigned int iov_size, > @@ -621,16 +667,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) > return len; > } > > -static int sk_has_rx_data(struct sock *sk) > -{ > - struct socket *sock = sk->sk_socket; > - > - if (sock->ops->peek_len) > - return sock->ops->peek_len(sock); > - > - return skb_queue_empty(&amp...
2018 Jul 03
2
[PATCH net-next v4 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...; + } > + > + mutex_unlock(&vq->mutex); > +} > + > + > static int vhost_net_tx_get_vq_desc(struct vhost_net *net, > struct vhost_virtqueue *vq, > struct iovec iov[], unsigned int iov_size, > @@ -621,16 +667,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) > return len; > } > > -static int sk_has_rx_data(struct sock *sk) > -{ > - struct socket *sock = sk->sk_socket; > - > - if (sock->ops->peek_len) > - return sock->ops->peek_len(sock); > - > - return skb_queue_empty(&amp...
2016 May 30
4
[PATCH V2 0/2] vhost_net polling optimization
Hi: This series tries to optimize vhost_net polling at two points: - Stop rx polling for reduicng the unnecessary wakeups during handle_rx(). - Conditonally enable tx polling for reducing the unnecessary traversing and spinlock touching. Test shows about 17% improvement on rx pps. Please review Changes from V1: - use vhost_net_disable_vq()/vhost_net_enable_vq() instead of open coding. -
2016 May 30
4
[PATCH V2 0/2] vhost_net polling optimization
Hi: This series tries to optimize vhost_net polling at two points: - Stop rx polling for reduicng the unnecessary wakeups during handle_rx(). - Conditonally enable tx polling for reducing the unnecessary traversing and spinlock touching. Test shows about 17% improvement on rx pps. Please review Changes from V1: - use vhost_net_disable_vq()/vhost_net_enable_vq() instead of open coding. -
2018 Jul 02
1
[PATCH net-next v3 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...; + } > + > + mutex_unlock(&vq->mutex); > +} > + > + > static int vhost_net_tx_get_vq_desc(struct vhost_net *net, > struct vhost_virtqueue *vq, > struct iovec iov[], unsigned int iov_size, > @@ -621,16 +665,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) > return len; > } > > -static int sk_has_rx_data(struct sock *sk) > -{ > - struct socket *sock = sk->sk_socket; > - > - if (sock->ops->peek_len) > - return sock->ops->peek_len(sock); > - > - return skb_queue_empty(&amp...
2018 Jul 03
1
[PATCH net-next v4 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...> static int vhost_net_tx_get_vq_desc(struct vhost_net *net, >>> struct vhost_virtqueue *vq, >>> struct iovec iov[], unsigned int iov_size, >>> @@ -621,16 +667,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) >>> return len; >>> } >>> >>> -static int sk_has_rx_data(struct sock *sk) >>> -{ >>> - struct socket *sock = sk->sk_socket; >>> - >>> - if (sock->ops->peek_len) >>> -...
2016 May 30
0
[PATCH V2 1/2] vhost_net: stop polling socket during rx processing
...et.c index 10ff494..e91603b 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -301,6 +301,32 @@ static bool vhost_can_busy_poll(struct vhost_dev *dev, !vhost_has_work(dev); } +static void vhost_net_disable_vq(struct vhost_net *n, + struct vhost_virtqueue *vq) +{ + struct vhost_net_virtqueue *nvq = + container_of(vq, struct vhost_net_virtqueue, vq); + struct vhost_poll *poll = n->poll + (nvq - n->vqs); + if (!vq->private_data) + return; + vhost_poll_stop(poll); +} + +static int vhost_net_enable_vq(struct vhost_net *n, + struct vhost_virtqueue *vq) +{ + struct vhost_net_v...
2017 Mar 22
2
[PATCH net-next 7/8] vhost_net: try batch dequing from skb array
...e <linux/if_vlan.h> > +#include <linux/skb_array.h> > +#include <linux/skbuff.h> > > #include <net/sock.h> > > @@ -85,6 +87,7 @@ struct vhost_net_ubuf_ref { > struct vhost_virtqueue *vq; > }; > > +#define VHOST_RX_BATCH 64 > struct vhost_net_virtqueue { > struct vhost_virtqueue vq; > size_t vhost_hlen; > @@ -99,6 +102,10 @@ struct vhost_net_virtqueue { > /* Reference counting for outstanding ubufs. > * Protected by vq mutex. Writers must also take device mutex. */ > struct vhost_net_ubuf_ref *ubufs; > + struct sk...
2017 Mar 22
2
[PATCH net-next 7/8] vhost_net: try batch dequing from skb array
...e <linux/if_vlan.h> > +#include <linux/skb_array.h> > +#include <linux/skbuff.h> > > #include <net/sock.h> > > @@ -85,6 +87,7 @@ struct vhost_net_ubuf_ref { > struct vhost_virtqueue *vq; > }; > > +#define VHOST_RX_BATCH 64 > struct vhost_net_virtqueue { > struct vhost_virtqueue vq; > size_t vhost_hlen; > @@ -99,6 +102,10 @@ struct vhost_net_virtqueue { > /* Reference counting for outstanding ubufs. > * Protected by vq mutex. Writers must also take device mutex. */ > struct vhost_net_ubuf_ref *ubufs; > + struct sk...
2016 Jun 01
7
[PATCH V3 0/2] vhost_net polling optimization
Hi: This series tries to optimize vhost_net polling at two points: - Stop rx polling for reduicng the unnecessary wakeups during handle_rx(). - Conditonally enable tx polling for reducing the unnecessary traversing and spinlock touching. Test shows about 17% improvement on rx pps. Please review Changes from V2: - Don't enable rx vq if we meet an err or rx vq is empty Changes from V1:
2016 Jun 01
7
[PATCH V3 0/2] vhost_net polling optimization
Hi: This series tries to optimize vhost_net polling at two points: - Stop rx polling for reduicng the unnecessary wakeups during handle_rx(). - Conditonally enable tx polling for reducing the unnecessary traversing and spinlock touching. Test shows about 17% improvement on rx pps. Please review Changes from V2: - Don't enable rx vq if we meet an err or rx vq is empty Changes from V1:
2016 Jun 01
0
[PATCH V3 1/2] vhost_net: stop polling socket during rx processing
...et.c index f744eeb..1d3e45f 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -301,6 +301,32 @@ static bool vhost_can_busy_poll(struct vhost_dev *dev, !vhost_has_work(dev); } +static void vhost_net_disable_vq(struct vhost_net *n, + struct vhost_virtqueue *vq) +{ + struct vhost_net_virtqueue *nvq = + container_of(vq, struct vhost_net_virtqueue, vq); + struct vhost_poll *poll = n->poll + (nvq - n->vqs); + if (!vq->private_data) + return; + vhost_poll_stop(poll); +} + +static int vhost_net_enable_vq(struct vhost_net *n, + struct vhost_virtqueue *vq) +{ + struct vhost_net_v...
2018 Jul 02
5
[PATCH net-next v4 0/4] net: vhost: improve performance when enable busyloop
From: Tonghao Zhang <xiangxia.m.yue at gmail.com> This patches improve the guest receive and transmit performance. On the handle_tx side, we poll the sock receive queue at the same time. handle_rx do that in the same way. For more performance report, see patch 4. v3 -> v4: fix some issues v2 -> v3: This patches are splited from previous big patch:
2017 Mar 21
0
[PATCH net-next 7/8] vhost_net: try batch dequing from skb array
...macvlan.h> #include <linux/if_tap.h> #include <linux/if_vlan.h> +#include <linux/skb_array.h> +#include <linux/skbuff.h> #include <net/sock.h> @@ -85,6 +87,7 @@ struct vhost_net_ubuf_ref { struct vhost_virtqueue *vq; }; +#define VHOST_RX_BATCH 64 struct vhost_net_virtqueue { struct vhost_virtqueue vq; size_t vhost_hlen; @@ -99,6 +102,10 @@ struct vhost_net_virtqueue { /* Reference counting for outstanding ubufs. * Protected by vq mutex. Writers must also take device mutex. */ struct vhost_net_ubuf_ref *ubufs; + struct skb_array *rx_array; + void *rxq[VHOS...
2018 Jun 30
0
[PATCH net-next v3 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...;dev, vq); + vhost_poll_queue(&vq->poll); + } + + mutex_unlock(&vq->mutex); +} + + static int vhost_net_tx_get_vq_desc(struct vhost_net *net, struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, @@ -621,16 +665,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) return len; } -static int sk_has_rx_data(struct sock *sk) -{ - struct socket *sock = sk->sk_socket; - - if (sock->ops->peek_len) - return sock->ops->peek_len(sock); - - return skb_queue_empty(&sk->sk_receive_queue); -} - static void vhost_rx_sign...
2018 Jul 02
0
[PATCH net-next v4 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...;dev, vq); + vhost_poll_queue(&vq->poll); + } + + mutex_unlock(&vq->mutex); +} + + static int vhost_net_tx_get_vq_desc(struct vhost_net *net, struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, @@ -621,16 +667,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) return len; } -static int sk_has_rx_data(struct sock *sk) -{ - struct socket *sock = sk->sk_socket; - - if (sock->ops->peek_len) - return sock->ops->peek_len(sock); - - return skb_queue_empty(&sk->sk_receive_queue); -} - static void vhost_rx_sign...
2018 Jul 04
0
[PATCH net-next v5 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...;dev, vq); + vhost_poll_queue(&vq->poll); + } + + mutex_unlock(&vq->mutex); +} + + static int vhost_net_tx_get_vq_desc(struct vhost_net *net, struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, @@ -621,16 +667,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) return len; } -static int sk_has_rx_data(struct sock *sk) -{ - struct socket *sock = sk->sk_socket; - - if (sock->ops->peek_len) - return sock->ops->peek_len(sock); - - return skb_queue_empty(&sk->sk_receive_queue); -} - static void vhost_rx_sign...
2018 Jul 03
0
[PATCH net-next v4 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
...> + > > static int vhost_net_tx_get_vq_desc(struct vhost_net *net, > > struct vhost_virtqueue *vq, > > struct iovec iov[], unsigned int iov_size, > > @@ -621,16 +667,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) > > return len; > > } > > > > -static int sk_has_rx_data(struct sock *sk) > > -{ > > - struct socket *sock = sk->sk_socket; > > - > > - if (sock->ops->peek_len) > > - return sock->o...