search for: virtqu

Displaying 20 results from an estimated 31 matches for "virtqu".

Did you mean: virtq
2017 Jun 01
4
[PATCH] virtio_net: lower limit on buffer size
...n + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), - rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); + rq->min_buf_len, PAGE_SIZE - hdr_len); return ALIGN(len, L1_CACHE_BYTES); } @@ -2039,7 +2039,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); - return max(min_buf_len, hdr_len); + return max(max(min_buf_len, hdr_len) - hdr_len, + (unsigned int)GOOD_PACKET_LEN); } static int virtnet_find_vqs(struct v...
2017 Jun 01
4
[PATCH] virtio_net: lower limit on buffer size
...n + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), - rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); + rq->min_buf_len, PAGE_SIZE - hdr_len); return ALIGN(len, L1_CACHE_BYTES); } @@ -2039,7 +2039,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); - return max(min_buf_len, hdr_len); + return max(max(min_buf_len, hdr_len) - hdr_len, + (unsigned int)GOOD_PACKET_LEN); } static int virtnet_find_vqs(struct v...
2011 Feb 10
2
[PATCH] virtio_net: Add schedule check to napi_enable call
...; --- drivers/net/virtio_net.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -446,6 +446,20 @@ static void skb_recv_done(struct virtque } } +static void virtnet_napi_enable(struct virtnet_info *vi) +{ + napi_enable(&vi->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the que...
2011 Feb 10
2
[PATCH] virtio_net: Add schedule check to napi_enable call
...; --- drivers/net/virtio_net.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -446,6 +446,20 @@ static void skb_recv_done(struct virtque } } +static void virtnet_napi_enable(struct virtnet_info *vi) +{ + napi_enable(&vi->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the que...
2008 Apr 18
1
[PATCH 0/5] High-speed tun receive and xmit
kvm (and lguest!) want to get more speed out of the tun device. We already have an ABI for guest<->host comms, called virtio_ring; extending tun to understand this (with its async nature and batching) make for an efficient network. But moreover: the same things that make virtio a good guest<->host transport make it appealing as a userspace<->kernel transport. So rather
2008 Apr 18
1
[PATCH 0/5] High-speed tun receive and xmit
kvm (and lguest!) want to get more speed out of the tun device. We already have an ABI for guest<->host comms, called virtio_ring; extending tun to understand this (with its async nature and batching) make for an efficient network. But moreover: the same things that make virtio a good guest<->host transport make it appealing as a userspace<->kernel transport. So rather
2013 Jul 27
1
Merge of "virtio_net: fix race in RX VQ processing" for linux-3.2.48
On Fri, 2013-07-12 at 23:13 +0200, Wolfram Gloger wrote: > Hi, > > Today I merged M. Tsirkin's patch v2 "virtio_net: fix race in RX VQ > processing": > > http://lkml.indiana.edu/hypermail/linux/kernel/1307.1/00503.html > > into 3.2.48 and lightly tested the result (vhost-net, virtio-disk > and 9pfs using virtio). This sounds like it could be suitable
2013 Jul 27
1
Merge of "virtio_net: fix race in RX VQ processing" for linux-3.2.48
On Fri, 2013-07-12 at 23:13 +0200, Wolfram Gloger wrote: > Hi, > > Today I merged M. Tsirkin's patch v2 "virtio_net: fix race in RX VQ > processing": > > http://lkml.indiana.edu/hypermail/linux/kernel/1307.1/00503.html > > into 3.2.48 and lightly tested the result (vhost-net, virtio-disk > and 9pfs using virtio). This sounds like it could be suitable
2007 Dec 14
3
virtio_net and SMP guests
...io/virtio_ring.c | 2 -- 1 file changed, 2 deletions(-) Index: kvm/drivers/virtio/virtio_ring.c =================================================================== --- kvm.orig/drivers/virtio/virtio_ring.c +++ kvm/drivers/virtio/virtio_ring.c @@ -225,8 +225,6 @@ static bool vring_restart(struct virtque struct vring_virtqueue *vq = to_vvq(_vq); START_USE(vq); - BUG_ON(!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)); - /* We optimistically turn back on interrupts, then check if there was * more to do. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT...
2007 Dec 14
3
virtio_net and SMP guests
...io/virtio_ring.c | 2 -- 1 file changed, 2 deletions(-) Index: kvm/drivers/virtio/virtio_ring.c =================================================================== --- kvm.orig/drivers/virtio/virtio_ring.c +++ kvm/drivers/virtio/virtio_ring.c @@ -225,8 +225,6 @@ static bool vring_restart(struct virtque struct vring_virtqueue *vq = to_vvq(_vq); START_USE(vq); - BUG_ON(!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)); - /* We optimistically turn back on interrupts, then check if there was * more to do. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT...
2009 May 29
0
[PATCH 4/4] lguest: don't force VIRTIO_F_NOTIFY_ON_EMPTY
...lguest/lguest.c +++ b/Documentation/lguest/lguest.c @@ -130,6 +130,9 @@ struct device /* Is it operational */ bool running; + /* Does Guest want an intrrupt on empty? */ + bool irq_on_empty; + /* Device-specific data. */ void *priv; }; @@ -569,11 +572,14 @@ static void trigger_irq(struct virtqueue return; vq->pending_used = 0; - /* If they don't want an interrupt, don't send one, unless empty. */ - if ((vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) - && lg_last_avail(vq) != vq->vring.avail->idx) { - (*vq->irq_suppressed)++; - retur...
2009 Sep 21
0
[PATCH 5/5] lguest: don't force VIRTIO_F_NOTIFY_ON_EMPTY
...uest/lguest.c +++ b/Documentation/lguest/lguest.c @@ -134,6 +134,9 @@ struct device { /* Is it operational */ bool running; + /* Does Guest want an intrrupt on empty? */ + bool irq_on_empty; + /* Device-specific data. */ void *priv; }; @@ -624,10 +627,13 @@ static void trigger_irq(struct virtqueue return; vq->pending_used = 0; - /* If they don't want an interrupt, don't send one, unless empty. */ - if ((vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) - && lg_last_avail(vq) != vq->vring.avail->idx) - return; + /* If they don't want an...
2010 Jun 03
0
[PATCH 3/3][STABLE] KVM: add schedule check to napi_enable call
...Make sure napi is scheduled subsequent to each napi_enable. Signed-off-by: Bruce Rogers <brogers at novell.com> Signed-off-by: Olaf Kirch <okir at suse.de> --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -388,6 +388,20 @@ static void skb_recv_done(struct virtque } } +static void virtnet_napi_enable(struct virtnet_info *vi) +{ + napi_enable(&vi->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet...
2009 May 29
0
[PATCH 4/4] lguest: don't force VIRTIO_F_NOTIFY_ON_EMPTY
...lguest/lguest.c +++ b/Documentation/lguest/lguest.c @@ -130,6 +130,9 @@ struct device /* Is it operational */ bool running; + /* Does Guest want an intrrupt on empty? */ + bool irq_on_empty; + /* Device-specific data. */ void *priv; }; @@ -569,11 +572,14 @@ static void trigger_irq(struct virtqueue return; vq->pending_used = 0; - /* If they don't want an interrupt, don't send one, unless empty. */ - if ((vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) - && lg_last_avail(vq) != vq->vring.avail->idx) { - (*vq->irq_suppressed)++; - retur...
2009 Sep 21
0
[PATCH 5/5] lguest: don't force VIRTIO_F_NOTIFY_ON_EMPTY
...uest/lguest.c +++ b/Documentation/lguest/lguest.c @@ -134,6 +134,9 @@ struct device { /* Is it operational */ bool running; + /* Does Guest want an intrrupt on empty? */ + bool irq_on_empty; + /* Device-specific data. */ void *priv; }; @@ -624,10 +627,13 @@ static void trigger_irq(struct virtqueue return; vq->pending_used = 0; - /* If they don't want an interrupt, don't send one, unless empty. */ - if ((vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) - && lg_last_avail(vq) != vq->vring.avail->idx) - return; + /* If they don't want an...
2010 Jun 03
0
[PATCH 3/3][STABLE] KVM: add schedule check to napi_enable call
...Make sure napi is scheduled subsequent to each napi_enable. Signed-off-by: Bruce Rogers <brogers at novell.com> Signed-off-by: Olaf Kirch <okir at suse.de> --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -388,6 +388,20 @@ static void skb_recv_done(struct virtque } } +static void virtnet_napi_enable(struct virtnet_info *vi) +{ + napi_enable(&vi->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet...
2017 Jun 02
1
[PATCH v2] virtio_net: lower limit on buffer size
...n + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), - rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); + rq->min_buf_len, PAGE_SIZE - hdr_len); return ALIGN(len, L1_CACHE_BYTES); } @@ -2039,7 +2039,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); - return max(min_buf_len, hdr_len); + return max(max(min_buf_len, hdr_len) - hdr_len, + (unsigned int)GOOD_PACKET_LEN); } static int virtnet_find_vqs(struct v...
2017 Jun 02
1
[PATCH v2] virtio_net: lower limit on buffer size
...n + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), - rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); + rq->min_buf_len, PAGE_SIZE - hdr_len); return ALIGN(len, L1_CACHE_BYTES); } @@ -2039,7 +2039,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); - return max(min_buf_len, hdr_len); + return max(max(min_buf_len, hdr_len) - hdr_len, + (unsigned int)GOOD_PACKET_LEN); } static int virtnet_find_vqs(struct v...
2009 May 29
2
[PATCH 2/4] virtio_net: return NETDEV_TX_BUSY instead of queueing an extra skb.
..._info struct napi_struct napi; unsigned int status; - /* The skb we couldn't send because buffers were full. */ - struct sk_buff *last_xmit_skb; - /* If we need to free in a timer, this is it. */ struct timer_list xmit_free_timer; @@ -116,9 +113,8 @@ static void skb_xmit_done(struct virtque /* We were probably waiting for more output buffers. */ netif_wake_queue(vi->dev); - /* Make sure we re-xmit last_xmit_skb: if there are no more packets - * queued, start_xmit won't be called. */ - tasklet_schedule(&vi->tasklet); + if (vi->free_in_tasklet) + tasklet_schedu...
2009 May 29
2
[PATCH 2/4] virtio_net: return NETDEV_TX_BUSY instead of queueing an extra skb.
..._info struct napi_struct napi; unsigned int status; - /* The skb we couldn't send because buffers were full. */ - struct sk_buff *last_xmit_skb; - /* If we need to free in a timer, this is it. */ struct timer_list xmit_free_timer; @@ -116,9 +113,8 @@ static void skb_xmit_done(struct virtque /* We were probably waiting for more output buffers. */ netif_wake_queue(vi->dev); - /* Make sure we re-xmit last_xmit_skb: if there are no more packets - * queued, start_xmit won't be called. */ - tasklet_schedule(&vi->tasklet); + if (vi->free_in_tasklet) + tasklet_schedu...