Displaying 20 results from an estimated 72 matches for "netif_xmit_stopped".
2018 Dec 06
7
[PATCH RFC 1/2] virtio-net: bql support
...gt; if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> netif_start_subqueue(dev, qnum);
> virtqueue_disable_cb(sq->vq);
> @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> }
> }
>
> - if (kick || netif_xmit_stopped(txq)) {
> + if (use_napi)
> + kick = __netdev_tx_sent_queue(txq, bytes, more);
> + else
> + kick = !more || netif_xmit_stopped(txq);
> +
> + if (kick) {
> if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
> u64_stats_update_begin(&...
2018 Dec 06
7
[PATCH RFC 1/2] virtio-net: bql support
...gt; if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> netif_start_subqueue(dev, qnum);
> virtqueue_disable_cb(sq->vq);
> @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> }
> }
>
> - if (kick || netif_xmit_stopped(txq)) {
> + if (use_napi)
> + kick = __netdev_tx_sent_queue(txq, bytes, more);
> + else
> + kick = !more || netif_xmit_stopped(txq);
> +
> + if (kick) {
> if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
> u64_stats_update_begin(&...
2018 Dec 27
2
[PATCH RFC 1/2] virtio-net: bql support
...MAX_SKB_FRAGS) {
>>> netif_start_subqueue(dev, qnum);
>>> virtqueue_disable_cb(sq->vq);
>>> @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
>>> }
>>> }
>>> - if (kick || netif_xmit_stopped(txq)) {
>>> + if (use_napi)
>>> + kick = __netdev_tx_sent_queue(txq, bytes, more);
>>> + else
>>> + kick = !more || netif_xmit_stopped(txq);
>>> +
>>> + if (kick) {
>>> if (virtqueue_kick_prepare(sq->vq) && virtqueue_n...
2018 Dec 27
2
[PATCH RFC 1/2] virtio-net: bql support
...MAX_SKB_FRAGS) {
>>> netif_start_subqueue(dev, qnum);
>>> virtqueue_disable_cb(sq->vq);
>>> @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
>>> }
>>> }
>>> - if (kick || netif_xmit_stopped(txq)) {
>>> + if (use_napi)
>>> + kick = __netdev_tx_sent_queue(txq, bytes, more);
>>> + else
>>> + kick = !more || netif_xmit_stopped(txq);
>>> +
>>> + if (kick) {
>>> if (virtqueue_kick_prepare(sq->vq) && virtqueue_n...
2018 Dec 05
3
[PATCH RFC 0/2] virtio-net: interrupt related improvements
Now that we have brought the virtio overhead way down with a fast packed
ring implementation, we seem to be actually observing TCP drops
indicative of bufferbloat. So let's try to enable TSQ. Note: it isn't
clear that the default pacing is great for the virt usecase. It's worth
trying to play with sk_pacing_shift_update to see what happens.
For this reason, and for a more important
2014 Oct 15
6
[PATCH] virtio_net: fix use after free
...!skb->xmit_more;
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq);
@@ -956,7 +958,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (__netif_subqueue_stopped(dev, qnum) || !skb->xmit_more)
+ if (kick || netif_xmit_stopped(txq))
virtqueue_kick(sq->vq);
return NETDEV_TX_OK;
--
MST
2014 Oct 15
6
[PATCH] virtio_net: fix use after free
...!skb->xmit_more;
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq);
@@ -956,7 +958,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (__netif_subqueue_stopped(dev, qnum) || !skb->xmit_more)
+ if (kick || netif_xmit_stopped(txq))
virtqueue_kick(sq->vq);
return NETDEV_TX_OK;
--
MST
2019 Oct 07
3
[PATCH RFC net-next 2/2] drivers: net: virtio_net: Add tx_timeout function
...i = netdev_priv(dev);
> + u32 i;
> +
> + /* find the stopped queue the same way dev_watchdog() does */
not really - the watchdog actually looks at trans_start.
> + for (i = 0; i < vi->curr_queue_pairs; i++) {
> + struct send_queue *sq = &vi->sq[i];
> +
> + if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
> + continue;
> +
> + u64_stats_update_begin(&sq->stats.syncp);
> + sq->stats.tx_timeouts++;
> + u64_stats_update_end(&sq->stats.syncp);
> +
> + netdev_warn(dev, "TX timeout on send queue: %d, sq: %s, vq: %d, name: %s\n&...
2019 Oct 07
3
[PATCH RFC net-next 2/2] drivers: net: virtio_net: Add tx_timeout function
...i = netdev_priv(dev);
> + u32 i;
> +
> + /* find the stopped queue the same way dev_watchdog() does */
not really - the watchdog actually looks at trans_start.
> + for (i = 0; i < vi->curr_queue_pairs; i++) {
> + struct send_queue *sq = &vi->sq[i];
> +
> + if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
> + continue;
> +
> + u64_stats_update_begin(&sq->stats.syncp);
> + sq->stats.tx_timeouts++;
> + u64_stats_update_end(&sq->stats.syncp);
> +
> + netdev_warn(dev, "TX timeout on send queue: %d, sq: %s, vq: %d, name: %s\n&...
2018 Dec 27
2
[PATCH RFC 1/2] virtio-net: bql support
...MAX_SKB_FRAGS) {
>>> netif_start_subqueue(dev, qnum);
>>> virtqueue_disable_cb(sq->vq);
>>> @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
>>> }
>>> }
>>> - if (kick || netif_xmit_stopped(txq)) {
>>> + if (use_napi)
>>> + kick = __netdev_tx_sent_queue(txq, bytes, more);
>>> + else
>>> + kick = !more || netif_xmit_stopped(txq);
>>> +
>>> + if (kick) {
>>> if (virtqueue_kick_prepare(sq->vq) && virtqueue_n...
2018 Dec 27
2
[PATCH RFC 1/2] virtio-net: bql support
...MAX_SKB_FRAGS) {
>>> netif_start_subqueue(dev, qnum);
>>> virtqueue_disable_cb(sq->vq);
>>> @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
>>> }
>>> }
>>> - if (kick || netif_xmit_stopped(txq)) {
>>> + if (use_napi)
>>> + kick = __netdev_tx_sent_queue(txq, bytes, more);
>>> + else
>>> + kick = !more || netif_xmit_stopped(txq);
>>> +
>>> + if (kick) {
>>> if (virtqueue_kick_prepare(sq->vq) && virtqueue_n...
2018 Dec 05
0
[PATCH RFC 1/2] virtio-net: bql support
...sq);
+ free_old_xmit_skbs(sq, txq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
@@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (kick || netif_xmit_stopped(txq)) {
+ if (use_napi)
+ kick = __netdev_tx_sent_queue(txq, bytes, more);
+ else
+ kick = !more || netif_xmit_stopped(txq);
+
+ if (kick) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.kic...
2019 Jan 02
2
[PATCH RFC 1/2] virtio-net: bql support
...start_subqueue(dev, qnum);
>>>>> virtqueue_disable_cb(sq->vq);
>>>>> @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
>>>>> }
>>>>> }
>>>>> - if (kick || netif_xmit_stopped(txq)) {
>>>>> + if (use_napi)
>>>>> + kick = __netdev_tx_sent_queue(txq, bytes, more);
>>>>> + else
>>>>> + kick = !more || netif_xmit_stopped(txq);
>>>>> +
>>>>> + if (kick) {
>>>>> if...
2019 Jan 02
2
[PATCH RFC 1/2] virtio-net: bql support
...start_subqueue(dev, qnum);
>>>>> virtqueue_disable_cb(sq->vq);
>>>>> @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
>>>>> }
>>>>> }
>>>>> - if (kick || netif_xmit_stopped(txq)) {
>>>>> + if (use_napi)
>>>>> + kick = __netdev_tx_sent_queue(txq, bytes, more);
>>>>> + else
>>>>> + kick = !more || netif_xmit_stopped(txq);
>>>>> +
>>>>> + if (kick) {
>>>>> if...
2018 Dec 26
0
[PATCH RFC 1/2] virtio-net: bql support
...->num_free >= 2+MAX_SKB_FRAGS) {
> > netif_start_subqueue(dev, qnum);
> > virtqueue_disable_cb(sq->vq);
> > @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> > }
> > }
> > - if (kick || netif_xmit_stopped(txq)) {
> > + if (use_napi)
> > + kick = __netdev_tx_sent_queue(txq, bytes, more);
> > + else
> > + kick = !more || netif_xmit_stopped(txq);
> > +
> > + if (kick) {
> > if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
&g...
2018 Dec 26
0
[PATCH RFC 1/2] virtio-net: bql support
...->num_free >= 2+MAX_SKB_FRAGS) {
> > netif_start_subqueue(dev, qnum);
> > virtqueue_disable_cb(sq->vq);
> > @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> > }
> > }
> > - if (kick || netif_xmit_stopped(txq)) {
> > + if (use_napi)
> > + kick = __netdev_tx_sent_queue(txq, bytes, more);
> > + else
> > + kick = !more || netif_xmit_stopped(txq);
> > +
> > + if (kick) {
> > if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
&g...
2018 Dec 26
0
[PATCH RFC 1/2] virtio-net: bql support
...->num_free >= 2+MAX_SKB_FRAGS) {
> > netif_start_subqueue(dev, qnum);
> > virtqueue_disable_cb(sq->vq);
> > @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> > }
> > }
> > - if (kick || netif_xmit_stopped(txq)) {
> > + if (use_napi)
> > + kick = __netdev_tx_sent_queue(txq, bytes, more);
> > + else
> > + kick = !more || netif_xmit_stopped(txq);
> > +
> > + if (kick) {
> > if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
&g...
2018 Dec 30
0
[PATCH RFC 1/2] virtio-net: bql support
...netif_start_subqueue(dev, qnum);
> > > > virtqueue_disable_cb(sq->vq);
> > > > @@ -1560,7 +1566,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> > > > }
> > > > }
> > > > - if (kick || netif_xmit_stopped(txq)) {
> > > > + if (use_napi)
> > > > + kick = __netdev_tx_sent_queue(txq, bytes, more);
> > > > + else
> > > > + kick = !more || netif_xmit_stopped(txq);
> > > > +
> > > > + if (kick) {
> > > > if (virtque...
2014 Dec 02
2
[PATCH RFC v4 net-next 0/5] virtio_net: enabling tx interrupts
...gt; >And transaction rate does increase if we coalesces more tx interurpts.
> >>
> >>
> >>It's possible that we are deferring kicks too much due to BQL.
> >>
> >>As an experiment: do we get any of it back if we do
> >>- if (kick || netif_xmit_stopped(txq))
> >>- virtqueue_kick(sq->vq);
> >>+ virtqueue_kick(sq->vq);
> >>?
> >
> >
> >I will try, but during TCP_RR, at most 1 packets were pending,
> >I suspect if BQL can help in this case.
>
> Looks like this help...
2014 Dec 02
2
[PATCH RFC v4 net-next 0/5] virtio_net: enabling tx interrupts
...gt; >And transaction rate does increase if we coalesces more tx interurpts.
> >>
> >>
> >>It's possible that we are deferring kicks too much due to BQL.
> >>
> >>As an experiment: do we get any of it back if we do
> >>- if (kick || netif_xmit_stopped(txq))
> >>- virtqueue_kick(sq->vq);
> >>+ virtqueue_kick(sq->vq);
> >>?
> >
> >
> >I will try, but during TCP_RR, at most 1 packets were pending,
> >I suspect if BQL can help in this case.
>
> Looks like this help...