Displaying 20 results from an estimated 68 matches for "__netif_tx_unlock".
2018 Dec 06
7
[PATCH RFC 1/2] virtio-net: bql support
...gt;stats.bytes += bytes;
> sq->stats.packets += packets;
> @@ -1364,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> return;
>
> if (__netif_tx_trylock(txq)) {
> - free_old_xmit_skbs(sq);
> + free_old_xmit_skbs(sq, txq, true);
> __netif_tx_unlock(txq);
> }
>
> @@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
>
> __netif_tx_lock(txq, raw_smp_processor_id());
> - free_old_xmit_skbs(sq);
>...
2018 Dec 06
7
[PATCH RFC 1/2] virtio-net: bql support
...gt;stats.bytes += bytes;
> sq->stats.packets += packets;
> @@ -1364,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> return;
>
> if (__netif_tx_trylock(txq)) {
> - free_old_xmit_skbs(sq);
> + free_old_xmit_skbs(sq, txq, true);
> __netif_tx_unlock(txq);
> }
>
> @@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
>
> __netif_tx_lock(txq, raw_smp_processor_id());
> - free_old_xmit_skbs(sq);
>...
2018 Dec 27
2
[PATCH RFC 1/2] virtio-net: bql support
...ts.packets += packets;
>>> @@ -1364,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
>>> return;
>>> if (__netif_tx_trylock(txq)) {
>>> - free_old_xmit_skbs(sq);
>>> + free_old_xmit_skbs(sq, txq, true);
>>> __netif_tx_unlock(txq);
>>> }
>>> @@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
>>> struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
>>> __netif_tx_lock(txq, raw_smp_processor_id());
>>> -...
2018 Dec 27
2
[PATCH RFC 1/2] virtio-net: bql support
...ts.packets += packets;
>>> @@ -1364,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
>>> return;
>>> if (__netif_tx_trylock(txq)) {
>>> - free_old_xmit_skbs(sq);
>>> + free_old_xmit_skbs(sq, txq, true);
>>> __netif_tx_unlock(txq);
>>> }
>>> @@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
>>> struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
>>> __netif_tx_lock(txq, raw_smp_processor_id());
>>> -...
2018 Dec 05
3
[PATCH RFC 0/2] virtio-net: interrupt related improvements
Now that we have brought the virtio overhead way down with a fast packed
ring implementation, we seem to be actually observing TCP drops
indicative of bufferbloat. So let's try to enable TSQ. Note: it isn't
clear that the default pacing is great for the virt usecase. It's worth
trying to play with sk_pacing_shift_update to see what happens.
For this reason, and for a more important
2014 Oct 15
2
[RFC PATCH net-next 5/6] virtio-net: enable tx interrupt
...rocessor_id());
>> > + virtqueue_disable_cb(sq->vq);
>> > + sent += free_old_xmit_skbs(sq, budget - sent);
>> > +
>> > + if (sent < budget) {
>> > + r = virtqueue_enable_cb_prepare(sq->vq);
>> > + napi_complete(napi);
>> > + __netif_tx_unlock(txq);
>> > + if (unlikely(virtqueue_poll(sq->vq, r)) &&
> So you are enabling callback on the next packet,
> which is almost sure to cause an interrupt storm
> on the guest.
>
>
> I think it's a bad idea, this is why I used
> enable_cb_delayed in my pat...
2014 Oct 15
2
[RFC PATCH net-next 5/6] virtio-net: enable tx interrupt
...rocessor_id());
>> > + virtqueue_disable_cb(sq->vq);
>> > + sent += free_old_xmit_skbs(sq, budget - sent);
>> > +
>> > + if (sent < budget) {
>> > + r = virtqueue_enable_cb_prepare(sq->vq);
>> > + napi_complete(napi);
>> > + __netif_tx_unlock(txq);
>> > + if (unlikely(virtqueue_poll(sq->vq, r)) &&
> So you are enabling callback on the next packet,
> which is almost sure to cause an interrupt storm
> on the guest.
>
>
> I think it's a bad idea, this is why I used
> enable_cb_delayed in my pat...
2018 Dec 27
2
[PATCH RFC 1/2] virtio-net: bql support
...ts.packets += packets;
>>> @@ -1364,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
>>> return;
>>> if (__netif_tx_trylock(txq)) {
>>> - free_old_xmit_skbs(sq);
>>> + free_old_xmit_skbs(sq, txq, true);
>>> __netif_tx_unlock(txq);
>>> }
>>> @@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
>>> struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
>>> __netif_tx_lock(txq, raw_smp_processor_id());
>>> -...
2018 Dec 27
2
[PATCH RFC 1/2] virtio-net: bql support
...ts.packets += packets;
>>> @@ -1364,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
>>> return;
>>> if (__netif_tx_trylock(txq)) {
>>> - free_old_xmit_skbs(sq);
>>> + free_old_xmit_skbs(sq, txq, true);
>>> __netif_tx_unlock(txq);
>>> }
>>> @@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
>>> struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
>>> __netif_tx_lock(txq, raw_smp_processor_id());
>>> -...
2018 Dec 05
0
[PATCH RFC 1/2] virtio-net: bql support
...update_begin(&sq->stats.syncp);
sq->stats.bytes += bytes;
sq->stats.packets += packets;
@@ -1364,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, txq, true);
__netif_tx_unlock(txq);
}
@@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
__netif_tx_lock(txq, raw_smp_processor_id());
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, txq, true);
__ne...
2014 Oct 15
2
[RFC PATCH net-next 5/6] virtio-net: enable tx interrupt
...vi->dev, vq2txq(sq->vq));
+ unsigned int r, sent = 0;
+
+again:
+ __netif_tx_lock(txq, smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ sent += free_old_xmit_skbs(sq, budget - sent);
+
+ if (sent < budget) {
+ r = virtqueue_enable_cb_prepare(sq->vq);
+ napi_complete(napi);
+ __netif_tx_unlock(txq);
+ if (unlikely(virtqueue_poll(sq->vq, r)) &&
+ napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(sq->vq);
+ __napi_schedule(napi);
+ goto again;
+ }
+ } else {
+ __netif_tx_unlock(txq);
+ }
+
+ netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
+ return sent;
+}...
2014 Oct 15
2
[RFC PATCH net-next 5/6] virtio-net: enable tx interrupt
...vi->dev, vq2txq(sq->vq));
+ unsigned int r, sent = 0;
+
+again:
+ __netif_tx_lock(txq, smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ sent += free_old_xmit_skbs(sq, budget - sent);
+
+ if (sent < budget) {
+ r = virtqueue_enable_cb_prepare(sq->vq);
+ napi_complete(napi);
+ __netif_tx_unlock(txq);
+ if (unlikely(virtqueue_poll(sq->vq, r)) &&
+ napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(sq->vq);
+ __napi_schedule(napi);
+ goto again;
+ }
+ } else {
+ __netif_tx_unlock(txq);
+ }
+
+ netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
+ return sent;
+}...
2014 Oct 14
4
[PATCH RFC] virtio_net: enable tx interrupt
...vi->dev, vq2txq(sq->vq));
+ unsigned int r, sent = 0;
+
+again:
+ __netif_tx_lock(txq, smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ sent += free_old_xmit_skbs(sq, budget - sent);
+
+ if (sent < budget) {
+ r = virtqueue_enable_cb_prepare(sq->vq);
+ napi_complete(napi);
+ __netif_tx_unlock(txq);
+ if (unlikely(virtqueue_poll(sq->vq, r)) &&
+ napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(sq->vq);
+ __napi_schedule(napi);
+ goto again;
+ }
+ } else {
+ __netif_tx_unlock(txq);
+ }
+
+ netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
+ return sent;
+}...
2014 Oct 14
4
[PATCH RFC] virtio_net: enable tx interrupt
...vi->dev, vq2txq(sq->vq));
+ unsigned int r, sent = 0;
+
+again:
+ __netif_tx_lock(txq, smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ sent += free_old_xmit_skbs(sq, budget - sent);
+
+ if (sent < budget) {
+ r = virtqueue_enable_cb_prepare(sq->vq);
+ napi_complete(napi);
+ __netif_tx_unlock(txq);
+ if (unlikely(virtqueue_poll(sq->vq, r)) &&
+ napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(sq->vq);
+ __napi_schedule(napi);
+ goto again;
+ }
+ } else {
+ __netif_tx_unlock(txq);
+ }
+
+ netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
+ return sent;
+}...
2019 Jan 02
2
[PATCH RFC 1/2] virtio-net: bql support
...4,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
>>>>> return;
>>>>> if (__netif_tx_trylock(txq)) {
>>>>> - free_old_xmit_skbs(sq);
>>>>> + free_old_xmit_skbs(sq, txq, true);
>>>>> __netif_tx_unlock(txq);
>>>>> }
>>>>> @@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
>>>>> struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
>>>>> __netif_tx_lock(txq, raw_s...
2019 Jan 02
2
[PATCH RFC 1/2] virtio-net: bql support
...4,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
>>>>> return;
>>>>> if (__netif_tx_trylock(txq)) {
>>>>> - free_old_xmit_skbs(sq);
>>>>> + free_old_xmit_skbs(sq, txq, true);
>>>>> __netif_tx_unlock(txq);
>>>>> }
>>>>> @@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
>>>>> struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
>>>>> __netif_tx_lock(txq, raw_s...
2014 Oct 15
1
[PATCH RFC v2 1/3] virtio_net: enable tx interrupt
...q));
+ unsigned int sent = 0;
+ bool enable_done;
+
+again:
+ __netif_tx_lock(txq, smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ sent += free_old_xmit_skbs(sq, budget - sent);
+
+ if (sent < budget) {
+ enable_done = virtqueue_enable_cb_delayed(sq->vq);
+ napi_complete(napi);
+ __netif_tx_unlock(txq);
+ if (unlikely(enable_done) && napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(sq->vq);
+ __napi_schedule(napi);
+ goto again;
+ }
+ } else {
+ __netif_tx_unlock(txq);
+ }
+
+ netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
+ return sent;
+}
+
#ifdef CONFIG_NET_RX_...
2014 Oct 15
1
[PATCH RFC v2 1/3] virtio_net: enable tx interrupt
...q));
+ unsigned int sent = 0;
+ bool enable_done;
+
+again:
+ __netif_tx_lock(txq, smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ sent += free_old_xmit_skbs(sq, budget - sent);
+
+ if (sent < budget) {
+ enable_done = virtqueue_enable_cb_delayed(sq->vq);
+ napi_complete(napi);
+ __netif_tx_unlock(txq);
+ if (unlikely(enable_done) && napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(sq->vq);
+ __napi_schedule(napi);
+ goto again;
+ }
+ } else {
+ __netif_tx_unlock(txq);
+ }
+
+ netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
+ return sent;
+}
+
#ifdef CONFIG_NET_RX_...
2018 Dec 26
0
[PATCH RFC 1/2] virtio-net: bql support
...t; > sq->stats.packets += packets;
> > @@ -1364,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> > return;
> > if (__netif_tx_trylock(txq)) {
> > - free_old_xmit_skbs(sq);
> > + free_old_xmit_skbs(sq, txq, true);
> > __netif_tx_unlock(txq);
> > }
> > @@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> > struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
> > __netif_tx_lock(txq, raw_smp_processor_id());
> > - free_old_xmit_skbs...
2018 Dec 26
0
[PATCH RFC 1/2] virtio-net: bql support
...t; > sq->stats.packets += packets;
> > @@ -1364,7 +1368,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> > return;
> > if (__netif_tx_trylock(txq)) {
> > - free_old_xmit_skbs(sq);
> > + free_old_xmit_skbs(sq, txq, true);
> > __netif_tx_unlock(txq);
> > }
> > @@ -1440,7 +1444,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> > struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
> > __netif_tx_lock(txq, raw_smp_processor_id());
> > - free_old_xmit_skbs...