search for: napi_schedule_prep

Displaying 20 results from an estimated 89 matches for "napi_schedule_prep".

2008 May 26
2
virtio_net: another race with virtio_net and enable_cb
...k the following scenario happened. Please look at virtnet_poll: [...] /* Out of packets? */ if (received < budget) { netif_rx_complete(vi->dev, napi); if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) && napi_schedule_prep(napi)) { vi->rvq->vq_ops->disable_cb(vi->rvq); __netif_rx_schedule(vi->dev, napi); goto again; } } If an interrupt arrives after netif_rx_complete, a second poll routine can run on a dif...
2008 May 26
2
virtio_net: another race with virtio_net and enable_cb
...k the following scenario happened. Please look at virtnet_poll: [...] /* Out of packets? */ if (received < budget) { netif_rx_complete(vi->dev, napi); if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) && napi_schedule_prep(napi)) { vi->rvq->vq_ops->disable_cb(vi->rvq); __netif_rx_schedule(vi->dev, napi); goto again; } } If an interrupt arrives after netif_rx_complete, a second poll routine can run on a dif...
2013 Jul 08
4
[PATCH 2/2] virtio_net: fix race in RX VQ processing
...ebug build, this can trigger panic within START_USE. Jason Wang reports that he can trigger the races artificially, by adding udelay() in virtqueue_enable_cb() after virtio_mb(). However, we must call napi_complete to clear NAPI_STATE_SCHED before polling the virtqueue for used buffers, otherwise napi_schedule_prep in a callback will fail, causing us to lose RX events. To fix, call virtqueue_enable_cb_prepare with NAPI_STATE_SCHED set (under napi lock), later call virtqueue_poll with NAPI_STATE_SCHED clear (outside the lock). Reported-by: Jason Wang <jasowang at redhat.com> Signed-off-by: Michael S. T...
2013 Jul 08
4
[PATCH 2/2] virtio_net: fix race in RX VQ processing
...ebug build, this can trigger panic within START_USE. Jason Wang reports that he can trigger the races artificially, by adding udelay() in virtqueue_enable_cb() after virtio_mb(). However, we must call napi_complete to clear NAPI_STATE_SCHED before polling the virtqueue for used buffers, otherwise napi_schedule_prep in a callback will fail, causing us to lose RX events. To fix, call virtqueue_enable_cb_prepare with NAPI_STATE_SCHED set (under napi lock), later call virtqueue_poll with NAPI_STATE_SCHED clear (outside the lock). Reported-by: Jason Wang <jasowang at redhat.com> Signed-off-by: Michael S. T...
2011 Feb 10
2
[PATCH] virtio_net: Add schedule check to napi_enable call
...->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&vi->napi)) { + virtqueue_disable_cb(vi->rvq); + __napi_schedule(&vi->napi); + } +} + static void refill_work(struct work_struct *work) { struct virtnet_info *vi; @@ -454,7 +468,7 @@ static void refill_work(struct work_stru vi = container_of(work, struct virtnet_info, refil...
2011 Feb 10
2
[PATCH] virtio_net: Add schedule check to napi_enable call
...->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&vi->napi)) { + virtqueue_disable_cb(vi->rvq); + __napi_schedule(&vi->napi); + } +} + static void refill_work(struct work_struct *work) { struct virtnet_info *vi; @@ -454,7 +468,7 @@ static void refill_work(struct work_stru vi = container_of(work, struct virtnet_info, refil...
2014 Oct 15
2
[RFC PATCH net-next 5/6] virtio-net: enable tx interrupt
...i->sq[vq2txq(vq)]; >> > >> > - /* Suppress further interrupts. */ >> > - virtqueue_disable_cb(vq); >> > - >> > - /* We were probably waiting for more output buffers. */ >> > - netif_wake_subqueue(vi->dev, vq2txq(vq)); >> > + if (napi_schedule_prep(&sq->napi)) { >> > + __napi_schedule(&sq->napi); >> > + } >> > } >> > >> > static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) >> > @@ -774,7 +801,39 @@ again: >> > return received; >> &gt...
2014 Oct 15
2
[RFC PATCH net-next 5/6] virtio-net: enable tx interrupt
...i->sq[vq2txq(vq)]; >> > >> > - /* Suppress further interrupts. */ >> > - virtqueue_disable_cb(vq); >> > - >> > - /* We were probably waiting for more output buffers. */ >> > - netif_wake_subqueue(vi->dev, vq2txq(vq)); >> > + if (napi_schedule_prep(&sq->napi)) { >> > + __napi_schedule(&sq->napi); >> > + } >> > } >> > >> > static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) >> > @@ -774,7 +801,39 @@ again: >> > return received; >> &gt...
2014 Oct 14
4
[PATCH RFC] virtio_net: enable tx interrupt
...rtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; + struct send_queue *sq = &vi->sq[vq2txq(vq)]; - /* Suppress further interrupts. */ - virtqueue_disable_cb(vq); - - /* We were probably waiting for more output buffers. */ - netif_wake_subqueue(vi->dev, vq2txq(vq)); + if (napi_schedule_prep(&sq->napi)) { + __napi_schedule(&sq->napi); + } } static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) @@ -766,6 +791,37 @@ again: return received; } +static int virtnet_poll_tx(struct napi_struct *napi, int budget) +{ + struct send_queue *sq = + contain...
2014 Oct 14
4
[PATCH RFC] virtio_net: enable tx interrupt
...rtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; + struct send_queue *sq = &vi->sq[vq2txq(vq)]; - /* Suppress further interrupts. */ - virtqueue_disable_cb(vq); - - /* We were probably waiting for more output buffers. */ - netif_wake_subqueue(vi->dev, vq2txq(vq)); + if (napi_schedule_prep(&sq->napi)) { + __napi_schedule(&sq->napi); + } } static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) @@ -766,6 +791,37 @@ again: return received; } +static int virtnet_poll_tx(struct napi_struct *napi, int budget) +{ + struct send_queue *sq = + contain...
2010 Jun 03
0
[PATCH 3/3][STABLE] KVM: add schedule check to napi_enable call
...l buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&vi->napi)) { + vi->rvq->vq_ops->disable_cb(vi->rvq); + __napi_schedule(&vi->napi); + } +} + static void refill_work(struct work_struct *work) { struct virtnet_info *vi; @@ -397,7 +411,7 @@ static void refill_work(struct work_str...
2010 Jun 03
0
[PATCH 3/3][STABLE] KVM: add schedule check to napi_enable call
...l buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&vi->napi)) { + vi->rvq->vq_ops->disable_cb(vi->rvq); + __napi_schedule(&vi->napi); + } +} + static void refill_work(struct work_struct *work) { struct virtnet_info *vi; @@ -397,7 +411,7 @@ static void refill_work(struct work_str...
2013 Jul 09
0
[PATCH v2 2/2] virtio_net: fix race in RX VQ processing
...ebug build, this can trigger panic within START_USE. Jason Wang reports that he can trigger the races artificially, by adding udelay() in virtqueue_enable_cb() after virtio_mb(). However, we must call napi_complete to clear NAPI_STATE_SCHED before polling the virtqueue for used buffers, otherwise napi_schedule_prep in a callback will fail, causing us to lose RX events. To fix, call virtqueue_enable_cb_prepare with NAPI_STATE_SCHED set (under napi lock), later call virtqueue_poll with NAPI_STATE_SCHED clear (outside the lock). Reported-by: Jason Wang <jasowang at redhat.com> Tested-by: Jason Wang <j...
2013 Jul 09
0
[PATCH 2/2] virtio_net: fix race in RX VQ processing
...panic within START_USE. > > Jason Wang reports that he can trigger the races artificially, > by adding udelay() in virtqueue_enable_cb() after virtio_mb(). > > However, we must call napi_complete to clear NAPI_STATE_SCHED before > polling the virtqueue for used buffers, otherwise napi_schedule_prep in > a callback will fail, causing us to lose RX events. > > To fix, call virtqueue_enable_cb_prepare with NAPI_STATE_SCHED > set (under napi lock), later call virtqueue_poll with > NAPI_STATE_SCHED clear (outside the lock). > > Reported-by: Jason Wang <jasowang at redhat.co...
2013 Jul 09
0
[PATCH v2 2/2] virtio_net: fix race in RX VQ processing
...ebug build, this can trigger panic within START_USE. Jason Wang reports that he can trigger the races artificially, by adding udelay() in virtqueue_enable_cb() after virtio_mb(). However, we must call napi_complete to clear NAPI_STATE_SCHED before polling the virtqueue for used buffers, otherwise napi_schedule_prep in a callback will fail, causing us to lose RX events. To fix, call virtqueue_enable_cb_prepare with NAPI_STATE_SCHED set (under napi lock), later call virtqueue_poll with NAPI_STATE_SCHED clear (outside the lock). Reported-by: Jason Wang <jasowang at redhat.com> Tested-by: Jason Wang <j...
2014 Oct 15
2
[RFC PATCH net-next 5/6] virtio-net: enable tx interrupt
...rtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; + struct send_queue *sq = &vi->sq[vq2txq(vq)]; - /* Suppress further interrupts. */ - virtqueue_disable_cb(vq); - - /* We were probably waiting for more output buffers. */ - netif_wake_subqueue(vi->dev, vq2txq(vq)); + if (napi_schedule_prep(&sq->napi)) { + __napi_schedule(&sq->napi); + } } static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) @@ -774,7 +801,39 @@ again: return received; } +static int virtnet_poll_tx(struct napi_struct *napi, int budget) +{ + struct send_queue *sq = + contain...
2014 Oct 15
2
[RFC PATCH net-next 5/6] virtio-net: enable tx interrupt
...rtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; + struct send_queue *sq = &vi->sq[vq2txq(vq)]; - /* Suppress further interrupts. */ - virtqueue_disable_cb(vq); - - /* We were probably waiting for more output buffers. */ - netif_wake_subqueue(vi->dev, vq2txq(vq)); + if (napi_schedule_prep(&sq->napi)) { + __napi_schedule(&sq->napi); + } } static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) @@ -774,7 +801,39 @@ again: return received; } +static int virtnet_poll_tx(struct napi_struct *napi, int budget) +{ + struct send_queue *sq = + contain...
2011 Feb 09
1
[PATCH] virtio-net: add schedule check to napi_enable call in refill_work
...->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&vi->napi)) { + virtqueue_disable_cb(vi->rvq); + __napi_schedule(&vi->napi); + } +} + static void refill_work(struct work_struct *work) { struct virtnet_info *vi; @@ -454,7 +468,7 @@ vi = container_of(work, struct virtnet_info, refill.work); napi_disable(&vi->napi)...
2011 Feb 09
1
[PATCH] virtio-net: add schedule check to napi_enable call in refill_work
...->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&vi->napi)) { + virtqueue_disable_cb(vi->rvq); + __napi_schedule(&vi->napi); + } +} + static void refill_work(struct work_struct *work) { struct virtnet_info *vi; @@ -454,7 +468,7 @@ vi = container_of(work, struct virtnet_info, refill.work); napi_disable(&vi->napi)...
2014 Oct 15
1
[PATCH RFC v2 1/3] virtio_net: enable tx interrupt
...rtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; + struct send_queue *sq = &vi->sq[vq2txq(vq)]; - /* Suppress further interrupts. */ - virtqueue_disable_cb(vq); - - /* We were probably waiting for more output buffers. */ - netif_wake_subqueue(vi->dev, vq2txq(vq)); + if (napi_schedule_prep(&sq->napi)) + __napi_schedule(&sq->napi); } static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) @@ -774,6 +798,37 @@ again: return received; } +static int virtnet_poll_tx(struct napi_struct *napi, int budget) +{ + struct send_queue *sq = + container_of(...