Displaying 20 results from an estimated 277 matches for "schedule_delayed_work".
2011 Feb 03
0
[PATCH R3 4/7] xen/balloon: Migration from mod_timer() to schedule_delayed_work()
Migration from mod_timer() to schedule_delayed_work().
Signed-off-by: Daniel Kiper <dkiper@net-space.pl>
---
drivers/xen/balloon.c | 16 +++-------------
1 files changed, 3 insertions(+), 13 deletions(-)
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 952cfe2..4223f64 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/b...
2013 Dec 26
2
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...le :
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(&vi->rq[i]);
What if the workqueue is scheduled _before_ the call to virtnet_napi_enable(&vi->rq[i]) ?
refill_work() will happily conflict with another cpu, two cpus could
call try_fill_recv() at the same time, or worse n...
2013 Dec 26
2
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...le :
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(&vi->rq[i]);
What if the workqueue is scheduled _before_ the call to virtnet_napi_enable(&vi->rq[i]) ?
refill_work() will happily conflict with another cpu, two cpus could
call try_fill_recv() at the same time, or worse n...
2023 May 22
1
[PATCH] virtio-fs: Improved request latencies when Virtio queue is full
...ex 4d8d4f16c727..8af9d3dc61d3 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -347,6 +347,8 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work)
}
} while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
spin_unlock(&fsvq->lock);
+
+ schedule_delayed_work(&fsvq->dispatch_work, 0);
}
static void virtio_fs_request_dispatch_work(struct work_struct *work)
@@ -388,8 +390,6 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
if (ret == -ENOMEM || ret == -ENOSPC) {
spin_lock(&fsvq->lock);
list_add_tail(&...
2023 May 31
1
[PATCH V2] virtio-fs: Improved request latencies when Virtio queue is full
...bug("virtio-fs: worker %s called.\n", __func__);
@@ -388,8 +391,6 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
if (ret == -ENOMEM || ret == -ENOSPC) {
spin_lock(&fsvq->lock);
list_add_tail(&req->list, &fsvq->queued_reqs);
- schedule_delayed_work(&fsvq->dispatch_work,
- msecs_to_jiffies(1));
spin_unlock(&fsvq->lock);
return;
}
@@ -436,8 +437,6 @@ static int send_forget_request(struct virtio_fs_vq *fsvq,
pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
ret);...
2023 Jul 03
2
[PATCH V4] virtio-fs: Improved request latencies when Virtio queue is full
...bug("virtio-fs: worker %s called.\n", __func__);
@@ -388,8 +391,6 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
if (ret == -ENOMEM || ret == -ENOSPC) {
spin_lock(&fsvq->lock);
list_add_tail(&req->list, &fsvq->queued_reqs);
- schedule_delayed_work(&fsvq->dispatch_work,
- msecs_to_jiffies(1));
spin_unlock(&fsvq->lock);
return;
}
@@ -436,8 +437,6 @@ static int send_forget_request(struct virtio_fs_vq *fsvq,
pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
ret);...
2013 Jul 03
4
[PATCH net] virtio-net: fix the race between channels setting and refill
...dev, VIRTIO_NET_F_MQ))
return 0;
@@ -915,10 +914,8 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
queue_pairs);
return -EINVAL;
} else {
- for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
vi->curr_queue_pairs = queue_pairs;
+ schedule_delayed_work(&vi->refill, 0);
}
return 0;
--
1.7.1
2013 Jul 03
4
[PATCH net] virtio-net: fix the race between channels setting and refill
...dev, VIRTIO_NET_F_MQ))
return 0;
@@ -915,10 +914,8 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
queue_pairs);
return -EINVAL;
} else {
- for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
vi->curr_queue_pairs = queue_pairs;
+ schedule_delayed_work(&vi->refill, 0);
}
return 0;
--
1.7.1
2014 Jul 14
2
[RFC PATCH 1/3] hw_random: allow RNG devices to give early randomness after a delay
...work);
> +
> + get_early_randomness(rng);
> +}
> +
> +static void add_early_randomness(struct hwrng *rng)
The add/get naming seems awkward in the above hunks.
> +{
> + if (!(rng->flags & HWRNG_DELAY_READ_AT_INIT))
> + return get_early_randomness(rng);
> +
> + schedule_delayed_work(&rng->dwork, msecs_to_jiffies(500));
> +}
> +
Perhaps instead of rng->flags and a hardcoded delay, we could have
rng->seed_delay = msecs_to_jiffies(500) in virtio-rng? Then you can
just call unconditionally:
schedule_delayed_work(&rng->dwork, rng->seed_delay);
I th...
2014 Jul 14
2
[RFC PATCH 1/3] hw_random: allow RNG devices to give early randomness after a delay
...work);
> +
> + get_early_randomness(rng);
> +}
> +
> +static void add_early_randomness(struct hwrng *rng)
The add/get naming seems awkward in the above hunks.
> +{
> + if (!(rng->flags & HWRNG_DELAY_READ_AT_INIT))
> + return get_early_randomness(rng);
> +
> + schedule_delayed_work(&rng->dwork, msecs_to_jiffies(500));
> +}
> +
Perhaps instead of rng->flags and a hardcoded delay, we could have
rng->seed_delay = msecs_to_jiffies(500) in virtio-rng? Then you can
just call unconditionally:
schedule_delayed_work(&rng->dwork, rng->seed_delay);
I th...
2013 Dec 27
1
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...max_queue_pairs; i++) {
> > if (i < vi->curr_queue_pairs)
> > /* Make sure we have some buffers: if oom use wq. */
> > if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
> > schedule_delayed_work(&vi->refill, 0);
> > virtnet_napi_enable(&vi->rq[i]);
> >
> >
> > What if the workqueue is scheduled _before_ the call to virtnet_napi_enable(&vi->rq[i]) ?
>
> Then napi_disable() in refill_work() will busy wait until napi is
>...
2013 Dec 27
1
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...max_queue_pairs; i++) {
> > if (i < vi->curr_queue_pairs)
> > /* Make sure we have some buffers: if oom use wq. */
> > if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
> > schedule_delayed_work(&vi->refill, 0);
> > virtnet_napi_enable(&vi->rq[i]);
> >
> >
> > What if the workqueue is scheduled _before_ the call to virtnet_napi_enable(&vi->rq[i]) ?
>
> Then napi_disable() in refill_work() will busy wait until napi is
>...
2020 Jun 05
2
[PATCH] virtio_net: Unregister and re-register xdp_rxq across freeze/restore
...f (err < 0)
+ xdp_rxq_info_unreg(xdp_rxq);
+ return err;
+}
+
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -1480,17 +1495,10 @@ static int virtnet_open(struct net_device *dev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
+ err = virtnet_reg_xdp(&vi->rq[i].xdp_rxq, dev, i);
if (err < 0)
return err;
- err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err &l...
2020 Jun 05
2
[PATCH] virtio_net: Unregister and re-register xdp_rxq across freeze/restore
...f (err < 0)
+ xdp_rxq_info_unreg(xdp_rxq);
+ return err;
+}
+
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -1480,17 +1495,10 @@ static int virtnet_open(struct net_device *dev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
+ err = virtnet_reg_xdp(&vi->rq[i].xdp_rxq, dev, i);
if (err < 0)
return err;
- err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err &l...
2013 Dec 27
0
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...= 0; i < vi->max_queue_pairs; i++) {
> if (i < vi->curr_queue_pairs)
> /* Make sure we have some buffers: if oom use wq. */
> if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
> schedule_delayed_work(&vi->refill, 0);
> virtnet_napi_enable(&vi->rq[i]);
>
>
> What if the workqueue is scheduled _before_ the call to virtnet_napi_enable(&vi->rq[i]) ?
Then napi_disable() in refill_work() will busy wait until napi is
enabled by virtnet_napi_enable() wh...
2013 Oct 14
4
[PATCH net 1/2] virtio-net: don't respond to cpu hotplug notifier if we're not ready
We're trying to re-configure the affinity unconditionally in cpu hotplug
callback. This may lead the issue during resuming from s3/s4 since
- virt queues haven't been allocated at that time.
- it's unnecessary since thaw method will re-configure the affinity.
Fix this issue by checking the config_enable and do nothing is we're not ready.
The bug were introduced by commit
2013 Oct 14
4
[PATCH net 1/2] virtio-net: don't respond to cpu hotplug notifier if we're not ready
We're trying to re-configure the affinity unconditionally in cpu hotplug
callback. This may lead the issue during resuming from s3/s4 since
- virt queues haven't been allocated at that time.
- it's unnecessary since thaw method will re-configure the affinity.
Fix this issue by checking the config_enable and do nothing is we're not ready.
The bug were introduced by commit
2012 Nov 27
4
[net-next rfc v7 0/3] Multiqueue virtio-net
Hi all:
This series is an update version of multiqueue virtio-net driver based on
Krishna Kumar's work to let virtio-net use multiple rx/tx queues to do the
packets reception and transmission. Please review and comments.
A protype implementation of qemu-kvm support could by found in
git://github.com/jasowang/qemu-kvm-mq.git. To start a guest with two queues, you
could specify the queues
2012 Nov 27
4
[net-next rfc v7 0/3] Multiqueue virtio-net
Hi all:
This series is an update version of multiqueue virtio-net driver based on
Krishna Kumar's work to let virtio-net use multiple rx/tx queues to do the
packets reception and transmission. Please review and comments.
A protype implementation of qemu-kvm support could by found in
git://github.com/jasowang/qemu-kvm-mq.git. To start a guest with two queues, you
could specify the queues
2019 Oct 30
0
[PATCH 1/3] virtiofs: Use a common function to send forget
...queue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
+ if (ret < 0) {
+ if (ret == -ENOMEM || ret == -ENOSPC) {
+ pr_debug("virtio-fs: Could not queue FORGET: err=%d."
+ " Will try later\n", ret);
+ list_add_tail(&forget->list, &fsvq->queued_reqs);
+ schedule_delayed_work(&fsvq->dispatch_work,
+ msecs_to_jiffies(1));
+ if (!in_flight)
+ inc_in_flight_req(fsvq);
+ /* Queue is full */
+ ret = 1;
+ } else {
+ pr_debug("virtio-fs: Could not queue FORGET: err=%d."
+ " Dropping it.\n", ret);
+ kfree(forget);
+ if (i...