search for: __set_current_state

Displaying 20 results from an estimated 45 matches for "__set_current_state".

2016 Apr 26
2
[PATCH 1/2] vhost: simplify work flushing
We used to implement the work flushing through tracking queued seq, done seq, and the number of flushing. This patch simplify this by just implement work flushing through another kind of vhost work with completion. This will be used by lockless enqueuing patch. Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vhost/vhost.c | 53
2016 Apr 26
2
[PATCH 1/2] vhost: simplify work flushing
We used to implement the work flushing through tracking queued seq, done seq, and the number of flushing. This patch simplify this by just implement work flushing through another kind of vhost work with completion. This will be used by lockless enqueuing patch. Signed-off-by: Jason Wang <jasowang at redhat.com> --- drivers/vhost/vhost.c | 53
2009 Sep 03
2
[PATCH] Don't exist from cleaner_kthread and transaction_kthread until kthread_should_stop is true
...e { smp_mb(); if (root->fs_info->closing) - break; + continue; set_current_state(TASK_INTERRUPTIBLE); schedule(); __set_current_state(TASK_RUNNING); @@ -1468,7 +1468,7 @@ do { smp_mb(); if (root->fs_info->closing) - break; + continue; delay = HZ * 30; vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE)...
2016 Apr 26
0
[PATCH 2/2] vhost: lockless enqueuing
...ldfs = get_fs(); set_fs(USER_DS); @@ -315,29 +317,25 @@ static int vhost_worker(void *data) /* mb paired w/ kthread_stop */ set_current_state(TASK_INTERRUPTIBLE); - spin_lock_irq(&dev->work_lock); - if (kthread_should_stop()) { - spin_unlock_irq(&dev->work_lock); __set_current_state(TASK_RUNNING); break; } - if (!list_empty(&dev->work_list)) { - work = list_first_entry(&dev->work_list, - struct vhost_work, node); - list_del_init(&work->node); - } else - work = NULL; - spin_unlock_irq(&dev->work_lock); - if (work) { + node =...
2023 May 23
4
[PATCH 3/3] fork, vhost: Use CLONE_THREAD to fix freezer/ps regression
...smp_wmb() can't help and should be removed, instead we need llist_for_each_entry_safe(...) { smp_mb__before_atomic(); clear_bit(VHOST_WORK_QUEUED, &work->flags); Also, if the work->fn pointer is not stable, we should read it before smp_mb__before_atomic() as well. No? __set_current_state(TASK_RUNNING); Why do we set TASK_RUNNING inside the loop? Does this mean that work->fn() can return with current->state != RUNNING ? work->fn(work); Now the main question. Whatever we do, SIGKILL/SIGSTOP/etc can come right before we call work->fn(). Is it "safe" to run...
2010 Jul 29
1
[PATCH] vhost: locking/rcu cleanup
...BLE); + n = dev->nvqs; +repeat: + set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ - spin_lock_irq(&dev->work_lock); - if (work) { - work->done_seq = seq; - if (work->flushing) - wake_up_all(&work->done); - } + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); + return 0; + } - if (kthread_should_stop()) { - spin_unlock_irq(&dev->work_lock); - __set_current_state(TASK_RUNNING); - return 0; + for (i = 0; i < n; ++i) { + work = &dev->vqs[(vq + i) % n].work; + seq = atomic_read(&work->queue_seq); + if (seq...
2010 Jul 29
1
[PATCH] vhost: locking/rcu cleanup
...BLE); + n = dev->nvqs; +repeat: + set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ - spin_lock_irq(&dev->work_lock); - if (work) { - work->done_seq = seq; - if (work->flushing) - wake_up_all(&work->done); - } + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); + return 0; + } - if (kthread_should_stop()) { - spin_unlock_irq(&dev->work_lock); - __set_current_state(TASK_RUNNING); - return 0; + for (i = 0; i < n; ++i) { + work = &dev->vqs[(vq + i) % n].work; + seq = atomic_read(&work->queue_seq); + if (seq...
2014 May 14
0
[RFC PATCH v1 07/16] drm/nouveau: rework to new fence interface
...rm->fence; - struct nouveau_eventh *handler; - int ret = 0; + t = jiffies; - ret = nouveau_event_new(pfifo->uevent, 0, - nouveau_fence_wait_uevent_handler, - priv, &handler); - if (ret) - return ret; + if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) { + __set_current_state(TASK_RUNNING); + return 0; + } - nouveau_event_get(handler); + __set_current_state(intr ? TASK_INTERRUPTIBLE : + TASK_UNINTERRUPTIBLE); - if (fence->timeout) { - unsigned long timeout = fence->timeout - jiffies; - - if (time_before(jiffies, fence->timeout)) { - if (intr)...
2016 Apr 26
2
[PATCH 2/2] vhost: lockless enqueuing
...-315,29 +317,25 @@ static int vhost_worker(void *data) > /* mb paired w/ kthread_stop */ > set_current_state(TASK_INTERRUPTIBLE); > > - spin_lock_irq(&dev->work_lock); > - > if (kthread_should_stop()) { > - spin_unlock_irq(&dev->work_lock); > __set_current_state(TASK_RUNNING); > break; > } > - if (!list_empty(&dev->work_list)) { > - work = list_first_entry(&dev->work_list, > - struct vhost_work, node); > - list_del_init(&work->node); > - } else > - work = NULL; > - spin_unlock_irq(&de...
2016 Apr 26
2
[PATCH 2/2] vhost: lockless enqueuing
...-315,29 +317,25 @@ static int vhost_worker(void *data) > /* mb paired w/ kthread_stop */ > set_current_state(TASK_INTERRUPTIBLE); > > - spin_lock_irq(&dev->work_lock); > - > if (kthread_should_stop()) { > - spin_unlock_irq(&dev->work_lock); > __set_current_state(TASK_RUNNING); > break; > } > - if (!list_empty(&dev->work_list)) { > - work = list_first_entry(&dev->work_list, > - struct vhost_work, node); > - list_del_init(&work->node); > - } else > - work = NULL; > - spin_unlock_irq(&de...
2023 Jun 01
4
[PATCH 1/1] fork, vhost: Use CLONE_THREAD to fix freezer/ps regression
...*data) +static bool vhost_worker(void *data) { struct vhost_worker *worker = data; struct vhost_work *work, *work_next; struct llist_node *node; - for (;;) { - /* mb paired w/ kthread_stop */ - set_current_state(TASK_INTERRUPTIBLE); - - if (vhost_task_should_stop(worker->vtsk)) { - __set_current_state(TASK_RUNNING); - break; - } - - node = llist_del_all(&worker->work_list); - if (!node) - schedule(); - + node = llist_del_all(&worker->work_list); + if (node) { node = llist_reverse_order(node); /* make sure flag is seen after deletion */ smp_wmb(); llist_for_each_e...
2019 Oct 17
0
[PATCH RFC 3/3] vhost, kcov: collect coverage from vhost_worker
...dex 36ca2cf419bf..71a349f6b352 100644 > --- a/drivers/vhost/vhost.c > +++ b/drivers/vhost/vhost.c > @@ -357,7 +357,13 @@ static int vhost_worker(void *data) > llist_for_each_entry_safe(work, work_next, node, node) { > clear_bit(VHOST_WORK_QUEUED, &work->flags); > __set_current_state(TASK_RUNNING); > +#ifdef CONFIG_KCOV > + kcov_remote_start(dev->kcov_handle); > +#endif Shouldn't you hide these #ifdefs in a .h file? This is not a "normal" kernel coding style at all. > work->fn(work); > +#ifdef CONFIG_KCOV > + kcov_remote_stop();...
2019 Oct 17
0
[PATCH RFC 3/3] vhost, kcov: collect coverage from vhost_worker
...rs/vhost/vhost.c > > > @@ -357,7 +357,13 @@ static int vhost_worker(void *data) > > > llist_for_each_entry_safe(work, work_next, node, node) { > > > clear_bit(VHOST_WORK_QUEUED, &work->flags); > > > __set_current_state(TASK_RUNNING); > > > +#ifdef CONFIG_KCOV > > > + kcov_remote_start(dev->kcov_handle); > > > +#endif > > > > Shouldn't you hide these #ifdefs in a .h file? This is not a "normal" > > kernel coding style at all. >...
2019 Oct 23
0
[PATCH 3/3] vhost, kcov: collect coverage from vhost_worker
....h> > > #include "vhost.h" > > @@ -357,7 +358,9 @@ static int vhost_worker(void *data) > llist_for_each_entry_safe(work, work_next, node, node) { > clear_bit(VHOST_WORK_QUEUED, &work->flags); > __set_current_state(TASK_RUNNING); > + kcov_remote_start(dev->kcov_handle); > work->fn(work); > + kcov_remote_stop(); > if (need_resched()) > schedule(); >...
2019 Oct 23
0
[PATCH 3/3] vhost, kcov: collect coverage from vhost_worker
...; > > > > > @@ -357,7 +358,9 @@ static int vhost_worker(void *data) > > > llist_for_each_entry_safe(work, work_next, node, node) { > > > clear_bit(VHOST_WORK_QUEUED, &work->flags); > > > __set_current_state(TASK_RUNNING); > > > + kcov_remote_start(dev->kcov_handle); > > > work->fn(work); > > > + kcov_remote_stop(); > > > if (need_resched()) > > >...
2023 May 22
1
[PATCH 3/3] fork, vhost: Use CLONE_THREAD to fix freezer/ps regression
On 05/22, Mike Christie wrote: > > On 5/22/23 7:30 AM, Oleg Nesterov wrote: > >> + /* > >> + * When we get a SIGKILL our release function will > >> + * be called. That will stop new IOs from being queued > >> + * and check for outstanding cmd responses. It will then > >> + * call vhost_task_stop to tell us to return and exit. >
2020 Apr 04
0
[PATCH 6/6] kernel: set USER_DS in kthread_use_mm
...ead_unuse_mm(dev->mm); - set_fs(oldfs); return 0; } diff --git a/fs/io-wq.c b/fs/io-wq.c index 83c2868eff2a..75cc2f31816d 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -168,7 +168,6 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) dropped_lock = true; } __set_current_state(TASK_RUNNING); - set_fs(KERNEL_DS); kthread_unuse_mm(worker->mm); mmput(worker->mm); worker->mm = NULL; @@ -420,14 +419,11 @@ static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work) mmput(worker->mm); worker->mm = NULL; } - if (!work->mm) {...
2020 Apr 16
0
[PATCH 3/3] kernel: set USER_DS in kthread_use_mm
...ead_unuse_mm(dev->mm); - set_fs(oldfs); return 0; } diff --git a/fs/io-wq.c b/fs/io-wq.c index 748621f7391e..a5e90ac39e4d 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -169,7 +169,6 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) dropped_lock = true; } __set_current_state(TASK_RUNNING); - set_fs(KERNEL_DS); kthread_unuse_mm(worker->mm); mmput(worker->mm); worker->mm = NULL; @@ -421,14 +420,11 @@ static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work) mmput(worker->mm); worker->mm = NULL; } - if (!work->mm) {...
2010 Apr 19
0
[PATCH 08/12] Btrfs: Introduce global metadata reservation
...p; @@ -1488,11 +1484,9 @@ static int cleaner_kthread(void *arg) if (freezing(current)) { refrigerator(); } else { - smp_mb(); - if (root->fs_info->closing) - break; set_current_state(TASK_INTERRUPTIBLE); - schedule(); + if (!kthread_should_stop()) + schedule(); __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); @@ -1504,36 +1498,39 @@ static int transaction_kthread(void *arg struct btrfs_root *root = arg; struct btrfs_trans_handle *trans; struct btrfs_transaction *cur; + u64 transid; unsigned long now; unsigned long delay; int ret; do...
2023 Jun 02
2
[PATCH 1/1] fork, vhost: Use CLONE_THREAD to fix freezer/ps regression
Hi Mike, sorry, but somehow I can't understand this patch... I'll try to read it with a fresh head on Weekend, but for example, On 06/01, Mike Christie wrote: > > static int vhost_task_fn(void *data) > { > struct vhost_task *vtsk = data; > - int ret; > + bool dead = false; > + > + for (;;) { > + bool did_work; > + > + /* mb paired w/