Displaying 20 results from an estimated 64 matches for "set_current_st".
2005 Aug 15
3
[-mm PATCH 2/32] fs: fix-up schedule_timeout() usage
Description: Use schedule_timeout_{,un}interruptible() instead of
set_current_state()/schedule_timeout() to reduce kernel size. Also use
helper functions to convert between human time units and jiffies rather
than constant HZ division to avoid rounding errors.
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
---
fs/cifs/cifsfs.c | 7 ++-----
fs/cifs...
2004 Sep 16
2
Current bristuff error report
Hello,
I just noticed an error in the current version of Klaus-Peter Junghanns
bristuff package, especially in the HFC module.
Everytime I try to unload the HFC module with "modprobe -r" I got a
kernel panic and the complete server hangs up so I need to do a hard
reset.
Regards,
Julian Pawlowski
2009 Sep 03
2
[PATCH] Don't exist from cleaner_kthread and transaction_kthread until kthread_should_stop is true
...mutex_lock(&root->fs_info->cleaner_mutex);
@@ -1447,7 +1447,7 @@
} else {
smp_mb();
if (root->fs_info->closing)
- break;
+ continue;
set_current_state(TASK_INTERRUPTIBLE);
schedule();
__set_current_state(TASK_RUNNING);
@@ -1468,7 +1468,7 @@
do {
smp_mb();
if (root->fs_info->closing)
- break;
+ continue;...
2023 Jun 01
4
[PATCH 1/1] fork, vhost: Use CLONE_THREAD to fix freezer/ps regression
...q_reset(struct vhost_dev *dev,
__vhost_vq_meta_reset(vq);
}
-static int vhost_worker(void *data)
+static bool vhost_worker(void *data)
{
struct vhost_worker *worker = data;
struct vhost_work *work, *work_next;
struct llist_node *node;
- for (;;) {
- /* mb paired w/ kthread_stop */
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (vhost_task_should_stop(worker->vtsk)) {
- __set_current_state(TASK_RUNNING);
- break;
- }
-
- node = llist_del_all(&worker->work_list);
- if (!node)
- schedule();
-
+ node = llist_del_all(&worker->work_list);
+ if (node) {
node = llist_r...
2016 Apr 26
2
[PATCH 1/2] vhost: simplify work flushing
...v->worker);
} else {
@@ -310,7 +306,6 @@ static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
struct vhost_work *work = NULL;
- unsigned uninitialized_var(seq);
mm_segment_t oldfs = get_fs();
set_fs(USER_DS);
@@ -321,11 +316,6 @@ static int vhost_worker(void *data)
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irq(&dev->work_lock);
- if (work) {
- work->done_seq = seq;
- if (work->flushing)
- wake_up_all(&work->done);
- }
if (kthread_should_stop()) {
spin_unlock_irq(&dev->work_lock);
@@ -336,7 +326,6 @@ static int vhost_w...
2016 Apr 26
2
[PATCH 1/2] vhost: simplify work flushing
...v->worker);
} else {
@@ -310,7 +306,6 @@ static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
struct vhost_work *work = NULL;
- unsigned uninitialized_var(seq);
mm_segment_t oldfs = get_fs();
set_fs(USER_DS);
@@ -321,11 +316,6 @@ static int vhost_worker(void *data)
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irq(&dev->work_lock);
- if (work) {
- work->done_seq = seq;
- if (work->flushing)
- wake_up_all(&work->done);
- }
if (kthread_should_stop()) {
spin_unlock_irq(&dev->work_lock);
@@ -336,7 +326,6 @@ static int vhost_w...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...f);
> + if (ref & 0x1) {
Please document the even/odd trick here too, not just in the commit log.
> + /* When ref change,
changes
> we are sure no reader can see
> + * previous map */
> + while (READ_ONCE(vq->ref) == ref) {
what is the below line in aid of?
> + set_current_state(TASK_RUNNING);
> + schedule();
if (need_resched())
schedule();
?
> + }
On an interruptible kernel, there's a risk here is that
a task got preempted with an odd ref.
So I suspect we'll have to disable preemption when we...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...f);
> + if (ref & 0x1) {
Please document the even/odd trick here too, not just in the commit log.
> + /* When ref change,
changes
> we are sure no reader can see
> + * previous map */
> + while (READ_ONCE(vq->ref) == ref) {
what is the below line in aid of?
> + set_current_state(TASK_RUNNING);
> + schedule();
if (need_resched())
schedule();
?
> + }
On an interruptible kernel, there's a risk here is that
a task got preempted with an odd ref.
So I suspect we'll have to disable preemption when we...
2019 Aug 03
1
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...- if (ref & 0x1) {
> - /* When ref change, we are sure no reader can see
> + ret = raw_read_seqcount(&vq->seq);
> + if (ret & 0x1) {
> + /* When seq changes, we are sure no reader can see
> * previous map */
> - while (READ_ONCE(vq->ref) == ref) {
> - set_current_state(TASK_RUNNING);
> + while (raw_read_seqcount(&vq->seq) == ret)
> schedule();
So why do we set state here? And should not we
check need_sched?
> - }
> }
> - /* Make sure ref counter was checked before any other
> - * operations that was dene on map. */
> +...
2016 Apr 26
0
[PATCH 2/2] vhost: lockless enqueuing
...er(void *data)
{
struct vhost_dev *dev = data;
- struct vhost_work *work = NULL;
+ struct vhost_work *work, *work_next;
+ struct llist_node *node;
mm_segment_t oldfs = get_fs();
set_fs(USER_DS);
@@ -315,29 +317,25 @@ static int vhost_worker(void *data)
/* mb paired w/ kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irq(&dev->work_lock);
-
if (kthread_should_stop()) {
- spin_unlock_irq(&dev->work_lock);
__set_current_state(TASK_RUNNING);
break;
}
- if (!list_empty(&dev->work_list)) {
- work = list_first_entry(&dev->work_list,...
2010 Jul 29
1
[PATCH] vhost: locking/rcu cleanup
...(struct vhost_dev *dev,
static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
- struct vhost_work *work = NULL;
- unsigned uninitialized_var(seq);
+ struct vhost_work *uninitialized_var(work);
+ unsigned n, i, vq = 0;
+ int seq;
- for (;;) {
- /* mb paired w/ kthread_stop */
- set_current_state(TASK_INTERRUPTIBLE);
+ n = dev->nvqs;
+repeat:
+ set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
- spin_lock_irq(&dev->work_lock);
- if (work) {
- work->done_seq = seq;
- if (work->flushing)
- wake_up_all(&work->done);
- }
+ if (kthread_s...
2010 Jul 29
1
[PATCH] vhost: locking/rcu cleanup
...(struct vhost_dev *dev,
static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
- struct vhost_work *work = NULL;
- unsigned uninitialized_var(seq);
+ struct vhost_work *uninitialized_var(work);
+ unsigned n, i, vq = 0;
+ int seq;
- for (;;) {
- /* mb paired w/ kthread_stop */
- set_current_state(TASK_INTERRUPTIBLE);
+ n = dev->nvqs;
+repeat:
+ set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
- spin_lock_irq(&dev->work_lock);
- if (work) {
- work->done_seq = seq;
- if (work->flushing)
- wake_up_all(&work->done);
- }
+ if (kthread_s...
2012 Nov 19
1
[PATCH] vhost-blk: Add vhost-blk support v5
...thread(void *data)
+{
+ mm_segment_t oldfs = get_fs();
+ struct vhost_blk *blk = data;
+ struct vhost_virtqueue *vq;
+ struct llist_node *llnode;
+ struct vhost_blk_req *req;
+ bool added;
+ u8 status;
+ int ret;
+
+ vq = &blk->vq;
+ set_fs(USER_DS);
+ use_mm(blk->dev.mm);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ llnode = llist_del_all(&blk->llhead);
+ if (!llnode) {
+ schedule();
+ if (unlikely(kthread_should_stop()))
+ break;
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+
+ if (need_resched())
+ schedule();
+
+ added = false;
+ while (llnode) {
+...
2012 Nov 19
1
[PATCH] vhost-blk: Add vhost-blk support v5
...thread(void *data)
+{
+ mm_segment_t oldfs = get_fs();
+ struct vhost_blk *blk = data;
+ struct vhost_virtqueue *vq;
+ struct llist_node *llnode;
+ struct vhost_blk_req *req;
+ bool added;
+ u8 status;
+ int ret;
+
+ vq = &blk->vq;
+ set_fs(USER_DS);
+ use_mm(blk->dev.mm);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ llnode = llist_del_all(&blk->llhead);
+ if (!llnode) {
+ schedule();
+ if (unlikely(kthread_should_stop()))
+ break;
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+
+ if (need_resched())
+ schedule();
+
+ added = false;
+ while (llnode) {
+...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...ing ref
with smp_store_release then this should be smp_load_acquire() without
an explicit mb.
> + ref = READ_ONCE(vq->ref);
> + if (ref & 0x1) {
> + /* When ref change, we are sure no reader can see
> + * previous map */
> + while (READ_ONCE(vq->ref) == ref) {
> + set_current_state(TASK_RUNNING);
> + schedule();
> + }
> + }
This is basically read_seqcount_begin()' with a schedule instead of
cpu_relax
> + /* Make sure ref counter was checked before any other
> + * operations that was dene on map. */
> + smp_mb();
should be in a smp_load_acquire...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...ing ref
with smp_store_release then this should be smp_load_acquire() without
an explicit mb.
> + ref = READ_ONCE(vq->ref);
> + if (ref & 0x1) {
> + /* When ref change, we are sure no reader can see
> + * previous map */
> + while (READ_ONCE(vq->ref) == ref) {
> + set_current_state(TASK_RUNNING);
> + schedule();
> + }
> + }
This is basically read_seqcount_begin()' with a schedule instead of
cpu_relax
> + /* Make sure ref counter was checked before any other
> + * operations that was dene on map. */
> + smp_mb();
should be in a smp_load_acquire...
2019 Aug 01
0
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...;
-
- ref = READ_ONCE(vq->ref);
- if (ref & 0x1) {
- /* When ref change, we are sure no reader can see
+ ret = raw_read_seqcount(&vq->seq);
+ if (ret & 0x1) {
+ /* When seq changes, we are sure no reader can see
* previous map */
- while (READ_ONCE(vq->ref) == ref) {
- set_current_state(TASK_RUNNING);
+ while (raw_read_seqcount(&vq->seq) == ret)
schedule();
- }
}
- /* Make sure ref counter was checked before any other
- * operations that was dene on map. */
+ /* Make sure seq was checked before any other operations that
+ * was dene on map. */
smp_mb();
}...
2010 Apr 19
0
[PATCH 08/12] Btrfs: Introduce global metadata reservation
...t;fs_info->sb, SB_FREEZE_WRITE);
if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
@@ -1488,11 +1484,9 @@ static int cleaner_kthread(void *arg)
if (freezing(current)) {
refrigerator();
} else {
- smp_mb();
- if (root->fs_info->closing)
- break;
set_current_state(TASK_INTERRUPTIBLE);
- schedule();
+ if (!kthread_should_stop())
+ schedule();
__set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
@@ -1504,36 +1498,39 @@ static int transaction_kthread(void *arg
struct btrfs_root *root = arg;
struct btrfs_trans_handle *tra...
2023 Jun 02
2
[PATCH 1/1] fork, vhost: Use CLONE_THREAD to fix freezer/ps regression
Hi Mike,
sorry, but somehow I can't understand this patch...
I'll try to read it with a fresh head on Weekend, but for example,
On 06/01, Mike Christie wrote:
>
> static int vhost_task_fn(void *data)
> {
> struct vhost_task *vtsk = data;
> - int ret;
> + bool dead = false;
> +
> + for (;;) {
> + bool did_work;
> +
> + /* mb paired w/
2016 Apr 26
2
[PATCH 2/2] vhost: lockless enqueuing
...ata;
> - struct vhost_work *work = NULL;
> + struct vhost_work *work, *work_next;
> + struct llist_node *node;
> mm_segment_t oldfs = get_fs();
>
> set_fs(USER_DS);
> @@ -315,29 +317,25 @@ static int vhost_worker(void *data)
> /* mb paired w/ kthread_stop */
> set_current_state(TASK_INTERRUPTIBLE);
>
> - spin_lock_irq(&dev->work_lock);
> -
> if (kthread_should_stop()) {
> - spin_unlock_irq(&dev->work_lock);
> __set_current_state(TASK_RUNNING);
> break;
> }
> - if (!list_empty(&dev->work_list)) {
> -...