Displaying 20 results from an estimated 75 matches for "vhost_poll_wakeup".
2018 Mar 27
4
[PATCH net V2] vhost: correctly remove wait queue during poll failure
...+--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1b3e8d2d..5d5a9d9 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -212,8 +212,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & EPOLLERR) {
- if (poll->wqh)
- remove_wait_queue(poll->wqh, &poll->wait);
+ vhost_poll_stop(poll);
ret = -EINVAL;
}
--
2.7.4
2018 Mar 27
4
[PATCH net V2] vhost: correctly remove wait queue during poll failure
...+--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1b3e8d2d..5d5a9d9 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -212,8 +212,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & EPOLLERR) {
- if (poll->wqh)
- remove_wait_queue(poll->wqh, &poll->wait);
+ vhost_poll_stop(poll);
ret = -EINVAL;
}
--
2.7.4
2020 Jun 02
1
[PATCH 1/6] vhost: allow device that does not depend on vhost worker
On Fri, May 29, 2020 at 04:02:58PM +0800, Jason Wang wrote:
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index d450e16c5c25..70105e045768 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -166,11 +166,16 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
> void *key)
> {
> struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
> + struct vhost_work *work = &poll->work;
>
> if (!(key_to_poll(key) & poll->mask))
> return 0;
>...
2019 Nov 07
1
[PATCH v5] vhost: introduce mdev based hardware backend
...hing, it looks to me the there's no owner check in
>> vhost_vring_ioctl() and the vhost_poll_start() can make sure handle_kick
>> works?
> Yeah, there is no owner check in vhost_vring_ioctl().
> IIUC, vhost_poll_start() will start polling the file. And when
> event arrives, vhost_poll_wakeup() will be called, and it will
> queue work to work_list and wakeup worker to finish the work.
> And the worker is created by vhost_dev_set_owner().
>
Right, rethink about this. It looks to me we need:
- Keep VHOST_SET_OWNER, this could be used for future control vq where
it needs a kthr...
2018 Mar 27
1
[PATCH net] vhost: correctly remove wait queue during poll failure
...ers/vhost/vhost.c b/drivers/vhost/vhost.c
>> index 1b3e8d2d..5d5a9d9 100644
>> --- a/drivers/vhost/vhost.c
>> +++ b/drivers/vhost/vhost.c
>> @@ -212,8 +212,7 @@ int vhost_poll_start(struct vhost_poll *poll,
>> struct file *file)
>> ????if (mask)
>> ??????? vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
>> ????if (mask & EPOLLERR) {
>> -??????? if (poll->wqh)
>> -??????????? remove_wait_queue(poll->wqh, &poll->wait);
>> +??????? vhost_poll_stop(poll);
>> ??????? ret = -EINVAL;
>> ????}
>>
>&...
2010 Sep 05
0
[PATCH] vhost: fix attach to cgroups regression
.../vhost/vhost.c | 79 +++++++++++++++++++++++++++++++++++-------------
1 files changed, 57 insertions(+), 22 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 4b99117..7c75dce 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return 0;
}
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+{
+ INIT_LIST_HEAD(&work->node);
+ work->fn = fn;
+ init_waitqueue_head(&work->done);
+ work->flushing = 0;
+ work->queue_seq = work->do...
2010 Sep 05
0
[PATCH] vhost: fix attach to cgroups regression
.../vhost/vhost.c | 79 +++++++++++++++++++++++++++++++++++-------------
1 files changed, 57 insertions(+), 22 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 4b99117..7c75dce 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return 0;
}
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+{
+ INIT_LIST_HEAD(&work->node);
+ work->fn = fn;
+ init_waitqueue_head(&work->done);
+ work->flushing = 0;
+ work->queue_seq = work->do...
2013 Mar 07
3
[PATCH] vhost_net: remove tx polling state
...cdb8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -89,6 +89,9 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
unsigned long mask;
int ret = 0;
+ if (poll->wqh)
+ return 0;
+
mask = file->f_op->poll(file, &poll->table);
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
--
1.7.1
2013 Mar 07
3
[PATCH] vhost_net: remove tx polling state
...cdb8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -89,6 +89,9 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
unsigned long mask;
int ret = 0;
+ if (poll->wqh)
+ return 0;
+
mask = file->f_op->poll(file, &poll->table);
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
--
1.7.1
2016 Apr 26
2
[PATCH 1/2] vhost: simplify work flushing
We used to implement the work flushing through tracking queued seq,
done seq, and the number of flushing. This patch simplify this by just
implement work flushing through another kind of vhost work with
completion. This will be used by lockless enqueuing patch.
Signed-off-by: Jason Wang <jasowang at redhat.com>
---
drivers/vhost/vhost.c | 53
2016 Apr 26
2
[PATCH 1/2] vhost: simplify work flushing
We used to implement the work flushing through tracking queued seq,
done seq, and the number of flushing. This patch simplify this by just
implement work flushing through another kind of vhost work with
completion. This will be used by lockless enqueuing patch.
Signed-off-by: Jason Wang <jasowang at redhat.com>
---
drivers/vhost/vhost.c | 53
2018 Mar 27
0
[PATCH net] vhost: correctly remove wait queue during poll failure
...+--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1b3e8d2d..5d5a9d9 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -212,8 +212,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & EPOLLERR) {
- if (poll->wqh)
- remove_wait_queue(poll->wqh, &poll->wait);
+ vhost_poll_stop(poll);
ret = -EINVAL;
}
--
2.7.4
2018 Mar 29
0
[PATCH net V2] vhost: correctly remove wait queue during poll failure
...deletions(-)
>
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index 1b3e8d2d..5d5a9d9 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -212,8 +212,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
> if (mask)
> vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
> if (mask & EPOLLERR) {
> - if (poll->wqh)
> - remove_wait_queue(poll->wqh, &poll->wait);
> + vhost_poll_stop(poll);
> ret = -EINVAL;
> }
>
> --
> 2.7.4
2013 Apr 11
1
[PATCH] vhost_net: remove tx polling state
...cdb8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -89,6 +89,9 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
unsigned long mask;
int ret = 0;
+ if (poll->wqh)
+ return 0;
+
mask = file->f_op->poll(file, &poll->table);
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
--
1.7.1
2013 Apr 11
1
[PATCH] vhost_net: remove tx polling state
...cdb8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -89,6 +89,9 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
unsigned long mask;
int ret = 0;
+ if (poll->wqh)
+ return 0;
+
mask = file->f_op->poll(file, &poll->table);
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
--
1.7.1
2016 Apr 26
0
[PATCH 2/2] vhost: lockless enqueuing
...++++++++--------------------------
drivers/vhost/vhost.h | 7 ++++---
2 files changed, 29 insertions(+), 30 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 73dd16d..0061a7b 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -168,7 +168,7 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
- INIT_LIST_HEAD(&work->node);
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
work->fn = fn;
init_waitqueue_head(&work->done);
}
@@ -246,15 +246,16 @@ EXP...
2010 Jul 29
1
[PATCH] vhost: locking/rcu cleanup
...* since jobs can re-queue themselves. */
vhost_net_flush(n);
+ vhost_dev_free(&n->dev);
kfree(n);
return 0;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e05557d..daa95c8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,27 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return 0;
}
+/* Must be called for each vq before vhost_dev_init. */
+void vhost_work_set_fn(struct vhost_work *work, vhost_work_fn_t fn)
+{
+ work->fn = fn;
+}
+
+static void vhost_work_init(struct vhost_work *work)
+{
+ atomic_set(&work->...
2010 Jul 29
1
[PATCH] vhost: locking/rcu cleanup
...* since jobs can re-queue themselves. */
vhost_net_flush(n);
+ vhost_dev_free(&n->dev);
kfree(n);
return 0;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e05557d..daa95c8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,27 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return 0;
}
+/* Must be called for each vq before vhost_dev_init. */
+void vhost_work_set_fn(struct vhost_work *work, vhost_work_fn_t fn)
+{
+ work->fn = fn;
+}
+
+static void vhost_work_init(struct vhost_work *work)
+{
+ atomic_set(&work->...
2020 May 28
0
[PATCH] vdpa: bypass waking up vhost_woker for vdpa vq kick
...+
> dev->iotlb = vhost_iotlb_alloc(0, 0);
> if (!dev->iotlb) {
> r = -ENOMEM;
So my feeling here is that you want to reuse the infrastructure in
vhost.c as much as possible
If this is true, let's just avoid duplicating the codes. How about
adding something like in vhost_poll_wakeup():
??? struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
??? struct vhost_work *work = &poll->work;
??? if (!(key_to_poll(key) & poll->mask))
??? ??? return 0;
??? if (!poll->dev->use_worker)
??? ??? work->fn(work);
??? else
??? ??? vhost_poll...
2018 Sep 09
0
[PATCH net-next v8 5/7] net: vhost: introduce bitmap for vhost_poll
...t; > void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
> > - __poll_t mask, struct vhost_dev *dev)
> > + __u8 poll_id, __poll_t mask, struct vhost_dev *dev)
> > {
> > init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
> > init_poll_funcptr(&poll->table, vhost_poll_func);
> > @@ -194,6 +194,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
> > poll->dev = dev;
> > poll->wqh = NULL;
> >
> > + poll->poll_id = poll_id;
&...