Displaying 20 results from an estimated 90 matches for "vhost_work_flush".
2020 Sep 22
0
[PATCH 7/8] vhost: remove work arg from vhost_work_flush
On 2020/9/22 ??2:23, Mike Christie wrote:
> vhost_work_flush doesn't do anything with the work arg. This patch drops
> it and then renames vhost_work_flush to vhost_work_dev_flush to reflect
> that the function flushes all the works in the dev and not just a
> specific queue or work item.
>
> Signed-off-by: Mike Christie <michael.christ...
2016 Aug 02
0
[vhost:vhost 6/17] ERROR: "vhost_work_flush" [drivers/vhost/vhost_scsi.ko] undefined!
...g-n0-08020724 (attached as .config)
compiler: gcc-4.8 (Debian 4.8.4-1) 4.8.4
reproduce:
git checkout 6190efb08c16dcd68c64b096a28f47ab33f017d7
# save the attached .config to linux build tree
make ARCH=x86_64
All errors (new ones prefixed by >>):
>> ERROR: "vhost_work_flush" [drivers/vhost/vhost_scsi.ko] undefined!
>> ERROR: "vhost_dev_cleanup" [drivers/vhost/vhost_scsi.ko] undefined!
>> ERROR: "vhost_log_access_ok" [drivers/vhost/vhost_scsi.ko] undefined!
>> ERROR: "vhost_enable_notify" [drivers/vhost/vhost_scsi.ko...
2016 Aug 02
0
[vhost:vhost 6/17] ERROR: "vhost_work_flush" [drivers/vhost/vhost_scsi.ko] undefined!
...g-n0-08020724 (attached as .config)
compiler: gcc-4.8 (Debian 4.8.4-1) 4.8.4
reproduce:
git checkout 6190efb08c16dcd68c64b096a28f47ab33f017d7
# save the attached .config to linux build tree
make ARCH=x86_64
All errors (new ones prefixed by >>):
>> ERROR: "vhost_work_flush" [drivers/vhost/vhost_scsi.ko] undefined!
>> ERROR: "vhost_dev_cleanup" [drivers/vhost/vhost_scsi.ko] undefined!
>> ERROR: "vhost_log_access_ok" [drivers/vhost/vhost_scsi.ko] undefined!
>> ERROR: "vhost_enable_notify" [drivers/vhost/vhost_scsi.ko...
2013 Jul 07
2
[PATCH v2 03/11] vhost: Make vhost a separate module
...e(struct vhost_dev *dev, struct vhost_work *work,
> > > unsigned seq)
> > > @@ -123,7 +128,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
> > > return left <= 0;
> > > }
> > >
> > > -static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
> > > +void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
> > > {
> > > unsigned seq;
> > > int flushing;
> > > @@ -138,6 +143,7 @@ static void vhost_work_flush(struct vhost_dev *dev...
2013 Jul 07
2
[PATCH v2 03/11] vhost: Make vhost a separate module
...e(struct vhost_dev *dev, struct vhost_work *work,
> > > unsigned seq)
> > > @@ -123,7 +128,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
> > > return left <= 0;
> > > }
> > >
> > > -static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
> > > +void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
> > > {
> > > unsigned seq;
> > > int flushing;
> > > @@ -138,6 +143,7 @@ static void vhost_work_flush(struct vhost_dev *dev...
2019 Aug 06
1
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...t; >
> > > I start with synchronize_rcu() but both you and Michael raise some
> > > concern.
> > I've also idly wondered if calling synchronize_rcu() under the various
> > mm locks is a deadlock situation.
>
>
> Maybe, that's why I suggest to use vhost_work_flush() which is much
> lightweight can can achieve the same function. It can guarantee all previous
> work has been processed after vhost_work_flush() return.
If things are already running in a work, then yes, you can piggyback
on the existing spinlocks inside the workqueue and be Ok
However, if...
2013 Jul 07
0
[PATCH v2 03/11] vhost: Make vhost a separate module
...t vhost_work *work,
> > > > unsigned seq)
> > > > @@ -123,7 +128,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
> > > > return left <= 0;
> > > > }
> > > >
> > > > -static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
> > > > +void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
> > > > {
> > > > unsigned seq;
> > > > int flushing;
> > > > @@ -138,6 +143,7 @@ static void vhost_work_fl...
2013 May 03
4
[PATCH 0/3] vhost cleanups and separate module
Asias He (3):
vhost: Remove vhost_enable_zcopy in vhost.h
vhost: Move VHOST_NET_FEATURES to net.c
vhost: Make vhost a separate module
drivers/vhost/Kconfig | 8 ++++++++
drivers/vhost/Makefile | 3 ++-
drivers/vhost/net.c | 6 ++++++
drivers/vhost/scsi.c | 1 -
drivers/vhost/vhost.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++++-
drivers/vhost/vhost.h | 8 ++------
6
2013 May 03
4
[PATCH 0/3] vhost cleanups and separate module
Asias He (3):
vhost: Remove vhost_enable_zcopy in vhost.h
vhost: Move VHOST_NET_FEATURES to net.c
vhost: Make vhost a separate module
drivers/vhost/Kconfig | 8 ++++++++
drivers/vhost/Makefile | 3 ++-
drivers/vhost/net.c | 6 ++++++
drivers/vhost/scsi.c | 1 -
drivers/vhost/vhost.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++++-
drivers/vhost/vhost.h | 8 ++------
6
2010 Sep 05
0
[PATCH] vhost: fix attach to cgroups regression
...98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue(poll->wqh, &poll->wait);
}
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- struct vhost_work *work = &poll->work;
unsigned seq;
int left;
int flushing;
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
- spin_unloc...
2010 Sep 05
0
[PATCH] vhost: fix attach to cgroups regression
...98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue(poll->wqh, &poll->wait);
}
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- struct vhost_work *work = &poll->work;
unsigned seq;
int left;
int flushing;
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
- spin_unloc...
2019 Aug 02
5
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
On Fri, Aug 02, 2019 at 05:40:07PM +0800, Jason Wang wrote:
> > This must be a proper barrier, like a spinlock, mutex, or
> > synchronize_rcu.
>
>
> I start with synchronize_rcu() but both you and Michael raise some
> concern.
I've also idly wondered if calling synchronize_rcu() under the various
mm locks is a deadlock situation.
> Then I try spinlock and mutex:
2019 Aug 02
5
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
On Fri, Aug 02, 2019 at 05:40:07PM +0800, Jason Wang wrote:
> > This must be a proper barrier, like a spinlock, mutex, or
> > synchronize_rcu.
>
>
> I start with synchronize_rcu() but both you and Michael raise some
> concern.
I've also idly wondered if calling synchronize_rcu() under the various
mm locks is a deadlock situation.
> Then I try spinlock and mutex:
2013 Mar 22
4
[PATCH V2 0/3] tcm_vhost pending requests flush
Changes in v2:
- Increase/Decrease inflight requests in
vhost_scsi_{allocate,free}_cmd and tcm_vhost_{allocate,free}_evt
Asias He (3):
tcm_vhost: Wait for pending requests in vhost_scsi_flush()
tcm_vhost: Wait for pending requests in vhost_scsi_clear_endpoint()
tcm_vhost: Fix tv_cmd leak in vhost_scsi_handle_vq
drivers/vhost/tcm_vhost.c | 131
2013 Mar 22
4
[PATCH V2 0/3] tcm_vhost pending requests flush
Changes in v2:
- Increase/Decrease inflight requests in
vhost_scsi_{allocate,free}_cmd and tcm_vhost_{allocate,free}_evt
Asias He (3):
tcm_vhost: Wait for pending requests in vhost_scsi_flush()
tcm_vhost: Wait for pending requests in vhost_scsi_clear_endpoint()
tcm_vhost: Fix tv_cmd leak in vhost_scsi_handle_vq
drivers/vhost/tcm_vhost.c | 131
2019 Aug 05
0
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...ex, or
>>> synchronize_rcu.
>>
>> I start with synchronize_rcu() but both you and Michael raise some
>> concern.
> I've also idly wondered if calling synchronize_rcu() under the various
> mm locks is a deadlock situation.
Maybe, that's why I suggest to use vhost_work_flush() which is much
lightweight can can achieve the same function. It can guarantee all
previous work has been processed after vhost_work_flush() return.
>
>> Then I try spinlock and mutex:
>>
>> 1) spinlock: add lots of overhead on datapath, this leads 0 performance
>> i...
2013 Apr 25
6
[PATCH v10 0/4] tcm_vhost hotplug
Asias He (4):
tcm_vhost: Refactor the lock nesting rule
tcm_vhost: Add hotplug/hotunplug support
tcm_vhost: Add ioctl to get and set events missed flag
tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG
drivers/vhost/tcm_vhost.c | 262 +++++++++++++++++++++++++++++++++++++++++++---
drivers/vhost/tcm_vhost.h | 13 +++
2 files changed, 259 insertions(+), 16 deletions(-)
--
1.8.1.4
2013 Apr 25
6
[PATCH v10 0/4] tcm_vhost hotplug
Asias He (4):
tcm_vhost: Refactor the lock nesting rule
tcm_vhost: Add hotplug/hotunplug support
tcm_vhost: Add ioctl to get and set events missed flag
tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG
drivers/vhost/tcm_vhost.c | 262 +++++++++++++++++++++++++++++++++++++++++++---
drivers/vhost/tcm_vhost.h | 13 +++
2 files changed, 259 insertions(+), 16 deletions(-)
--
1.8.1.4
2013 Mar 11
4
[PATCH] tcm_vhost: Wait for pending requests in vhost_scsi_flush()
...int index)
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
int i;
+ struct vhost_scsi_inflight *old_inflight;
+
+ old_inflight = ACCESS_ONCE(vs->vs_inflight);
+ if (!tcm_vhost_alloc_inflight(vs))
+ return;
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
vhost_scsi_flush_vq(vs, i);
vhost_work_flush(&vs->dev, &vs->vs_completion_work);
+ vhost_work_flush(&vs->dev, &vs->vs_event_work);
+
+ /* Wait until all requests issued before the flush to be finished */
+ wait_event(old_inflight->wait, tcm_vhost_done_inflight(old_inflight));
+
+ kfree(old_inflight);
}
/*
@@...
2013 Mar 11
4
[PATCH] tcm_vhost: Wait for pending requests in vhost_scsi_flush()
...int index)
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
int i;
+ struct vhost_scsi_inflight *old_inflight;
+
+ old_inflight = ACCESS_ONCE(vs->vs_inflight);
+ if (!tcm_vhost_alloc_inflight(vs))
+ return;
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
vhost_scsi_flush_vq(vs, i);
vhost_work_flush(&vs->dev, &vs->vs_completion_work);
+ vhost_work_flush(&vs->dev, &vs->vs_event_work);
+
+ /* Wait until all requests issued before the flush to be finished */
+ wait_event(old_inflight->wait, tcm_vhost_done_inflight(old_inflight));
+
+ kfree(old_inflight);
}
/*
@@...