search for: vhost_dev_lock_vq

Displaying 20 results from an estimated 50 matches for "vhost_dev_lock_vq".

Did you mean: vhost_dev_lock_vqs
2018 Jan 23
5
[PATCH net 1/2] vhost: use mutex_lock_nested() in vhost_dev_lock_vqs()
We used to call mutex_lock() in vhost_dev_lock_vqs() which tries to hold mutexes of all virtqueues. This may confuse lockdep to report a possible deadlock because of trying to hold locks belong to same class. Switch to use mutex_lock_nested() to avoid false positive. Fixes: 6b1e6cc7855b0 ("vhost: new device IOTLB API") Reported-by: syzb...
2018 Jan 23
5
[PATCH net 1/2] vhost: use mutex_lock_nested() in vhost_dev_lock_vqs()
We used to call mutex_lock() in vhost_dev_lock_vqs() which tries to hold mutexes of all virtqueues. This may confuse lockdep to report a possible deadlock because of trying to hold locks belong to same class. Switch to use mutex_lock_nested() to avoid false positive. Fixes: 6b1e6cc7855b0 ("vhost: new device IOTLB API") Reported-by: syzb...
2018 Jan 24
1
[PATCH net 1/2] vhost: use mutex_lock_nested() in vhost_dev_lock_vqs()
On Wed, Jan 24, 2018 at 04:38:30PM -0500, David Miller wrote: > From: Jason Wang <jasowang at redhat.com> > Date: Tue, 23 Jan 2018 17:27:25 +0800 > > > We used to call mutex_lock() in vhost_dev_lock_vqs() which tries to > > hold mutexes of all virtqueues. This may confuse lockdep to report a > > possible deadlock because of trying to hold locks belong to same > > class. Switch to use mutex_lock_nested() to avoid false positive. > > > > Fixes: 6b1e6cc7855b0 ("vh...
2018 Jan 24
0
[PATCH net 1/2] vhost: use mutex_lock_nested() in vhost_dev_lock_vqs()
From: Jason Wang <jasowang at redhat.com> Date: Tue, 23 Jan 2018 17:27:25 +0800 > We used to call mutex_lock() in vhost_dev_lock_vqs() which tries to > hold mutexes of all virtqueues. This may confuse lockdep to report a > possible deadlock because of trying to hold locks belong to same > class. Switch to use mutex_lock_nested() to avoid false positive. > > Fixes: 6b1e6cc7855b0 ("vhost: new device IOTLB API...
2018 Jul 22
2
[PATCH net-next v6 1/4] net: vhost: lock the vqs one by one
...); > + } > } > > static void vhost_vq_reset(struct vhost_dev *dev, > @@ -890,20 +893,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, > #define vhost_get_used(vq, x, ptr) \ > vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) > > -static void vhost_dev_lock_vqs(struct vhost_dev *d) > -{ > - int i = 0; > - for (i = 0; i < d->nvqs; ++i) > - mutex_lock_nested(&d->vqs[i]->mutex, i); > -} > - > -static void vhost_dev_unlock_vqs(struct vhost_dev *d) > -{ > - int i = 0; > - for (i = 0; i < d->nvqs; ++i) >...
2018 Jul 22
2
[PATCH net-next v6 1/4] net: vhost: lock the vqs one by one
...); > + } > } > > static void vhost_vq_reset(struct vhost_dev *dev, > @@ -890,20 +893,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, > #define vhost_get_used(vq, x, ptr) \ > vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) > > -static void vhost_dev_lock_vqs(struct vhost_dev *d) > -{ > - int i = 0; > - for (i = 0; i < d->nvqs; ++i) > - mutex_lock_nested(&d->vqs[i]->mutex, i); > -} > - > -static void vhost_dev_unlock_vqs(struct vhost_dev *d) > -{ > - int i = 0; > - for (i = 0; i < d->nvqs; ++i) >...
2018 Nov 29
2
[REBASE PATCH net-next v9 1/4] net: vhost: lock the vqs one by one
...); > + } > } > > static void vhost_vq_reset(struct vhost_dev *dev, > @@ -891,20 +894,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, > #define vhost_get_used(vq, x, ptr) \ > vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) > > -static void vhost_dev_lock_vqs(struct vhost_dev *d) > -{ > - int i = 0; > - for (i = 0; i < d->nvqs; ++i) > - mutex_lock_nested(&d->vqs[i]->mutex, i); > -} > - > -static void vhost_dev_unlock_vqs(struct vhost_dev *d) > -{ > - int i = 0; > - for (i = 0; i < d->nvqs; ++i) >...
2018 Nov 29
2
[REBASE PATCH net-next v9 1/4] net: vhost: lock the vqs one by one
...); > + } > } > > static void vhost_vq_reset(struct vhost_dev *dev, > @@ -891,20 +894,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, > #define vhost_get_used(vq, x, ptr) \ > vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) > > -static void vhost_dev_lock_vqs(struct vhost_dev *d) > -{ > - int i = 0; > - for (i = 0; i < d->nvqs; ++i) > - mutex_lock_nested(&d->vqs[i]->mutex, i); > -} > - > -static void vhost_dev_unlock_vqs(struct vhost_dev *d) > -{ > - int i = 0; > - for (i = 0; i < d->nvqs; ++i) >...
2018 Jun 30
0
[PATCH net-next v3 1/4] net: vhost: lock the vqs one by one
...; + mutex_unlock(&d->vqs[i]->mutex); + } } static void vhost_vq_reset(struct vhost_dev *dev, @@ -887,20 +890,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, #define vhost_get_used(vq, x, ptr) \ vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) -static void vhost_dev_lock_vqs(struct vhost_dev *d) -{ - int i = 0; - for (i = 0; i < d->nvqs; ++i) - mutex_lock_nested(&d->vqs[i]->mutex, i); -} - -static void vhost_dev_unlock_vqs(struct vhost_dev *d) -{ - int i = 0; - for (i = 0; i < d->nvqs; ++i) - mutex_unlock(&d->vqs[i]->mutex); -} - sta...
2018 Jul 21
0
[PATCH net-next v6 1/4] net: vhost: lock the vqs one by one
...; + mutex_unlock(&d->vqs[i]->mutex); + } } static void vhost_vq_reset(struct vhost_dev *dev, @@ -890,20 +893,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, #define vhost_get_used(vq, x, ptr) \ vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) -static void vhost_dev_lock_vqs(struct vhost_dev *d) -{ - int i = 0; - for (i = 0; i < d->nvqs; ++i) - mutex_lock_nested(&d->vqs[i]->mutex, i); -} - -static void vhost_dev_unlock_vqs(struct vhost_dev *d) -{ - int i = 0; - for (i = 0; i < d->nvqs; ++i) - mutex_unlock(&d->vqs[i]->mutex); -} - sta...
2018 Sep 25
0
[REBASE PATCH net-next v9 1/4] net: vhost: lock the vqs one by one
...; + mutex_unlock(&d->vqs[i]->mutex); + } } static void vhost_vq_reset(struct vhost_dev *dev, @@ -891,20 +894,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, #define vhost_get_used(vq, x, ptr) \ vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) -static void vhost_dev_lock_vqs(struct vhost_dev *d) -{ - int i = 0; - for (i = 0; i < d->nvqs; ++i) - mutex_lock_nested(&d->vqs[i]->mutex, i); -} - -static void vhost_dev_unlock_vqs(struct vhost_dev *d) -{ - int i = 0; - for (i = 0; i < d->nvqs; ++i) - mutex_unlock(&d->vqs[i]->mutex); -} - sta...
2018 Dec 12
0
[PATCH net V2 3/4] Revert "net: vhost: lock the vqs one by one"
...; - mutex_unlock(&d->vqs[i]->mutex); - } } static void vhost_vq_reset(struct vhost_dev *dev, @@ -895,6 +892,20 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, #define vhost_get_used(vq, x, ptr) \ vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) +static void vhost_dev_lock_vqs(struct vhost_dev *d) +{ + int i = 0; + for (i = 0; i < d->nvqs; ++i) + mutex_lock_nested(&d->vqs[i]->mutex, i); +} + +static void vhost_dev_unlock_vqs(struct vhost_dev *d) +{ + int i = 0; + for (i = 0; i < d->nvqs; ++i) + mutex_unlock(&d->vqs[i]->mutex); +} + sta...
2018 Jul 25
0
[PATCH net-next v6 1/4] net: vhost: lock the vqs one by one
...> static void vhost_vq_reset(struct vhost_dev *dev, > > @@ -890,20 +893,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, > > #define vhost_get_used(vq, x, ptr) \ > > vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) > > > > -static void vhost_dev_lock_vqs(struct vhost_dev *d) > > -{ > > - int i = 0; > > - for (i = 0; i < d->nvqs; ++i) > > - mutex_lock_nested(&d->vqs[i]->mutex, i); > > -} > > - > > -static void vhost_dev_unlock_vqs(struct vhost_dev *d) > > -{ > &g...
2018 Dec 12
1
[PATCH net V2 3/4] Revert "net: vhost: lock the vqs one by one"
...); > - } > } > > static void vhost_vq_reset(struct vhost_dev *dev, > @@ -895,6 +892,20 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, > #define vhost_get_used(vq, x, ptr) \ > vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) > > +static void vhost_dev_lock_vqs(struct vhost_dev *d) > +{ > + int i = 0; > + for (i = 0; i < d->nvqs; ++i) > + mutex_lock_nested(&d->vqs[i]->mutex, i); > +} > + > +static void vhost_dev_unlock_vqs(struct vhost_dev *d) > +{ > + int i = 0; > + for (i = 0; i < d->nvqs; ++i) >...
2018 Sep 25
6
[REBASE PATCH net-next v9 0/4] net: vhost: improve performance when enable busyloop
From: Tonghao Zhang <xiangxia.m.yue at gmail.com> This patches improve the guest receive performance. On the handle_tx side, we poll the sock receive queue at the same time. handle_rx do that in the same way. For more performance report, see patch 4 Tonghao Zhang (4): net: vhost: lock the vqs one by one net: vhost: replace magic number of lock annotation net: vhost: factor out busy
2018 May 22
3
[PATCH net] vhost: synchronize IOTLB message with dev cleanup
...ed, 3 insertions(+) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index f3bd8e9..f0be5f3 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -981,6 +981,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, { int ret = 0; + mutex_lock(&dev->mutex); vhost_dev_lock_vqs(dev); switch (msg->type) { case VHOST_IOTLB_UPDATE: @@ -1016,6 +1017,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, } vhost_dev_unlock_vqs(dev); + mutex_unlock(&dev->mutex); + return ret; } ssize_t vhost_chr_write_iter(struct vhost_dev *dev, -- 2.7.4
2018 Jul 21
7
[PATCH net-next v6 0/4] net: vhost: improve performance when enable busyloop
From: Tonghao Zhang <xiangxia.m.yue at gmail.com> This patches improve the guest receive performance. On the handle_tx side, we poll the sock receive queue at the same time. handle_rx do that in the same way. For more performance report, see patch 4. v5->v6: rebase the codes. Tonghao Zhang (4): net: vhost: lock the vqs one by one net: vhost: replace magic number of lock annotation
2018 Dec 13
5
[PATCH net V3 0/3] Fix various issue of vhost
Hi: This series tries to fix various issues of vhost: - Patch 1 adds a missing write barrier between used idx updating and logging. - Patch 2-3 brings back the protection of device IOTLB through vq mutex, this fixes possible use after free in device IOTLB entries. Please consider them for -stable. Thanks Changes from V2: - drop dirty page fix and make it for net-next Changes from V1: -
2018 Jun 30
9
[PATCH net-next v3 0/4] net: vhost: improve performance when enable busyloop
From: Tonghao Zhang <xiangxia.m.yue at gmail.com> This patches improve the guest receive and transmit performance. On the handle_tx side, we poll the sock receive queue at the same time. handle_rx do that in the same way. This patches are splited from previous big patch: http://patchwork.ozlabs.org/patch/934673/ For more performance report, see patch 4. Tonghao Zhang (4): net: vhost:
2018 Dec 12
10
[PATCH net V2 0/4] Fix various issue of vhost
Hi: This series tries to fix various issues of vhost: - Patch 1 adds a missing write barrier between used idx updating and logging. - Patch 2-3 brings back the protection of device IOTLB through vq mutex, this fixes possible use after free in device IOTLB entries. - Patch 4-7 fixes the diry page logging when device IOTLB is enabled. We should done through GPA instead of GIOVA, this was