Displaying 20 results from an estimated 98 matches for "vhost_iotlb_miss".
2018 Nov 30
3
[PATCH] vhost: fix IOTLB locking
Commit 78139c94dc8c ("net: vhost: lock the vqs one by one") moved the vq
lock to improve scalability, but introduced a possible deadlock in
vhost-iotlb. vhost_iotlb_notify_vq() now takes vq->mutex while holding
the device's IOTLB spinlock. And on the vhost_iotlb_miss() path, the
spinlock is taken while holding vq->mutex.
As long as we hold dev->mutex to prevent an ioctl from modifying
vq->poll concurrently, we can safely call vhost_poll_queue() without
holding vq->mutex. Since vhost_process_iotlb_msg() holds dev->mutex when
calling vhost_iotlb_n...
2018 Nov 30
3
[PATCH] vhost: fix IOTLB locking
Commit 78139c94dc8c ("net: vhost: lock the vqs one by one") moved the vq
lock to improve scalability, but introduced a possible deadlock in
vhost-iotlb. vhost_iotlb_notify_vq() now takes vq->mutex while holding
the device's IOTLB spinlock. And on the vhost_iotlb_miss() path, the
spinlock is taken while holding vq->mutex.
As long as we hold dev->mutex to prevent an ioctl from modifying
vq->poll concurrently, we can safely call vhost_poll_queue() without
holding vq->mutex. Since vhost_process_iotlb_msg() holds dev->mutex when
calling vhost_iotlb_n...
2015 Dec 31
4
[PATCH RFC] vhost: basic device IOTLB support
...eak;
+ default:
+ r = -EINVAL;
+ }
+ spin_unlock(&d->iotlb_lock);
+
+ if (!r && entry.flags.type != VHOST_IOTLB_INVALIDATE) {
+ mutex_lock(&d->iotlb_req_mutex);
+ if (entry.iova == d->pending_request.iova &&
+ d->pending_request.flags.type ==
+ VHOST_IOTLB_MISS) {
+ d->pending_request = entry;
+ complete(&d->iotlb_completion);
+ }
+ mutex_unlock(&d->iotlb_req_mutex);
+ }
+
+ break;
default:
r = -ENOIOCTLCMD;
break;
@@ -1177,9 +1268,104 @@ int vhost_init_used(struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_init_...
2015 Dec 31
4
[PATCH RFC] vhost: basic device IOTLB support
...eak;
+ default:
+ r = -EINVAL;
+ }
+ spin_unlock(&d->iotlb_lock);
+
+ if (!r && entry.flags.type != VHOST_IOTLB_INVALIDATE) {
+ mutex_lock(&d->iotlb_req_mutex);
+ if (entry.iova == d->pending_request.iova &&
+ d->pending_request.flags.type ==
+ VHOST_IOTLB_MISS) {
+ d->pending_request = entry;
+ complete(&d->iotlb_completion);
+ }
+ mutex_unlock(&d->iotlb_req_mutex);
+ }
+
+ break;
default:
r = -ENOIOCTLCMD;
break;
@@ -1177,9 +1268,104 @@ int vhost_init_used(struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_init_...
2018 Aug 03
4
[PATCH net-next] vhost: switch to use new message format
...case VHOST_IOTLB_MSG:
+ size = sizeof(node->msg);
+ msg = &node->msg.iotlb;
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ size = sizeof(node->msg_v2);
+ msg = &node->msg_v2.iotlb;
+ break;
+ default:
+ BUG();
+ break;
+ }
- if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
+ ret = copy_to_iter(start, size, to);
+ if (ret != size || msg->type != VHOST_IOTLB_MISS) {
kfree(node);
return ret;
}
-
vhost_enqueue_msg(dev, &dev->pending_list, node);
}
@@ -1126,12 +1154,19 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, in...
2018 Aug 03
4
[PATCH net-next] vhost: switch to use new message format
...case VHOST_IOTLB_MSG:
+ size = sizeof(node->msg);
+ msg = &node->msg.iotlb;
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ size = sizeof(node->msg_v2);
+ msg = &node->msg_v2.iotlb;
+ break;
+ default:
+ BUG();
+ break;
+ }
- if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
+ ret = copy_to_iter(start, size, to);
+ if (ret != size || msg->type != VHOST_IOTLB_MISS) {
kfree(node);
return ret;
}
-
vhost_enqueue_msg(dev, &dev->pending_list, node);
}
@@ -1126,12 +1154,19 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, in...
2018 Nov 29
2
[REBASE PATCH net-next v9 1/4] net: vhost: lock the vqs one by one
...ze, u64 end,
> u64 userspace_addr, int perm)
> @@ -954,7 +943,10 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
> if (msg->iova <= vq_msg->iova &&
> msg->iova + msg->size - 1 >= vq_msg->iova &&
> vq_msg->type == VHOST_IOTLB_MISS) {
> + mutex_lock(&node->vq->mutex);
This seems to introduce a deadlock (and sleep-in-atomic): the vq->mutex
is taken while the IOTLB spinlock is held (taken earlier in
vhost_iotlb_notify_vq()). On the vhost_iotlb_miss() path, the IOTLB
spinlock is taken while the vq->mutex is...
2018 Nov 29
2
[REBASE PATCH net-next v9 1/4] net: vhost: lock the vqs one by one
...ze, u64 end,
> u64 userspace_addr, int perm)
> @@ -954,7 +943,10 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
> if (msg->iova <= vq_msg->iova &&
> msg->iova + msg->size - 1 >= vq_msg->iova &&
> vq_msg->type == VHOST_IOTLB_MISS) {
> + mutex_lock(&node->vq->mutex);
This seems to introduce a deadlock (and sleep-in-atomic): the vq->mutex
is taken while the IOTLB spinlock is held (taken earlier in
vhost_iotlb_notify_vq()). On the vhost_iotlb_miss() path, the IOTLB
spinlock is taken while the vq->mutex is...
2018 Aug 03
0
[PATCH net-next] vhost: switch to use new message format
...; + msg = &node->msg.iotlb;
> + break;
> + case VHOST_IOTLB_MSG_V2:
> + size = sizeof(node->msg_v2);
> + msg = &node->msg_v2.iotlb;
> + break;
> + default:
> + BUG();
> + break;
> + }
>
> - if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
> + ret = copy_to_iter(start, size, to);
> + if (ret != size || msg->type != VHOST_IOTLB_MISS) {
> kfree(node);
> return ret;
> }
> -
> vhost_enqueue_msg(dev, &dev->pending_list, node);
> }
>
> @@ -1126,12 +1154,19 @@ static int vhost_i...
2015 Dec 31
0
[PATCH RFC] vhost: basic device IOTLB support
...}
> + spin_unlock(&d->iotlb_lock);
> +
> + if (!r && entry.flags.type != VHOST_IOTLB_INVALIDATE) {
> + mutex_lock(&d->iotlb_req_mutex);
> + if (entry.iova == d->pending_request.iova &&
> + d->pending_request.flags.type ==
> + VHOST_IOTLB_MISS) {
> + d->pending_request = entry;
> + complete(&d->iotlb_completion);
> + }
> + mutex_unlock(&d->iotlb_req_mutex);
> + }
> +
> + break;
> default:
> r = -ENOIOCTLCMD;
> break;
> @@ -1177,9 +1268,104 @@ int vhost_init_used(struct...
2018 Aug 06
1
[PATCH net-next V2] vhost: switch to use new message format
...case VHOST_IOTLB_MSG:
+ size = sizeof(node->msg);
+ msg = &node->msg.iotlb;
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ size = sizeof(node->msg_v2);
+ msg = &node->msg_v2.iotlb;
+ break;
+ default:
+ BUG();
+ break;
+ }
- if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
+ ret = copy_to_iter(start, size, to);
+ if (ret != size || msg->type != VHOST_IOTLB_MISS) {
kfree(node);
return ret;
}
-
vhost_enqueue_msg(dev, &dev->pending_list, node);
}
@@ -1126,12 +1154,19 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, in...
2018 Nov 30
1
[PATCH v2] vhost: fix IOTLB locking
Commit 78139c94dc8c ("net: vhost: lock the vqs one by one") moved the vq
lock to improve scalability, but introduced a possible deadlock in
vhost-iotlb. vhost_iotlb_notify_vq() now takes vq->mutex while holding
the device's IOTLB spinlock. And on the vhost_iotlb_miss() path, the
spinlock is taken while holding vq->mutex.
Since calling vhost_poll_queue() doesn't require any lock, avoid the
deadlock by not taking vq->mutex.
Fixes: 78139c94dc8c ("net: vhost: lock the vqs one by one")
Acked-by: Jason Wang <jasowang at redhat.com>
Acked-b...
2016 Mar 25
0
[RFC PATCH V2 2/2] vhost: device IOTLB API
...+ int i;
+
+
+ for (i = 0; i < d->nvqs; i++) {
+ vq = d->vqs[i];
+ mutex_lock(&vq->mutex);
+ req = &vq->pending_request;
+ if (entry->iova <= req->iova &&
+ entry->iova + entry->size - 1 > req->iova &&
+ req->flags.type == VHOST_IOTLB_MISS) {
+ *req = *entry;
+ vhost_poll_queue(&vq->poll);
+ }
+ mutex_unlock(&vq->mutex);
+ }
+}
+
/* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
struct file *eventfp, *filep = NULL;
struct eventfd_ctx *ctx...
2016 Jun 22
0
[PATCH 3/3] vhost: device IOTLB API
...->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &d->pending_list, node) {
+ struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
+ if (msg->iova <= vq_msg->iova &&
+ msg->iova + msg->size - 1 > vq_msg->iova &&
+ vq_msg->type == VHOST_IOTLB_MISS) {
+ vhost_poll_queue(&node->vq->poll);
+ list_del(&node->node);
+ kfree(node);
+ }
+ }
+
+ spin_unlock(&d->iotlb_lock);
+}
+
+static int umem_access_ok(u64 uaddr, u64 size, int access)
+{
+ if ((access & VHOST_ACCESS_RO) &&
+ !access_ok(VERIFY_READ, u...
2016 Apr 27
2
[RFC PATCH V2 2/2] vhost: device IOTLB API
...lt; d->nvqs; i++) {
> + vq = d->vqs[i];
> + mutex_lock(&vq->mutex);
> + req = &vq->pending_request;
> + if (entry->iova <= req->iova &&
> + entry->iova + entry->size - 1 > req->iova &&
> + req->flags.type == VHOST_IOTLB_MISS) {
> + *req = *entry;
> + vhost_poll_queue(&vq->poll);
> + }
> + mutex_unlock(&vq->mutex);
> + }
> +}
> +
> /* Caller must have device mutex */
> long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
> {
> struct f...
2016 Apr 27
2
[RFC PATCH V2 2/2] vhost: device IOTLB API
...lt; d->nvqs; i++) {
> + vq = d->vqs[i];
> + mutex_lock(&vq->mutex);
> + req = &vq->pending_request;
> + if (entry->iova <= req->iova &&
> + entry->iova + entry->size - 1 > req->iova &&
> + req->flags.type == VHOST_IOTLB_MISS) {
> + *req = *entry;
> + vhost_poll_queue(&vq->poll);
> + }
> + mutex_unlock(&vq->mutex);
> + }
> +}
> +
> /* Caller must have device mutex */
> long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
> {
> struct f...
2020 Feb 05
2
[PATCH] vhost: introduce vDPA based backend
...ment format for
> these ioctls?
>
It's the existed uapi:
/* no alignment requirement */
struct vhost_iotlb_msg {
??? __u64 iova;
??? __u64 size;
??? __u64 uaddr;
#define VHOST_ACCESS_RO????? 0x1
#define VHOST_ACCESS_WO????? 0x2
#define VHOST_ACCESS_RW????? 0x3
??? __u8 perm;
#define VHOST_IOTLB_MISS?????????? 1
#define VHOST_IOTLB_UPDATE???????? 2
#define VHOST_IOTLB_INVALIDATE???? 3
#define VHOST_IOTLB_ACCESS_FAIL??? 4
??? __u8 type;
};
#define VHOST_IOTLB_MSG 0x1
#define VHOST_IOTLB_MSG_V2 0x2
struct vhost_msg {
??? int type;
??? union {
??? ??? struct vhost_iotlb_msg iotlb;
??? ??? _...
2020 Feb 05
2
[PATCH] vhost: introduce vDPA based backend
...ment format for
> these ioctls?
>
It's the existed uapi:
/* no alignment requirement */
struct vhost_iotlb_msg {
??? __u64 iova;
??? __u64 size;
??? __u64 uaddr;
#define VHOST_ACCESS_RO????? 0x1
#define VHOST_ACCESS_WO????? 0x2
#define VHOST_ACCESS_RW????? 0x3
??? __u8 perm;
#define VHOST_IOTLB_MISS?????????? 1
#define VHOST_IOTLB_UPDATE???????? 2
#define VHOST_IOTLB_INVALIDATE???? 3
#define VHOST_IOTLB_ACCESS_FAIL??? 4
??? __u8 type;
};
#define VHOST_IOTLB_MSG 0x1
#define VHOST_IOTLB_MSG_V2 0x2
struct vhost_msg {
??? int type;
??? union {
??? ??? struct vhost_iotlb_msg iotlb;
??? ??? _...
2018 Nov 30
0
[PATCH] vhost: fix IOTLB locking
...the vqs one by one") moved the vq
> lock to improve scalability, but introduced a possible deadlock in
> vhost-iotlb. vhost_iotlb_notify_vq() now takes vq->mutex while holding
> the device's IOTLB spinlock.
Indeed spin_lock is just outside this snippet. Yack.
> And on the vhost_iotlb_miss() path, the
> spinlock is taken while holding vq->mutex.
>
> As long as we hold dev->mutex to prevent an ioctl from modifying
> vq->poll concurrently, we can safely call vhost_poll_queue() without
> holding vq->mutex. Since vhost_process_iotlb_msg() holds dev->mutex w...
2018 Nov 30
0
[REBASE PATCH net-next v9 1/4] net: vhost: lock the vqs one by one
...u64 userspace_addr, int perm)
>> @@ -954,7 +943,10 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
>> if (msg->iova <= vq_msg->iova &&
>> msg->iova + msg->size - 1 >= vq_msg->iova &&
>> vq_msg->type == VHOST_IOTLB_MISS) {
>> + mutex_lock(&node->vq->mutex);
> This seems to introduce a deadlock (and sleep-in-atomic): the vq->mutex
> is taken while the IOTLB spinlock is held (taken earlier in
> vhost_iotlb_notify_vq()). On the vhost_iotlb_miss() path, the IOTLB
> spinlock is taken wh...