Displaying 20 results from an estimated 20 matches for "e9ed2722b633".
2019 Oct 03
1
[PATCH 07/11] vhost: convert vhost_umem_interval_tree to half closed intervals
...node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
-							addr, addr + len - 1);
+							   addr, addr + len);
 		if (node == NULL || node->start > addr) {
 			if (umem != dev->iotlb) {
 				ret = -EFAULT;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index e9ed2722b633..bb36cb9ed5ec 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -53,13 +53,13 @@ struct vhost_log {
 };
 
 #define START(node) ((node)->start)
-#define LAST(node) ((node)->last)
+#define END(node) ((node)->end)
 
 struct vhost_umem_node {
 	struct rb_node rb;
 	struct list_...
2019 Oct 04
0
[PATCH 07/11] vhost: convert vhost_umem_interval_tree to half closed intervals
...t; -	u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
>> +	u64 s = 0, size, orig_addr = addr, last = addr + len;
>
>maybe "end" or "end_addr" instead of "last".
>
>> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
>> index e9ed2722b633..bb36cb9ed5ec 100644
>> --- a/drivers/vhost/vhost.h
>> +++ b/drivers/vhost/vhost.h
>> @@ -53,13 +53,13 @@ struct vhost_log {
>>  };
>>
>>  #define START(node) ((node)->start)
>> -#define LAST(node) ((node)->last)
>> +#define END(node) ((node)-&g...
2019 Oct 17
0
[PATCH RFC 3/3] vhost, kcov: collect coverage from vhost_worker
...;  	if (dev->worker) {
>  		kthread_stop(dev->worker);
>  		dev->worker = NULL;
> +#ifdef CONFIG_KCOV
> +		dev->kcov_handle = 0;
> +#endif
>  	}
>  	if (dev->mm)
>  		mmput(dev->mm);
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index e9ed2722b633..010ca1ebcbd5 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -173,6 +173,9 @@ struct vhost_dev {
>  	int iov_limit;
>  	int weight;
>  	int byte_weight;
> +#ifdef CONFIG_KCOV
> +	u64 kcov_handle;
> +#endif
Why is this a #ifdef at all here?
thank...
2019 Oct 23
0
[PATCH 3/3] vhost, kcov: collect coverage from vhost_worker
...>                 kthread_stop(dev->worker);
>                 dev->worker = NULL;
> +               dev->kcov_handle = 0;
>         }
>         if (dev->mm)
>                 mmput(dev->mm);
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index e9ed2722b633..a123fd70847e 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -173,6 +173,7 @@ struct vhost_dev {
>         int iov_limit;
>         int weight;
>         int byte_weight;
> +       u64 kcov_handle;
>  };
>
>  bool vhost_exceeds_weight(struct vh...
2019 Oct 23
0
[PATCH 3/3] vhost, kcov: collect coverage from vhost_worker
...dev->worker = NULL;
> > > +               dev->kcov_handle = 0;
> > >         }
> > >         if (dev->mm)
> > >                 mmput(dev->mm);
> > > diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> > > index e9ed2722b633..a123fd70847e 100644
> > > --- a/drivers/vhost/vhost.h
> > > +++ b/drivers/vhost/vhost.h
> > > @@ -173,6 +173,7 @@ struct vhost_dev {
> > >         int iov_limit;
> > >         int weight;
> > >         int byte_weight;
> > > +       u6...
2019 Sep 06
1
[PATCH 1/2] Revert "vhost: access vq metadata through kernel virtual address"
...> -	vhost_setup_vq_uaddr(vq);
> -
> -	if (d->mm)
> -		mmu_notifier_register(&d->mmu_notifier, d->mm);
> -#endif
> -
>  	mutex_unlock(&vq->mutex);
>  
>  	return r;
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index 42a8c2a13ab1..e9ed2722b633 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -12,9 +12,6 @@
>  #include <linux/virtio_config.h>
>  #include <linux/virtio_ring.h>
>  #include <linux/atomic.h>
> -#include <linux/pagemap.h>
> -#include <linux/mmu_notifier....
2019 Oct 11
0
[PATCH RFC v1 1/2] vhost: option to fetch descriptors through an independent struct
...q, 1);
+	vq->ndescs = 0;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_get_vq_desc_batch);
+
 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
 {
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index e9ed2722b633..1724f61b6c2d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -80,6 +80,13 @@ enum vhost_uaddr_type {
 	VHOST_NUM_ADDRS = 3,
 };
 
+struct vhost_desc {
+	u64 addr;
+	u32 len;
+	u16 flags; /* VRING_DESC_F_WRITE, VRING_DESC_F_NEXT */
+	u16 id;
+};
+
 /* The virtqueue structure descr...
2019 Sep 17
0
[RFC v4 3/3] vhost: introduce mdev based hardware backend
...r + u->size >= ring_addr + len) {
+			*addr = ring_addr - u->userspace_addr + u->start;
+			return true;
+		}
+	}
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(vhost_translate_ring_addr);
 
 static int __init vhost_init(void)
 {
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index e9ed2722b633..294a6bcb6adf 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -189,6 +189,12 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
 bool vhost_vq_access_ok(struct vho...
2019 Sep 05
0
[PATCH 1/2] Revert "vhost: access vq metadata through kernel virtual address"
...*d,
 		BUG();
 	}
 
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-	vhost_setup_vq_uaddr(vq);
-
-	if (d->mm)
-		mmu_notifier_register(&d->mmu_notifier, d->mm);
-#endif
-
 	mutex_unlock(&vq->mutex);
 
 	return r;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 42a8c2a13ab1..e9ed2722b633 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -12,9 +12,6 @@
 #include <linux/virtio_config.h>
 #include <linux/virtio_ring.h>
 #include <linux/atomic.h>
-#include <linux/pagemap.h>
-#include <linux/mmu_notifier.h>
-#include <asm/cacheflush.h>...
2019 Sep 17
1
[RFC v4 3/3] vhost: introduce mdev based hardware backend
...e;
> +		}
> +	}
> +
> +	return false;
> +}
> +EXPORT_SYMBOL_GPL(vhost_translate_ring_addr);
As we've discussed, this is necessary.
Thanks
>   
>   static int __init vhost_init(void)
>   {
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index e9ed2722b633..294a6bcb6adf 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -189,6 +189,12 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
>   long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
>   bool vh...
2019 Oct 12
2
[PATCH RFC v2 0/2] vhost: ring format independence
This adds infrastructure required for supporting
multiple ring formats.
The idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
2019 Sep 05
0
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...S
+	if (r == 0)
+		vhost_setup_vq_uaddr(vq);
+
+	if (d->mm) {
+		r = mmu_notifier_register(&d->mmu_notifier, d->mm);
+		if (!r)
+			d->has_notifier = true;
+	}
+#endif
+
 	mutex_unlock(&vq->mutex);
 
 	return r;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index e9ed2722b633..85e97e0f77f5 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -12,6 +12,9 @@
 #include <linux/virtio_config.h>
 #include <linux/virtio_ring.h>
 #include <linux/atomic.h>
+#include <linux/pagemap.h>
+#include <linux/mmu_notifier.h>
+#include <asm/ca...
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi:
Per request from Michael and Jason, the metadata accelreation is
reverted in this version and rework in next version.
Please review.
Thanks
Jason Wang (2):
  Revert "vhost: access vq metadata through kernel virtual address"
  vhost: re-introducing metadata acceleration through kernel virtual
    address
 drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi:
Per request from Michael and Jason, the metadata accelreation is
reverted in this version and rework in next version.
Please review.
Thanks
Jason Wang (2):
  Revert "vhost: access vq metadata through kernel virtual address"
  vhost: re-introducing metadata acceleration through kernel virtual
    address
 drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------
2019 Sep 17
7
[RFC v4 0/3] vhost: introduce mdev based hardware backend
This RFC is to demonstrate below ideas,
a) Build vhost-mdev on top of the same abstraction defined in
   the virtio-mdev series [1];
b) Introduce /dev/vhost-mdev to do vhost ioctls and support
   setting mdev device as backend;
Now the userspace API looks like this:
- Userspace generates a compatible mdev device;
- Userspace opens this mdev device with VFIO API (including
  doing IOMMU
2019 Sep 17
7
[RFC v4 0/3] vhost: introduce mdev based hardware backend
This RFC is to demonstrate below ideas,
a) Build vhost-mdev on top of the same abstraction defined in
   the virtio-mdev series [1];
b) Introduce /dev/vhost-mdev to do vhost ioctls and support
   setting mdev device as backend;
Now the userspace API looks like this:
- Userspace generates a compatible mdev device;
- Userspace opens this mdev device with VFIO API (including
  doing IOMMU
2019 Oct 13
4
[PATCH RFC v3 0/4] vhost: ring format independence
This adds infrastructure required for supporting
multiple ring formats.
The idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
2019 Oct 13
6
[PATCH RFC v4 0/5] vhost: ring format independence
This adds infrastructure required for supporting
multiple ring formats.
The idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
2019 Oct 11
8
[PATCH RFC v1 0/2] vhost: ring format independence
So the idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
we are fetching multiple descriptors.
And perhaps more importantly,
2019 Oct 11
8
[PATCH RFC v1 0/2] vhost: ring format independence
So the idea is as follows: we convert descriptors to an
independent format first, and process that converting to
iov later.
The point is that we have a tight loop that fetches
descriptors, which is good for cache utilization.
This will also allow all kind of batching tricks -
e.g. it seems possible to keep SMAP disabled while
we are fetching multiple descriptors.
And perhaps more importantly,