Displaying 20 results from an estimated 76 matches for "vhost_copy_from_user".
2019 Dec 18
0
[PATCH 1/1] drivers/vhost : Removes unnecessary 'else' in vhost_copy_from_user
...++----------------------
> 1 file changed, 24 insertions(+), 26 deletions(-)
>
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index f44340b41494..b23d1b74c32f 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -824,34 +824,32 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
>
> if (!vq->iotlb)
> return __copy_from_user(to, from, size);
> - else {
> - /* This function should be called after iotlb
> - * prefetch, which means we're sure that vq
> - * could be access through iotlb. So -EAGAIN...
2016 Dec 06
0
[PATCH 06/10] vhost: add missing __user annotations
...int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
+static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
const void *from, unsigned size)
{
int ret;
@@ -749,7 +749,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
}
static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
- void *from, unsigned size)
+ void __user *from, unsigned size)
{
int ret;
@@ -783,7 +783,7 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
}
static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
- voi...
2019 Mar 06
1
[RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors
...ost_get_used_idx(struct vhost_virtqueue *vq,
> + __virtio16 *idx)
> +{
> + return vhost_get_used(vq, *idx, &vq->used->idx);
> +}
> +
> +static inline int vhost_get_desc(struct vhost_virtqueue *vq,
> + struct vring_desc *desc, int idx)
> +{
> + return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
> +}
> +
> static int vhost_new_umem_range(struct vhost_umem *umem,
> u64 start, u64 size, u64 end,
> u64 userspace_addr, int perm)
> @@ -1840,8 +1905,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
&...
2019 Mar 06
1
[RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors
...ost_get_used_idx(struct vhost_virtqueue *vq,
> + __virtio16 *idx)
> +{
> + return vhost_get_used(vq, *idx, &vq->used->idx);
> +}
> +
> +static inline int vhost_get_desc(struct vhost_virtqueue *vq,
> + struct vring_desc *desc, int idx)
> +{
> + return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
> +}
> +
> static int vhost_new_umem_range(struct vhost_umem *umem,
> u64 start, u64 size, u64 end,
> u64 userspace_addr, int perm)
> @@ -1840,8 +1905,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
&...
2019 Oct 18
2
read_barrier_depends() usage in vhost.c
...to find one...
> >
>
> I guess it was because we will read from the address stored in the iov like:
>
> 1) trasnlate_desc() that stores the userspace buffer pointer in the iov
>
> 2) copy_from_iter() that reads from those pointers
Isn't that exactly the same flow as vhost_copy_from_user(), which doesn't
have the barrier? Staring at the code some more, my best bet at the moment
is that the load of 'indirect->addr' is probably the one to worry about,
since it's part of the vring and can be updated concurrently.
> So we need a data dependency barrier in the mid...
2019 Oct 18
2
read_barrier_depends() usage in vhost.c
...to find one...
> >
>
> I guess it was because we will read from the address stored in the iov like:
>
> 1) trasnlate_desc() that stores the userspace buffer pointer in the iov
>
> 2) copy_from_iter() that reads from those pointers
Isn't that exactly the same flow as vhost_copy_from_user(), which doesn't
have the barrier? Staring at the code some more, my best bet at the moment
is that the load of 'indirect->addr' is probably the one to worry about,
since it's part of the vring and can be updated concurrently.
> So we need a data dependency barrier in the mid...
2019 Oct 21
0
read_barrier_depends() usage in vhost.c
...;>>
>> I guess it was because we will read from the address stored in the iov like:
>>
>> 1) trasnlate_desc() that stores the userspace buffer pointer in the iov
>>
>> 2) copy_from_iter() that reads from those pointers
> Isn't that exactly the same flow as vhost_copy_from_user(), which doesn't
> have the barrier?
There's a rmb before calling vhost_copy_from_user() (vhost_get_desc()).
> Staring at the code some more, my best bet at the moment
> is that the load of 'indirect->addr' is probably the one to worry about,
> since it's pa...
2019 Mar 06
0
[RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors
...t_used_event(vq));
+}
+
+static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
+ __virtio16 *idx)
+{
+ return vhost_get_used(vq, *idx, &vq->used->idx);
+}
+
+static inline int vhost_get_desc(struct vhost_virtqueue *vq,
+ struct vring_desc *desc, int idx)
+{
+ return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
+}
+
static int vhost_new_umem_range(struct vhost_umem *umem,
u64 start, u64 size, u64 end,
u64 userspace_addr, int perm)
@@ -1840,8 +1905,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
static int vhost_update_use...
2019 Mar 07
0
[RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors
...eue *vq,
>> + __virtio16 *idx)
>> +{
>> + return vhost_get_used(vq, *idx, &vq->used->idx);
>> +}
>> +
>> +static inline int vhost_get_desc(struct vhost_virtqueue *vq,
>> + struct vring_desc *desc, int idx)
>> +{
>> + return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
>> +}
>> +
>> static int vhost_new_umem_range(struct vhost_umem *umem,
>> u64 start, u64 size, u64 end,
>> u64 userspace_addr, int perm)
>> @@ -1840,8 +1905,7 @@ int vhost_log_write(struct vhost_virtqueue *vq,...
2018 Feb 14
0
[PATCH RFC 2/2] vhost: packed ring support
...ss, i;
+ u16 avail_idx = vq->last_avail_idx;
+
+ /* When we start there are none of either input nor output. */
+ *out_num = *in_num = 0;
+ if (unlikely(log))
+ *log_num = 0;
+
+ do {
+ unsigned int iov_count = *in_num + *out_num;
+
+ i = vq->last_avail_idx & (vq->num - 1);
+ ret = vhost_copy_from_user(vq, &desc, vq->desc_packed + i,
+ sizeof(desc));
+ if (unlikely(ret)) {
+ vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
+ i, vq->desc_packed + i);
+ return -EFAULT;
+ }
+
+ if (!desc_is_avail(vq, &desc)) {
+ /* If there's nothing new since l...
2016 Dec 14
1
[PATCH V2] vhost: introduce O(1) vq metadata cache
...+ (u64)(uintptr_t)to, size,
+ VHOST_ADDR_DESC);
+
+ if (uaddr)
+ return __copy_to_user(uaddr, from, size);
+
ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
ARRAY_SIZE(vq->iotlb_iov),
VHOST_ACCESS_WO);
@@ -761,8 +796,14 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
* could be access through iotlb. So -EAGAIN should
* not happen in this case.
*/
- /* TODO: more fast path */
+ void __user *uaddr = vhost_vq_meta_fetch(vq,
+ (u64)(uintptr_t)from, size,
+ VHOST_ADDR_DESC);
struct iov_iter f;
+...
2016 Dec 14
1
[PATCH V2] vhost: introduce O(1) vq metadata cache
...+ (u64)(uintptr_t)to, size,
+ VHOST_ADDR_DESC);
+
+ if (uaddr)
+ return __copy_to_user(uaddr, from, size);
+
ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
ARRAY_SIZE(vq->iotlb_iov),
VHOST_ACCESS_WO);
@@ -761,8 +796,14 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
* could be access through iotlb. So -EAGAIN should
* not happen in this case.
*/
- /* TODO: more fast path */
+ void __user *uaddr = vhost_vq_meta_fetch(vq,
+ (u64)(uintptr_t)from, size,
+ VHOST_ADDR_DESC);
struct iov_iter f;
+...
2018 Dec 28
4
[RFC PATCH V2 0/3] vhost: accelerate metadata access through vmap()
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Changes from V1:
- instead of pinning pages, use MMU notifier to invalidate vmaps and
remap duing
2016 Dec 14
2
[PATCH] vhost: introduce O(1) vq metadata cache
...+ (u64)(uintptr_t)to, size,
+ VHOST_ADDR_DESC);
+
+ if (uaddr)
+ return __copy_to_user(uaddr, from, size);
+
ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
ARRAY_SIZE(vq->iotlb_iov),
VHOST_ACCESS_WO);
@@ -761,8 +796,14 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
* could be access through iotlb. So -EAGAIN should
* not happen in this case.
*/
- /* TODO: more fast path */
+ void __user *uaddr = vhost_vq_meta_fetch(vq,
+ (u64)(uintptr_t)from, size,
+ VHOST_ADDR_DESC);
struct iov_iter f;
+...
2016 Dec 14
2
[PATCH] vhost: introduce O(1) vq metadata cache
...+ (u64)(uintptr_t)to, size,
+ VHOST_ADDR_DESC);
+
+ if (uaddr)
+ return __copy_to_user(uaddr, from, size);
+
ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
ARRAY_SIZE(vq->iotlb_iov),
VHOST_ACCESS_WO);
@@ -761,8 +796,14 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
* could be access through iotlb. So -EAGAIN should
* not happen in this case.
*/
- /* TODO: more fast path */
+ void __user *uaddr = vhost_vq_meta_fetch(vq,
+ (u64)(uintptr_t)from, size,
+ VHOST_ADDR_DESC);
struct iov_iter f;
+...
2018 Feb 14
6
[PATCH RFC 0/2] Packed ring for vhost
Hi all:
This RFC implement a subset of packed ring which was described at
https://github.com/oasis-tcs/virtio-docs/blob/master/virtio-v1.1-packed-wd07.pdf
. The code were tested with pmd implement by Jens at
http://dpdk.org/ml/archives/dev/2018-January/089417.html. Minor
change was needed for pmd codes to kick virtqueue since it assumes a
busy polling backend.
Test were done between localhost
2018 Feb 14
6
[PATCH RFC 0/2] Packed ring for vhost
Hi all:
This RFC implement a subset of packed ring which was described at
https://github.com/oasis-tcs/virtio-docs/blob/master/virtio-v1.1-packed-wd07.pdf
. The code were tested with pmd implement by Jens at
http://dpdk.org/ml/archives/dev/2018-January/089417.html. Minor
change was needed for pmd codes to kick virtqueue since it assumes a
busy polling backend.
Test were done between localhost
2019 Mar 06
12
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling. This is done through setup kernel address through vmap() and
resigter MMU notifier for invalidation.
Test shows about 24% improvement on TX PPS. TCP_STREAM doesn't see
obvious improvement.
2019 Mar 06
12
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling. This is done through setup kernel address through vmap() and
resigter MMU notifier for invalidation.
Test shows about 24% improvement on TX PPS. TCP_STREAM doesn't see
obvious improvement.
2018 Dec 13
11
[PATCH net-next 0/3] vhost: accelerate metadata access through vmap()
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Please review
Jason Wang (3):
vhost: generalize adding used elem
vhost: fine grain userspace memory