Displaying 20 results from an estimated 113 matches for "vhost_put_us".
Did you mean:
vhost_put_user
2019 Mar 06
1
[RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors
...709e7 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -869,6 +869,34 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
> ret; \
> })
>
> +static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
> +{
> + return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
> + vhost_avail_event(vq));
> +}
> +
> +static inline int vhost_put_used(struct vhost_virtqueue *vq,
> + struct vring_used_elem *head, int idx,
> + int count)
> +{
> + return vhost_copy_to_user(vq, vq->used->r...
2019 Mar 06
1
[RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors
...709e7 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -869,6 +869,34 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
> ret; \
> })
>
> +static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
> +{
> + return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
> + vhost_avail_event(vq));
> +}
> +
> +static inline int vhost_put_used(struct vhost_virtqueue *vq,
> + struct vring_used_elem *head, int idx,
> + int count)
> +{
> + return vhost_copy_to_user(vq, vq->used->r...
2019 Mar 06
0
[RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors
...b/drivers/vhost/vhost.c
index 400aa78..29709e7 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -869,6 +869,34 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
ret; \
})
+static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
+{
+ return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
+ vhost_avail_event(vq));
+}
+
+static inline int vhost_put_used(struct vhost_virtqueue *vq,
+ struct vring_used_elem *head, int idx,
+ int count)
+{
+ return vhost_copy_to_user(vq, vq->used->ring + idx, head,
+ count * sizeof(*...
2019 Mar 07
0
[RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors
.../vhost/vhost.c
>> +++ b/drivers/vhost/vhost.c
>> @@ -869,6 +869,34 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
>> ret; \
>> })
>>
>> +static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
>> +{
>> + return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
>> + vhost_avail_event(vq));
>> +}
>> +
>> +static inline int vhost_put_used(struct vhost_virtqueue *vq,
>> + struct vring_used_elem *head, int idx,
>> + int count)
>> +{
>> + return vhost_co...
2018 Dec 28
4
[RFC PATCH V2 0/3] vhost: accelerate metadata access through vmap()
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Changes from V1:
- instead of pinning pages, use MMU notifier to invalidate vmaps and
remap duing
2018 Dec 13
11
[PATCH net-next 0/3] vhost: accelerate metadata access through vmap()
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Please review
Jason Wang (3):
vhost: generalize adding used elem
vhost: fine grain userspace memory
2018 Dec 13
11
[PATCH net-next 0/3] vhost: accelerate metadata access through vmap()
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Please review
Jason Wang (3):
vhost: generalize adding used elem
vhost: fine grain userspace memory
2018 Dec 29
12
[RFC PATCH V3 0/5] Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Changes from V2:
- fix buggy range overlapping check
- tear down MMU notifier during vhost ioctl to make sure
2018 Dec 29
12
[RFC PATCH V3 0/5] Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Changes from V2:
- fix buggy range overlapping check
- tear down MMU notifier during vhost ioctl to make sure
2019 Mar 06
12
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling. This is done through setup kernel address through vmap() and
resigter MMU notifier for invalidation.
Test shows about 24% improvement on TX PPS. TCP_STREAM doesn't see
obvious improvement.
2019 Mar 06
12
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling. This is done through setup kernel address through vmap() and
resigter MMU notifier for invalidation.
Test shows about 24% improvement on TX PPS. TCP_STREAM doesn't see
obvious improvement.
2018 Dec 13
1
[PATCH net-next 1/3] vhost: generalize adding used elem
...> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -2164,16 +2164,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
>
> start = vq->last_used_idx & (vq->num - 1);
> used = vq->used->ring + start;
> - if (count == 1) {
> - if (vhost_put_user(vq, heads[0].id, &used->id)) {
> - vq_err(vq, "Failed to write used id");
> - return -EFAULT;
> - }
> - if (vhost_put_user(vq, heads[0].len, &used->len)) {
> - vq_err(vq, "Failed to write used len");
> - return -EFAULT;
> - }
>...
2019 Apr 23
7
[RFC PATCH V3 0/6] vhost: accelerate metadata access
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling. This is done through setup kernel address through direct
mapping and co-opreate VM management with MMU notifiers.
Test shows about 23% improvement on TX PPS. TCP_STREAM doesn't see
obvious
2018 Dec 13
0
[PATCH net-next 3/3] vhost: access vq metadata through kernel virtual address
...ruct vhost_virtqueue *vq,
static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
{
+ if (!vq->iotlb) {
+ struct vring_used *used = vq->used_ring.addr;
+
+ *((__virtio16 *)&used->ring[vq->num]) =
+ cpu_to_vhost16(vq, vq->avail_idx);
+ return 0;
+ }
+
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
vhost_avail_event(vq));
}
@@ -881,6 +989,13 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
struct vring_used_elem *head, int idx,
int count)
{
+ if (!vq->iotlb) {
+ struct vring_used *used = vq->used_ring.ad...
2019 Jan 04
1
[RFC PATCH V3 5/5] vhost: access vq metadata through kernel virtual address
...addr;
> +
> + if (likely(used)) {
> + *((__virtio16 *)&used->ring[vq->num]) =
> + cpu_to_vhost16(vq, vq->avail_idx);
So here we are modifying userspace memory without marking it dirty.
Is this OK? And why?
> + return 0;
> + }
> + }
> +
> return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
> vhost_avail_event(vq));
> }
> @@ -899,6 +1056,16 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
> struct vring_used_elem *head, int idx,
> int count)
> {
> + if (!vq->iotlb) {
> + stru...
2019 May 24
10
[PATCH net-next 0/6] vhost: accelerate metadata access
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling like SMAP. This is done through setup kernel address through
direct mapping and co-opreate VM management with MMU notifiers.
Test shows about 23% improvement on TX PPS. TCP_STREAM
2019 May 24
10
[PATCH net-next 0/6] vhost: accelerate metadata access
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling like SMAP. This is done through setup kernel address through
direct mapping and co-opreate VM management with MMU notifiers.
Test shows about 23% improvement on TX PPS. TCP_STREAM
2019 Sep 06
1
[PATCH 1/2] Revert "vhost: access vq metadata through kernel virtual address"
...;
> - if (likely(map)) {
> - used = map->addr;
> - *((__virtio16 *)&used->ring[vq->num]) =
> - cpu_to_vhost16(vq, vq->avail_idx);
> - rcu_read_unlock();
> - return 0;
> - }
> -
> - rcu_read_unlock();
> - }
> -#endif
> -
> return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
> vhost_avail_event(vq));
> }
> @@ -1223,27 +922,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
> struct vring_used_elem *head, int idx,
> int count)
> {
> -#if VHOST_ARCH_CAN_ACCEL_UACCESS
&...
2018 Dec 29
0
[RFC PATCH V3 5/5] vhost: access vq metadata through kernel virtual address
...atic inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
{
+ if (!vq->iotlb) {
+ struct vring_used *used = vq->used_ring.addr;
+
+ if (likely(used)) {
+ *((__virtio16 *)&used->ring[vq->num]) =
+ cpu_to_vhost16(vq, vq->avail_idx);
+ return 0;
+ }
+ }
+
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
vhost_avail_event(vq));
}
@@ -899,6 +1056,16 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
struct vring_used_elem *head, int idx,
int count)
{
+ if (!vq->iotlb) {
+ struct vring_used *used = vq->used_ring.a...
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi:
Per request from Michael and Jason, the metadata accelreation is
reverted in this version and rework in next version.
Please review.
Thanks
Jason Wang (2):
Revert "vhost: access vq metadata through kernel virtual address"
vhost: re-introducing metadata acceleration through kernel virtual
address
drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------