search for: vhost_get_desc_size

Displaying 20 results from an estimated 48 matches for "vhost_get_desc_size".

2019 Mar 06
0
[RFC PATCH V2 4/5] vhost: introduce helpers to get the size of metadata area
...*vq->avail->ring) * num + event; +} + +static size_t vhost_get_used_size(struct vhost_virtqueue *vq, int num) +{ + size_t event = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; + + return sizeof(*vq->used) + + sizeof(*vq->used->ring) * num + event; +} + +static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, int num) +{ + return sizeof(*vq->desc) * num; +} + void vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue **vqs, int nvqs, int iov_limit) { @@ -1253,13 +1274,9 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, struct vr...
2019 Mar 07
0
[RFC PATCH V2 4/5] vhost: introduce helpers to get the size of metadata area
...t_virtqueue *vq, int num) >> +{ >> + size_t event = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; >> + >> + return sizeof(*vq->used) + >> + sizeof(*vq->used->ring) * num + event; >> +} >> + >> +static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, int num) >> +{ >> + return sizeof(*vq->desc) * num; >> +} >> + >> void vhost_dev_init(struct vhost_dev *dev, >> struct vhost_virtqueue **vqs, int nvqs, int iov_limit) >> { >> @@ -1253,13 +1...
2019 Mar 06
1
[RFC PATCH V2 4/5] vhost: introduce helpers to get the size of metadata area
...gt; +static size_t vhost_get_used_size(struct vhost_virtqueue *vq, int num) > +{ > + size_t event = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; > + > + return sizeof(*vq->used) + > + sizeof(*vq->used->ring) * num + event; > +} > + > +static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, int num) > +{ > + return sizeof(*vq->desc) * num; > +} > + > void vhost_dev_init(struct vhost_dev *dev, > struct vhost_virtqueue **vqs, int nvqs, int iov_limit) > { > @@ -1253,13 +1274,9 @@ static bool vq_access_ok(struct vhost_virtqueue...
2019 Mar 06
1
[RFC PATCH V2 4/5] vhost: introduce helpers to get the size of metadata area
...gt; +static size_t vhost_get_used_size(struct vhost_virtqueue *vq, int num) > +{ > + size_t event = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; > + > + return sizeof(*vq->used) + > + sizeof(*vq->used->ring) * num + event; > +} > + > +static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, int num) > +{ > + return sizeof(*vq->desc) * num; > +} > + > void vhost_dev_init(struct vhost_dev *dev, > struct vhost_virtqueue **vqs, int nvqs, int iov_limit) > { > @@ -1253,13 +1274,9 @@ static bool vq_access_ok(struct vhost_virtqueue...
2019 Mar 06
12
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
This series tries to access virtqueue metadata through kernel virtual address instead of copy_user() friends since they had too much overheads like checks, spec barriers or even hardware feature toggling. This is done through setup kernel address through vmap() and resigter MMU notifier for invalidation. Test shows about 24% improvement on TX PPS. TCP_STREAM doesn't see obvious improvement.
2019 Mar 06
12
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
This series tries to access virtqueue metadata through kernel virtual address instead of copy_user() friends since they had too much overheads like checks, spec barriers or even hardware feature toggling. This is done through setup kernel address through vmap() and resigter MMU notifier for invalidation. Test shows about 24% improvement on TX PPS. TCP_STREAM doesn't see obvious improvement.
2019 Sep 17
1
[RFC v4 3/3] vhost: introduce mdev based hardware backend
...s; queue_id++) { > + vq = &m->vqs[queue_id]; > + > + if (!vq->desc || !vq->avail || !vq->used) > + break; > + > + virtio_mdev_writel(mdev, VIRTIO_MDEV_QUEUE_NUM, vq->num); > + > + if (!vhost_translate_ring_addr(vq, (u64)vq->desc, > + vhost_get_desc_size(vq, vq->num), > + &addr)) > + return -EINVAL; Interesting, any reason for doing such kinds of translation to HVA? I believe the add should already an IOVA that has been map by VFIO. > + > + virtio_mdev_writel(mdev, VIRTIO_MDEV_QUEUE_DESC_LOW, addr); > + vir...
2019 Jan 04
1
[RFC PATCH V3 5/5] vhost: access vq metadata through kernel virtual address
...q->avail_ring)); > + memset(&vq->used_ring, 0, sizeof(vq->used_ring)); > + memset(&vq->desc_ring, 0, sizeof(vq->desc_ring)); > mutex_init(&vq->mutex); > vhost_vq_reset(dev, vq); > if (vq->handle_kick) > @@ -510,6 +513,73 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, int num) > return sizeof(*vq->desc) * num; > } > > +static void vhost_uninit_vmap(struct vhost_vmap *map) > +{ > + if (map->addr) > + vunmap(map->unmap_addr); > + > + map->addr = NULL; > + map->unmap_addr = NULL; > +}...
2019 Sep 17
0
[RFC v4 3/3] vhost: introduce mdev based hardware backend
...+ for (queue_id = 0; queue_id < m->nvqs; queue_id++) { + vq = &m->vqs[queue_id]; + + if (!vq->desc || !vq->avail || !vq->used) + break; + + virtio_mdev_writel(mdev, VIRTIO_MDEV_QUEUE_NUM, vq->num); + + if (!vhost_translate_ring_addr(vq, (u64)vq->desc, + vhost_get_desc_size(vq, vq->num), + &addr)) + return -EINVAL; + + virtio_mdev_writel(mdev, VIRTIO_MDEV_QUEUE_DESC_LOW, addr); + virtio_mdev_writel(mdev, VIRTIO_MDEV_QUEUE_DESC_HIGH, + (addr >> 32)); + + if (!vhost_translate_ring_addr(vq, (u64)vq->avail, + vhost_get_avai...
2018 Dec 29
0
[RFC PATCH V3 5/5] vhost: access vq metadata through kernel virtual address
...vq->avail_ring, 0, sizeof(vq->avail_ring)); + memset(&vq->used_ring, 0, sizeof(vq->used_ring)); + memset(&vq->desc_ring, 0, sizeof(vq->desc_ring)); mutex_init(&vq->mutex); vhost_vq_reset(dev, vq); if (vq->handle_kick) @@ -510,6 +513,73 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, int num) return sizeof(*vq->desc) * num; } +static void vhost_uninit_vmap(struct vhost_vmap *map) +{ + if (map->addr) + vunmap(map->unmap_addr); + + map->addr = NULL; + map->unmap_addr = NULL; +} + +static int vhost_invalidate_vmap(struct vhost_virtq...
2018 Dec 29
12
[RFC PATCH V3 0/5] Hi:
This series tries to access virtqueue metadata through kernel virtual address instead of copy_user() friends since they had too much overheads like checks, spec barriers or even hardware feature toggling. Test shows about 24% improvement on TX PPS. It should benefit other cases as well. Changes from V2: - fix buggy range overlapping check - tear down MMU notifier during vhost ioctl to make sure
2018 Dec 29
12
[RFC PATCH V3 0/5] Hi:
This series tries to access virtqueue metadata through kernel virtual address instead of copy_user() friends since they had too much overheads like checks, spec barriers or even hardware feature toggling. Test shows about 24% improvement on TX PPS. It should benefit other cases as well. Changes from V2: - fix buggy range overlapping check - tear down MMU notifier during vhost ioctl to make sure
2019 Jul 27
2
INFO: rcu detected stall in vhost_worker
...vhost_setup_uaddr(struct vho static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq) { spin_lock(&vq->mmu_lock); - + /* + * deadlock if managing to take mmu_lock again while + * setting up uaddr + */ vhost_setup_uaddr(vq, VHOST_ADDR_DESC, (unsigned long)vq->desc, vhost_get_desc_size(vq, vq->num), --
2019 Mar 06
0
[RFC PATCH V2 5/5] vhost: access vq metadata through kernel virtual address
...e_vq(net, vq); out: vhost_net_signal_used(nvq); + vq_meta_prefetch_done(vq); mutex_unlock(&vq->mutex); } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 1015464..36ccf7c 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -434,6 +434,74 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, int num) return sizeof(*vq->desc) * num; } +static void vhost_uninit_vmap(struct vhost_vmap *map) +{ + if (map->addr) { + vunmap(map->unmap_addr); + kfree(map->pages); + map->pages = NULL; + map->npages = 0; + } + + map->addr = NULL; + map-&...
2019 Sep 17
7
[RFC v4 0/3] vhost: introduce mdev based hardware backend
This RFC is to demonstrate below ideas, a) Build vhost-mdev on top of the same abstraction defined in the virtio-mdev series [1]; b) Introduce /dev/vhost-mdev to do vhost ioctls and support setting mdev device as backend; Now the userspace API looks like this: - Userspace generates a compatible mdev device; - Userspace opens this mdev device with VFIO API (including doing IOMMU
2019 Sep 17
7
[RFC v4 0/3] vhost: introduce mdev based hardware backend
This RFC is to demonstrate below ideas, a) Build vhost-mdev on top of the same abstraction defined in the virtio-mdev series [1]; b) Introduce /dev/vhost-mdev to do vhost ioctls and support setting mdev device as backend; Now the userspace API looks like this: - Userspace generates a compatible mdev device; - Userspace opens this mdev device with VFIO API (including doing IOMMU
2019 Mar 06
2
[RFC PATCH V2 5/5] vhost: access vq metadata through kernel virtual address
...nvq); > + vq_meta_prefetch_done(vq); > mutex_unlock(&vq->mutex); > } > > diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c > index 1015464..36ccf7c 100644 > --- a/drivers/vhost/vhost.c > +++ b/drivers/vhost/vhost.c > @@ -434,6 +434,74 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, int num) > return sizeof(*vq->desc) * num; > } > > +static void vhost_uninit_vmap(struct vhost_vmap *map) > +{ > + if (map->addr) { > + vunmap(map->unmap_addr); > + kfree(map->pages); > + map->pages = NULL; > + map-&g...
2019 Mar 06
2
[RFC PATCH V2 5/5] vhost: access vq metadata through kernel virtual address
...nvq); > + vq_meta_prefetch_done(vq); > mutex_unlock(&vq->mutex); > } > > diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c > index 1015464..36ccf7c 100644 > --- a/drivers/vhost/vhost.c > +++ b/drivers/vhost/vhost.c > @@ -434,6 +434,74 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, int num) > return sizeof(*vq->desc) * num; > } > > +static void vhost_uninit_vmap(struct vhost_vmap *map) > +{ > + if (map->addr) { > + vunmap(map->unmap_addr); > + kfree(map->pages); > + map->pages = NULL; > + map-&g...
2019 Apr 23
7
[RFC PATCH V3 0/6] vhost: accelerate metadata access
This series tries to access virtqueue metadata through kernel virtual address instead of copy_user() friends since they had too much overheads like checks, spec barriers or even hardware feature toggling. This is done through setup kernel address through direct mapping and co-opreate VM management with MMU notifiers. Test shows about 23% improvement on TX PPS. TCP_STREAM doesn't see obvious
2020 Apr 20
2
[PATCH v3] virtio: force spec specified alignment on types
...vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, - struct vring_desc __user *desc, - struct vring_avail __user *avail, - struct vring_used __user *used) + vring_desc_t __user *desc, + vring_avail_t __user *avail, + vring_used_t __user *used) { return access_ok(desc, vhost_get_desc_size(vq, num)) && @@ -2301,7 +2301,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { - struct vring_used_elem __user *used; + vring_used_elem_t __user *used; u16 old, new; int start; diff --git a/drivers/vhost/v...