Displaying 20 results from an estimated 30 matches for "vhost_init_vmap".
Did you mean:
vhost_uninit_vmap
2018 Dec 13
0
[PATCH net-next 3/3] vhost: access vq metadata through kernel virtual address
...->used_ring));
+ memset(&vq->desc_ring, 0, sizeof(vq->desc_ring));
mutex_init(&vq->mutex);
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
@@ -614,6 +617,102 @@ static void vhost_clear_msg(struct vhost_dev *dev)
spin_unlock(&dev->iotlb_lock);
}
+static int vhost_init_vmap(struct vhost_vmap *map, unsigned long uaddr,
+ size_t size, int write)
+{
+ struct page **pages;
+ int npages = DIV_ROUND_UP(size, PAGE_SIZE);
+ int npinned;
+ void *vaddr;
+
+ pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ npinned = get_...
2019 Jan 04
1
[RFC PATCH V3 5/5] vhost: access vq metadata through kernel virtual address
...return 0;
> +err_mmu_notifier:
> + vhost_dev_free_iovecs(dev);
> err_cgroup:
> kthread_stop(worker);
> dev->worker = NULL;
> @@ -632,6 +709,72 @@ static void vhost_clear_msg(struct vhost_dev *dev)
> spin_unlock(&dev->iotlb_lock);
> }
>
> +static int vhost_init_vmap(struct vhost_vmap *map, unsigned long uaddr,
> + size_t size, int write)
> +{
> + struct page **pages;
> + int npages = DIV_ROUND_UP(size, PAGE_SIZE);
> + int npinned;
> + void *vaddr;
> + int err = 0;
> +
> + pages = kmalloc_array(npages, sizeof(struct page *), GFP_...
2018 Dec 13
5
[PATCH net-next 3/3] vhost: access vq metadata through kernel virtual address
...desc_ring, 0, sizeof(vq->desc_ring));
> mutex_init(&vq->mutex);
> vhost_vq_reset(dev, vq);
> if (vq->handle_kick)
> @@ -614,6 +617,102 @@ static void vhost_clear_msg(struct vhost_dev *dev)
> spin_unlock(&dev->iotlb_lock);
> }
>
> +static int vhost_init_vmap(struct vhost_vmap *map, unsigned long uaddr,
> + size_t size, int write)
> +{
> + struct page **pages;
> + int npages = DIV_ROUND_UP(size, PAGE_SIZE);
> + int npinned;
> + void *vaddr;
> +
> + pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> + if (...
2018 Dec 13
5
[PATCH net-next 3/3] vhost: access vq metadata through kernel virtual address
...desc_ring, 0, sizeof(vq->desc_ring));
> mutex_init(&vq->mutex);
> vhost_vq_reset(dev, vq);
> if (vq->handle_kick)
> @@ -614,6 +617,102 @@ static void vhost_clear_msg(struct vhost_dev *dev)
> spin_unlock(&dev->iotlb_lock);
> }
>
> +static int vhost_init_vmap(struct vhost_vmap *map, unsigned long uaddr,
> + size_t size, int write)
> +{
> + struct page **pages;
> + int npages = DIV_ROUND_UP(size, PAGE_SIZE);
> + int npinned;
> + void *vaddr;
> +
> + pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> + if (...
2018 Dec 13
11
[PATCH net-next 0/3] vhost: accelerate metadata access through vmap()
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Please review
Jason Wang (3):
vhost: generalize adding used elem
vhost: fine grain userspace memory
2018 Dec 13
11
[PATCH net-next 0/3] vhost: accelerate metadata access through vmap()
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Please review
Jason Wang (3):
vhost: generalize adding used elem
vhost: fine grain userspace memory
2018 Dec 29
0
[RFC PATCH V3 5/5] vhost: access vq metadata through kernel virtual address
...>mm);
+ if (err)
+ goto err_mmu_notifier;
+
return 0;
+err_mmu_notifier:
+ vhost_dev_free_iovecs(dev);
err_cgroup:
kthread_stop(worker);
dev->worker = NULL;
@@ -632,6 +709,72 @@ static void vhost_clear_msg(struct vhost_dev *dev)
spin_unlock(&dev->iotlb_lock);
}
+static int vhost_init_vmap(struct vhost_vmap *map, unsigned long uaddr,
+ size_t size, int write)
+{
+ struct page **pages;
+ int npages = DIV_ROUND_UP(size, PAGE_SIZE);
+ int npinned;
+ void *vaddr;
+ int err = 0;
+
+ pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+...
2018 Dec 28
0
[RFC PATCH V2 3/3] vhost: access vq metadata through kernel virtual address
...>mm);
+ if (err)
+ goto err_mmu_notifier;
+
return 0;
+err_mmu_notifier:
+ vhost_dev_free_iovecs(dev);
err_cgroup:
kthread_stop(worker);
dev->worker = NULL;
@@ -611,6 +676,87 @@ static void vhost_clear_msg(struct vhost_dev *dev)
spin_unlock(&dev->iotlb_lock);
}
+static int vhost_init_vmap(struct vhost_vmap *map, unsigned long uaddr,
+ size_t size, int write)
+{
+ struct page **pages;
+ int npages = DIV_ROUND_UP(size, PAGE_SIZE);
+ int npinned;
+ void *vaddr;
+ int err = 0;
+
+ pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+...
2019 Mar 06
0
[RFC PATCH V2 5/5] vhost: access vq metadata through kernel virtual address
...>mm);
+ if (err)
+ goto err_mmu_notifier;
+
return 0;
+err_mmu_notifier:
+ vhost_dev_free_iovecs(dev);
err_cgroup:
kthread_stop(worker);
dev->worker = NULL;
@@ -633,6 +711,81 @@ static void vhost_clear_msg(struct vhost_dev *dev)
spin_unlock(&dev->iotlb_lock);
}
+static int vhost_init_vmap(struct vhost_dev *dev,
+ struct vhost_vmap *map, unsigned long uaddr,
+ size_t size, int write)
+{
+ struct page **pages;
+ int npages = DIV_ROUND_UP(size, PAGE_SIZE);
+ int npinned;
+ void *vaddr;
+ int err = -EFAULT;
+
+ err = -ENOMEM;
+ pages = kmalloc_array(npages, sizeof(struct page...
2019 Mar 06
2
[RFC PATCH V2 5/5] vhost: access vq metadata through kernel virtual address
...return 0;
> +err_mmu_notifier:
> + vhost_dev_free_iovecs(dev);
> err_cgroup:
> kthread_stop(worker);
> dev->worker = NULL;
> @@ -633,6 +711,81 @@ static void vhost_clear_msg(struct vhost_dev *dev)
> spin_unlock(&dev->iotlb_lock);
> }
>
> +static int vhost_init_vmap(struct vhost_dev *dev,
> + struct vhost_vmap *map, unsigned long uaddr,
> + size_t size, int write)
> +{
> + struct page **pages;
> + int npages = DIV_ROUND_UP(size, PAGE_SIZE);
> + int npinned;
> + void *vaddr;
> + int err = -EFAULT;
> +
> + err = -ENOMEM;
&...
2019 Mar 06
2
[RFC PATCH V2 5/5] vhost: access vq metadata through kernel virtual address
...return 0;
> +err_mmu_notifier:
> + vhost_dev_free_iovecs(dev);
> err_cgroup:
> kthread_stop(worker);
> dev->worker = NULL;
> @@ -633,6 +711,81 @@ static void vhost_clear_msg(struct vhost_dev *dev)
> spin_unlock(&dev->iotlb_lock);
> }
>
> +static int vhost_init_vmap(struct vhost_dev *dev,
> + struct vhost_vmap *map, unsigned long uaddr,
> + size_t size, int write)
> +{
> + struct page **pages;
> + int npages = DIV_ROUND_UP(size, PAGE_SIZE);
> + int npinned;
> + void *vaddr;
> + int err = -EFAULT;
> +
> + err = -ENOMEM;
&...
2018 Dec 29
12
[RFC PATCH V3 0/5] Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Changes from V2:
- fix buggy range overlapping check
- tear down MMU notifier during vhost ioctl to make sure
2018 Dec 29
12
[RFC PATCH V3 0/5] Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Changes from V2:
- fix buggy range overlapping check
- tear down MMU notifier during vhost ioctl to make sure
2018 Dec 28
4
[RFC PATCH V2 0/3] vhost: accelerate metadata access through vmap()
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling.
Test shows about 24% improvement on TX PPS. It should benefit other
cases as well.
Changes from V1:
- instead of pinning pages, use MMU notifier to invalidate vmaps and
remap duing
2018 Dec 14
2
[PATCH net-next 3/3] vhost: access vq metadata through kernel virtual address
...it(&vq->mutex);
> > > vhost_vq_reset(dev, vq);
> > > if (vq->handle_kick)
> > > @@ -614,6 +617,102 @@ static void vhost_clear_msg(struct vhost_dev *dev)
> > > spin_unlock(&dev->iotlb_lock);
> > > }
> > > +static int vhost_init_vmap(struct vhost_vmap *map, unsigned long uaddr,
> > > + size_t size, int write)
> > > +{
> > > + struct page **pages;
> > > + int npages = DIV_ROUND_UP(size, PAGE_SIZE);
> > > + int npinned;
> > > + void *vaddr;
> > > +
> > >...
2018 Dec 14
2
[PATCH net-next 3/3] vhost: access vq metadata through kernel virtual address
...it(&vq->mutex);
> > > vhost_vq_reset(dev, vq);
> > > if (vq->handle_kick)
> > > @@ -614,6 +617,102 @@ static void vhost_clear_msg(struct vhost_dev *dev)
> > > spin_unlock(&dev->iotlb_lock);
> > > }
> > > +static int vhost_init_vmap(struct vhost_vmap *map, unsigned long uaddr,
> > > + size_t size, int write)
> > > +{
> > > + struct page **pages;
> > > + int npages = DIV_ROUND_UP(size, PAGE_SIZE);
> > > + int npinned;
> > > + void *vaddr;
> > > +
> > >...
2019 Mar 06
12
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling. This is done through setup kernel address through vmap() and
resigter MMU notifier for invalidation.
Test shows about 24% improvement on TX PPS. TCP_STREAM doesn't see
obvious improvement.
2019 Mar 06
12
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling. This is done through setup kernel address through vmap() and
resigter MMU notifier for invalidation.
Test shows about 24% improvement on TX PPS. TCP_STREAM doesn't see
obvious improvement.
2018 Dec 24
2
[PATCH net-next 3/3] vhost: access vq metadata through kernel virtual address
...set(dev, vq);
> > > > > if (vq->handle_kick)
> > > > > @@ -614,6 +617,102 @@ static void vhost_clear_msg(struct vhost_dev *dev)
> > > > > spin_unlock(&dev->iotlb_lock);
> > > > > }
> > > > > +static int vhost_init_vmap(struct vhost_vmap *map, unsigned long uaddr,
> > > > > + size_t size, int write)
> > > > > +{
> > > > > + struct page **pages;
> > > > > + int npages = DIV_ROUND_UP(size, PAGE_SIZE);
> > > > > + int npinned;
> > &...
2018 Dec 24
2
[PATCH net-next 3/3] vhost: access vq metadata through kernel virtual address
...set(dev, vq);
> > > > > if (vq->handle_kick)
> > > > > @@ -614,6 +617,102 @@ static void vhost_clear_msg(struct vhost_dev *dev)
> > > > > spin_unlock(&dev->iotlb_lock);
> > > > > }
> > > > > +static int vhost_init_vmap(struct vhost_vmap *map, unsigned long uaddr,
> > > > > + size_t size, int write)
> > > > > +{
> > > > > + struct page **pages;
> > > > > + int npages = DIV_ROUND_UP(size, PAGE_SIZE);
> > > > > + int npinned;
> > &...