search for: vhost_vdpa_map

Displaying 20 results from an estimated 23 matches for "vhost_vdpa_map".

2020 Oct 01
0
[PATCH] vhost-vdpa: fix page pinning leakage in error path
...vers/vhost/vdpa.c | 121 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 73 insertions(+), 48 deletions(-) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 796fe97..abc4aa2 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -565,6 +565,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, perm_to_iommu_flags(perm)); } + if (r) + vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1); return r; } @@ -592,21 +594,19 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, struct vhost_dev *dev = &v->vdev; struct vhos...
2020 Oct 01
0
[PATCH v2] vhost-vdpa: fix page pinning leakage in error path
...vers/vhost/vdpa.c | 121 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 73 insertions(+), 48 deletions(-) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 796fe97..abc4aa2 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -565,6 +565,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, perm_to_iommu_flags(perm)); } + if (r) + vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1); return r; } @@ -592,21 +594,19 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, struct vhost_dev *dev = &v->vdev; struct vhos...
2020 Nov 03
0
[PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path"
...no valid PFN next to it */ > - this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL; > - > - if (last_pfn && (this_pfn == -1UL || > - this_pfn != last_pfn + 1)) { > - /* Pin a contiguous chunk of memory */ > - csize = last_pfn - map_pfn + 1; > - ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT, > - map_pfn << PAGE_SHIFT, > - msg->perm); > - if (ret) { > - /* > - * Unpin the rest chunks of memory on the > - * flight with no corresponding vdpa_map() > - * calls having been made yet. On the o...
2020 Jun 28
2
[PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints
...st_vdpa_reset(struct vhost_vdpa *v) > const struct vdpa_config_ops *ops = vdpa->config; > > ops->set_status(vdpa, 0); > + v->in_batch = 0; > } > > static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp) > @@ -540,9 +544,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, > > if (ops->dma_map) > r = ops->dma_map(vdpa, iova, size, pa, perm); > - else if (ops->set_map) > - r = ops->set_map(vdpa, dev->iotlb); > - else > + else if (ops->set_map) { > + if (!v->in_batch) > + r = ops->set_m...
2020 Jun 28
2
[PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints
...st_vdpa_reset(struct vhost_vdpa *v) > const struct vdpa_config_ops *ops = vdpa->config; > > ops->set_status(vdpa, 0); > + v->in_batch = 0; > } > > static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp) > @@ -540,9 +544,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, > > if (ops->dma_map) > r = ops->dma_map(vdpa, iova, size, pa, perm); > - else if (ops->set_map) > - r = ops->set_map(vdpa, dev->iotlb); > - else > + else if (ops->set_map) { > + if (!v->in_batch) > + r = ops->set_m...
2020 Sep 24
30
[RFC PATCH 00/24] Control VQ support in vDPA
Hi All: This series tries to add the support for control virtqueue in vDPA. Control virtqueue is used by networking device for accepting various commands from the driver. It's a must to support multiqueue and other configurations. When used by vhost-vDPA bus driver for VM, the control virtqueue should be shadowed via userspace VMM (Qemu) instead of being assigned directly to Guest. This is
2020 Sep 24
30
[RFC PATCH 00/24] Control VQ support in vDPA
Hi All: This series tries to add the support for control virtqueue in vDPA. Control virtqueue is used by networking device for accepting various commands from the driver. It's a must to support multiqueue and other configurations. When used by vhost-vDPA bus driver for VM, the control virtqueue should be shadowed via userspace VMM (Qemu) instead of being assigned directly to Guest. This is
2023 Jan 24
0
[RFC PATCH 02/19] drivers/vhost: Convert to use vm_account
On Tue, Jan 24, 2023 at 04:42:31PM +1100, Alistair Popple wrote: > diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c > index ec32f78..a31dd53 100644 > --- a/drivers/vhost/vdpa.c > +++ b/drivers/vhost/vdpa.c ... > @@ -780,6 +780,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, > u32 asid = iotlb_to_asid(iotlb); > int r = 0; > > + if (!vdpa->use_va) > + if (vm_account_pinned(&dev->vm_account, PFN_DOWN(size))) > + return -ENOMEM; > + > r = vhost_iotlb_add_range_ctx(iotlb, iova, io...
2020 Jun 29
1
[PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints
...const struct vdpa_config_ops *ops = vdpa->config; > > > ops->set_status(vdpa, 0); > > > + v->in_batch = 0; > > > } > > > static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp) > > > @@ -540,9 +544,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, > > > if (ops->dma_map) > > > r = ops->dma_map(vdpa, iova, size, pa, perm); > > > - else if (ops->set_map) > > > - r = ops->set_map(vdpa, dev->iotlb); > > > - else > > > + else if (ops->set_map) {...
2023 Mar 10
0
[PATCH] vhost-vdpa: cleanup memory maps when closing vdpa fds
...gt; > > > > > > Please consider the following lifecycle of the vdpa device: > > > > > > 1. vhost_vdpa_open > > > vhost_vdpa_alloc_domain > > > > > > 2. vhost_vdpa_pa_map > > > pin_user_pages > > > vhost_vdpa_map > > > iommu_map > > > > > > 3. kill QEMU > > > > > > 4. vhost_vdpa_release > > > vhost_vdpa_free_domain > > > > > > In this case, we have no opportunity to invoke unpin_user_pages or > > > iommu_unmap...
2020 Jun 18
0
[PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints
...da); @@ -125,6 +128,7 @@ static void vhost_vdpa_reset(struct vhost_vdpa *v) const struct vdpa_config_ops *ops = vdpa->config; ops->set_status(vdpa, 0); + v->in_batch = 0; } static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp) @@ -540,9 +544,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, if (ops->dma_map) r = ops->dma_map(vdpa, iova, size, pa, perm); - else if (ops->set_map) - r = ops->set_map(vdpa, dev->iotlb); - else + else if (ops->set_map) { + if (!v->in_batch) + r = ops->set_map(vdpa, dev->iotlb); + } else r = iomm...
2020 Jun 29
0
[PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints
...t;> const struct vdpa_config_ops *ops = vdpa->config; >> >> ops->set_status(vdpa, 0); >> + v->in_batch = 0; >> } >> >> static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp) >> @@ -540,9 +544,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, >> >> if (ops->dma_map) >> r = ops->dma_map(vdpa, iova, size, pa, perm); >> - else if (ops->set_map) >> - r = ops->set_map(vdpa, dev->iotlb); >> - else >> + else if (ops->set_map) { >> + if (!v->i...
2020 Jun 18
6
[PATCH RFC 0/5] support batched IOTLB updating in vhost-vdpa
Hi all: This series tries to support batched IOTLB updating vhost-vdpa. Currently vhost-vdpa accepts userspace mapping via IOTLB API, and it can only forward one mapping to IOMMU or device through IOMMU API or dma_map(). Though set_map() is deisgend to have the capability to pass an rbtree based mapping to vDPA device, it's still be called at least once for each VHOST_IOTLB_UPDATE or
2020 Jul 01
5
[PATCH 0/5]
Hi all: This series tries to support batched IOTLB updating vhost-vdpa. Currently vhost-vdpa accepts userspace mapping via IOTLB API, and it can only forward one mapping to IOMMU or device through IOMMU API or dma_map(). Though set_map() is designed to have the capability to pass an rbtree based mapping to vDPA device, it's still be called at least once for each VHOST_IOTLB_UPDATE or
2023 Jan 06
3
[PATCH 1/8] iommu: Add a gfp parameter to iommu_map()
...TE | IOMMU_CACHE, GFP_KERNEL); if (!ret) { size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 166044642fd5cc..e555c3bd1c030b 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -777,7 +777,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, r = ops->set_map(vdpa, asid, iotlb); } else { r = iommu_map(v->domain, iova, pa, size, - perm_to_iommu_flags(perm)); + perm_to_iommu_flags(perm), GFP_KERNEL); } if (r) { vhost_iotlb_del_range(iotlb, iova, iova +...
2023 Jan 06
3
[PATCH 1/8] iommu: Add a gfp parameter to iommu_map()
...TE | IOMMU_CACHE, GFP_KERNEL); if (!ret) { size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 166044642fd5cc..e555c3bd1c030b 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -777,7 +777,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, r = ops->set_map(vdpa, asid, iotlb); } else { r = iommu_map(v->domain, iova, pa, size, - perm_to_iommu_flags(perm)); + perm_to_iommu_flags(perm), GFP_KERNEL); } if (r) { vhost_iotlb_del_range(iotlb, iova, iova +...
2023 Jan 06
3
[PATCH 1/8] iommu: Add a gfp parameter to iommu_map()
...TE | IOMMU_CACHE, GFP_KERNEL); if (!ret) { size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 166044642fd5cc..e555c3bd1c030b 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -777,7 +777,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, r = ops->set_map(vdpa, asid, iotlb); } else { r = iommu_map(v->domain, iova, pa, size, - perm_to_iommu_flags(perm)); + perm_to_iommu_flags(perm), GFP_KERNEL); } if (r) { vhost_iotlb_del_range(iotlb, iova, iova +...
2023 Jan 06
8
[PATCH 0/8] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2023 Jan 06
8
[PATCH 0/8] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first
2023 Jan 06
8
[PATCH 0/8] Let iommufd charge IOPTE allocations to the memory cgroup
iommufd follows the same design as KVM and uses memory cgroups to limit the amount of kernel memory a iommufd file descriptor can pin down. The various internal data structures already use GFP_KERNEL_ACCOUNT to charge its own memory. However, one of the biggest consumers of kernel memory is the IOPTEs stored under the iommu_domain and these allocations are not tracked. This series is the first