? 2021/3/15 ??1:37, Xie Yongji ??:> The upcoming patch is going to support VA mapping. So let's
> factor out the logic of PA mapping firstly to make the code
> more readable.
>
> Suggested-by: Jason Wang <jasowang at redhat.com>
> Signed-off-by: Xie Yongji <xieyongji at bytedance.com>
Acked-by: Jason Wang <jasowang at redhat.com>
While at it, I think it's better to factor out the unmap() part? Since
the unpin and page dirty is not needed for va device.
Thanks
> ---
> drivers/vhost/vdpa.c | 46 ++++++++++++++++++++++++++++------------------
> 1 file changed, 28 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index b24ec69a374b..7c83fbf3edac 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -579,37 +579,28 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
u64 iova, u64 size)
> }
> }
>
> -static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
> - struct vhost_iotlb_msg *msg)
> +static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
> + u64 iova, u64 size, u64 uaddr, u32 perm)
> {
> struct vhost_dev *dev = &v->vdev;
> - struct vhost_iotlb *iotlb = dev->iotlb;
> struct page **page_list;
> unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
> unsigned int gup_flags = FOLL_LONGTERM;
> unsigned long npages, cur_base, map_pfn, last_pfn = 0;
> unsigned long lock_limit, sz2pin, nchunks, i;
> - u64 iova = msg->iova;
> + u64 start = iova;
> long pinned;
> int ret = 0;
>
> - if (msg->iova < v->range.first ||
> - msg->iova + msg->size - 1 > v->range.last)
> - return -EINVAL;
> -
> - if (vhost_iotlb_itree_first(iotlb, msg->iova,
> - msg->iova + msg->size - 1))
> - return -EEXIST;
> -
> /* Limit the use of memory for bookkeeping */
> page_list = (struct page **) __get_free_page(GFP_KERNEL);
> if (!page_list)
> return -ENOMEM;
>
> - if (msg->perm & VHOST_ACCESS_WO)
> + if (perm & VHOST_ACCESS_WO)
> gup_flags |= FOLL_WRITE;
>
> - npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >>
PAGE_SHIFT;
> + npages = PAGE_ALIGN(size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
> if (!npages) {
> ret = -EINVAL;
> goto free;
> @@ -623,7 +614,7 @@ static int vhost_vdpa_process_iotlb_update(struct
vhost_vdpa *v,
> goto unlock;
> }
>
> - cur_base = msg->uaddr & PAGE_MASK;
> + cur_base = uaddr & PAGE_MASK;
> iova &= PAGE_MASK;
> nchunks = 0;
>
> @@ -654,7 +645,7 @@ static int vhost_vdpa_process_iotlb_update(struct
vhost_vdpa *v,
> csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
> ret = vhost_vdpa_map(v, iova, csize,
> map_pfn << PAGE_SHIFT,
> - msg->perm);
> + perm);
> if (ret) {
> /*
> * Unpin the pages that are left unmapped
> @@ -683,7 +674,7 @@ static int vhost_vdpa_process_iotlb_update(struct
vhost_vdpa *v,
>
> /* Pin the rest chunk */
> ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) <<
PAGE_SHIFT,
> - map_pfn << PAGE_SHIFT, msg->perm);
> + map_pfn << PAGE_SHIFT, perm);
> out:
> if (ret) {
> if (nchunks) {
> @@ -702,13 +693,32 @@ static int vhost_vdpa_process_iotlb_update(struct
vhost_vdpa *v,
> for (pfn = map_pfn; pfn <= last_pfn; pfn++)
> unpin_user_page(pfn_to_page(pfn));
> }
> - vhost_vdpa_unmap(v, msg->iova, msg->size);
> + vhost_vdpa_unmap(v, start, size);
> }
> unlock:
> mmap_read_unlock(dev->mm);
> free:
> free_page((unsigned long)page_list);
> return ret;
> +
> +}
> +
> +static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
> + struct vhost_iotlb_msg *msg)
> +{
> + struct vhost_dev *dev = &v->vdev;
> + struct vhost_iotlb *iotlb = dev->iotlb;
> +
> + if (msg->iova < v->range.first ||
> + msg->iova + msg->size - 1 > v->range.last)
> + return -EINVAL;
> +
> + if (vhost_iotlb_itree_first(iotlb, msg->iova,
> + msg->iova + msg->size - 1))
> + return -EEXIST;
> +
> + return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
> + msg->perm);
> }
>
> static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,