Thomas Zimmermann
2021-Apr-16 13:31 UTC
[Nouveau] [PATCH v3 5/7] drm/vmwgfx: Inline ttm_bo_mmap() into vmwgfx driver
The vmwgfx driver is the only remaining user of ttm_bo_mmap(). Inline the code. The internal helper ttm_bo_vm_lookup() is now also part of vmwgfx as vmw_bo_vm_lookup(). v2: * replace pr_err() with drm_err() (Zack) Signed-off-by: Thomas Zimmermann <tzimmermann at suse.de> Reviewed-by: Zack Rusin <zackr at vmware.com> --- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 56 ++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index cb9975889e2f..c8b6543b4e39 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -27,6 +27,32 @@ #include "vmwgfx_drv.h" +static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev, + unsigned long offset, + unsigned long pages) +{ + struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); + struct drm_device *drm = &dev_priv->drm; + struct drm_vma_offset_node *node; + struct ttm_buffer_object *bo = NULL; + + drm_vma_offset_lock_lookup(bdev->vma_manager); + + node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); + if (likely(node)) { + bo = container_of(node, struct ttm_buffer_object, + base.vma_node); + bo = ttm_bo_get_unless_zero(bo); + } + + drm_vma_offset_unlock_lookup(bdev->vma_manager); + + if (!bo) + drm_err(drm, "Could not find buffer object to map\n"); + + return bo; +} + int vmw_mmap(struct file *filp, struct vm_area_struct *vma) { static const struct vm_operations_struct vmw_vm_ops = { @@ -41,10 +67,28 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) }; struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); - int ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev); + struct ttm_device *bdev = &dev_priv->bdev; + struct ttm_buffer_object *bo; + int ret; + + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) + return -EINVAL; + + bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); + if (unlikely(!bo)) + return -EINVAL; - if (ret) - return ret; + if (unlikely(!bo->bdev->funcs->verify_access)) { + ret = -EPERM; + goto out_unref; + } + ret = bo->bdev->funcs->verify_access(bo, filp); + if (unlikely(ret != 0)) + goto out_unref; + + ret = ttm_bo_mmap_obj(vma, bo); + if (unlikely(ret != 0)) + goto out_unref; vma->vm_ops = &vmw_vm_ops; @@ -52,7 +96,13 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) if (!is_cow_mapping(vma->vm_flags)) vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP; + ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */ + return 0; + +out_unref: + ttm_bo_put(bo); + return ret; } /* struct vmw_validation_mem callback */ -- 2.31.1
Christian König
2021-Apr-16 13:46 UTC
[Nouveau] [PATCH v3 5/7] drm/vmwgfx: Inline ttm_bo_mmap() into vmwgfx driver
Am 16.04.21 um 15:31 schrieb Thomas Zimmermann:> The vmwgfx driver is the only remaining user of ttm_bo_mmap(). Inline > the code. The internal helper ttm_bo_vm_lookup() is now also part of > vmwgfx as vmw_bo_vm_lookup(). > > v2: > * replace pr_err() with drm_err() (Zack) > > Signed-off-by: Thomas Zimmermann <tzimmermann at suse.de> > Reviewed-by: Zack Rusin <zackr at vmware.com> > --- > drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 56 ++++++++++++++++++++++-- > 1 file changed, 53 insertions(+), 3 deletions(-) > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c > index cb9975889e2f..c8b6543b4e39 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c > @@ -27,6 +27,32 @@ > > #include "vmwgfx_drv.h" > > +static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev, > + unsigned long offset, > + unsigned long pages) > +{ > + struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); > + struct drm_device *drm = &dev_priv->drm; > + struct drm_vma_offset_node *node; > + struct ttm_buffer_object *bo = NULL; > + > + drm_vma_offset_lock_lookup(bdev->vma_manager); > + > + node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); > + if (likely(node)) { > + bo = container_of(node, struct ttm_buffer_object, > + base.vma_node); > + bo = ttm_bo_get_unless_zero(bo); > + } > + > + drm_vma_offset_unlock_lookup(bdev->vma_manager); > + > + if (!bo) > + drm_err(drm, "Could not find buffer object to map\n"); > + > + return bo; > +} > + > int vmw_mmap(struct file *filp, struct vm_area_struct *vma) > { > static const struct vm_operations_struct vmw_vm_ops = { > @@ -41,10 +67,28 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) > }; > struct drm_file *file_priv = filp->private_data; > struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); > - int ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev); > + struct ttm_device *bdev = &dev_priv->bdev; > + struct ttm_buffer_object *bo; > + int ret; > + > + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) > + return -EINVAL; > + > + bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); > + if (unlikely(!bo)) > + return -EINVAL; > > - if (ret) > - return ret; > + if (unlikely(!bo->bdev->funcs->verify_access)) { > + ret = -EPERM; > + goto out_unref; > + } > + ret = bo->bdev->funcs->verify_access(bo, filp);Is there any reason we can't call vmw_verify_access() directly here? Would allow us to completely nuke the verify_access callback as well as far as I can see. Regards, Christian.> + if (unlikely(ret != 0)) > + goto out_unref; > + > + ret = ttm_bo_mmap_obj(vma, bo); > + if (unlikely(ret != 0)) > + goto out_unref; > > vma->vm_ops = &vmw_vm_ops; > > @@ -52,7 +96,13 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) > if (!is_cow_mapping(vma->vm_flags)) > vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP; > > + ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */ > + > return 0; > + > +out_unref: > + ttm_bo_put(bo); > + return ret; > } > > /* struct vmw_validation_mem callback */