Danilo Krummrich
2023-Nov-03 13:14 UTC
[Nouveau] [PATCH drm-misc-next v8 09/12] drm/gpuvm: reference count drm_gpuvm structures
On Fri, Nov 03, 2023 at 08:18:35AM +0100, Christian K?nig wrote:> Am 02.11.23 um 00:31 schrieb Danilo Krummrich: > > Implement reference counting for struct drm_gpuvm. > > From the design point of view what is that good for?It was discussed in this thread [1]. Essentially, the idea is to make sure that vm_bo->vm is always valid without the driver having the need to take extra care. It also ensures that GPUVM can't be freed with mappings still held.> > Background is that the most common use case I see is that this object is > embedded into something else and a reference count is then not really a good > idea.Do you have a specific use-case in mind where this would interfere?> > Thanks, > Christian.[1] https://lore.kernel.org/dri-devel/6fa058a4-20d3-44b9-af58-755cfb375d75 at redhat.com/> > > > > Signed-off-by: Danilo Krummrich <dakr at redhat.com> > > --- > > drivers/gpu/drm/drm_gpuvm.c | 44 +++++++++++++++++++------- > > drivers/gpu/drm/nouveau/nouveau_uvmm.c | 20 +++++++++--- > > include/drm/drm_gpuvm.h | 31 +++++++++++++++++- > > 3 files changed, 78 insertions(+), 17 deletions(-) > > > > diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c > > index 53e2c406fb04..6a88eafc5229 100644 > > --- a/drivers/gpu/drm/drm_gpuvm.c > > +++ b/drivers/gpu/drm/drm_gpuvm.c > > @@ -746,6 +746,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, > > gpuvm->rb.tree = RB_ROOT_CACHED; > > INIT_LIST_HEAD(&gpuvm->rb.list); > > + kref_init(&gpuvm->kref); > > + > > gpuvm->name = name ? name : "unknown"; > > gpuvm->flags = flags; > > gpuvm->ops = ops; > > @@ -770,15 +772,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, > > } > > EXPORT_SYMBOL_GPL(drm_gpuvm_init); > > -/** > > - * drm_gpuvm_destroy() - cleanup a &drm_gpuvm > > - * @gpuvm: pointer to the &drm_gpuvm to clean up > > - * > > - * Note that it is a bug to call this function on a manager that still > > - * holds GPU VA mappings. > > - */ > > -void > > -drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) > > +static void > > +drm_gpuvm_fini(struct drm_gpuvm *gpuvm) > > { > > gpuvm->name = NULL; > > @@ -790,7 +785,33 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) > > drm_gem_object_put(gpuvm->r_obj); > > } > > -EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); > > + > > +static void > > +drm_gpuvm_free(struct kref *kref) > > +{ > > + struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref); > > + > > + if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free)) > > + return; > > + > > + drm_gpuvm_fini(gpuvm); > > + > > + gpuvm->ops->vm_free(gpuvm); > > +} > > + > > +/** > > + * drm_gpuvm_bo_put() - drop a struct drm_gpuvm reference > > + * @gpuvm: the &drm_gpuvm to release the reference of > > + * > > + * This releases a reference to @gpuvm. > > + */ > > +void > > +drm_gpuvm_put(struct drm_gpuvm *gpuvm) > > +{ > > + if (gpuvm) > > + kref_put(&gpuvm->kref, drm_gpuvm_free); > > +} > > +EXPORT_SYMBOL_GPL(drm_gpuvm_put); > > static int > > __drm_gpuva_insert(struct drm_gpuvm *gpuvm, > > @@ -843,7 +864,7 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm, > > if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range))) > > return -EINVAL; > > - return __drm_gpuva_insert(gpuvm, va); > > + return __drm_gpuva_insert(drm_gpuvm_get(gpuvm), va); > > } > > EXPORT_SYMBOL_GPL(drm_gpuva_insert); > > @@ -876,6 +897,7 @@ drm_gpuva_remove(struct drm_gpuva *va) > > } > > __drm_gpuva_remove(va); > > + drm_gpuvm_put(va->vm); > > } > > EXPORT_SYMBOL_GPL(drm_gpuva_remove); > > diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > > index 54be12c1272f..cb2f06565c46 100644 > > --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c > > +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > > @@ -1780,6 +1780,18 @@ nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo) > > } > > } > > +static void > > +nouveau_uvmm_free(struct drm_gpuvm *gpuvm) > > +{ > > + struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm); > > + > > + kfree(uvmm); > > +} > > + > > +static const struct drm_gpuvm_ops gpuvm_ops = { > > + .vm_free = nouveau_uvmm_free, > > +}; > > + > > int > > nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, > > void *data, > > @@ -1830,7 +1842,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, > > NOUVEAU_VA_SPACE_END, > > init->kernel_managed_addr, > > init->kernel_managed_size, > > - NULL); > > + &gpuvm_ops); > > /* GPUVM takes care from here on. */ > > drm_gem_object_put(r_obj); > > @@ -1849,8 +1861,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, > > return 0; > > out_gpuvm_fini: > > - drm_gpuvm_destroy(&uvmm->base); > > - kfree(uvmm); > > + drm_gpuvm_put(&uvmm->base); > > out_unlock: > > mutex_unlock(&cli->mutex); > > return ret; > > @@ -1902,7 +1913,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) > > mutex_lock(&cli->mutex); > > nouveau_vmm_fini(&uvmm->vmm); > > - drm_gpuvm_destroy(&uvmm->base); > > - kfree(uvmm); > > + drm_gpuvm_put(&uvmm->base); > > mutex_unlock(&cli->mutex); > > } > > diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h > > index 0c2e24155a93..4e6e1fd3485a 100644 > > --- a/include/drm/drm_gpuvm.h > > +++ b/include/drm/drm_gpuvm.h > > @@ -247,6 +247,11 @@ struct drm_gpuvm { > > struct list_head list; > > } rb; > > + /** > > + * @kref: reference count of this object > > + */ > > + struct kref kref; > > + > > /** > > * @kernel_alloc_node: > > * > > @@ -273,7 +278,23 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, > > u64 start_offset, u64 range, > > u64 reserve_offset, u64 reserve_range, > > const struct drm_gpuvm_ops *ops); > > -void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm); > > + > > +/** > > + * drm_gpuvm_get() - acquire a struct drm_gpuvm reference > > + * @gpuvm: the &drm_gpuvm to acquire the reference of > > + * > > + * This function acquires an additional reference to @gpuvm. It is illegal to > > + * call this without already holding a reference. No locks required. > > + */ > > +static inline struct drm_gpuvm * > > +drm_gpuvm_get(struct drm_gpuvm *gpuvm) > > +{ > > + kref_get(&gpuvm->kref); > > + > > + return gpuvm; > > +} > > + > > +void drm_gpuvm_put(struct drm_gpuvm *gpuvm); > > bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); > > bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); > > @@ -673,6 +694,14 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, > > * operations to drivers. > > */ > > struct drm_gpuvm_ops { > > + /** > > + * @vm_free: called when the last reference of a struct drm_gpuvm is > > + * dropped > > + * > > + * This callback is mandatory. > > + */ > > + void (*vm_free)(struct drm_gpuvm *gpuvm); > > + > > /** > > * @op_alloc: called when the &drm_gpuvm allocates > > * a struct drm_gpuva_op >
Christian König
2023-Nov-03 14:04 UTC
[Nouveau] [PATCH drm-misc-next v8 09/12] drm/gpuvm: reference count drm_gpuvm structures
Am 03.11.23 um 14:14 schrieb Danilo Krummrich:> On Fri, Nov 03, 2023 at 08:18:35AM +0100, Christian K?nig wrote: >> Am 02.11.23 um 00:31 schrieb Danilo Krummrich: >>> Implement reference counting for struct drm_gpuvm. >> From the design point of view what is that good for? > It was discussed in this thread [1]. > > Essentially, the idea is to make sure that vm_bo->vm is always valid without the > driver having the need to take extra care. It also ensures that GPUVM can't be > freed with mappings still held.Well in this case I have some objections to this. The lifetime of the VM is driver and use case specific. Especially we most likely don't want the VM to live longer than the application which originally used it. If you make the GPUVM an independent object you actually open up driver abuse for the lifetime of this. Additional to that see below for a quite real problem with this.>> Background is that the most common use case I see is that this object is >> embedded into something else and a reference count is then not really a good >> idea. > Do you have a specific use-case in mind where this would interfere?Yes, absolutely. For an example see amdgpu_mes_self_test(), here we initialize a temporary amdgpu VM for an in kernel unit test which runs during driver load. When the function returns I need to guarantee that the VM is destroyed or otherwise I will mess up normal operation. Reference counting is nice when you don't know who else is referring to your VM, but the cost is that you also don't know when the object will guardedly be destroyed. I can trivially work around this by saying that the generic GPUVM object has a different lifetime than the amdgpu specific object, but that opens up doors for use after free again. Regards, Christian.> >> Thanks, >> Christian. > [1]https://lore.kernel.org/dri-devel/6fa058a4-20d3-44b9-af58-755cfb375d75 at redhat.com/ > >>> Signed-off-by: Danilo Krummrich<dakr at redhat.com> >>> --- >>> drivers/gpu/drm/drm_gpuvm.c | 44 +++++++++++++++++++------- >>> drivers/gpu/drm/nouveau/nouveau_uvmm.c | 20 +++++++++--- >>> include/drm/drm_gpuvm.h | 31 +++++++++++++++++- >>> 3 files changed, 78 insertions(+), 17 deletions(-) >>> >>> diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c >>> index 53e2c406fb04..6a88eafc5229 100644 >>> --- a/drivers/gpu/drm/drm_gpuvm.c >>> +++ b/drivers/gpu/drm/drm_gpuvm.c >>> @@ -746,6 +746,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, >>> gpuvm->rb.tree = RB_ROOT_CACHED; >>> INIT_LIST_HEAD(&gpuvm->rb.list); >>> + kref_init(&gpuvm->kref); >>> + >>> gpuvm->name = name ? name : "unknown"; >>> gpuvm->flags = flags; >>> gpuvm->ops = ops; >>> @@ -770,15 +772,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, >>> } >>> EXPORT_SYMBOL_GPL(drm_gpuvm_init); >>> -/** >>> - * drm_gpuvm_destroy() - cleanup a &drm_gpuvm >>> - * @gpuvm: pointer to the &drm_gpuvm to clean up >>> - * >>> - * Note that it is a bug to call this function on a manager that still >>> - * holds GPU VA mappings. >>> - */ >>> -void >>> -drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) >>> +static void >>> +drm_gpuvm_fini(struct drm_gpuvm *gpuvm) >>> { >>> gpuvm->name = NULL; >>> @@ -790,7 +785,33 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) >>> drm_gem_object_put(gpuvm->r_obj); >>> } >>> -EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); >>> + >>> +static void >>> +drm_gpuvm_free(struct kref *kref) >>> +{ >>> + struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref); >>> + >>> + if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free)) >>> + return; >>> + >>> + drm_gpuvm_fini(gpuvm); >>> + >>> + gpuvm->ops->vm_free(gpuvm); >>> +} >>> + >>> +/** >>> + * drm_gpuvm_bo_put() - drop a struct drm_gpuvm reference >>> + * @gpuvm: the &drm_gpuvm to release the reference of >>> + * >>> + * This releases a reference to @gpuvm. >>> + */ >>> +void >>> +drm_gpuvm_put(struct drm_gpuvm *gpuvm) >>> +{ >>> + if (gpuvm) >>> + kref_put(&gpuvm->kref, drm_gpuvm_free); >>> +} >>> +EXPORT_SYMBOL_GPL(drm_gpuvm_put); >>> static int >>> __drm_gpuva_insert(struct drm_gpuvm *gpuvm, >>> @@ -843,7 +864,7 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm, >>> if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range))) >>> return -EINVAL; >>> - return __drm_gpuva_insert(gpuvm, va); >>> + return __drm_gpuva_insert(drm_gpuvm_get(gpuvm), va); >>> } >>> EXPORT_SYMBOL_GPL(drm_gpuva_insert); >>> @@ -876,6 +897,7 @@ drm_gpuva_remove(struct drm_gpuva *va) >>> } >>> __drm_gpuva_remove(va); >>> + drm_gpuvm_put(va->vm); >>> } >>> EXPORT_SYMBOL_GPL(drm_gpuva_remove); >>> diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c >>> index 54be12c1272f..cb2f06565c46 100644 >>> --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c >>> +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c >>> @@ -1780,6 +1780,18 @@ nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo) >>> } >>> } >>> +static void >>> +nouveau_uvmm_free(struct drm_gpuvm *gpuvm) >>> +{ >>> + struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm); >>> + >>> + kfree(uvmm); >>> +} >>> + >>> +static const struct drm_gpuvm_ops gpuvm_ops = { >>> + .vm_free = nouveau_uvmm_free, >>> +}; >>> + >>> int >>> nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, >>> void *data, >>> @@ -1830,7 +1842,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, >>> NOUVEAU_VA_SPACE_END, >>> init->kernel_managed_addr, >>> init->kernel_managed_size, >>> - NULL); >>> + &gpuvm_ops); >>> /* GPUVM takes care from here on. */ >>> drm_gem_object_put(r_obj); >>> @@ -1849,8 +1861,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, >>> return 0; >>> out_gpuvm_fini: >>> - drm_gpuvm_destroy(&uvmm->base); >>> - kfree(uvmm); >>> + drm_gpuvm_put(&uvmm->base); >>> out_unlock: >>> mutex_unlock(&cli->mutex); >>> return ret; >>> @@ -1902,7 +1913,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) >>> mutex_lock(&cli->mutex); >>> nouveau_vmm_fini(&uvmm->vmm); >>> - drm_gpuvm_destroy(&uvmm->base); >>> - kfree(uvmm); >>> + drm_gpuvm_put(&uvmm->base); >>> mutex_unlock(&cli->mutex); >>> } >>> diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h >>> index 0c2e24155a93..4e6e1fd3485a 100644 >>> --- a/include/drm/drm_gpuvm.h >>> +++ b/include/drm/drm_gpuvm.h >>> @@ -247,6 +247,11 @@ struct drm_gpuvm { >>> struct list_head list; >>> } rb; >>> + /** >>> + * @kref: reference count of this object >>> + */ >>> + struct kref kref; >>> + >>> /** >>> * @kernel_alloc_node: >>> * >>> @@ -273,7 +278,23 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, >>> u64 start_offset, u64 range, >>> u64 reserve_offset, u64 reserve_range, >>> const struct drm_gpuvm_ops *ops); >>> -void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm); >>> + >>> +/** >>> + * drm_gpuvm_get() - acquire a struct drm_gpuvm reference >>> + * @gpuvm: the &drm_gpuvm to acquire the reference of >>> + * >>> + * This function acquires an additional reference to @gpuvm. It is illegal to >>> + * call this without already holding a reference. No locks required. >>> + */ >>> +static inline struct drm_gpuvm * >>> +drm_gpuvm_get(struct drm_gpuvm *gpuvm) >>> +{ >>> + kref_get(&gpuvm->kref); >>> + >>> + return gpuvm; >>> +} >>> + >>> +void drm_gpuvm_put(struct drm_gpuvm *gpuvm); >>> bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); >>> bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); >>> @@ -673,6 +694,14 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, >>> * operations to drivers. >>> */ >>> struct drm_gpuvm_ops { >>> + /** >>> + * @vm_free: called when the last reference of a struct drm_gpuvm is >>> + * dropped >>> + * >>> + * This callback is mandatory. >>> + */ >>> + void (*vm_free)(struct drm_gpuvm *gpuvm); >>> + >>> /** >>> * @op_alloc: called when the &drm_gpuvm allocates >>> * a struct drm_gpuva_op-------------- next part -------------- An HTML attachment was scrubbed... URL: <https://lists.freedesktop.org/archives/nouveau/attachments/20231103/7dec3802/attachment-0001.htm>