Danilo Krummrich
2023-Nov-01 23:31 UTC
[Nouveau] [PATCH drm-misc-next v8 09/12] drm/gpuvm: reference count drm_gpuvm structures
Implement reference counting for struct drm_gpuvm.
Signed-off-by: Danilo Krummrich <dakr at redhat.com>
---
drivers/gpu/drm/drm_gpuvm.c | 44 +++++++++++++++++++-------
drivers/gpu/drm/nouveau/nouveau_uvmm.c | 20 +++++++++---
include/drm/drm_gpuvm.h | 31 +++++++++++++++++-
3 files changed, 78 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 53e2c406fb04..6a88eafc5229 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -746,6 +746,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
gpuvm->rb.tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpuvm->rb.list);
+ kref_init(&gpuvm->kref);
+
gpuvm->name = name ? name : "unknown";
gpuvm->flags = flags;
gpuvm->ops = ops;
@@ -770,15 +772,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_init);
-/**
- * drm_gpuvm_destroy() - cleanup a &drm_gpuvm
- * @gpuvm: pointer to the &drm_gpuvm to clean up
- *
- * Note that it is a bug to call this function on a manager that still
- * holds GPU VA mappings.
- */
-void
-drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
+static void
+drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
{
gpuvm->name = NULL;
@@ -790,7 +785,33 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
drm_gem_object_put(gpuvm->r_obj);
}
-EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
+
+static void
+drm_gpuvm_free(struct kref *kref)
+{
+ struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref);
+
+ if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free))
+ return;
+
+ drm_gpuvm_fini(gpuvm);
+
+ gpuvm->ops->vm_free(gpuvm);
+}
+
+/**
+ * drm_gpuvm_bo_put() - drop a struct drm_gpuvm reference
+ * @gpuvm: the &drm_gpuvm to release the reference of
+ *
+ * This releases a reference to @gpuvm.
+ */
+void
+drm_gpuvm_put(struct drm_gpuvm *gpuvm)
+{
+ if (gpuvm)
+ kref_put(&gpuvm->kref, drm_gpuvm_free);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_put);
static int
__drm_gpuva_insert(struct drm_gpuvm *gpuvm,
@@ -843,7 +864,7 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm,
if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
return -EINVAL;
- return __drm_gpuva_insert(gpuvm, va);
+ return __drm_gpuva_insert(drm_gpuvm_get(gpuvm), va);
}
EXPORT_SYMBOL_GPL(drm_gpuva_insert);
@@ -876,6 +897,7 @@ drm_gpuva_remove(struct drm_gpuva *va)
}
__drm_gpuva_remove(va);
+ drm_gpuvm_put(va->vm);
}
EXPORT_SYMBOL_GPL(drm_gpuva_remove);
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 54be12c1272f..cb2f06565c46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1780,6 +1780,18 @@ nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
}
}
+static void
+nouveau_uvmm_free(struct drm_gpuvm *gpuvm)
+{
+ struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm);
+
+ kfree(uvmm);
+}
+
+static const struct drm_gpuvm_ops gpuvm_ops = {
+ .vm_free = nouveau_uvmm_free,
+};
+
int
nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
void *data,
@@ -1830,7 +1842,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
NOUVEAU_VA_SPACE_END,
init->kernel_managed_addr,
init->kernel_managed_size,
- NULL);
+ &gpuvm_ops);
/* GPUVM takes care from here on. */
drm_gem_object_put(r_obj);
@@ -1849,8 +1861,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
return 0;
out_gpuvm_fini:
- drm_gpuvm_destroy(&uvmm->base);
- kfree(uvmm);
+ drm_gpuvm_put(&uvmm->base);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
@@ -1902,7 +1913,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
mutex_lock(&cli->mutex);
nouveau_vmm_fini(&uvmm->vmm);
- drm_gpuvm_destroy(&uvmm->base);
- kfree(uvmm);
+ drm_gpuvm_put(&uvmm->base);
mutex_unlock(&cli->mutex);
}
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 0c2e24155a93..4e6e1fd3485a 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -247,6 +247,11 @@ struct drm_gpuvm {
struct list_head list;
} rb;
+ /**
+ * @kref: reference count of this object
+ */
+ struct kref kref;
+
/**
* @kernel_alloc_node:
*
@@ -273,7 +278,23 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char
*name,
u64 start_offset, u64 range,
u64 reserve_offset, u64 reserve_range,
const struct drm_gpuvm_ops *ops);
-void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm);
+
+/**
+ * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
+ * @gpuvm: the &drm_gpuvm to acquire the reference of
+ *
+ * This function acquires an additional reference to @gpuvm. It is illegal to
+ * call this without already holding a reference. No locks required.
+ */
+static inline struct drm_gpuvm *
+drm_gpuvm_get(struct drm_gpuvm *gpuvm)
+{
+ kref_get(&gpuvm->kref);
+
+ return gpuvm;
+}
+
+void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
@@ -673,6 +694,14 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva
*va,
* operations to drivers.
*/
struct drm_gpuvm_ops {
+ /**
+ * @vm_free: called when the last reference of a struct drm_gpuvm is
+ * dropped
+ *
+ * This callback is mandatory.
+ */
+ void (*vm_free)(struct drm_gpuvm *gpuvm);
+
/**
* @op_alloc: called when the &drm_gpuvm allocates
* a struct drm_gpuva_op
--
2.41.0
kernel test robot
2023-Nov-02 10:46 UTC
[Nouveau] [PATCH drm-misc-next v8 09/12] drm/gpuvm: reference count drm_gpuvm structures
Hi Danilo, kernel test robot noticed the following build warnings: [auto build test WARNING on 3c6c7ca4508b6cb1a033ac954c50a1b2c97af883] url: https://github.com/intel-lab-lkp/linux/commits/Danilo-Krummrich/drm-gpuvm-convert-WARN-to-drm_WARN-variants/20231102-073332 base: 3c6c7ca4508b6cb1a033ac954c50a1b2c97af883 patch link: https://lore.kernel.org/r/20231101233113.8059-10-dakr%40redhat.com patch subject: [PATCH drm-misc-next v8 09/12] drm/gpuvm: reference count drm_gpuvm structures config: arc-allmodconfig (https://download.01.org/0day-ci/archive/20231102/202311021833.q8aYDJnr-lkp at intel.com/config) compiler: arceb-elf-gcc (GCC) 13.2.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231102/202311021833.q8aYDJnr-lkp at intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp at intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202311021833.q8aYDJnr-lkp at intel.com/ All warnings (new ones prefixed by >>):>> drivers/gpu/drm/drm_gpuvm.c:810: warning: expecting prototype for drm_gpuvm_bo_put(). Prototype was for drm_gpuvm_put() insteadvim +810 drivers/gpu/drm/drm_gpuvm.c 801 802 /** 803 * drm_gpuvm_bo_put() - drop a struct drm_gpuvm reference 804 * @gpuvm: the &drm_gpuvm to release the reference of 805 * 806 * This releases a reference to @gpuvm. 807 */ 808 void 809 drm_gpuvm_put(struct drm_gpuvm *gpuvm) > 810 { 811 if (gpuvm) 812 kref_put(&gpuvm->kref, drm_gpuvm_free); 813 } 814 EXPORT_SYMBOL_GPL(drm_gpuvm_put); 815 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
Thomas Hellström
2023-Nov-02 13:21 UTC
[Nouveau] [PATCH drm-misc-next v8 09/12] drm/gpuvm: reference count drm_gpuvm structures
On Thu, 2023-11-02 at 00:31 +0100, Danilo Krummrich wrote:> Implement reference counting for struct drm_gpuvm. > > Signed-off-by: Danilo Krummrich <dakr at redhat.com>Will port the Xe series over to check that it works properly and get back with review on this one.> --- > ?drivers/gpu/drm/drm_gpuvm.c??????????? | 44 +++++++++++++++++++----- > -- > ?drivers/gpu/drm/nouveau/nouveau_uvmm.c | 20 +++++++++--- > ?include/drm/drm_gpuvm.h??????????????? | 31 +++++++++++++++++- > ?3 files changed, 78 insertions(+), 17 deletions(-) > > diff --git a/drivers/gpu/drm/drm_gpuvm.c > b/drivers/gpu/drm/drm_gpuvm.c > index 53e2c406fb04..6a88eafc5229 100644 > --- a/drivers/gpu/drm/drm_gpuvm.c > +++ b/drivers/gpu/drm/drm_gpuvm.c > @@ -746,6 +746,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const > char *name, > ????????gpuvm->rb.tree = RB_ROOT_CACHED; > ????????INIT_LIST_HEAD(&gpuvm->rb.list); > ? > +???????kref_init(&gpuvm->kref); > + > ????????gpuvm->name = name ? name : "unknown"; > ????????gpuvm->flags = flags; > ????????gpuvm->ops = ops; > @@ -770,15 +772,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const > char *name, > ?} > ?EXPORT_SYMBOL_GPL(drm_gpuvm_init); > ? > -/** > - * drm_gpuvm_destroy() - cleanup a &drm_gpuvm > - * @gpuvm: pointer to the &drm_gpuvm to clean up > - * > - * Note that it is a bug to call this function on a manager that > still > - * holds GPU VA mappings. > - */ > -void > -drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) > +static void > +drm_gpuvm_fini(struct drm_gpuvm *gpuvm) > ?{ > ????????gpuvm->name = NULL; > ? > @@ -790,7 +785,33 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) > ? > ????????drm_gem_object_put(gpuvm->r_obj); > ?} > -EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); > + > +static void > +drm_gpuvm_free(struct kref *kref) > +{ > +???????struct drm_gpuvm *gpuvm = container_of(kref, struct > drm_gpuvm, kref); > + > +???????if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free)) > +???????????????return; > + > +???????drm_gpuvm_fini(gpuvm); > + > +???????gpuvm->ops->vm_free(gpuvm); > +} > + > +/** > + * drm_gpuvm_bo_put() - drop a struct drm_gpuvm reference > + * @gpuvm: the &drm_gpuvm to release the reference of > + * > + * This releases a reference to @gpuvm. > + */ > +void > +drm_gpuvm_put(struct drm_gpuvm *gpuvm) > +{ > +???????if (gpuvm) > +???????????????kref_put(&gpuvm->kref, drm_gpuvm_free); > +} > +EXPORT_SYMBOL_GPL(drm_gpuvm_put); > ? > ?static int > ?__drm_gpuva_insert(struct drm_gpuvm *gpuvm, > @@ -843,7 +864,7 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm, > ????????if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range))) > ????????????????return -EINVAL; > ? > -???????return __drm_gpuva_insert(gpuvm, va); > +???????return __drm_gpuva_insert(drm_gpuvm_get(gpuvm), va); > ?} > ?EXPORT_SYMBOL_GPL(drm_gpuva_insert); > ? > @@ -876,6 +897,7 @@ drm_gpuva_remove(struct drm_gpuva *va) > ????????} > ? > ????????__drm_gpuva_remove(va); > +???????drm_gpuvm_put(va->vm); > ?} > ?EXPORT_SYMBOL_GPL(drm_gpuva_remove); > ? > diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c > b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > index 54be12c1272f..cb2f06565c46 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c > +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > @@ -1780,6 +1780,18 @@ nouveau_uvmm_bo_unmap_all(struct nouveau_bo > *nvbo) > ????????} > ?} > ? > +static void > +nouveau_uvmm_free(struct drm_gpuvm *gpuvm) > +{ > +???????struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm); > + > +???????kfree(uvmm); > +} > + > +static const struct drm_gpuvm_ops gpuvm_ops = { > +???????.vm_free = nouveau_uvmm_free, > +}; > + > ?int > ?nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, > ?????????????????????????? void *data, > @@ -1830,7 +1842,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device > *dev, > ?????????????????????? NOUVEAU_VA_SPACE_END, > ?????????????????????? init->kernel_managed_addr, > ?????????????????????? init->kernel_managed_size, > -????????????????????? NULL); > +????????????????????? &gpuvm_ops); > ????????/* GPUVM takes care from here on. */ > ????????drm_gem_object_put(r_obj); > ? > @@ -1849,8 +1861,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device > *dev, > ????????return 0; > ? > ?out_gpuvm_fini: > -???????drm_gpuvm_destroy(&uvmm->base); > -???????kfree(uvmm); > +???????drm_gpuvm_put(&uvmm->base); > ?out_unlock: > ????????mutex_unlock(&cli->mutex); > ????????return ret; > @@ -1902,7 +1913,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) > ? > ????????mutex_lock(&cli->mutex); > ????????nouveau_vmm_fini(&uvmm->vmm); > -???????drm_gpuvm_destroy(&uvmm->base); > -???????kfree(uvmm); > +???????drm_gpuvm_put(&uvmm->base); > ????????mutex_unlock(&cli->mutex); > ?} > diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h > index 0c2e24155a93..4e6e1fd3485a 100644 > --- a/include/drm/drm_gpuvm.h > +++ b/include/drm/drm_gpuvm.h > @@ -247,6 +247,11 @@ struct drm_gpuvm { > ????????????????struct list_head list; > ????????} rb; > ? > +???????/** > +??????? * @kref: reference count of this object > +??????? */ > +???????struct kref kref; > + > ????????/** > ???????? * @kernel_alloc_node: > ???????? * > @@ -273,7 +278,23 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, > const char *name, > ??????????????????? u64 start_offset, u64 range, > ??????????????????? u64 reserve_offset, u64 reserve_range, > ??????????????????? const struct drm_gpuvm_ops *ops); > -void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm); > + > +/** > + * drm_gpuvm_get() - acquire a struct drm_gpuvm reference > + * @gpuvm: the &drm_gpuvm to acquire the reference of > + * > + * This function acquires an additional reference to @gpuvm. It is > illegal to > + * call this without already holding a reference. No locks required. > + */ > +static inline struct drm_gpuvm * > +drm_gpuvm_get(struct drm_gpuvm *gpuvm) > +{ > +???????kref_get(&gpuvm->kref); > + > +???????return gpuvm; > +} > + > +void drm_gpuvm_put(struct drm_gpuvm *gpuvm); > ? > ?bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 > range); > ?bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 > range); > @@ -673,6 +694,14 @@ static inline void drm_gpuva_init_from_op(struct > drm_gpuva *va, > ? * operations to drivers. > ? */ > ?struct drm_gpuvm_ops { > +???????/** > +??????? * @vm_free: called when the last reference of a struct > drm_gpuvm is > +??????? * dropped > +??????? * > +??????? * This callback is mandatory. > +??????? */ > +???????void (*vm_free)(struct drm_gpuvm *gpuvm); > + > ????????/** > ???????? * @op_alloc: called when the &drm_gpuvm allocates > ???????? * a struct drm_gpuva_op
Thomas Hellström
2023-Nov-02 17:09 UTC
[Nouveau] [PATCH drm-misc-next v8 09/12] drm/gpuvm: reference count drm_gpuvm structures
On Thu, 2023-11-02 at 00:31 +0100, Danilo Krummrich wrote:> Implement reference counting for struct drm_gpuvm. > > Signed-off-by: Danilo Krummrich <dakr at redhat.com> > --- > ?drivers/gpu/drm/drm_gpuvm.c??????????? | 44 +++++++++++++++++++----- > -- > ?drivers/gpu/drm/nouveau/nouveau_uvmm.c | 20 +++++++++--- > ?include/drm/drm_gpuvm.h??????????????? | 31 +++++++++++++++++- > ?3 files changed, 78 insertions(+), 17 deletions(-) > > diff --git a/drivers/gpu/drm/drm_gpuvm.c > b/drivers/gpu/drm/drm_gpuvm.c > index 53e2c406fb04..6a88eafc5229 100644 > --- a/drivers/gpu/drm/drm_gpuvm.c > +++ b/drivers/gpu/drm/drm_gpuvm.c > @@ -746,6 +746,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const > char *name, > ????????gpuvm->rb.tree = RB_ROOT_CACHED; > ????????INIT_LIST_HEAD(&gpuvm->rb.list); > ? > +???????kref_init(&gpuvm->kref); > + > ????????gpuvm->name = name ? name : "unknown"; > ????????gpuvm->flags = flags; > ????????gpuvm->ops = ops; > @@ -770,15 +772,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const > char *name, > ?} > ?EXPORT_SYMBOL_GPL(drm_gpuvm_init); > ? > -/** > - * drm_gpuvm_destroy() - cleanup a &drm_gpuvm > - * @gpuvm: pointer to the &drm_gpuvm to clean up > - * > - * Note that it is a bug to call this function on a manager that > still > - * holds GPU VA mappings. > - */ > -void > -drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) > +static void > +drm_gpuvm_fini(struct drm_gpuvm *gpuvm) > ?{ > ????????gpuvm->name = NULL; > ? > @@ -790,7 +785,33 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) > ? > ????????drm_gem_object_put(gpuvm->r_obj); > ?} > -EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); > + > +static void > +drm_gpuvm_free(struct kref *kref) > +{ > +???????struct drm_gpuvm *gpuvm = container_of(kref, struct > drm_gpuvm, kref); > + > +???????if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free)) > +???????????????return; > + > +???????drm_gpuvm_fini(gpuvm); > + > +???????gpuvm->ops->vm_free(gpuvm); > +} > + > +/** > + * drm_gpuvm_bo_put() - drop a struct drm_gpuvm referencecopy-paste error in function name. Also it appears like xe might put a vm from irq context so we should document the context where this function call is allowable, and if applicable add a might_sleep(). If this function needs to sleep we can work around that in Xe by keeping an xe-private refcount for the xe vm container, but I'd like to avoid that if possible and piggy-back on the refcount introduced here.> + * @gpuvm: the &drm_gpuvm to release the reference of > + * > + * This releases a reference to @gpuvm. > + */ > +void > +drm_gpuvm_put(struct drm_gpuvm *gpuvm) > +{ > +???????if (gpuvm) > +???????????????kref_put(&gpuvm->kref, drm_gpuvm_free); > +} > +EXPORT_SYMBOL_GPL(drm_gpuvm_put); > ? > ?static int > ?__drm_gpuva_insert(struct drm_gpuvm *gpuvm, > @@ -843,7 +864,7 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm, > ????????if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range))) > ????????????????return -EINVAL; > ? > -???????return __drm_gpuva_insert(gpuvm, va); > +???????return __drm_gpuva_insert(drm_gpuvm_get(gpuvm), va);Here we leak a reference if __drm_gpuva_insert() fails, and IMO the reference should be taken where the pointer holding the reference is assigned (in this case in __drm_gpuva_insert()), or document the reference transfer from the argument close to the assignment. But since a va itself is not refcounted it clearly can't outlive the vm, so is a reference really needed here? I'd suggest using an accessor that instead of using va->vm uses va->vm_bo->vm, to avoid needing to worry about the vm->vm refcountaltoghether. Thanks, Thomas
Christian König
2023-Nov-03 07:18 UTC
[Nouveau] [PATCH drm-misc-next v8 09/12] drm/gpuvm: reference count drm_gpuvm structures
Am 02.11.23 um 00:31 schrieb Danilo Krummrich:> Implement reference counting for struct drm_gpuvm.From the design point of view what is that good for? Background is that the most common use case I see is that this object is embedded into something else and a reference count is then not really a good idea. Thanks, Christian.> > Signed-off-by: Danilo Krummrich <dakr at redhat.com> > --- > drivers/gpu/drm/drm_gpuvm.c | 44 +++++++++++++++++++------- > drivers/gpu/drm/nouveau/nouveau_uvmm.c | 20 +++++++++--- > include/drm/drm_gpuvm.h | 31 +++++++++++++++++- > 3 files changed, 78 insertions(+), 17 deletions(-) > > diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c > index 53e2c406fb04..6a88eafc5229 100644 > --- a/drivers/gpu/drm/drm_gpuvm.c > +++ b/drivers/gpu/drm/drm_gpuvm.c > @@ -746,6 +746,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, > gpuvm->rb.tree = RB_ROOT_CACHED; > INIT_LIST_HEAD(&gpuvm->rb.list); > > + kref_init(&gpuvm->kref); > + > gpuvm->name = name ? name : "unknown"; > gpuvm->flags = flags; > gpuvm->ops = ops; > @@ -770,15 +772,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, > } > EXPORT_SYMBOL_GPL(drm_gpuvm_init); > > -/** > - * drm_gpuvm_destroy() - cleanup a &drm_gpuvm > - * @gpuvm: pointer to the &drm_gpuvm to clean up > - * > - * Note that it is a bug to call this function on a manager that still > - * holds GPU VA mappings. > - */ > -void > -drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) > +static void > +drm_gpuvm_fini(struct drm_gpuvm *gpuvm) > { > gpuvm->name = NULL; > > @@ -790,7 +785,33 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) > > drm_gem_object_put(gpuvm->r_obj); > } > -EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); > + > +static void > +drm_gpuvm_free(struct kref *kref) > +{ > + struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref); > + > + if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free)) > + return; > + > + drm_gpuvm_fini(gpuvm); > + > + gpuvm->ops->vm_free(gpuvm); > +} > + > +/** > + * drm_gpuvm_bo_put() - drop a struct drm_gpuvm reference > + * @gpuvm: the &drm_gpuvm to release the reference of > + * > + * This releases a reference to @gpuvm. > + */ > +void > +drm_gpuvm_put(struct drm_gpuvm *gpuvm) > +{ > + if (gpuvm) > + kref_put(&gpuvm->kref, drm_gpuvm_free); > +} > +EXPORT_SYMBOL_GPL(drm_gpuvm_put); > > static int > __drm_gpuva_insert(struct drm_gpuvm *gpuvm, > @@ -843,7 +864,7 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm, > if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range))) > return -EINVAL; > > - return __drm_gpuva_insert(gpuvm, va); > + return __drm_gpuva_insert(drm_gpuvm_get(gpuvm), va); > } > EXPORT_SYMBOL_GPL(drm_gpuva_insert); > > @@ -876,6 +897,7 @@ drm_gpuva_remove(struct drm_gpuva *va) > } > > __drm_gpuva_remove(va); > + drm_gpuvm_put(va->vm); > } > EXPORT_SYMBOL_GPL(drm_gpuva_remove); > > diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > index 54be12c1272f..cb2f06565c46 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c > +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > @@ -1780,6 +1780,18 @@ nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo) > } > } > > +static void > +nouveau_uvmm_free(struct drm_gpuvm *gpuvm) > +{ > + struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm); > + > + kfree(uvmm); > +} > + > +static const struct drm_gpuvm_ops gpuvm_ops = { > + .vm_free = nouveau_uvmm_free, > +}; > + > int > nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, > void *data, > @@ -1830,7 +1842,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, > NOUVEAU_VA_SPACE_END, > init->kernel_managed_addr, > init->kernel_managed_size, > - NULL); > + &gpuvm_ops); > /* GPUVM takes care from here on. */ > drm_gem_object_put(r_obj); > > @@ -1849,8 +1861,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, > return 0; > > out_gpuvm_fini: > - drm_gpuvm_destroy(&uvmm->base); > - kfree(uvmm); > + drm_gpuvm_put(&uvmm->base); > out_unlock: > mutex_unlock(&cli->mutex); > return ret; > @@ -1902,7 +1913,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) > > mutex_lock(&cli->mutex); > nouveau_vmm_fini(&uvmm->vmm); > - drm_gpuvm_destroy(&uvmm->base); > - kfree(uvmm); > + drm_gpuvm_put(&uvmm->base); > mutex_unlock(&cli->mutex); > } > diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h > index 0c2e24155a93..4e6e1fd3485a 100644 > --- a/include/drm/drm_gpuvm.h > +++ b/include/drm/drm_gpuvm.h > @@ -247,6 +247,11 @@ struct drm_gpuvm { > struct list_head list; > } rb; > > + /** > + * @kref: reference count of this object > + */ > + struct kref kref; > + > /** > * @kernel_alloc_node: > * > @@ -273,7 +278,23 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, > u64 start_offset, u64 range, > u64 reserve_offset, u64 reserve_range, > const struct drm_gpuvm_ops *ops); > -void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm); > + > +/** > + * drm_gpuvm_get() - acquire a struct drm_gpuvm reference > + * @gpuvm: the &drm_gpuvm to acquire the reference of > + * > + * This function acquires an additional reference to @gpuvm. It is illegal to > + * call this without already holding a reference. No locks required. > + */ > +static inline struct drm_gpuvm * > +drm_gpuvm_get(struct drm_gpuvm *gpuvm) > +{ > + kref_get(&gpuvm->kref); > + > + return gpuvm; > +} > + > +void drm_gpuvm_put(struct drm_gpuvm *gpuvm); > > bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); > bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); > @@ -673,6 +694,14 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, > * operations to drivers. > */ > struct drm_gpuvm_ops { > + /** > + * @vm_free: called when the last reference of a struct drm_gpuvm is > + * dropped > + * > + * This callback is mandatory. > + */ > + void (*vm_free)(struct drm_gpuvm *gpuvm); > + > /** > * @op_alloc: called when the &drm_gpuvm allocates > * a struct drm_gpuva_op