Displaying 7 results from an estimated 7 matches for "mn_list".
Did you mean:
mfn_list
2019 Oct 28
2
[PATCH v2 13/15] drm/amdgpu: Use mmu_range_insert instead of hmm_mirror
...truct amdgpu_bo *bo, *next_bo;
-
- mutex_lock(&adev->mn_lock);
- down_write(&amn->lock);
- hash_del(&amn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node,
- &amn->objects.rb_root, it.rb) {
- list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
- bo->mn = NULL;
- list_del_init(&bo->mn_list);
- }
- kfree(node);
- }
- up_write(&amn->lock);
- mutex_unlock(&adev->mn_lock);
-
- hmm_mirror_unregister(&amn->mirror);
- kfree(amn);
-}
-
-/**
- * amdgpu_hmm_mirror_release - callback to notify about mm destruc...
2019 Oct 29
0
[PATCH v2 13/15] drm/amdgpu: Use mmu_range_insert instead of hmm_mirror
...-
> - mutex_lock(&adev->mn_lock);
> - down_write(&amn->lock);
> - hash_del(&amn->node);
> - rbtree_postorder_for_each_entry_safe(node, next_node,
> - &amn->objects.rb_root, it.rb) {
> - list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
> - bo->mn = NULL;
> - list_del_init(&bo->mn_list);
> - }
> - kfree(node);
> - }
> - up_write(&amn->lock);
> - mutex_unlock(&adev->mn_lock);
> -
> - hmm_mirror_unregister(&amn->mirror);
> - kfree(amn);
> -}
> -
> -/**
&g...
2019 Oct 29
0
[PATCH v2 13/15] drm/amdgpu: Use mmu_range_insert instead of hmm_mirror
...-
> - mutex_lock(&adev->mn_lock);
> - down_write(&amn->lock);
> - hash_del(&amn->node);
> - rbtree_postorder_for_each_entry_safe(node, next_node,
> - &amn->objects.rb_root, it.rb) {
> - list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
> - bo->mn = NULL;
> - list_del_init(&bo->mn_list);
> - }
> - kfree(node);
> - }
> - up_write(&amn->lock);
> - mutex_unlock(&adev->mn_lock);
> -
> - hmm_mirror_unregister(&amn->mirror);
> - kfree(amn);
> -}
> -
> -/**
&g...
2019 Oct 28
0
[PATCH v2 07/15] drm/radeon: use mmu_range_notifier_insert
...linux/mmu_notifier.h>
+#endif
+
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -509,8 +513,9 @@ struct radeon_bo {
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
- struct radeon_mn *mn;
- struct list_head mn_list;
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_range_notifier notifier;
+#endif
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index dbab9a3a969b9e..d3d41e20a64922 100644
--- a/dr...
2019 Oct 29
0
[PATCH v2 07/15] drm/radeon: use mmu_range_notifier_insert
...<drm/ttm/ttm_bo_api.h>
> #include <drm/ttm/ttm_bo_driver.h>
> #include <drm/ttm/ttm_placement.h>
> @@ -509,8 +513,9 @@ struct radeon_bo {
> struct ttm_bo_kmap_obj dma_buf_vmap;
> pid_t pid;
>
> - struct radeon_mn *mn;
> - struct list_head mn_list;
> +#ifdef CONFIG_MMU_NOTIFIER
> + struct mmu_range_notifier notifier;
> +#endif
> };
> #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
>
> diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
> index dba...
2019 Oct 28
32
[PATCH v2 00/15] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others
2019 Nov 12
20
[PATCH hmm v3 00/14] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others