search for: bo_count

Displaying 20 results from an estimated 22 matches for "bo_count".

2019 Aug 05
2
[PATCH v6 08/17] drm/ttm: use gem vma_node
...tm_mem_io_unlock(man); @@ -1343,9 +1343,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Aug 05
2
[PATCH v6 08/17] drm/ttm: use gem vma_node
...tm_mem_io_unlock(man); @@ -1343,9 +1343,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Aug 05
2
[PATCH v6 08/17] drm/ttm: use gem vma_node
...tm_mem_io_unlock(man); @@ -1343,9 +1343,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Aug 14
2
[Intel-gfx] [PATCH v6 08/17] drm/ttm: use gem vma_node
.../ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 10a861a1690c..83b389fc117e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -160,7 +160,7 @@ static void ttm_bo_release_list(struct kref *list_kref) ttm_tt_destroy(bo->ttm); atomic_dec(&bo->bdev->glob->bo_count); dma_fence_put(bo->moving); - if (!ttm_bo_uses_embedded_gem_object(bo)) + if (bo->base.ttm_init) reservation_object_fini(&bo->base._resv); mutex_destroy(&bo->wu_mutex); bo->destroy(bo); @@ -1344,6 +1344,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, *...
2019 Jun 20
0
[PATCH 5/6] drm/ttm: use gem vma_node
...ttm_mem_io_unlock(man); @@ -1342,9 +1342,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Aug 02
0
[PATCH v4 08/17] drm/ttm: use gem vma_node
...tm_mem_io_unlock(man); @@ -1341,9 +1341,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1351,7 +1351,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Aug 02
0
[PATCH v4 08/17] drm/ttm: use gem vma_node
...tm_mem_io_unlock(man); @@ -1341,9 +1341,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1351,7 +1351,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Aug 05
0
[PATCH v5 08/18] drm/ttm: use gem vma_node
...tm_mem_io_unlock(man); @@ -1343,9 +1343,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Aug 05
0
[PATCH v5 08/18] drm/ttm: use gem vma_node
...tm_mem_io_unlock(man); @@ -1343,9 +1343,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Aug 02
0
[PATCH v4 08/17] drm/ttm: use gem vma_node
...tm_mem_io_unlock(man); @@ -1341,9 +1341,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1351,7 +1351,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Aug 05
0
[PATCH v5 08/18] drm/ttm: use gem vma_node
...tm_mem_io_unlock(man); @@ -1343,9 +1343,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Jun 21
0
[PATCH v2 08/18] drm/ttm: use gem vma_node
...ttm_mem_io_unlock(man); @@ -1342,9 +1342,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Jun 21
0
[PATCH v2 08/18] drm/ttm: use gem vma_node
...ttm_mem_io_unlock(man); @@ -1342,9 +1342,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Jun 21
0
[PATCH v2 08/18] drm/ttm: use gem vma_node
...ttm_mem_io_unlock(man); @@ -1342,9 +1342,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Jun 28
0
[PATCH v3 08/18] drm/ttm: use gem vma_node
...ttm_mem_io_unlock(man); @@ -1342,9 +1342,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Jun 28
0
[PATCH v3 08/18] drm/ttm: use gem vma_node
...ttm_mem_io_unlock(man); @@ -1342,9 +1342,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Jun 28
0
[PATCH v3 08/18] drm/ttm: use gem vma_node
...ttm_mem_io_unlock(man); @@ -1342,9 +1342,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev-&...
2019 Aug 13
0
[Intel-gfx] [PATCH v6 08/17] drm/ttm: use gem vma_node
...3,9 +1343,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, > * struct elements we want use regardless. > */ > reservation_object_init(&bo->base._resv); > + drm_vma_node_reset(&bo->base.vma_node); > } > atomic_inc(&bo->bdev->glob->bo_count); > - drm_vma_node_reset(&bo->vma_node); > > /* > * For ttm_bo_type_device buffers, allocate > @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, > */ > if (bo->type == ttm_bo_type_device || > bo->type == ttm_bo_type_sg)...
2016 Mar 18
4
[PATCH] gpu/drm: Use u64_to_user_pointer
...rm/vc4/vc4_gem.c @@ -120,7 +120,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, bo_state[i].size = vc4_bo->base.base.size; } - if (copy_to_user((void __user *)(uintptr_t)get_state->bo, + if (copy_to_user(u64_to_user_ptr(get_state->bo), bo_state, state->bo_count * sizeof(*bo_state))) ret = -EFAULT; @@ -550,7 +550,7 @@ vc4_cl_lookup_bos(struct drm_device *dev, } ret = copy_from_user(handles, - (void __user *)(uintptr_t)args->bo_handles, + u64_to_user_ptr(args->bo_handles), exec->bo_count * sizeof(uint32_t)); if (re...
2016 Mar 18
4
[PATCH] gpu/drm: Use u64_to_user_pointer
...rm/vc4/vc4_gem.c @@ -120,7 +120,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, bo_state[i].size = vc4_bo->base.base.size; } - if (copy_to_user((void __user *)(uintptr_t)get_state->bo, + if (copy_to_user(u64_to_user_ptr(get_state->bo), bo_state, state->bo_count * sizeof(*bo_state))) ret = -EFAULT; @@ -550,7 +550,7 @@ vc4_cl_lookup_bos(struct drm_device *dev, } ret = copy_from_user(handles, - (void __user *)(uintptr_t)args->bo_handles, + u64_to_user_ptr(args->bo_handles), exec->bo_count * sizeof(uint32_t)); if (re...