search for: ttm_bo_type_device

Displaying 20 results from an estimated 70 matches for "ttm_bo_type_device".

2019 Jun 17
2
[PATCH 1/4] drm/virtio: pass gem reservation object to ttm init
...44 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -132,7 +132,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, virtio_gpu_init_ttm_placement(bo); ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size, ttm_bo_type_device, &bo->placement, 0, - true, acc_size, NULL, NULL, + true, acc_size, NULL, + bo->gem_base.resv, &virtio_gpu_ttm_bo_destroy); /* ttm_bo_init failure will call the destroy */ if (ret != 0) -- 2.18.1
2019 Jun 17
2
[PATCH 1/4] drm/virtio: pass gem reservation object to ttm init
...44 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -132,7 +132,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, virtio_gpu_init_ttm_placement(bo); ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size, ttm_bo_type_device, &bo->placement, 0, - true, acc_size, NULL, NULL, + true, acc_size, NULL, + bo->gem_base.resv, &virtio_gpu_ttm_bo_destroy); /* ttm_bo_init failure will call the destroy */ if (ret != 0) -- 2.18.1
2018 Dec 19
0
[PATCH 05/10] drm/virtio: use struct to pass params to virtio_gpu_object_create()
...unsigned long size, bool kernel, bool pinned, + struct virtio_gpu_object_params *params, struct virtio_gpu_object **bo_ptr) { struct virtio_gpu_object *bo; - enum ttm_bo_type type; size_t acc_size; int ret; - if (kernel) - type = ttm_bo_type_kernel; - else - type = ttm_bo_type_device; *bo_ptr = NULL; - acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size, + acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size, sizeof(struct virtio_gpu_object)); bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); @@ -104,19 +99,20 @@ int v...
2019 Jun 20
0
[PATCH 5/6] drm/ttm: use gem vma_node
...struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Mar 18
1
[PATCH v3 2/5] drm/virtio: use struct to pass params to virtio_gpu_object_create()
...unsigned long size, bool kernel, bool pinned, + struct virtio_gpu_object_params *params, struct virtio_gpu_object **bo_ptr) { struct virtio_gpu_object *bo; - enum ttm_bo_type type; size_t acc_size; int ret; - if (kernel) - type = ttm_bo_type_kernel; - else - type = ttm_bo_type_device; *bo_ptr = NULL; - acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size, + acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size, sizeof(struct virtio_gpu_object)); bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); @@ -117,19 +111,20 @@ int...
2019 Jun 28
1
[PATCH v5 09/12] drm/virtio: rework virtio_gpu_object_create fencing
...+ virtio_gpu_cmd_create_resource(vgdev, bo, params, + objs, fence); } + if (fence) + drm_gem_unlock_reservations(objs->objs, objs->nents, &ticket); + virtio_gpu_init_ttm_placement(bo); ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size, ttm_bo_type_device, &bo->placement, 0, @@ -139,38 +158,6 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, if (ret != 0) return ret; - if (fence) { - struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; - struct list_head validate_list; - struct ttm_validate_buffer mainbuf;...
2019 Aug 02
0
[PATCH v4 08/17] drm/ttm: use gem vma_node
...truct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1351,7 +1351,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Aug 02
0
[PATCH v4 08/17] drm/ttm: use gem vma_node
...truct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1351,7 +1351,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Aug 05
0
[PATCH v5 08/18] drm/ttm: use gem vma_node
...truct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Aug 05
0
[PATCH v5 08/18] drm/ttm: use gem vma_node
...truct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Aug 02
0
[PATCH v4 08/17] drm/ttm: use gem vma_node
...truct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1351,7 +1351,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Aug 05
0
[PATCH v5 08/18] drm/ttm: use gem vma_node
...truct ttm_bo_device *bdev, * struct elements we want use regardless. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Jun 21
0
[PATCH v2 08/18] drm/ttm: use gem vma_node
...struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Jun 21
0
[PATCH v2 08/18] drm/ttm: use gem vma_node
...struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Jun 21
0
[PATCH v2 08/18] drm/ttm: use gem vma_node
...struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Jun 28
0
[PATCH v3 08/18] drm/ttm: use gem vma_node
...struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Jun 28
0
[PATCH v3 08/18] drm/ttm: use gem vma_node
...struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Jun 28
0
[PATCH v3 08/18] drm/ttm: use gem vma_node
...struct ttm_bo_device *bdev, * away once all users are switched over. */ reservation_object_init(&bo->base._resv); + drm_vma_node_reset(&bo->base.vma_node); } atomic_inc(&bo->bdev->glob->bo_count); - drm_vma_node_reset(&bo->vma_node); /* * For ttm_bo_type_device buffers, allocate @@ -1352,7 +1352,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + ret = drm_vma_offset_add(&bdev->v...
2019 Jun 17
0
[PATCH 1/4] drm/virtio: pass gem reservation object to ttm init
...virtio/virtgpu_object.c > +++ b/drivers/gpu/drm/virtio/virtgpu_object.c > @@ -132,7 +132,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, > virtio_gpu_init_ttm_placement(bo); > ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size, > ttm_bo_type_device, &bo->placement, 0, > - true, acc_size, NULL, NULL, > + true, acc_size, NULL, > + bo->gem_base.resv, > &virtio_gpu_ttm_bo_destroy); > /* ttm_bo_init failure will call the destroy */ > if (ret != 0) > -- > 2.18.1 > -- Daniel Vetter S...
2019 Jun 18
0
[PATCH v2 01/12] drm/virtio: pass gem reservation object to ttm init
...44 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -132,7 +132,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, virtio_gpu_init_ttm_placement(bo); ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size, ttm_bo_type_device, &bo->placement, 0, - true, acc_size, NULL, NULL, + true, acc_size, NULL, + bo->gem_base.resv, &virtio_gpu_ttm_bo_destroy); /* ttm_bo_init failure will call the destroy */ if (ret != 0) -- 2.18.1