Displaying 10 results from an estimated 10 matches for "dma_resv_wait_timeout_rcu".
2019 Oct 28
0
[PATCH v2 07/15] drm/radeon: use mmu_range_notifier_insert
...n true;
- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
- continue;
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- r = radeon_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
-
- radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (...
2019 Oct 29
0
[PATCH v2 07/15] drm/radeon: use mmu_range_notifier_insert
...state != tt_bound)
> - continue;
> + if (!mmu_notifier_range_blockable(range))
> + return false;
>
> - r = radeon_bo_reserve(bo, true);
> - if (r) {
> - DRM_ERROR("(%ld) failed to reserve user bo\n", r);
> - continue;
> - }
> -
> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
> - true, false, MAX_SCHEDULE_TIMEOUT);
> - if (r <= 0)
> - DRM_ERROR("(%ld) failed to wait for user bo\n", r);
> -
> - radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
> - r = ttm_bo_validate(&bo->tbo, &bo->...
2019 Oct 28
2
[PATCH v2 13/15] drm/amdgpu: Use mmu_range_insert instead of hmm_mirror
...bo;
+ struct amdgpu_bo *bo = container_of(mrn, struct amdgpu_bo, notifier);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
- list_for_each_entry(bo, &node->bos, mn_list) {
-
- if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
- continue;
-
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
- }
+ /* FIXME: Is this necessary? */
+ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, range->start,
+ range->end))
+ return true;
+
+ if...
2019 Dec 10
0
[PATCH AUTOSEL 5.4 002/350] drm/virtio: switch virtio_gpu_wait_ioctl() to gem helper.
...return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- if (args->flags & VIRTGPU_WAIT_NOWAIT)
- nowait = true;
- ret = virtio_gpu_object_wait(qobj, nowait);
+ if (args->flags & VIRTGPU_WAIT_NOWAIT) {
+ ret = dma_resv_test_signaled_rcu(obj->resv, true);
+ } else {
+ ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
+ timeout);
+ }
+ if (ret == 0)
+ ret = -EBUSY;
+ else if (ret > 0)
+ ret = 0;
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
--
2.20.1
2019 Oct 29
0
[PATCH v2 13/15] drm/amdgpu: Use mmu_range_insert instead of hmm_mirror
...struct amdgpu_bo, notifier);
> + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
> long r;
>
> - list_for_each_entry(bo, &node->bos, mn_list) {
> -
> - if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
> - continue;
> -
> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
> - true, false, MAX_SCHEDULE_TIMEOUT);
> - if (r <= 0)
> - DRM_ERROR("(%ld) failed to wait for user bo\n", r);
> - }
> + /* FIXME: Is this necessary? */
> + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, range->start,
> + ran...
2019 Oct 29
0
[PATCH v2 13/15] drm/amdgpu: Use mmu_range_insert instead of hmm_mirror
...struct amdgpu_bo, notifier);
> + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
> long r;
>
> - list_for_each_entry(bo, &node->bos, mn_list) {
> -
> - if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
> - continue;
> -
> - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
> - true, false, MAX_SCHEDULE_TIMEOUT);
> - if (r <= 0)
> - DRM_ERROR("(%ld) failed to wait for user bo\n", r);
> - }
> + /* FIXME: Is this necessary? */
Most likely not.
Christian.
> + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, ra...
2019 Nov 12
0
[PATCH v3 12/14] drm/amdgpu: Use mmu_interval_notifier instead of hmm_mirror
...(&mn->lock);
-}
-
/**
* amdgpu_mn_invalidate_gfx - callback to notify about mm change
*
@@ -94,6 +72,9 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
return false;
mutex_lock(&adev->notifier_lock);
+
+ mmu_interval_set_seq(mni, cur_seq);
+
r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
MAX_SCHEDULE_TIMEOUT);
mutex_unlock(&adev->notifier_lock);
@@ -127,6 +108,9 @@ static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni,
return false;
mutex_lock(&adev->notifier_lock);
+
+ mmu_interval_set_seq(mni, c...
2019 Oct 28
32
[PATCH v2 00/15] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others
2019 Oct 29
4
[PATCH v2 14/15] drm/amdgpu: Use mmu_range_notifier instead of hmm_mirror
On Tue, Oct 29, 2019 at 07:22:37PM +0000, Yang, Philip wrote:
> Hi Jason,
>
> I did quick test after merging amd-staging-drm-next with the
> mmu_notifier branch, which includes this set changes. The test result
> has different failures, app stuck intermittently, GUI no display etc. I
> am understanding the changes and will try to figure out the cause.
Thanks! I'm not
2019 Nov 12
20
[PATCH hmm v3 00/14] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others