Displaying 20 results from an estimated 30 matches for "nouveau_bo_sync_for_cpu".
2014 May 19
2
[PATCH 3/4] drm/nouveau: hook up cache sync functions
...form-friendly]
> Signed-off-by: Alexandre Courbot <acourbot at nvidia.com>
Perhaps having a propery commit message here would be good.
> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
[...]
> +#ifdef NOUVEAU_NEED_CACHE_SYNC
> +void
> +nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
> +{
> + struct nouveau_device *device;
> + struct ttm_tt *ttm = nvbo->bo.ttm;
> +
> + device = nouveau_dev(nouveau_bdev(ttm->bdev)->dev);
> +
> + if (nvbo->bo.ttm && nvbo->bo.ttm->caching_state == tt_cached)
> + ttm_dma_tt...
2014 May 19
0
[PATCH 3/4] drm/nouveau: hook up cache sync functions
...veau_bo_sync_for_device(nvbo);
+
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
interruptible, no_wait_gpu);
if (ret)
@@ -487,6 +489,36 @@ nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
return 0;
}
+#ifdef NOUVEAU_NEED_CACHE_SYNC
+void
+nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+{
+ struct nouveau_device *device;
+ struct ttm_tt *ttm = nvbo->bo.ttm;
+
+ device = nouveau_dev(nouveau_bdev(ttm->bdev)->dev);
+
+ if (nvbo->bo.ttm && nvbo->bo.ttm->caching_state == tt_cached)
+ ttm_dma_tt_cache_sync_for_cpu((struct ttm_dma_tt *...
2014 May 19
8
[PATCH 0/4] drm/ttm: nouveau: memory coherency fixes for ARM
This small series introduces TTM helper functions as well as Nouveau hooks that
are needed to ensure buffer coherency on ARM. Most of this series is a
forward-port of some patches Lucas Stach sent last year and that are also
needed for Nouveau GK20A support:
http://lists.freedesktop.org/archives/nouveau/2013-August/014026.html
Another patch takes care of flushing the CPU write-buffer when
2014 Jul 08
0
[PATCH v4 4/6] drm/nouveau: synchronize BOs when required
...le_for_device(device->pdev,
+ ttm_dma->dma_address[i], PAGE_SIZE,
+ PCI_DMA_TODEVICE);
+ } else {
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ dma_sync_single_for_device(nv_device_base(device),
+ ttm_dma->dma_address[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ }
+}
+
+void
+nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_device *device = nouveau_dev(drm->dev);
+ struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ int i;
+
+ if (!ttm_dma)
+ return;
+
+ if (nv_device_is_cpu_coherent(device) ||...
2014 Jul 10
2
[PATCH v4 4/6] drm/nouveau: synchronize BOs when required
...ress[i], PAGE_SIZE,
> + PCI_DMA_TODEVICE);
> + } else {
> + for (i = 0; i < ttm_dma->ttm.num_pages; i++)
> + dma_sync_single_for_device(nv_device_base(device),
> + ttm_dma->dma_address[i], PAGE_SIZE,
> + DMA_TO_DEVICE);
> + }
> +}
> +
> +void
> +nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
> +{
> + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
> + struct nouveau_device *device = nouveau_dev(drm->dev);
> + struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
> + int i;
> +
> + if (!ttm_dma)
> + return;
>...
2014 Jul 08
8
[PATCH v4 0/6] drm: nouveau: memory coherency on ARM
Another revision of this patchset critical for GK20A to operate.
Previous attempts were exclusively using either TTM's regular page allocator or
the DMA API one. Both have their advantages and drawbacks: the page allocator is
fast but requires explicit synchronization on non-coherent architectures,
whereas the DMA allocator always returns coherent memory, but is also slower,
creates a
2020 Jan 24
1
[PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver v2
...;> PAGE_SHIFT;
> nouveau_bo_placement_set(nvbo, flags, 0);
> + INIT_LIST_HEAD(&nvbo->io_reserve_lru);
>
> ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
> &nvbo->placement, align >> PAGE_SHIFT, false,
> @@ -574,6 +576,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
> PAGE_SIZE, DMA_FROM_DEVICE);
> }
>
> +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
> +{
> + struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
> + struct nouveau_bo *nvbo = nouveau_bo(bo);
> +
> + mutex_lock(&am...
2020 Jan 28
1
[PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver v2
...ouveau_bo_placement_set(nvbo, flags, 0);
> + INIT_LIST_HEAD(&nvbo->io_reserve_lru);
>
> ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
> &nvbo->placement, align >> PAGE_SHIFT, false,
> @@ -574,6 +576,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
> PAGE_SIZE, DMA_FROM_DEVICE);
> }
>
> +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
> +{
> + struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
> + struct nouveau_bo *nvbo = nou...
2014 Oct 27
4
[PATCH v5 0/4] drm: nouveau: memory coherency on ARM
It has been a couple of months since v4 - apologies for this. v4 has not
received many comments, but this version addresses them and makes a new
attempt at pushing the critical bit for GK20A and Nouveau on ARM in
general.
As a reminder, this series addresses the memory coherency issue that we
are seeing on ARM platforms. Contrary to x86 which invalidates the PCI
caches whenever a write is made by
2020 Jan 24
4
TTM/Nouveau cleanups
Hi guys,
I've already send this out in September last year, but only got a response from Daniel.
Could you guys please test this and tell me what you think about it?
Basically I'm trying to remove all driver specific features from TTM which don't need to be inside the framework.
Thanks,
Christian.
2020 Jan 24
0
[PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver v2
...bo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
+ INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
&nvbo->placement, align >> PAGE_SHIFT, false,
@@ -574,6 +576,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
PAGE_SIZE, DMA_FROM_DEVICE);
}
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ list_move_tai...
2020 Aug 21
0
[PATCH 2/3] drm/nouveau: move io_reserve_lru handling into the driver v4
...bo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
+ INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
&nvbo->placement, align >> PAGE_SHIFT, false,
@@ -574,6 +576,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
PAGE_SIZE, DMA_FROM_DEVICE);
}
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ list_move_tai...
2019 Oct 09
0
[PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver
...>> PAGE_SHIFT;
> nouveau_bo_placement_set(nvbo, flags, 0);
> + INIT_LIST_HEAD(&nvbo->io_reserve_lru);
>
> ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
> &nvbo->placement, align >> PAGE_SHIFT, false,
> @@ -566,6 +568,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
> PAGE_SIZE, DMA_FROM_DEVICE);
> }
>
> +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
> +{
> + struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
> + struct nouveau_bo *nvbo = nouveau_bo(bo);
> +
> + mutex_lock(&d...
2019 Sep 30
3
[PATCH 1/2] drm/nouveau: move io_reserve_lru handling into the driver
...bo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
+ INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
&nvbo->placement, align >> PAGE_SHIFT, false,
@@ -566,6 +568,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
PAGE_SIZE, DMA_FROM_DEVICE);
}
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ list_move_tai...
2020 Aug 20
3
Moving LRU handling into Nouveau v2
Hi guys,
I already tried this a few month ago, but since I don't have NVidia hardware its rather hard to test for me (need to get some ordered).
Dave brought up the topic that we should probably try to move the handling into Nouveau once more, so I tried to fix the problem Ben reported and rebased on top of current drm-misc-next.
Dave can you test this? At least in theory the approach
2014 Jun 24
4
[PATCH v2 0/3] drm/ttm: nouveau: memory coherency for ARM
For this v2 I have fixed the patches that are non-controversial (all Lucas' :))
and am resubmitting them in the hope that they will get merged. This will
just leave the issue of Nouveau system-memory buffers mapping to be solved.
This issue is quite complex, so let me summarize the situation and the data
I have at hand. ARM caching is like a quantum world where Murphy's law
constantly
2020 Aug 21
5
Moving LRU handling into Nouveau v3
Hi guys,
so I got some hardware and tested this and after hammering out tons of typos it now seems to work fine.
Could you give it more testing?
Thanks in advance,
Christian
2018 Jun 18
0
[PATCH 3/4] drm/nouveau: Replace drm_gem_object_unreference_unlocked with put function
...f buffer %d on "
"validation list\n", b->handle);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
ret = -EINVAL;
break;
}
@@ -894,7 +894,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
ret = lret;
nouveau_bo_sync_for_cpu(nvbo);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
@@ -913,7 +913,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
nvbo = nouveau_gem_object(gem);
nouveau_bo_sync_for_device(nvbo);
- drm_gem_object_unreference_unlocked(...
2018 Jan 11
3
[PATCH 0/3] drm/nouveau: Add support for fence FDs
From: Thierry Reding <treding at nvidia.com>
This small series of patches implements support for waiting on and
emitting fence FDs on kickoff. This enables explicit fencing and can be
used for example to synchronize buffer accesses between the display
engine and the GPU on Tegra.
The first patch lays the groundwork by splitting up nouveau_fence_sync()
to allow reuse. Patch 2 is where the
2017 Aug 03
0
[PATCH 17/29] drm/nouveau: switch to drm_*{get, put} helpers
...f buffer %d on "
"validation list\n", b->handle);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
ret = -EINVAL;
break;
}
@@ -877,7 +877,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
ret = lret;
nouveau_bo_sync_for_cpu(nvbo);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
@@ -896,7 +896,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
nvbo = nouveau_gem_object(gem);
nouveau_bo_sync_for_device(nvbo);
- drm_gem_object_unreference_unlocked(...