Displaying 10 results from an estimated 10 matches for "ttm_dma_tt_cache_sync_for_cpu".
2014 May 19
0
[PATCH 2/4] drm/ttm: introduce dma cache sync helpers
..._device(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++) {
+ dma_sync_single_for_device(dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
+ }
+}
+EXPORT_SYMBOL(ttm_dma_tt_cache_sync_for_device);
+
+void ttm_dma_tt_cache_sync_for_cpu(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++) {
+ dma_sync_single_for_cpu(dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+}
+EXPORT_SYMBOL(ttm_dma_tt_cache_sync_for_cpu);
+
void ttm_tt_unbind(str...
2014 Jun 24
0
[PATCH v2 2/3] drm/ttm: introduce dma cache sync helpers
...ruct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+ unsigned long i;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++) {
+ dma_sync_single_for_device(dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
+ }
+}
+EXPORT_SYMBOL(ttm_dma_tt_cache_sync_for_device);
+
+void ttm_dma_tt_cache_sync_for_cpu(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+ unsigned long i;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++) {
+ dma_sync_single_for_cpu(dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+}
+EXPORT_SYMBOL(ttm_dma_tt_cache_sync_for_cpu);
+
void ttm_tt_...
2014 Jun 24
4
[PATCH v2 0/3] drm/ttm: nouveau: memory coherency for ARM
For this v2 I have fixed the patches that are non-controversial (all Lucas' :))
and am resubmitting them in the hope that they will get merged. This will
just leave the issue of Nouveau system-memory buffers mapping to be solved.
This issue is quite complex, so let me summarize the situation and the data
I have at hand. ARM caching is like a quantum world where Murphy's law
constantly
2014 May 19
8
[PATCH 0/4] drm/ttm: nouveau: memory coherency fixes for ARM
This small series introduces TTM helper functions as well as Nouveau hooks that
are needed to ensure buffer coherency on ARM. Most of this series is a
forward-port of some patches Lucas Stach sent last year and that are also
needed for Nouveau GK20A support:
http://lists.freedesktop.org/archives/nouveau/2013-August/014026.html
Another patch takes care of flushing the CPU write-buffer when
2013 Aug 28
2
[PATCH 3/6] drm/nouveau: hook up cache sync functions
...,
> ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
> spin_unlock(&nvbo->bo.bdev->fence_lock);
> drm_gem_object_unreference_unlocked(gem);
> +
> + if (!ret && nvbo->bo.ttm && nvbo->bo.ttm->caching_state == tt_cached)
Ditto?
> + ttm_dma_tt_cache_sync_for_cpu((struct ttm_dma_tt *)nvbo->bo.ttm,
> + &dev->pdev->dev);
> +
> return ret;
> }
>
> --
> 1.8.3.1
>
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> http://lists.freedesk...
2013 Aug 28
11
[PATCH 0/6] Nouveau on ARM fixes
This is the first set of patches to make Nouveau work
on Tegra. Those are only the obvious correctness fixes,
a lot of optimization work remains to be done, but at least
it's enough to get accel working and let the machine survive
a piglit run.
A new BO flag is introduced to allow userspace to hint the
kernel about possible optimizations.
Lucas Stach (6):
drm/ttm: recognize ARM arch in
2014 May 19
2
[PATCH 3/4] drm/nouveau: hook up cache sync functions
...nc_for_cpu(struct nouveau_bo *nvbo)
> +{
> + struct nouveau_device *device;
> + struct ttm_tt *ttm = nvbo->bo.ttm;
> +
> + device = nouveau_dev(nouveau_bdev(ttm->bdev)->dev);
> +
> + if (nvbo->bo.ttm && nvbo->bo.ttm->caching_state == tt_cached)
> + ttm_dma_tt_cache_sync_for_cpu((struct ttm_dma_tt *)nvbo->bo.ttm,
> + nv_device_base(device));
Can we be certain at this point that the struct ttm_tt is in fact a
struct ttm_dma_tt?
> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
[...]
> +#if IS_ENABLED(CONFIG_AR...
2013 Aug 28
0
[PATCH 3/6] drm/nouveau: hook up cache sync functions
...gt; +
> > + if (!ret && nvbo->bo.ttm && nvbo->bo.ttm->caching_state == tt_cached)
>
> Ditto?
cpu_prep is used to make the kernel aware of a following userspace read.
Writecombined mappings are essentially uncached from the read
perspective.
>
> > + ttm_dma_tt_cache_sync_for_cpu((struct ttm_dma_tt *)nvbo->bo.ttm,
> > + &dev->pdev->dev);
> > +
> > return ret;
> > }
> >
> > --
> > 1.8.3.1
> >
> > _______________________________________________
> > dri-devel mailing list
> > dri-de...
2013 Aug 28
0
[PATCH 3/6] drm/nouveau: hook up cache sync functions
...prep(struct drm_device *dev, void *data,
ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
spin_unlock(&nvbo->bo.bdev->fence_lock);
drm_gem_object_unreference_unlocked(gem);
+
+ if (!ret && nvbo->bo.ttm && nvbo->bo.ttm->caching_state == tt_cached)
+ ttm_dma_tt_cache_sync_for_cpu((struct ttm_dma_tt *)nvbo->bo.ttm,
+ &dev->pdev->dev);
+
return ret;
}
--
1.8.3.1
2014 May 19
0
[PATCH 3/4] drm/nouveau: hook up cache sync functions
...EAU_NEED_CACHE_SYNC
+void
+nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+{
+ struct nouveau_device *device;
+ struct ttm_tt *ttm = nvbo->bo.ttm;
+
+ device = nouveau_dev(nouveau_bdev(ttm->bdev)->dev);
+
+ if (nvbo->bo.ttm && nvbo->bo.ttm->caching_state == tt_cached)
+ ttm_dma_tt_cache_sync_for_cpu((struct ttm_dma_tt *)nvbo->bo.ttm,
+ nv_device_base(device));
+}
+
+void
+nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
+{
+ struct ttm_tt *ttm = nvbo->bo.ttm;
+
+ if (ttm && ttm->caching_state == tt_cached) {
+ struct nouveau_device *device;
+
+ device = nouveau_...