search for: dma_fence

Displaying 20 results from an estimated 95 matches for "dma_fence".

2018 Jan 11
0
[PATCH 1/3] gpu: host1x: Add support for DMA fences
...fence.h> +#include <linux/dma-fence-array.h> +#include <linux/slab.h> + +#include "fence.h" +#include "intr.h" +#include "syncpt.h" +#include "cdma.h" +#include "channel.h" +#include "dev.h" + +struct host1x_fence { + struct dma_fence base; + spinlock_t lock; + + struct host1x_syncpt *syncpt; + u32 threshold; + + struct host1x *host; + void *waiter; + + char timeline_name[10]; +}; + +static inline struct host1x_fence *to_host1x_fence(struct dma_fence *fence) +{ + return (struct host1x_fence *)fence; +} + +static const char *host...
2018 May 03
1
[PATCH 14/15] drm/virtio: Remove unecessary dma_fence_ops
dma_fence_default_wait is the default now, same for the trivial enable_signaling implementation. Reviewed-by: Eric Anholt <eric at anholt.net> Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch> Cc: David Airlie <airlied at linux.ie> Cc: Gerd Hoffmann <kraxel at redhat.com> Cc:...
2018 Jan 11
6
[PATCH 0/3] drm/tegra: Add support for fence FDs
From: Thierry Reding <treding at nvidia.com> This set of patches adds support for fences to Tegra DRM and complements the fence FD support for Nouveau. Technically this isn't necessary for a fence-based synchronization loop with Nouveau because the KMS core takes care of all that, but engines behind host1x can use the IOCTL extensions provided here to emit fence FDs that in turn can be
2023 Mar 22
0
[PATCH v2 1/2] drm/virtio: Refactor job submission code path
...ev; > + struct drm_file *file; > + uint64_t fence_ctx; > + uint32_t ring_idx; > + int out_fence_fd; > + void *buf; > +}; > + > +static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, > + struct dma_fence *dma_fence) > +{ > + uint32_t context = submit->fence_ctx + submit->ring_idx; > + > + if (dma_fence_match_context(dma_fence, context)) > + return 0; > + > + return dma_fence_wait(dma_fence, true); > +} > + > +static int virtio_gpu_...
2018 Jan 11
3
[PATCH 0/3] drm/nouveau: Add support for fence FDs
From: Thierry Reding <treding at nvidia.com> This small series of patches implements support for waiting on and emitting fence FDs on kickoff. This enables explicit fencing and can be used for example to synchronize buffer accesses between the display engine and the GPU on Tegra. The first patch lays the groundwork by splitting up nouveau_fence_sync() to allow reuse. Patch 2 is where the
2017 Mar 28
5
[Bug 100431] New: nv50: memory corruption due to use-after-free of dma_fence
https://bugs.freedesktop.org/show_bug.cgi?id=100431 Bug ID: 100431 Summary: nv50: memory corruption due to use-after-free of dma_fence Product: xorg Version: unspecified Hardware: ARM OS: Linux (All) Status: NEW Severity: major Priority: medium Component: Driver/nouveau Assignee: nouveau at lists.freedesktop.org Reporter: a...
2018 May 02
0
[PATCH] drm/qxl: Remove unecessary dma_fence_ops
...file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 7cb214577275..e37f0097f744 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -50,12 +50,6 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence) return "release"; } -static bool qxl_nop_signaling(struct dma_fence *fence) -{ - /* fences are always automatically signaled, so just pretend we did this.. */ - return true; -} - static long qxl_fence_wait(struct dma_fence *fence, bool intr, signed long timeout) { @@...
2018 May 02
0
[PATCH] drm/qxl: Remove unecessary dma_fence_ops
...file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 7cb214577275..e37f0097f744 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -50,12 +50,6 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence) return "release"; } -static bool qxl_nop_signaling(struct dma_fence *fence) -{ - /* fences are always automatically signaled, so just pretend we did this.. */ - return true; -} - static long qxl_fence_wait(struct dma_fence *fence, bool intr, signed long timeout) { @@...
2018 May 03
0
[PATCH 11/15] drm/qxl: Remove unecessary dma_fence_ops
...file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 7cb214577275..e37f0097f744 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -50,12 +50,6 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence) return "release"; } -static bool qxl_nop_signaling(struct dma_fence *fence) -{ - /* fences are always automatically signaled, so just pretend we did this.. */ - return true; -} - static long qxl_fence_wait(struct dma_fence *fence, bool intr, signed long timeout) { @@...
2017 Oct 05
2
Missing file in current kernel-devel package
...tos.plus.x86_64/include/trace/events/fence.h > /usr/src/kernels/3.10.0-693.2.2.el7.centos.plus.x86_64/include/linux/dma-fence.h > /usr/src/kernels/3.10.0-693.2.2.el7.centos.plus.x86_64/include/linux/seqno-fence.h > /usr/src/kernels/3.10.0-693.2.2.el7.centos.plus.x86_64/include/trace/events/dma_fence.h > > Looks like upstream renamed it for some reason. Not good - I did a diff of fence.h and dma_fence.h, it *appears* to be the same structures, but all with different names. That's not going to compile. Sorry, but I really don't believe it is good, much less best practice to do so...
2019 Jul 19
0
[PATCH AUTOSEL 5.2 005/171] drm/virtio: set seqno for dma-fence
From: Chia-I Wu <olvaffe at gmail.com> [ Upstream commit efe2bf965522bf0796d413b47a2abbf81d471d6f ] This is motivated by having meaningful ftrace events, but it also fixes use cases where dma_fence_is_later is called, such as in sync_file_merge. In other drivers, fence creation and cmdbuf submission normally happen atomically, mutex_lock(); fence = dma_fence_create(..., ++timeline->seqno); submit_cmdbuf(); mutex_unlock(); and have no such issue. But in our driver, because most...
2019 Jul 19
0
[PATCH AUTOSEL 5.1 004/141] drm/virtio: set seqno for dma-fence
From: Chia-I Wu <olvaffe at gmail.com> [ Upstream commit efe2bf965522bf0796d413b47a2abbf81d471d6f ] This is motivated by having meaningful ftrace events, but it also fixes use cases where dma_fence_is_later is called, such as in sync_file_merge. In other drivers, fence creation and cmdbuf submission normally happen atomically, mutex_lock(); fence = dma_fence_create(..., ++timeline->seqno); submit_cmdbuf(); mutex_unlock(); and have no such issue. But in our driver, because most...
2018 Jan 11
1
[PATCH 1/2] drm/nouveau: Remove redundant _get
...ence.c index 503fa94dc06d..9c8f3a154d55 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -426,7 +426,7 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, return ret; } -static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence) +static const char *nouveau_fence_get_driver_name(struct dma_fence *fence) { return "nouveau"; } @@ -496,7 +496,7 @@ static void nouveau_fence_release(struct dma_fence *f) } static const struct dma_fence_ops nouveau_fence_ops_legacy = { - .get_driver_name = nouveau_fence_g...
2019 Jul 02
3
[PATCH v6 07/18] drm/virtio: add virtio_gpu_object_array & helpers
...t virtio_gpu_object_array *objs, + struct drm_gem_object *obj); +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence); +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs); + /* virtio vg */ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/v...
2019 Jul 02
3
[PATCH v6 07/18] drm/virtio: add virtio_gpu_object_array & helpers
...t virtio_gpu_object_array *objs, + struct drm_gem_object *obj); +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence); +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs); + /* virtio vg */ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/v...
2020 Aug 28
8
[PATCH 0/6] drm/nouveau: Support sync FDs and sync objects
From: Thierry Reding <treding at nvidia.com> Hi, This series implements a new IOCTL to submit push buffers that can optionally return a sync FD or sync object to userspace. This is useful in cases where userspace wants to synchronize operations between the GPU and another driver (such as KMS for display). Among other things this allows extensions such as eglDupNativeFenceFDANDROID to be
2019 Jun 28
0
[PATCH v3 15/18] drm/nouveau: switch driver from bo->resv to bo->base.resv
...m); - nvbo->bo.base.resv = nvbo->bo.resv; if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ @@ -1325,7 +1324,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; - struct dma_fence *fence = reservation_object_get_excl(bo->resv); + struct dma_fence *fence = reservation_object_get_excl(bo->base.resv); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; @@ -1656,7 +1655,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) void nouveau_bo_fence(struc...
2019 Aug 02
0
[PATCH v4 14/17] drm/nouveau: switch driver from bo->resv to bo->base.resv
...m); - nvbo->bo.base.resv = nvbo->bo.resv; if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ @@ -1325,7 +1324,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; - struct dma_fence *fence = reservation_object_get_excl(bo->resv); + struct dma_fence *fence = reservation_object_get_excl(bo->base.resv); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; @@ -1656,7 +1655,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) void nouveau_bo_fence(struc...
2019 Aug 05
0
[PATCH v5 15/18] drm/nouveau: switch driver from bo->resv to bo->base.resv
...m); - nvbo->bo.base.resv = nvbo->bo.resv; if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ @@ -1325,7 +1324,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; - struct dma_fence *fence = reservation_object_get_excl(bo->resv); + struct dma_fence *fence = reservation_object_get_excl(bo->base.resv); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; @@ -1656,7 +1655,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) void nouveau_bo_fence(struc...
2019 Aug 05
0
[PATCH v6 14/17] drm/nouveau: switch driver from bo->resv to bo->base.resv
...m); - nvbo->bo.base.resv = nvbo->bo.resv; if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ @@ -1325,7 +1324,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; - struct dma_fence *fence = reservation_object_get_excl(bo->resv); + struct dma_fence *fence = reservation_object_get_excl(bo->base.resv); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; @@ -1656,7 +1655,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) void nouveau_bo_fence(struc...