Displaying 20 results from an estimated 30 matches for "virtio_gpu_array_lock_resv".
Did you mean:
virtio_gpu_array_unlock_resv
2019 Jul 02
3
[PATCH v6 07/18] drm/virtio: add virtio_gpu_object_array & helpers
...+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj);
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence);
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
+
/* virtio...
2019 Jul 02
3
[PATCH v6 07/18] drm/virtio: add virtio_gpu_object_array & helpers
...+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj);
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence);
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
+
/* virtio...
2019 Jul 03
1
[PATCH v6 07/18] drm/virtio: add virtio_gpu_object_array & helpers
...> > +struct virtio_gpu_object_array*
> > +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
> > +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
> > + struct drm_gem_object *obj);
> > +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
> > +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
> > +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
> > + struct dma_fence *fence);
> > +void virtio_gpu_...
2020 Feb 11
0
[PATCH 2/2] drm/virtio: fix virtio_gpu_cursor_plane_update().
Add missing virtio_gpu_array_lock_resv() call.
Signed-off-by: Gerd Hoffmann <kraxel at redhat.com>
---
drivers/gpu/drm/virtio/virtgpu_plane.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index ac42c84d2d7f..d1c3f5fbfee4 100644
--- a/drivers/gp...
2020 Feb 11
0
[PATCH 1/2] drm/virtio: fix virtio_gpu_execbuffer_ioctl locking
...io/virtgpu_ioctl.c
index 205ec4abae2b..0477d1250f2d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -126,22 +126,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
bo_handles = NULL;
}
- if (buflist) {
- ret = virtio_gpu_array_lock_resv(buflist);
- if (ret)
- goto out_unused_fd;
- }
-
buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
- goto out_unresv;
+ goto out_unused_fd;
+ }
+
+ if (buflist) {
+ ret = virtio_gpu_array_lock_resv(buflist);
+ if (ret)
+ g...
2019 Jul 02
2
[PATCH v6 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...g with virtio_gpu_execbuffer_ioctl.
Stop using ttm helpers, use the virtio_gpu_array_* helpers (which work
on the reservation objects directly) instead.
New workflow:
(1) All gem objects needed by a command are added to a
virtio_gpu_object_array.
(2) All reservation objects will be locked (virtio_gpu_array_lock_resv).
(3) virtio_gpu_fence_emit() completes fence initialization.
(4) fence gets added to the objects, reservation objects are unlocked
(virtio_gpu_array_add_fence, virtio_gpu_array_unlock_resv).
(5) virtio command is submitted to the host.
(6) The completion callback (virtio_gpu_dequeue_ctrl_...
2019 Jul 02
2
[PATCH v6 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...g with virtio_gpu_execbuffer_ioctl.
Stop using ttm helpers, use the virtio_gpu_array_* helpers (which work
on the reservation objects directly) instead.
New workflow:
(1) All gem objects needed by a command are added to a
virtio_gpu_object_array.
(2) All reservation objects will be locked (virtio_gpu_array_lock_resv).
(3) virtio_gpu_fence_emit() completes fence initialization.
(4) fence gets added to the objects, reservation objects are unlocked
(virtio_gpu_array_add_fence, virtio_gpu_array_unlock_resv).
(5) virtio command is submitted to the host.
(6) The completion callback (virtio_gpu_dequeue_ctrl_...
2019 Sep 05
1
[PATCH] drm/virtio: fix command submission with objects but without fence.
...}
> This leaks when fence == NULL and vbuf->objs != NULL (which can really
> happen IIRC... not at my desk to check).
Yes, it can happen, for example when flushing dumb buffers.
But I don't think we leak in this case. The code paths which don't need
a fence also do not call virtio_gpu_array_lock_resv(), so things are
balanced. The actual release of the objs happens in
virtio_gpu_dequeue_ctrl_func() via virtio_gpu_array_put_free_delayed().
cheers,
Gerd
2019 Jul 03
0
[PATCH v6 07/18] drm/virtio: add virtio_gpu_object_array & helpers
..._array_alloc(u32 nents);
> +struct virtio_gpu_object_array*
> +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
> +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
> + struct drm_gem_object *obj);
> +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
> +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
> +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
> + struct dma_fence *fence);
> +void virtio_gpu_array_put_free(struc...
2019 Aug 02
0
[PATCH v7 07/18] drm/virtio: add virtio_gpu_object_array & helpers
...+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj);
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence);
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
+
/* virtio...
2019 Aug 02
0
[PATCH v7 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...explicitly keep a reference of all buffers used instead of depending
on ttm_bo_put() checking whenever the object is actually idle before
releasing it.
New workflow:
(1) All gem objects needed by a command are added to a
virtio_gpu_object_array.
(2) All reservation objects will be locked (virtio_gpu_array_lock_resv).
(3) virtio_gpu_fence_emit() completes fence initialization.
(4) fence gets added to the objects, reservation objects are unlocked
(virtio_gpu_array_add_fence, virtio_gpu_array_unlock_resv).
(5) virtio command is submitted to the host.
(6) The completion callback (virtio_gpu_dequeue_ctrl_...
2019 Jul 03
0
[PATCH v6 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...t; Stop using ttm helpers, use the virtio_gpu_array_* helpers (which work
> on the reservation objects directly) instead.
>
> New workflow:
>
> (1) All gem objects needed by a command are added to a
> virtio_gpu_object_array.
> (2) All reservation objects will be locked (virtio_gpu_array_lock_resv).
> (3) virtio_gpu_fence_emit() completes fence initialization.
> (4) fence gets added to the objects, reservation objects are unlocked
> (virtio_gpu_array_add_fence, virtio_gpu_array_unlock_resv).
> (5) virtio command is submitted to the host.
> (6) The completion callback...
2019 Jul 02
2
[PATCH v6 14/18] drm/virtio: rework virtio_gpu_transfer_from_host_ioctl fencing
...= drm_gem_object_lookup(file, args->bo_handle);
- if (gobj == NULL)
+ objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj);
- if (ret)
- goto out;
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
convert_to_hw_box(&box, &args->box);
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence) {
ret = -ENOMEM;
- goto out_unres;
+ goto err_unlock;
}
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, qobj->hw_res_handle,
- vfpr...
2019 Jul 02
2
[PATCH v6 14/18] drm/virtio: rework virtio_gpu_transfer_from_host_ioctl fencing
...= drm_gem_object_lookup(file, args->bo_handle);
- if (gobj == NULL)
+ objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj);
- if (ret)
- goto out;
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
convert_to_hw_box(&box, &args->box);
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence) {
ret = -ENOMEM;
- goto out_unres;
+ goto err_unlock;
}
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, qobj->hw_res_handle,
- vfpr...
2019 Jul 02
2
[PATCH v6 15/18] drm/virtio: rework virtio_gpu_transfer_to_host_ioctl fencing
...nvert_to_hw_box(&box, &args->box);
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, qobj, offset,
+ (vgdev, gem_to_virtio_gpu_obj(objs->objs[0]), offset,
box.w, box.h, box.x, box.y, NULL);
+ virtio_gpu_array_put_free(objs);
} else {
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
+
+ ret = -ENOMEM;
fence = virtio_gpu_fence_alloc(vgdev);
- if (!fence) {
- ret = -ENOMEM;
- goto out_unres;
- }
+ if (!fence)
+ goto err_unlock;
+
virtio_gpu_cmd_transfer_to_host_3d
- (vgdev, qobj,
+ (vgdev,
vfpriv ? vfpriv-&g...
2019 Jul 02
2
[PATCH v6 15/18] drm/virtio: rework virtio_gpu_transfer_to_host_ioctl fencing
...nvert_to_hw_box(&box, &args->box);
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, qobj, offset,
+ (vgdev, gem_to_virtio_gpu_obj(objs->objs[0]), offset,
box.w, box.h, box.x, box.y, NULL);
+ virtio_gpu_array_put_free(objs);
} else {
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
+
+ ret = -ENOMEM;
fence = virtio_gpu_fence_alloc(vgdev);
- if (!fence) {
- ret = -ENOMEM;
- goto out_unres;
- }
+ if (!fence)
+ goto err_unlock;
+
virtio_gpu_cmd_transfer_to_host_3d
- (vgdev, qobj,
+ (vgdev,
vfpriv ? vfpriv-&g...
2019 Aug 02
0
[PATCH v7 10/18] drm/virtio: rework virtio_gpu_transfer_from_host_ioctl fencing
...;args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj);
- if (ret)
- goto out;
-
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
- if (unlikely(ret))
- goto out_unres;
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
convert_to_hw_box(&box, &args->box);
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence) {
ret = -ENOMEM;
- goto out_unres;
+ goto err_unlock;
}
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, qobj->hw_res_handle,
- vfpr...
2019 Sep 04
2
[PATCH] drm/virtio: fix command submission with objects but without fence.
Only call virtio_gpu_array_add_fence if we actually have a fence.
Fixes: da758d51968a ("drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing")
Signed-off-by: Gerd Hoffmann <kraxel at redhat.com>
---
drivers/gpu/drm/virtio/virtgpu_vq.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c
2019 Sep 04
2
[PATCH] drm/virtio: fix command submission with objects but without fence.
Only call virtio_gpu_array_add_fence if we actually have a fence.
Fixes: da758d51968a ("drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing")
Signed-off-by: Gerd Hoffmann <kraxel at redhat.com>
---
drivers/gpu/drm/virtio/virtgpu_vq.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c
2019 Jul 03
0
[PATCH v6 15/18] drm/virtio: rework virtio_gpu_transfer_to_host_ioctl fencing
...gt; + (vgdev, gem_to_virtio_gpu_obj(objs->objs[0]), offset,
> box.w, box.h, box.x, box.y, NULL);
> + virtio_gpu_array_put_free(objs);
Don't we need this in non-3D case as well?
> } else {
> + ret = virtio_gpu_array_lock_resv(objs);
> + if (ret != 0)
> + goto err_put_free;
> +
> + ret = -ENOMEM;
> fence = virtio_gpu_fence_alloc(vgdev);
> - if (!fence) {
> - ret = -ENOMEM;
> -...