search for: fence_id

Displaying 20 results from an estimated 41 matches for "fence_id".

2019 Jun 28
1
[PATCH v4 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...t we need if (buflist) for drm_gem_unlock_reservations too. Fixed. > > - > > - list_del(&entry->list); > > - free_vbuf(vgdev, entry); > > } > > wake_up(&vgdev->ctrlq.ack_queue); > > > > if (fence_id) > > virtio_gpu_fence_event_process(vgdev, fence_id); > > + > > + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { > > + if (entry->objs) > > + virtio_gpu_array_put_free(entry->objs); > &...
2014 Sep 11
3
[Qemu-devel] [PATCH 1/2] virtio-gpu/2d: add hardware spec include file
...PLAY_INFO and friends. It makes it MUCH nicer for application software to probe for later extensions if every member of the enum is also associated with a preprocessor macro. > + > +struct virtgpu_ctrl_hdr { > + uint32_t type; > + uint32_t flags; > + uint64_t fence_id; > + uint32_t ctx_id; > + uint32_t padding; > +}; > + Is the padding to ensure that this is aligned regardless of 32-bit or 64-bit hosts? Is it worth adding a compile-time assertion about the size of the struct to ensure the compiler doesn't add any additional paddin...
2014 Sep 11
3
[Qemu-devel] [PATCH 1/2] virtio-gpu/2d: add hardware spec include file
...PLAY_INFO and friends. It makes it MUCH nicer for application software to probe for later extensions if every member of the enum is also associated with a preprocessor macro. > + > +struct virtgpu_ctrl_hdr { > + uint32_t type; > + uint32_t flags; > + uint64_t fence_id; > + uint32_t ctx_id; > + uint32_t padding; > +}; > + Is the padding to ensure that this is aligned regardless of 32-bit or 64-bit hosts? Is it worth adding a compile-time assertion about the size of the struct to ensure the compiler doesn't add any additional paddin...
2019 Jun 20
2
[PATCH v4 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...nse(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free(entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(...
2019 Jun 20
2
[PATCH v4 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...nse(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free(entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(...
2019 Jun 28
2
[PATCH v5 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...nse(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free(entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(...
2019 Jun 28
2
[PATCH v5 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...nse(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free(entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(...
2019 Jul 02
2
[PATCH v6 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...nse(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free(entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(...
2019 Jul 02
2
[PATCH v6 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...nse(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free(entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(...
2019 Jul 19
0
[PATCH AUTOSEL 5.2 005/171] drm/virtio: set seqno for dma-fence
...ce->seq = ++drv->sync_seq; + fence->f.seqno = ++drv->sync_seq; dma_fence_get(&fence->f); list_add_tail(&fence->node, &drv->fences); spin_unlock_irqrestore(&drv->lock, irq_flags); cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); - cmd_hdr->fence_id = cpu_to_le64(fence->seq); + cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno); return 0; } @@ -109,7 +112,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, spin_lock_irqsave(&drv->lock, irq_flags); atomic64_set(&vgdev->fence_drv.last_seq, last_...
2019 Jul 19
0
[PATCH AUTOSEL 5.1 004/141] drm/virtio: set seqno for dma-fence
...ce->seq = ++drv->sync_seq; + fence->f.seqno = ++drv->sync_seq; dma_fence_get(&fence->f); list_add_tail(&fence->node, &drv->fences); spin_unlock_irqrestore(&drv->lock, irq_flags); cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); - cmd_hdr->fence_id = cpu_to_le64(fence->seq); + cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno); return 0; } @@ -109,7 +112,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, spin_lock_irqsave(&drv->lock, irq_flags); atomic64_set(&vgdev->fence_drv.last_seq, last_...
2019 Jun 19
2
[PATCH v3 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...nse(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + drm_gem_array_put_free(entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(str...
2019 Jun 19
2
[PATCH v3 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...nse(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + drm_gem_array_put_free(entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(str...
2019 May 03
2
[PATCH] drm/virtio: Remove redundant return type
...v, +void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, struct virtio_gpu_ctrl_hdr *cmd_hdr, struct virtio_gpu_fence *fence) { @@ -96,7 +96,6 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); cmd_hdr->fence_id = cpu_to_le64(fence->seq); - return 0; } void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, -- 2.20.1
2014 Sep 12
2
[Qemu-devel] [PATCH 1/2] virtio-gpu/2d: add hardware spec include file
...#39;t think this will ever be shipped as library header for external > users ... Fair enough. I wasn't sure, so it didn't hurt to ask. > >>> +struct virtgpu_ctrl_hdr { >>> + uint32_t type; >>> + uint32_t flags; >>> + uint64_t fence_id; >>> + uint32_t ctx_id; >>> + uint32_t padding; >>> +}; >>> + >> >> Is the padding to ensure that this is aligned regardless of 32-bit or >> 64-bit hosts? > > Yes. > >> Is it worth adding a compile-time assertion a...
2014 Sep 12
2
[Qemu-devel] [PATCH 1/2] virtio-gpu/2d: add hardware spec include file
...#39;t think this will ever be shipped as library header for external > users ... Fair enough. I wasn't sure, so it didn't hurt to ask. > >>> +struct virtgpu_ctrl_hdr { >>> + uint32_t type; >>> + uint32_t flags; >>> + uint64_t fence_id; >>> + uint32_t ctx_id; >>> + uint32_t padding; >>> +}; >>> + >> >> Is the padding to ensure that this is aligned regardless of 32-bit or >> 64-bit hosts? > > Yes. > >> Is it worth adding a compile-time assertion a...
2019 Jun 30
0
[PATCH v5 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...t; if (entry->resp_cb) > entry->resp_cb(vgdev, entry); > - > - list_del(&entry->list); > - free_vbuf(vgdev, entry); > } > wake_up(&vgdev->ctrlq.ack_queue); > > if (fence_id) > virtio_gpu_fence_event_process(vgdev, fence_id); > + > + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { > + if (entry->objs) > + virtio_gpu_array_put_free(entry->objs); > + list_del(&...
2019 Aug 02
0
[PATCH v7 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...nse(vgdev->ctrlq.vq, resp); @@ -219,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free(entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(...
2019 Jul 03
0
[PATCH v6 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...t; if (entry->resp_cb) > entry->resp_cb(vgdev, entry); > - > - list_del(&entry->list); > - free_vbuf(vgdev, entry); > } > wake_up(&vgdev->ctrlq.ack_queue); > > if (fence_id) > virtio_gpu_fence_event_process(vgdev, fence_id); > + > + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { > + if (entry->objs) > + virtio_gpu_array_put_free(entry->objs); > + list_del(&...
2019 Jun 27
0
[PATCH v4 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...t; if (entry->resp_cb) > entry->resp_cb(vgdev, entry); > - > - list_del(&entry->list); > - free_vbuf(vgdev, entry); > } > wake_up(&vgdev->ctrlq.ack_queue); > > if (fence_id) > virtio_gpu_fence_event_process(vgdev, fence_id); > + > + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { > + if (entry->objs) > + virtio_gpu_array_put_free(entry->objs); > + list_del(&...