search for: trace_virtio_gpu_cmd_response

Displaying 14 results from an estimated 14 matches for "trace_virtio_gpu_cmd_response".

2020 Feb 14
1
[PATCH] drm/virtio: fix error check
...n(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index cfe9c54f87a3..67caecde623e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -222,7 +222,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { - if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { + if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { struct virtio_gpu_ctrl_hdr *cmd; cmd = virtio_gpu_vb...
2019 Jun 28
2
[PATCH v5 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fen...
2019 Jun 28
2
[PATCH v5 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fen...
2019 Jul 02
2
[PATCH v6 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fen...
2019 Jul 02
2
[PATCH v6 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fen...
2019 Jun 20
2
[PATCH v4 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fen...
2019 Jun 20
2
[PATCH v4 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fen...
2019 Jun 19
2
[PATCH v3 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fen...
2019 Jun 19
2
[PATCH v3 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fen...
2019 Jun 30
0
[PATCH v5 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...spin_unlock(&vgdev->ctrlq.qlock); > > - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { > + list_for_each_entry(entry, &reclaim_list, list) { > resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; > > trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); > @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) > } > if (entry->resp_cb) > entry->resp_cb(vgdev, entry); > - > - list_del(&entry->list...
2019 Aug 02
0
[PATCH v7 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -219,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fen...
2019 Jul 03
0
[PATCH v6 08/18] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...spin_unlock(&vgdev->ctrlq.qlock); > > - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { > + list_for_each_entry(entry, &reclaim_list, list) { > resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; > > trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); > @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) > } > if (entry->resp_cb) > entry->resp_cb(vgdev, entry); > - > - list_del(&entry->list...
2019 Jun 27
0
[PATCH v4 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...spin_unlock(&vgdev->ctrlq.qlock); > > - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { > + list_for_each_entry(entry, &reclaim_list, list) { > resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; > > trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); > @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) > } > if (entry->resp_cb) > entry->resp_cb(vgdev, entry); > - > - list_del(&entry->list...
2019 Jun 19
0
[PATCH v3 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
..._enable_cb(vgdev->ctrlq.vq)); > spin_unlock(&vgdev->ctrlq.qlock); > > - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { > + list_for_each_entry(entry, &reclaim_list, list) { > resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; > > trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); > @@ -218,14 +218,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) > } > if (entry->resp_cb) > entry->resp_cb(vgdev, entry); > - > - list_del(&entry->list); > - free_vbuf(vgdev, entry); > } > wake_up...