search for: data_buf

Displaying 20 results from an estimated 72 matches for "data_buf".

2019 Sep 05
2
[PATCH v2] drm/virtio: Use vmalloc for command buffer allocations.
...index 981ee16e3ee9..3ec89ae8478c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -154,7 +154,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, { if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); - kfree(vbuf->data_buf); + kvfree(vbuf->data_buf); kmem_cache_free(vgdev->vbufs, vbuf); } @@ -251,13 +251,70 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) wake_up(&vgdev->cursorq.ack_queue); } +/* How many bytes left in this page. */ +static unsigned int rest_of_page(void *data)...
2019 Sep 05
2
[PATCH v2] drm/virtio: Use vmalloc for command buffer allocations.
...index 981ee16e3ee9..3ec89ae8478c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -154,7 +154,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, { if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); - kfree(vbuf->data_buf); + kvfree(vbuf->data_buf); kmem_cache_free(vgdev->vbufs, vbuf); } @@ -251,13 +251,70 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) wake_up(&vgdev->cursorq.ack_queue); } +/* How many bytes left in this page. */ +static unsigned int rest_of_page(void *data)...
2019 Sep 02
1
[PATCH] drm/virtio: Use vmalloc for command buffer allocations.
On Fri, Aug 30, 2019 at 10:49:25AM -0700, David Riley wrote: > Hi Gerd, > > On Fri, Aug 30, 2019 at 4:16 AM Gerd Hoffmann <kraxel at redhat.com> wrote: > > > > Hi, > > > > > > > - kfree(vbuf->data_buf); > > > > > + kvfree(vbuf->data_buf); > > > > > > > > if (is_vmalloc_addr(vbuf->data_buf)) ... > > > > > > > > needed here I gues? > > > > > > > > > > kvfree() handles vmalloc/kmalloc/kvmalloc int...
2019 Sep 10
0
[PATCH v3 2/2] drm/virtio: Use vmalloc for command buffer allocations.
...index bf5a4a50b002..76cf2b9d5d1d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -154,7 +154,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, { if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); - kfree(vbuf->data_buf); + kvfree(vbuf->data_buf); kmem_cache_free(vgdev->vbufs, vbuf); } @@ -251,13 +251,54 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) wake_up(&vgdev->cursorq.ack_queue); } +/* Create sg_table from a vmalloc'd buffer. */ +static struct sg_table *vmalloc_to...
2019 Sep 11
1
[PATCH v4 2/2] drm/virtio: Use vmalloc for command buffer allocations.
...index 5a64c776138d..9f9b782dd332 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -155,7 +155,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, { if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); - kfree(vbuf->data_buf); + kvfree(vbuf->data_buf); kmem_cache_free(vgdev->vbufs, vbuf); } @@ -256,13 +256,54 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) wake_up(&vgdev->cursorq.ack_queue); } +/* Create sg_table from a vmalloc'd buffer. */ +static struct sg_table *vmalloc_to...
2019 Sep 12
1
[PATCH] drm/virtio: Fix warning in virtio_gpu_queue_fenced_ctrl_buffer.
....c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 9f9b782dd332..80176f379ad5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -358,7 +358,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, &outcnt); if (!sgt) - return -ENOMEM; + return; vout = sgt->sgl; } else { sg_init_one(&sg, vbuf->data_buf, vbuf->data_size); -- 2.23.0.162.g0b9fbb3734-goog
2020 Mar 02
0
[virtio-dev] [PATCH v2 4/4] drm/virtio: Support virtgpu exported resources
...struct virtio_gpu_vbuffer *vbuf) > +{ > + struct virtio_gpu_resp_resource_uuid *resp = > + (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf; > + struct virtio_gpu_object *obj = > + (struct virtio_gpu_object *)vbuf->data_buf; > + uint32_t resp_type = le32_to_cpu(resp->hdr.type); > + > + /* > + * Keeps the data_buf, which points to this virtio_gpu_object, from > + * getting kfree'd after this cb returns. > + */ > + vbuf->data_buf = NULL; > + > +...
2019 Sep 06
0
[PATCH v2] drm/virtio: Use vmalloc for command buffer allocations.
...; struct virtio_gpu_fence *fence) > { > struct virtqueue *vq = vgdev->ctrlq.vq; > + struct scatterlist *vout = NULL, sg; > + struct sg_table *sgt = NULL; > int rc; > + int outcnt = 0; > + > + if (vbuf->data_size) { > + if (is_vmalloc_addr(vbuf->data_buf)) { > + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, > + &outcnt); > + if (!sgt) > + return -ENOMEM; > + vout = sgt->sgl; > + } else { > + sg_init_one(&sg, vbuf->data_buf, vbuf->data_size); > + vout = &sg; > + o...
2017 Mar 01
2
[PATCH] drm: virtio: use kmem_cache
...EL); + if (IS_ERR(vbuf)) + return ERR_CAST(vbuf); memset(vbuf, 0, VBUFFER_SIZE); BUG_ON(size > MAX_INLINE_CMD_SIZE); @@ -208,9 +173,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); kfree(vbuf->data_buf); - spin_lock(&vgdev->free_vbufs_lock); - list_add(&vbuf->list, &vgdev->free_vbufs); - spin_unlock(&vgdev->free_vbufs_lock); + kmem_cache_free(vgdev->vbufs, vbuf); } static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) -- 1.8.3.1
2017 Mar 01
2
[PATCH] drm: virtio: use kmem_cache
...EL); + if (IS_ERR(vbuf)) + return ERR_CAST(vbuf); memset(vbuf, 0, VBUFFER_SIZE); BUG_ON(size > MAX_INLINE_CMD_SIZE); @@ -208,9 +173,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); kfree(vbuf->data_buf); - spin_lock(&vgdev->free_vbufs_lock); - list_add(&vbuf->list, &vgdev->free_vbufs); - spin_unlock(&vgdev->free_vbufs_lock); + kmem_cache_free(vgdev->vbufs, vbuf); } static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) -- 1.8.3.1
2017 Dec 28
3
[PATCH] drm/virtio: Add window server support
...uct virtio_gpu_vbuffer *vbuf) +{ + struct virtqueue *vq = vgdev->winsrv_txq.vq; + struct scatterlist *sgs[2], vcmd, vout; + int ret; + + if (!vgdev->vqs_ready) + return -ENODEV; + + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + sgs[0] = &vcmd; + + sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); + sgs[1] = &vout; + + spin_lock(&vgdev->winsrv_txq.qlock); +retry: + ret = virtqueue_add_sgs(vq, sgs, 2, 0, vbuf, GFP_ATOMIC); + if (ret == -ENOSPC) { + spin_unlock(&vgdev->winsrv_txq.qlock); + wait_event(vgdev->winsrv_txq.ack_queue, vq->num_free); +...
2017 Dec 28
3
[PATCH] drm/virtio: Add window server support
...uct virtio_gpu_vbuffer *vbuf) +{ + struct virtqueue *vq = vgdev->winsrv_txq.vq; + struct scatterlist *sgs[2], vcmd, vout; + int ret; + + if (!vgdev->vqs_ready) + return -ENODEV; + + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + sgs[0] = &vcmd; + + sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); + sgs[1] = &vout; + + spin_lock(&vgdev->winsrv_txq.qlock); +retry: + ret = virtqueue_add_sgs(vq, sgs, 2, 0, vbuf, GFP_ATOMIC); + if (ret == -ENOSPC) { + spin_unlock(&vgdev->winsrv_txq.qlock); + wait_event(vgdev->winsrv_txq.ack_queue, vq->num_free); +...
2018 Jan 26
0
[PATCH v3 1/2] drm/virtio: Add window server support
...uct virtio_gpu_vbuffer *vbuf) +{ + struct virtqueue *vq = vgdev->winsrv_txq.vq; + struct scatterlist *sgs[2], vcmd, vout; + int ret; + + if (!vgdev->vqs_ready) + return -ENODEV; + + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + sgs[0] = &vcmd; + + sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); + sgs[1] = &vout; + + spin_lock(&vgdev->winsrv_txq.qlock); +retry: + ret = virtqueue_add_sgs(vq, sgs, 2, 0, vbuf, GFP_ATOMIC); + if (ret == -ENOSPC) { + spin_unlock(&vgdev->winsrv_txq.qlock); + wait_event(vgdev->winsrv_txq.ack_queue, vq->num_free); +...
2018 Jan 26
3
[PATCH v3 0/2] drm/virtio: Add window server support
Hi, this work is based on the virtio_wl driver in the ChromeOS kernel by Zach Reizner, currently at: https://chromium.googlesource.com/chromiumos/third_party/kernel/+/chromeos-4.4/drivers/virtio/virtio_wl.c There's one feature missing currently, which is letting clients write directly to the host part of a resource, so the extra copy in TRANSFER_TO_HOST isn't needed. Have pushed the
2017 Dec 14
2
[PATCH] drm/virtio: Add window server support
...uct virtio_gpu_vbuffer *vbuf) +{ + struct virtqueue *vq = vgdev->winsrv_txq.vq; + struct scatterlist *sgs[2], vcmd, vout; + int ret; + + if (!vgdev->vqs_ready) + return -ENODEV; + + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + sgs[0] = &vcmd; + + sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); + sgs[1] = &vout; + + spin_lock(&vgdev->winsrv_txq.qlock); +retry: + ret = virtqueue_add_sgs(vq, sgs, 2, 0, vbuf, GFP_ATOMIC); + if (ret == -ENOSPC) { + spin_unlock(&vgdev->winsrv_txq.qlock); + wait_event(vgdev->winsrv_txq.ack_queue, vq->num_free); +...
2017 Dec 14
2
[PATCH] drm/virtio: Add window server support
...uct virtio_gpu_vbuffer *vbuf) +{ + struct virtqueue *vq = vgdev->winsrv_txq.vq; + struct scatterlist *sgs[2], vcmd, vout; + int ret; + + if (!vgdev->vqs_ready) + return -ENODEV; + + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + sgs[0] = &vcmd; + + sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); + sgs[1] = &vout; + + spin_lock(&vgdev->winsrv_txq.qlock); +retry: + ret = virtqueue_add_sgs(vq, sgs, 2, 0, vbuf, GFP_ATOMIC); + if (ret == -ENOSPC) { + spin_unlock(&vgdev->winsrv_txq.qlock); + wait_event(vgdev->winsrv_txq.ack_queue, vq->num_free); +...
2019 Jun 28
2
[PATCH v5 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...tio_gpu_fence *fence) + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -949,6 +955,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, vbuf->data_buf = data; vbuf->data_size = data_size; + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); -- 2.18.1
2019 Jun 28
2
[PATCH v5 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
...tio_gpu_fence *fence) + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -949,6 +955,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, vbuf->data_buf = data; vbuf->data_size = data_size; + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); -- 2.18.1
2015 Jun 16
0
[PATCH 3/3] virtio-gpu: add locking for vbuf pool
...t); + spin_unlock(&vgdev->free_vbufs_lock); memset(vbuf, 0, VBUFFER_SIZE); BUG_ON(size > MAX_INLINE_CMD_SIZE); @@ -201,7 +206,9 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); kfree(vbuf->data_buf); + spin_lock(&vgdev->free_vbufs_lock); list_add(&vbuf->list, &vgdev->free_vbufs); + spin_unlock(&vgdev->free_vbufs_lock); } static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) -- 1.8.3.1
2015 Sep 09
0
[PATCH 2/5] virtio-gpu: add & use virtio_gpu_queue_fenced_ctrl_buffer
...rtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); } static void @@ -524,9 +554,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, vbuf->data_buf = ents; vbuf->data_size = sizeof(*ents) * nents; - if (fence) - virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); } static void virtio_gpu_cmd_get_display_i...