Displaying 20 results from an estimated 38 matches for "cmd_size".
Did you mean:
pmd_size
2013 Mar 14
4
[PATCH] virtio-spec: add field for scsi command size
...al I/O alignment.
\change_inserted 1531152142 1341302349
VIRTIO_BLK_F_CONFIG_WCE (11) Device can toggle its cache between writeback
and writethrough modes.
+\change_inserted 1986246365 1363257418
+
+\end_layout
+
+\begin_layout Description
+
+\change_inserted 1986246365 1363258629
+VIRTIO_BLK_F_CMD_SIZE (12) cmd_size field is valid.
+\change_inserted 1531152142 1341302349
+
\end_layout
\end_deeper
@@ -5994,6 +6004,30 @@ struct virtio_blk_config {
\change_inserted 1531152142 1341301918
u8 writeback;
+\change_inserted 1986246365 1363257385
+
+\end_layout
+
+\begin_layout Plain Layout
+
+...
2013 Mar 14
4
[PATCH] virtio-spec: add field for scsi command size
...al I/O alignment.
\change_inserted 1531152142 1341302349
VIRTIO_BLK_F_CONFIG_WCE (11) Device can toggle its cache between writeback
and writethrough modes.
+\change_inserted 1986246365 1363257418
+
+\end_layout
+
+\begin_layout Description
+
+\change_inserted 1986246365 1363258629
+VIRTIO_BLK_F_CMD_SIZE (12) cmd_size field is valid.
+\change_inserted 1531152142 1341302349
+
\end_layout
\end_deeper
@@ -5994,6 +6004,30 @@ struct virtio_blk_config {
\change_inserted 1531152142 1341301918
u8 writeback;
+\change_inserted 1986246365 1363257385
+
+\end_layout
+
+\begin_layout Plain Layout
+
+...
2017 Apr 12
0
Re: [PATCH 1/2] daemon: run 'udevadm settle' with --exit-if-exists option
...2ad..dccfa15bc 100644
> --- a/daemon/guestfsd.c
> +++ b/daemon/guestfsd.c
> @@ -1213,13 +1213,18 @@ random_name (char *template)
> * fussed if it fails.
> */
> void
> -udev_settle (void)
> +udev_settle_file (const char *file)
> {
> - char cmd[80];
> + size_t cmd_size = strlen (str_udevadm) +
> + sizeof (" settle") +
> + sizeof (" --debug") +
> + (file ? sizeof (" --exit-if-exists=") + strlen (file) : 0);
> + char *cmd = malloc (cmd_size);
> int r;
>...
2014 Sep 06
5
[PATCH] virtio_blk: merge S/G list entries by default
...io_device *vdev)
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = virtblk_queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
--
1.9.1
2014 Sep 06
5
[PATCH] virtio_blk: merge S/G list entries by default
...io_device *vdev)
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = virtblk_queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
--
1.9.1
2014 Mar 14
4
[PATCH] virtio-blk: Initialize blkqueue depth from virtqueue size
...k_mq_ops virtio_mq_ops = {
static struct blk_mq_reg virtio_mq_reg = {
.ops = &virtio_mq_ops,
.nr_hw_queues = 1,
- .queue_depth = 64,
.numa_node = NUMA_NO_NODE,
.flags = BLK_MQ_F_SHOULD_MERGE,
};
@@ -555,6 +554,7 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
+ virtio_mq_reg.queue_depth = vblk->vq->num_free / 2;
q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
if (!q) {
--
1.9.0.279.gdc9e3eb
2014 Mar 14
4
[PATCH] virtio-blk: Initialize blkqueue depth from virtqueue size
...k_mq_ops virtio_mq_ops = {
static struct blk_mq_reg virtio_mq_reg = {
.ops = &virtio_mq_ops,
.nr_hw_queues = 1,
- .queue_depth = 64,
.numa_node = NUMA_NO_NODE,
.flags = BLK_MQ_F_SHOULD_MERGE,
};
@@ -555,6 +554,7 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
+ virtio_mq_reg.queue_depth = vblk->vq->num_free / 2;
q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
if (!q) {
--
1.9.0.279.gdc9e3eb
2014 Mar 14
2
[PATCH] virtio-blk: make the queue depth configurable
...= 64;
+module_param(queue_depth, int, 444);
+
static struct blk_mq_reg virtio_mq_reg = {
.ops = &virtio_mq_ops,
.nr_hw_queues = 1,
@@ -551,6 +554,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_vq;
}
+ virtio_mq_reg.queue_depth = queue_depth;
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
--
1.9.0
2014 Mar 15
0
[PATCH] virtio-blk: make the queue depth the max supportable by the hypervisor
...ct blk_mq_reg virtio_mq_reg = {
.ops = &virtio_mq_ops,
.nr_hw_queues = 1,
@@ -551,9 +554,14 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_vq;
}
+ virtio_mq_reg.queue_depth = queue_depth > 0 ? queue_depth :
+ (vblk->vq->num_free / 2);
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
+ virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
+ pr_info("%s: using queue depth %d\n", vblk->disk->disk_name,
+ virtio_mq_reg.queue_depth);
q = vblk->disk-&...
2014 Sep 07
0
[PATCH] virtio_blk: merge S/G list entries by default
...gt;tag_set.ops = &virtio_mq_ops;
> vblk->tag_set.queue_depth = virtblk_queue_depth;
> vblk->tag_set.numa_node = NUMA_NO_NODE;
> - vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
> + vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
> vblk->tag_set.cmd_size =
> sizeof(struct virtblk_req) +
> sizeof(struct scatterlist) * sg_elems;
> --
> 1.9.1
2014 Mar 14
2
[PATCH] virtio-blk: make the queue depth configurable
...= 64;
+module_param(queue_depth, int, 444);
+
static struct blk_mq_reg virtio_mq_reg = {
.ops = &virtio_mq_ops,
.nr_hw_queues = 1,
@@ -551,6 +554,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_vq;
}
+ virtio_mq_reg.queue_depth = queue_depth;
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
--
1.9.0
2014 Mar 15
1
[PATCH] virtio-blk: make the queue depth the max supportable by the hypervisor
...ps = &virtio_mq_ops,
> .nr_hw_queues = 1,
>@@ -551,9 +554,14 @@ static int virtblk_probe(struct virtio_device
>*vdev)
> goto out_free_vq;
> }
>
>+ virtio_mq_reg.queue_depth = queue_depth > 0 ? queue_depth :
>+ (vblk->vq->num_free / 2);
> virtio_mq_reg.cmd_size =
> sizeof(struct virtblk_req) +
> sizeof(struct scatterlist) * sg_elems;
>+ virtblk_name_format("vd", index, vblk->disk->disk_name,
>DISK_NAME_LEN);
>+ pr_info("%s: using queue depth %d\n", vblk->disk->disk_name,
>+ virtio_mq_reg.queue_depth);...
2014 Mar 15
1
[PATCH] virtio-blk: make the queue depth the max supportable by the hypervisor
...ps = &virtio_mq_ops,
> .nr_hw_queues = 1,
>@@ -551,9 +554,14 @@ static int virtblk_probe(struct virtio_device
>*vdev)
> goto out_free_vq;
> }
>
>+ virtio_mq_reg.queue_depth = queue_depth > 0 ? queue_depth :
>+ (vblk->vq->num_free / 2);
> virtio_mq_reg.cmd_size =
> sizeof(struct virtblk_req) +
> sizeof(struct scatterlist) * sg_elems;
>+ virtblk_name_format("vd", index, vblk->disk->disk_name,
>DISK_NAME_LEN);
>+ pr_info("%s: using queue depth %d\n", vblk->disk->disk_name,
>+ virtio_mq_reg.queue_depth);...
2014 Mar 19
2
[PATCH] virtio-blk: make the queue depth the max supportable by the hypervisor
...is to fill the ring. */
+ if (!virtio_mq_reg.queue_depth) {
+ virtio_mq_reg.queue_depth = vblk->vq->num_free;
+ /* ... but without indirect descs, we use 2 descs per req */
+ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
+ virtio_mq_reg.queue_depth /= 2;
+ }
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
2014 Mar 19
2
[PATCH] virtio-blk: make the queue depth the max supportable by the hypervisor
...is to fill the ring. */
+ if (!virtio_mq_reg.queue_depth) {
+ virtio_mq_reg.queue_depth = vblk->vq->num_free;
+ /* ... but without indirect descs, we use 2 descs per req */
+ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
+ virtio_mq_reg.queue_depth /= 2;
+ }
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
2014 Mar 17
2
[PATCH] virtio-blk: make the queue depth the max supportable by the hypervisor
Theodore Ts'o <tytso at mit.edu> writes:
> The current virtio block sets a queue depth of 64, which is
> insufficient for very fast devices. It has been demonstrated that
> with a high IOPS device, using a queue depth of 256 can double the
> IOPS which can be sustained.
>
> As suggested by Venkatash Srinivas, set the queue depth by default to
> be one half the the
2014 Mar 17
2
[PATCH] virtio-blk: make the queue depth the max supportable by the hypervisor
Theodore Ts'o <tytso at mit.edu> writes:
> The current virtio block sets a queue depth of 64, which is
> insufficient for very fast devices. It has been demonstrated that
> with a high IOPS device, using a queue depth of 256 can double the
> IOPS which can be sustained.
>
> As suggested by Venkatash Srinivas, set the queue depth by default to
> be one half the the
2015 Sep 10
6
[RFC PATCH 0/2] virtio nvme
Hi all,
These 2 patches added virtio-nvme to kernel and qemu,
basically modified from virtio-blk and nvme code.
As title said, request for your comments.
Play it in Qemu with:
-drive file=disk.img,format=raw,if=none,id=D22 \
-device virtio-nvme-pci,drive=D22,serial=1234,num_queues=4
The goal is to have a full NVMe stack from VM guest(virtio-nvme)
to host(vhost_nvme) to LIO NVMe-over-fabrics
2015 Sep 10
6
[RFC PATCH 0/2] virtio nvme
Hi all,
These 2 patches added virtio-nvme to kernel and qemu,
basically modified from virtio-blk and nvme code.
As title said, request for your comments.
Play it in Qemu with:
-drive file=disk.img,format=raw,if=none,id=D22 \
-device virtio-nvme-pci,drive=D22,serial=1234,num_queues=4
The goal is to have a full NVMe stack from VM guest(virtio-nvme)
to host(vhost_nvme) to LIO NVMe-over-fabrics
2015 Mar 24
10
[PATCH] Add virtio gpu driver.
..._p = NULL;
+ return ERR_CAST(vbuf);
+ }
+ *vbuffer_p = vbuf;
+ return (struct virtio_gpu_update_cursor *)vbuf->buf;
+}
+
+static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
+ virtio_gpu_resp_cb cb,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int cmd_size, int resp_size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = virtio_gpu_allocate_vbuf(vgdev, cmd_size, resp_size, cb);
+ if (IS_ERR(vbuf)) {
+ *vbuffer_p = NULL;
+ return ERR_CAST(vbuf);
+ }
+ *vbuffer_p = vbuf;
+ return (struct virtio_gpu_command *)vbuf->buf;
+}
+
+static void free_vbuf(s...