Displaying 20 results from an estimated 21 matches for "blk_mq_init_queue".
2017 Jan 09
4
[PATCH] virtio_blk: fix panic in initialization error path
From: Omar Sandoval <osandov at fb.com>
If blk_mq_init_queue() returns an error, it gets assigned to
vblk->disk->queue. Then, when we call put_disk(), we end up calling
blk_put_queue() with the ERR_PTR, causing a bad dereference. Fix it by
only assigning to vblk->disk->queue on success.
Signed-off-by: Omar Sandoval <osandov at fb.com>
---...
2017 Jan 09
4
[PATCH] virtio_blk: fix panic in initialization error path
From: Omar Sandoval <osandov at fb.com>
If blk_mq_init_queue() returns an error, it gets assigned to
vblk->disk->queue. Then, when we call put_disk(), we end up calling
blk_put_queue() with the ERR_PTR, causing a bad dereference. Fix it by
only assigning to vblk->disk->queue on success.
Signed-off-by: Omar Sandoval <osandov at fb.com>
---...
2015 Jan 08
0
[PATCH] virtio_blk: fix blk_mq_init_queue() error handling
blk_mq_init_queue() returns ERR_PTR() on failure, not NULL.
Signed-off-by: J?rg Billeter <j at bitron.ch>
---
drivers/block/virtio_blk.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7ef7c09..b1b5c75 100644
--- a/drivers/...
2015 Jan 08
0
[PATCH] virtio_blk: fix blk_mq_init_queue() error handling
blk_mq_init_queue() returns ERR_PTR() on failure, not NULL.
Signed-off-by: J?rg Billeter <j at bitron.ch>
---
drivers/block/virtio_blk.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7ef7c09..b1b5c75 100644
--- a/drivers/...
2017 Jan 10
0
[PATCH] virtio_blk: fix panic in initialization error path
On Mon, Jan 09, 2017 at 11:44:12AM -0800, Omar Sandoval wrote:
> From: Omar Sandoval <osandov at fb.com>
>
> If blk_mq_init_queue() returns an error, it gets assigned to
> vblk->disk->queue. Then, when we call put_disk(), we end up calling
> blk_put_queue() with the ERR_PTR, causing a bad dereference. Fix it by
> only assigning to vblk->disk->queue on success.
>
> Signed-off-by: Omar Sandoval <o...
2014 Mar 14
4
[PATCH] virtio-blk: Initialize blkqueue depth from virtqueue size
...s = BLK_MQ_F_SHOULD_MERGE,
};
@@ -555,6 +554,7 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
+ virtio_mq_reg.queue_depth = vblk->vq->num_free / 2;
q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
if (!q) {
--
1.9.0.279.gdc9e3eb
2014 Mar 14
4
[PATCH] virtio-blk: Initialize blkqueue depth from virtqueue size
...s = BLK_MQ_F_SHOULD_MERGE,
};
@@ -555,6 +554,7 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
+ virtio_mq_reg.queue_depth = vblk->vq->num_free / 2;
q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
if (!q) {
--
1.9.0.279.gdc9e3eb
2014 Mar 15
0
[PATCH] virtio-blk: make the queue depth the max supportable by the hypervisor
...f(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
+ virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
+ pr_info("%s: using queue depth %d\n", vblk->disk->disk_name,
+ virtio_mq_reg.queue_depth);
q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
if (!q) {
@@ -565,8 +573,6 @@ static int virtblk_probe(struct virtio_device *vdev)
q->queuedata = vblk;
- virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
-
vblk->disk->major = major;
vblk->disk->first_mino...
2014 Mar 15
1
[PATCH] virtio-blk: make the queue depth the max supportable by the hypervisor
...lems;
>+ virtblk_name_format("vd", index, vblk->disk->disk_name,
>DISK_NAME_LEN);
>+ pr_info("%s: using queue depth %d\n", vblk->disk->disk_name,
>+ virtio_mq_reg.queue_depth);
Isn't that visible from sysfs?
>
> q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
> if (!q) {
>@@ -565,8 +573,6 @@ static int virtblk_probe(struct virtio_device
>*vdev)
>
> q->queuedata = vblk;
>
>- virtblk_name_format("vd", index, vblk->disk->disk_name,
>DISK_NAME_LEN);
>-
> vblk->disk->major...
2014 Mar 15
1
[PATCH] virtio-blk: make the queue depth the max supportable by the hypervisor
...lems;
>+ virtblk_name_format("vd", index, vblk->disk->disk_name,
>DISK_NAME_LEN);
>+ pr_info("%s: using queue depth %d\n", vblk->disk->disk_name,
>+ virtio_mq_reg.queue_depth);
Isn't that visible from sysfs?
>
> q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
> if (!q) {
>@@ -565,8 +573,6 @@ static int virtblk_probe(struct virtio_device
>*vdev)
>
> q->queuedata = vblk;
>
>- virtblk_name_format("vd", index, vblk->disk->disk_name,
>DISK_NAME_LEN);
>-
> vblk->disk->major...
2017 Nov 21
0
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
...eue(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
+ mutex_lock(&all_q_mutex);
+ list_del_init(&q->all_q_node);
+ mutex_unlock(&all_q_mutex);
+
blk_mq_del_queue_tag_set(q);
+
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
}
/* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q)
+static void blk_mq_queue_reinit(struct request_queue *q,
+ const struct cpumask *online_mask)
{
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
@@ -2539,12 +2559,76 @@ static void blk_mq_queue_reinit(str...
2017 Nov 21
2
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
On 11/21/2017 07:39 PM, Jens Axboe wrote:
> On 11/21/2017 11:27 AM, Jens Axboe wrote:
>> On 11/21/2017 11:12 AM, Christian Borntraeger wrote:
>>>
>>>
>>> On 11/21/2017 07:09 PM, Jens Axboe wrote:
>>>> On 11/21/2017 10:27 AM, Jens Axboe wrote:
>>>>> On 11/21/2017 03:14 AM, Christian Borntraeger wrote:
>>>>>> Bisect
2017 Nov 21
2
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
On 11/21/2017 07:39 PM, Jens Axboe wrote:
> On 11/21/2017 11:27 AM, Jens Axboe wrote:
>> On 11/21/2017 11:12 AM, Christian Borntraeger wrote:
>>>
>>>
>>> On 11/21/2017 07:09 PM, Jens Axboe wrote:
>>>> On 11/21/2017 10:27 AM, Jens Axboe wrote:
>>>>> On 11/21/2017 03:14 AM, Christian Borntraeger wrote:
>>>>>> Bisect
2013 Jun 03
0
[virtio_blk] BUG: unable to handle kernel paging request at ff7f5784
...upport
Pretend we have 4 issue queues.
Signed-off-by: Jens Axboe <axboe at kernel.dk>
[ 277.857736] blk-mq: CPU -> queue map
[ 277.871142] CPU 0 -> Queue 0
[ 277.934080] BUG: unable to handle kernel paging request at ff7f5784
[ 277.934080] IP: [<812fe4c7>] blk_mq_init_queue+0x5fa/0x6f7
[ 277.934080] *pde = 06814067 *pte = 00000000
[ 277.934080] Oops: 0002 [#1] SMP DEBUG_PAGEALLOC
[ 277.934080] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.10.0-rc3-00011-g5754ab5 #98
[ 277.934080] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2007
[ 277.934080] task: 8d448000 ti: 8d...
2013 Jun 03
0
[virtio_blk] BUG: unable to handle kernel paging request at ff7f5784
...upport
Pretend we have 4 issue queues.
Signed-off-by: Jens Axboe <axboe at kernel.dk>
[ 277.857736] blk-mq: CPU -> queue map
[ 277.871142] CPU 0 -> Queue 0
[ 277.934080] BUG: unable to handle kernel paging request at ff7f5784
[ 277.934080] IP: [<812fe4c7>] blk_mq_init_queue+0x5fa/0x6f7
[ 277.934080] *pde = 06814067 *pte = 00000000
[ 277.934080] Oops: 0002 [#1] SMP DEBUG_PAGEALLOC
[ 277.934080] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.10.0-rc3-00011-g5754ab5 #98
[ 277.934080] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2007
[ 277.934080] task: 8d448000 ti: 8d...
2015 Sep 10
6
[RFC PATCH 0/2] virtio nvme
Hi all,
These 2 patches added virtio-nvme to kernel and qemu,
basically modified from virtio-blk and nvme code.
As title said, request for your comments.
Play it in Qemu with:
-drive file=disk.img,format=raw,if=none,id=D22 \
-device virtio-nvme-pci,drive=D22,serial=1234,num_queues=4
The goal is to have a full NVMe stack from VM guest(virtio-nvme)
to host(vhost_nvme) to LIO NVMe-over-fabrics
2015 Sep 10
6
[RFC PATCH 0/2] virtio nvme
Hi all,
These 2 patches added virtio-nvme to kernel and qemu,
basically modified from virtio-blk and nvme code.
As title said, request for your comments.
Play it in Qemu with:
-drive file=disk.img,format=raw,if=none,id=D22 \
-device virtio-nvme-pci,drive=D22,serial=1234,num_queues=4
The goal is to have a full NVMe stack from VM guest(virtio-nvme)
to host(vhost_nvme) to LIO NVMe-over-fabrics
2017 Nov 21
2
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
...et *set = q->tag_set;
>
> + mutex_lock(&all_q_mutex);
> + list_del_init(&q->all_q_node);
> + mutex_unlock(&all_q_mutex);
> +
> blk_mq_del_queue_tag_set(q);
> +
> blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
> }
>
> /* Basically redo blk_mq_init_queue with queue frozen */
> -static void blk_mq_queue_reinit(struct request_queue *q)
> +static void blk_mq_queue_reinit(struct request_queue *q,
> + const struct cpumask *online_mask)
> {
> WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
>
> @@ -2539,12 +2559,76 @@ s...
2017 Nov 21
2
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
...et *set = q->tag_set;
>
> + mutex_lock(&all_q_mutex);
> + list_del_init(&q->all_q_node);
> + mutex_unlock(&all_q_mutex);
> +
> blk_mq_del_queue_tag_set(q);
> +
> blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
> }
>
> /* Basically redo blk_mq_init_queue with queue frozen */
> -static void blk_mq_queue_reinit(struct request_queue *q)
> +static void blk_mq_queue_reinit(struct request_queue *q,
> + const struct cpumask *online_mask)
> {
> WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
>
> @@ -2539,12 +2559,76 @@ s...
2013 Nov 04
2
[virtio_blk] WARNING: CPU: 0 PID: 1 at fs/sysfs/dir.c:526 sysfs_add_one()
...001b546158 ffffffff822c8cd0 ffffe8ffffa005c0 ffff88001df55cf8
[ 15.272332] ffffffff814e6322 00000000001cf538 00000000001cf538 ffff88001b5b1400
[ 15.272332] 0000000400000000 000060ffe18005c0 ffff88001b8bd800 ffff88001b5b1400
[ 15.272332] Call Trace:
[ 15.272332] [<ffffffff814e6322>] blk_mq_init_queue+0x2ff/0x3a5
[ 15.272332] [<ffffffff815c197b>] virtblk_probe+0x1d0/0x5cc
[ 15.272332] [<ffffffff8157bcc3>] virtio_dev_probe+0xbc/0xfa
[ 15.272332] [<ffffffff815aea3a>] driver_probe_device+0x11a/0x2f2
[ 15.272332] [<ffffffff815aecb0>] __driver_attach+0x61/0x83
[...