Displaying 8 results from an estimated 8 matches for "eventidx_addr".
2015 Nov 18
0
[PATCH -qemu] nvme: support Google vendor extension
...}
@@ -350,6 +361,8 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
}
sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq);
+ sq->db_addr = 0;
+ sq->eventidx_addr = 0;
assert(n->cq[cqid]);
cq = n->cq[cqid];
@@ -430,6 +443,8 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
cq->head = cq->tail = 0;
QTAILQ_INIT(&cq->req_list);
QTAILQ_INIT(&cq->sq_list);
+ cq->db_addr = 0;
+...
2015 Nov 19
2
[PATCH -qemu] nvme: support Google vendor extension
On 18/11/2015 06:47, Ming Lin wrote:
> @@ -726,7 +798,11 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
> }
>
> start_sqs = nvme_cq_full(cq) ? 1 : 0;
> - cq->head = new_head;
> + /* When the mapped pointer memory area is setup, we don't rely on
> + * the MMIO written values to update the head pointer. */
>
2015 Nov 19
2
[PATCH -qemu] nvme: support Google vendor extension
On 18/11/2015 06:47, Ming Lin wrote:
> @@ -726,7 +798,11 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
> }
>
> start_sqs = nvme_cq_full(cq) ? 1 : 0;
> - cq->head = new_head;
> + /* When the mapped pointer memory area is setup, we don't rely on
> + * the MMIO written values to update the head pointer. */
>
2015 Nov 20
0
[PATCH -qemu] nvme: support Google vendor extension
...Cmd *cmd)
{
uint64_t db_addr = le64_to_cpu(cmd->prp1);
@@ -565,6 +603,7 @@ static uint16_t nvme_set_db_memory(NvmeCtrl *n, const NvmeCmd *cmd)
/* Submission queue tail pointer location, 2 * QID * stride. */
sq->db_addr = db_addr + 2 * i * 4;
sq->eventidx_addr = eventidx_addr + 2 * i * 4;
+ nvme_init_sq_eventfd(sq);
}
if (cq != NULL) {
@@ -572,6 +611,7 @@ static uint16_t nvme_set_db_memory(NvmeCtrl *n, const NvmeCmd *cmd)
*/
cq->db_addr = db_addr + (2 * i + 1) * 4;
cq->eventidx_...
2015 Nov 18
3
[RFC PATCH 0/2] Google extension to improve qemu-nvme performance
Hi Rob & Mihai,
I wrote vhost-nvme patches on top of Christoph's NVMe target.
vhost-nvme still uses mmio. So the guest OS can run unmodified NVMe
driver. But the tests I have done didn't show competitive performance
compared to virtio-blk/virtio-scsi. The bottleneck is in mmio. Your nvme
vendor extension patches reduces greatly the number of MMIO writes.
So I'd like to push it
2015 Nov 18
3
[RFC PATCH 0/2] Google extension to improve qemu-nvme performance
Hi Rob & Mihai,
I wrote vhost-nvme patches on top of Christoph's NVMe target.
vhost-nvme still uses mmio. So the guest OS can run unmodified NVMe
driver. But the tests I have done didn't show competitive performance
compared to virtio-blk/virtio-scsi. The bottleneck is in mmio. Your nvme
vendor extension patches reduces greatly the number of MMIO writes.
So I'd like to push it
2015 Nov 20
2
[PATCH -qemu] nvme: support Google vendor extension
...nt64_t db_addr = le64_to_cpu(cmd->prp1);
> @@ -565,6 +603,7 @@ static uint16_t nvme_set_db_memory(NvmeCtrl *n, const NvmeCmd *cmd)
> /* Submission queue tail pointer location, 2 * QID * stride. */
> sq->db_addr = db_addr + 2 * i * 4;
> sq->eventidx_addr = eventidx_addr + 2 * i * 4;
> + nvme_init_sq_eventfd(sq);
> }
>
> if (cq != NULL) {
> @@ -572,6 +611,7 @@ static uint16_t nvme_set_db_memory(NvmeCtrl *n, const NvmeCmd *cmd)
> */
> cq->db_addr = db_addr + (2 * i + 1)...
2015 Nov 20
2
[PATCH -qemu] nvme: support Google vendor extension
...nt64_t db_addr = le64_to_cpu(cmd->prp1);
> @@ -565,6 +603,7 @@ static uint16_t nvme_set_db_memory(NvmeCtrl *n, const NvmeCmd *cmd)
> /* Submission queue tail pointer location, 2 * QID * stride. */
> sq->db_addr = db_addr + 2 * i * 4;
> sq->eventidx_addr = eventidx_addr + 2 * i * 4;
> + nvme_init_sq_eventfd(sq);
> }
>
> if (cq != NULL) {
> @@ -572,6 +611,7 @@ static uint16_t nvme_set_db_memory(NvmeCtrl *n, const NvmeCmd *cmd)
> */
> cq->db_addr = db_addr + (2 * i + 1)...