Displaying 10 results from an estimated 10 matches for "nvmecqueu".
Did you mean:
nvmecqueue
2015 Nov 19
2
[PATCH -qemu] nvme: support Google vendor extension
On 18/11/2015 06:47, Ming Lin wrote:
> @@ -726,7 +798,11 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
> }
>
> start_sqs = nvme_cq_full(cq) ? 1 : 0;
> - cq->head = new_head;
> + /* When the mapped pointer memory area is setup, we don't rely on
> + * the MMIO written values to update the head pointer. */
>
2015 Nov 19
2
[PATCH -qemu] nvme: support Google vendor extension
On 18/11/2015 06:47, Ming Lin wrote:
> @@ -726,7 +798,11 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
> }
>
> start_sqs = nvme_cq_full(cq) ? 1 : 0;
> - cq->head = new_head;
> + /* When the mapped pointer memory area is setup, we don't rely on
> + * the MMIO written values to update the head pointer. */
>
2015 Nov 20
0
[PATCH -qemu] nvme: support Google vendor extension
...git a/hw/block/nvme.c b/hw/block/nvme.c
index 3e1c38d..d28690d 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -543,6 +543,44 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
return NVME_SUCCESS;
}
+static void nvme_cq_notifier(EventNotifier *e)
+{
+ NvmeCQueue *cq =
+ container_of(e, NvmeCQueue, notifier);
+
+ nvme_post_cqes(cq);
+}
+
+static void nvme_init_cq_eventfd(NvmeCQueue *cq)
+{
+ NvmeCtrl *n = cq->ctrl;
+ uint16_t offset = (cq->cqid*2+1) * NVME_CAP_DSTRD(n->bar.cap);
+
+ event_notifier_init(&cq->notifier, 0);...
2015 Nov 20
2
[PATCH -qemu] nvme: support Google vendor extension
...3e1c38d..d28690d 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -543,6 +543,44 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> return NVME_SUCCESS;
> }
>
> +static void nvme_cq_notifier(EventNotifier *e)
> +{
> + NvmeCQueue *cq =
> + container_of(e, NvmeCQueue, notifier);
> +
> + nvme_post_cqes(cq);
> +}
> +
> +static void nvme_init_cq_eventfd(NvmeCQueue *cq)
> +{
> + NvmeCtrl *n = cq->ctrl;
> + uint16_t offset = (cq->cqid*2+1) * NVME_CAP_DSTRD(n->bar.cap);
> +
&...
2015 Nov 20
2
[PATCH -qemu] nvme: support Google vendor extension
...3e1c38d..d28690d 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -543,6 +543,44 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> return NVME_SUCCESS;
> }
>
> +static void nvme_cq_notifier(EventNotifier *e)
> +{
> + NvmeCQueue *cq =
> + container_of(e, NvmeCQueue, notifier);
> +
> + nvme_post_cqes(cq);
> +}
> +
> +static void nvme_init_cq_eventfd(NvmeCQueue *cq)
> +{
> + NvmeCtrl *n = cq->ctrl;
> + uint16_t offset = (cq->cqid*2+1) * NVME_CAP_DSTRD(n->bar.cap);
> +
&...
2015 Nov 18
0
[PATCH -qemu] nvme: support Google vendor extension
...al]>
*/
+#include <exec/memory.h>
#include <hw/block/block.h>
#include <hw/hw.h>
#include <hw/pci/msix.h>
@@ -158,6 +159,14 @@ static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
return NVME_SUCCESS;
}
+static void nvme_update_cq_head(NvmeCQueue *cq)
+{
+ if (cq->db_addr) {
+ pci_dma_read(&cq->ctrl->parent_obj, cq->db_addr,
+ &cq->head, sizeof(cq->head));
+ }
+}
+
static void nvme_post_cqes(void *opaque)
{
NvmeCQueue *cq = opaque;
@@ -168,6 +177,8 @@ static void nvme_post_c...
2015 Nov 20
0
[PATCH -qemu] nvme: support Google vendor extension
...lock/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -543,6 +543,44 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> > return NVME_SUCCESS;
> > }
> >
> > +static void nvme_cq_notifier(EventNotifier *e)
> > +{
> > + NvmeCQueue *cq =
> > + container_of(e, NvmeCQueue, notifier);
> > +
> > + nvme_post_cqes(cq);
> > +}
> > +
> > +static void nvme_init_cq_eventfd(NvmeCQueue *cq)
> > +{
> > + NvmeCtrl *n = cq->ctrl;
> > + uint16_t offset = (cq->cqid*2...
2015 Nov 18
3
[RFC PATCH 0/2] Google extension to improve qemu-nvme performance
Hi Rob & Mihai,
I wrote vhost-nvme patches on top of Christoph's NVMe target.
vhost-nvme still uses mmio. So the guest OS can run unmodified NVMe
driver. But the tests I have done didn't show competitive performance
compared to virtio-blk/virtio-scsi. The bottleneck is in mmio. Your nvme
vendor extension patches reduces greatly the number of MMIO writes.
So I'd like to push it
2015 Nov 18
3
[RFC PATCH 0/2] Google extension to improve qemu-nvme performance
Hi Rob & Mihai,
I wrote vhost-nvme patches on top of Christoph's NVMe target.
vhost-nvme still uses mmio. So the guest OS can run unmodified NVMe
driver. But the tests I have done didn't show competitive performance
compared to virtio-blk/virtio-scsi. The bottleneck is in mmio. Your nvme
vendor extension patches reduces greatly the number of MMIO writes.
So I'd like to push it
2015 Nov 21
1
[PATCH -qemu] nvme: support Google vendor extension
On 21/11/2015 00:05, Ming Lin wrote:
> [ 1.752129] Freeing unused kernel memory: 420K (ffff880001b97000 - ffff880001c00000)
> [ 1.986573] clocksource: tsc: mask: 0xffffffffffffffff max_cycles: 0x30e5c9bbf83, max_idle_ns: 440795378954 ns
> [ 1.988187] clocksource: Switched to clocksource tsc
> [ 3.235423] clocksource: timekeeping watchdog: Marking clocksource 'tsc'