Displaying 20 results from an estimated 20 matches for "nvme_dev".
2020 Aug 19
0
[PATCH 28/28] nvme-pci: use dma_alloc_pages backed dmapools
.../pci.c | 80 ++++++++++++++++++++---------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a33adab62acbaf..fb34dbcb973673 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -114,8 +114,8 @@ struct nvme_dev {
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
struct device *dev;
- struct dma_pool *prp_page_pool;
- struct dma_pool *prp_small_pool;
+ struct dma_pool prp_page_pool;
+ struct dma_pool prp_small_pool;
unsigned online_queues;
unsigned max_qid;
unsigned io_queues[HCTX_MAX_TYPES...
2012 Nov 19
0
[PATCH 247/493] drivers/block: remove use of __devinit
...t cpqarray_eisa_detect(void)
+static int cpqarray_eisa_detect(void)
{
int i=0, j;
__u32 board_id;
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 1f7acff..8a7dc4c 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -975,7 +975,7 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}
-static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
+static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
int qid, int cq_size, int vector)
{
int result;
@@ -1011,7 +1...
2012 Nov 19
0
[PATCH 247/493] drivers/block: remove use of __devinit
...t cpqarray_eisa_detect(void)
+static int cpqarray_eisa_detect(void)
{
int i=0, j;
__u32 board_id;
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 1f7acff..8a7dc4c 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -975,7 +975,7 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}
-static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
+static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
int qid, int cq_size, int vector)
{
int result;
@@ -1011,7 +1...
2015 Nov 18
3
[RFC PATCH 0/2] Google extension to improve qemu-nvme performance
Hi Rob & Mihai,
I wrote vhost-nvme patches on top of Christoph's NVMe target.
vhost-nvme still uses mmio. So the guest OS can run unmodified NVMe
driver. But the tests I have done didn't show competitive performance
compared to virtio-blk/virtio-scsi. The bottleneck is in mmio. Your nvme
vendor extension patches reduces greatly the number of MMIO writes.
So I'd like to push it
2015 Nov 18
3
[RFC PATCH 0/2] Google extension to improve qemu-nvme performance
Hi Rob & Mihai,
I wrote vhost-nvme patches on top of Christoph's NVMe target.
vhost-nvme still uses mmio. So the guest OS can run unmodified NVMe
driver. But the tests I have done didn't show competitive performance
compared to virtio-blk/virtio-scsi. The bottleneck is in mmio. Your nvme
vendor extension patches reduces greatly the number of MMIO writes.
So I'd like to push it
2015 Sep 27
0
[RFC PATCH 0/2] virtio nvme
...|
> '----------------------------------------------------------------------'
For this part, HCH mentioned he is currently working on some code to
pass native NVMe commands + SGL memory via blk-mq struct request into
struct nvme_dev and/or struct nvme_queue.
> | ^
> | |
> v |
> .----------------------------------------------------------------------.
>...
2014 Aug 20
1
[RFC PATCH 03/11] PCI/MSI: Refactor pci_dev_msi_enabled()
...&& pcibios_disable_irq)
> pcibios_disable_irq(dev);
> }
>
> diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index
> 02351e2..f96b90f 100644
> --- a/drivers/block/nvme-core.c
> +++ b/drivers/block/nvme-core.c
> @@ -2325,9 +2325,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
>
> static void nvme_dev_unmap(struct nvme_dev *dev) {
> - if (dev->pci_dev->msi_enabled)
> + if (pci_dev_msi_enabled(dev->pci_dev, MSI_TYPE))
> pci_disable_msi(dev->pci_dev);
> - else if (dev->pci_dev->msix_enabled)
> + else if...
2014 Aug 20
1
[RFC PATCH 03/11] PCI/MSI: Refactor pci_dev_msi_enabled()
...&& pcibios_disable_irq)
> pcibios_disable_irq(dev);
> }
>
> diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index
> 02351e2..f96b90f 100644
> --- a/drivers/block/nvme-core.c
> +++ b/drivers/block/nvme-core.c
> @@ -2325,9 +2325,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
>
> static void nvme_dev_unmap(struct nvme_dev *dev) {
> - if (dev->pci_dev->msi_enabled)
> + if (pci_dev_msi_enabled(dev->pci_dev, MSI_TYPE))
> pci_disable_msi(dev->pci_dev);
> - else if (dev->pci_dev->msix_enabled)
> + else if...
2014 Jul 26
0
[RFC PATCH 03/11] PCI/MSI: Refactor pci_dev_msi_enabled()
...enabled(dev, MSI_TYPE | MSIX_TYPE)
+ && pcibios_disable_irq)
pcibios_disable_irq(dev);
}
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 02351e2..f96b90f 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -2325,9 +2325,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
static void nvme_dev_unmap(struct nvme_dev *dev)
{
- if (dev->pci_dev->msi_enabled)
+ if (pci_dev_msi_enabled(dev->pci_dev, MSI_TYPE))
pci_disable_msi(dev->pci_dev);
- else if (dev->pci_dev->msix_enabled)
+ else if (pci_dev_msi_enabled(dev->pci_...
2020 Aug 19
39
a saner API for allocating DMA addressable pages
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
I'm still a
2015 Sep 23
3
[RFC PATCH 0/2] virtio nvme
On Fri, 2015-09-18 at 14:09 -0700, Nicholas A. Bellinger wrote:
> On Fri, 2015-09-18 at 11:12 -0700, Ming Lin wrote:
> > On Thu, 2015-09-17 at 17:55 -0700, Nicholas A. Bellinger wrote:
> > > On Thu, 2015-09-17 at 16:31 -0700, Ming Lin wrote:
> > > > On Wed, 2015-09-16 at 23:10 -0700, Nicholas A. Bellinger wrote:
> > > > > Hi Ming & Co,
>
>
2015 Sep 23
3
[RFC PATCH 0/2] virtio nvme
On Fri, 2015-09-18 at 14:09 -0700, Nicholas A. Bellinger wrote:
> On Fri, 2015-09-18 at 11:12 -0700, Ming Lin wrote:
> > On Thu, 2015-09-17 at 17:55 -0700, Nicholas A. Bellinger wrote:
> > > On Thu, 2015-09-17 at 16:31 -0700, Ming Lin wrote:
> > > > On Wed, 2015-09-16 at 23:10 -0700, Nicholas A. Bellinger wrote:
> > > > > Hi Ming & Co,
>
>
2019 Mar 19
3
virtio-blk: should num_vqs be limited by num_possible_cpus()?
Hi Jason,
On 3/18/19 3:47 PM, Jason Wang wrote:
>
> On 2019/3/15 ??8:41, Cornelia Huck wrote:
>> On Fri, 15 Mar 2019 12:50:11 +0800
>> Jason Wang <jasowang at redhat.com> wrote:
>>
>>> Or something like I proposed several years ago?
>>> https://do-db2.lkml.org/lkml/2014/12/25/169
>>>
>>> Btw, for virtio-net, I think we actually
2019 Mar 19
3
virtio-blk: should num_vqs be limited by num_possible_cpus()?
Hi Jason,
On 3/18/19 3:47 PM, Jason Wang wrote:
>
> On 2019/3/15 ??8:41, Cornelia Huck wrote:
>> On Fri, 15 Mar 2019 12:50:11 +0800
>> Jason Wang <jasowang at redhat.com> wrote:
>>
>>> Or something like I proposed several years ago?
>>> https://do-db2.lkml.org/lkml/2014/12/25/169
>>>
>>> Btw, for virtio-net, I think we actually
2013 Mar 27
0
[PATCH 04/22] block: Convert bio_for_each_segment() to bvec_iter
..._name, req, bvec->bv_len);
+ nbd->disk->disk_name, req, bvec.bv_len);
}
}
return req;
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index bc97493..faddcf3 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -380,7 +380,7 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
nvme_free_iod(dev, iod);
if (status) {
bio_endio(bio, -EIO);
- } else if (bio->bi_vcnt > bio->bi_iter.bi_idx) {
+ } else if (bio->bi_iter.bi_size) {
requeue_bio(dev, bio);
} else {
bio_endio(bio, 0);
@@ -476,33 +476,34 @@ static int nvme_setup_prps(struct...
2013 Mar 27
0
[PATCH 04/22] block: Convert bio_for_each_segment() to bvec_iter
..._name, req, bvec->bv_len);
+ nbd->disk->disk_name, req, bvec.bv_len);
}
}
return req;
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index bc97493..faddcf3 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -380,7 +380,7 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
nvme_free_iod(dev, iod);
if (status) {
bio_endio(bio, -EIO);
- } else if (bio->bi_vcnt > bio->bi_iter.bi_idx) {
+ } else if (bio->bi_iter.bi_size) {
requeue_bio(dev, bio);
} else {
bio_endio(bio, 0);
@@ -476,33 +476,34 @@ static int nvme_setup_prps(struct...
2015 Sep 10
6
[RFC PATCH 0/2] virtio nvme
Hi all,
These 2 patches added virtio-nvme to kernel and qemu,
basically modified from virtio-blk and nvme code.
As title said, request for your comments.
Play it in Qemu with:
-drive file=disk.img,format=raw,if=none,id=D22 \
-device virtio-nvme-pci,drive=D22,serial=1234,num_queues=4
The goal is to have a full NVMe stack from VM guest(virtio-nvme)
to host(vhost_nvme) to LIO NVMe-over-fabrics
2015 Sep 10
6
[RFC PATCH 0/2] virtio nvme
Hi all,
These 2 patches added virtio-nvme to kernel and qemu,
basically modified from virtio-blk and nvme code.
As title said, request for your comments.
Play it in Qemu with:
-drive file=disk.img,format=raw,if=none,id=D22 \
-device virtio-nvme-pci,drive=D22,serial=1234,num_queues=4
The goal is to have a full NVMe stack from VM guest(virtio-nvme)
to host(vhost_nvme) to LIO NVMe-over-fabrics
2014 Jul 26
20
[RFC PATCH 00/11] Refactor MSI to support Non-PCI device
Hi all,
The series is a draft of generic MSI driver that supports PCI
and Non-PCI device which have MSI capability. If you're not interested
it, sorry for the noise.
The series is based on Linux-3.16-rc1.
MSI was introduced in PCI Spec 2.2. Currently, kernel MSI
driver codes are bonding with PCI device. Because MSI has a lot
advantages in design. More and more non-PCI devices want to
use
2014 Jul 26
20
[RFC PATCH 00/11] Refactor MSI to support Non-PCI device
Hi all,
The series is a draft of generic MSI driver that supports PCI
and Non-PCI device which have MSI capability. If you're not interested
it, sorry for the noise.
The series is based on Linux-3.16-rc1.
MSI was introduced in PCI Spec 2.2. Currently, kernel MSI
driver codes are bonding with PCI device. Because MSI has a lot
advantages in design. More and more non-PCI devices want to
use