search for: dma_to_device

Displaying 20 results from an estimated 257 matches for "dma_to_device".

2020 Sep 14
2
[PATCH 07/17] 53c700: improve non-coherent DMA handling
...rs/scsi/53c700.c @@ -269,6 +269,20 @@ NCR_700_get_SXFER(struct scsi_device *SDp) spi_period(SDp->sdev_target)); } +static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h, + void *addr, size_t size) +{ + if (h->noncoherent) + dma_cache_sync(h->dev, addr, size, DMA_TO_DEVICE); +} + +static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h, + void *addr, size_t size) +{ + if (h->noncoherent) + dma_cache_sync(h->dev, addr, size, DMA_FROM_DEVICE); +} + struct Scsi_Host * NCR_700_detect(struct scsi_host_template *tpnt, struct NCR_700_Host_P...
2020 Aug 19
0
[PATCH 07/28] 53c700: improve non-coherent DMA handling
...rs/scsi/53c700.c @@ -269,6 +269,20 @@ NCR_700_get_SXFER(struct scsi_device *SDp) spi_period(SDp->sdev_target)); } +static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h, + void *addr, size_t size) +{ + if (h->noncoherent) + dma_cache_sync(h->dev, addr, size, DMA_TO_DEVICE); +} + +static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h, + void *addr, size_t size) +{ + if (h->noncoherent) + dma_cache_sync(h->dev, addr, size, DMA_FROM_DEVICE); +} + struct Scsi_Host * NCR_700_detect(struct scsi_host_template *tpnt, struct NCR_700_Host_P...
2023 May 17
2
[PATCH vhost v9 01/12] virtio_ring: put mapping error check in vring_map_one_sg
...+ return 0; } static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, @@ -588,8 +593,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { - dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); - if (vring_mapping_error(vq, addr)) + dma_addr_t addr; + + if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr)) goto unmap_release; prev = i; @@ -603,8 +609,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, } for (; n < (out_sgs + in_sgs); n++) { for...
2020 Sep 08
2
[PATCH] drm/virtio: drop quirks handling
...int si, ret; @@ -162,15 +161,11 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, return -EINVAL; } - if (use_dma_api) { - shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, - shmem->pages->sgl, - shmem->pages->nents, - DMA_TO_DEVICE); - *nents = shmem->mapped; - } else { - *nents = shmem->pages->nents; - } + shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, + shmem->pages->sgl, + shmem->pages->nents, + DMA_TO_DEVICE); + *nents = shmem->mapped; *ents = kmalloc_array(*ne...
2020 Sep 08
2
[PATCH] drm/virtio: drop quirks handling
...int si, ret; @@ -162,15 +161,11 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, return -EINVAL; } - if (use_dma_api) { - shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, - shmem->pages->sgl, - shmem->pages->nents, - DMA_TO_DEVICE); - *nents = shmem->mapped; - } else { - *nents = shmem->pages->nents; - } + shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, + shmem->pages->sgl, + shmem->pages->nents, + DMA_TO_DEVICE); + *nents = shmem->mapped; *ents = kmalloc_array(*ne...
2020 Sep 15
0
[PATCH 07/18] 53c700: improve non-coherent DMA handling
...rs/scsi/53c700.c @@ -269,6 +269,20 @@ NCR_700_get_SXFER(struct scsi_device *SDp) spi_period(SDp->sdev_target)); } +static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h, + void *addr, size_t size) +{ + if (h->noncoherent) + dma_cache_sync(h->dev, addr, size, DMA_TO_DEVICE); +} + +static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h, + void *addr, size_t size) +{ + if (h->noncoherent) + dma_cache_sync(h->dev, addr, size, DMA_FROM_DEVICE); +} + struct Scsi_Host * NCR_700_detect(struct scsi_host_template *tpnt, struct NCR_700_Host_P...
2017 Feb 08
0
FW: Question about /patch/9251925/
...packet Look forward to your reply. Thanks, Jason BTW: *1. Intel Ethernet Controller XL710(40G) driver:* TX: i40e_lan_xmit_frame ->i40e_xmit_frame_ring() ->i40e_tx_map() { ...... dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); ...... dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); ...... } i40e_clean_tx_irq() { ...... /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf,...
2017 Feb 08
0
FW: Question about /patch/9251925/
...packet Look forward to your reply. Thanks, Jason BTW: *1. Intel Ethernet Controller XL710(40G) driver:* TX: i40e_lan_xmit_frame ->i40e_xmit_frame_ring() ->i40e_tx_map() { ...... dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); ...... dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); ...... } i40e_clean_tx_irq() { ...... /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf,...
2013 Oct 25
2
[PATCH] vhost/scsi: Fix incorrect usage of get_user_pages_fast write parameter
From: Nicholas Bellinger <nab at linux-iscsi.org> This patch addresses a long-standing bug where the get_user_pages_fast() write parameter used for setting the underlying page table entry permission bits was incorrectly set to write=1 for data_direction=DMA_TO_DEVICE, and passed into get_user_pages_fast() via vhost_scsi_map_iov_to_sgl(). However, this parameter is intended to signal WRITEs to pinned userspace PTEs for the virtio-scsi DMA_FROM_DEVICE -> READ payload case, and *not* for the virtio-scsi DMA_TO_DEVICE -> WRITE payload case. This bug would m...
2013 Oct 25
2
[PATCH] vhost/scsi: Fix incorrect usage of get_user_pages_fast write parameter
From: Nicholas Bellinger <nab at linux-iscsi.org> This patch addresses a long-standing bug where the get_user_pages_fast() write parameter used for setting the underlying page table entry permission bits was incorrectly set to write=1 for data_direction=DMA_TO_DEVICE, and passed into get_user_pages_fast() via vhost_scsi_map_iov_to_sgl(). However, this parameter is intended to signal WRITEs to pinned userspace PTEs for the virtio-scsi DMA_FROM_DEVICE -> READ payload case, and *not* for the virtio-scsi DMA_TO_DEVICE -> WRITE payload case. This bug would m...
2020 Sep 08
0
[PATCH] drm/virtio: drop quirks handling
...atic int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, > return -EINVAL; > } > > - if (use_dma_api) { > - shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, > - shmem->pages->sgl, > - shmem->pages->nents, > - DMA_TO_DEVICE); > - *nents = shmem->mapped; > - } else { > - *nents = shmem->pages->nents; > - } > + shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, > + shmem->pages->sgl, > + shmem->pages->nents, > + DMA_TO_DEVICE); > + *nents = sh...
2020 Sep 15
0
[PATCH 09/18] sgiwd93: convert to dma_alloc_noncoherent
Use the new non-coherent DMA API including proper ownership transfers. This also means we can allocate the memory as DMA_TO_DEVICE instead of bidirectional. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/scsi/sgiwd93.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c index 3bdf0deb8f1529..cf1030c9dda17f 100644 --- a/drivers/...
2020 Aug 19
0
[PATCH 28/28] nvme-pci: use dma_alloc_pages backed dmapools
...->npages = 1; } @@ -630,6 +630,11 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, for (;;) { if (i == NVME_CTRL_PAGE_SIZE >> 3) { __le64 *old_prp_list = prp_list; + + dma_sync_single_for_device(dev->dev, prp_dma, + i * sizeof(*prp_list), + DMA_TO_DEVICE); + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) return BLK_STS_RESOURCE; @@ -653,6 +658,8 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, dma_len = sg_dma_len(sg); } + dma_sync_single_for_device(dev->dev, prp_dma, i * sizeof(*prp...
2023 Mar 02
1
[PATCH vhost v1 02/12] virtio_ring: split: separate DMA codes
...struct scatterlist *sgs[], + unsigned int total_sg, + unsigned int out_sgs, + unsigned int in_sgs) +{ + struct scatterlist *sg; + unsigned int n; + + for (n = 0; n < out_sgs; n++) { + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); + + if (vring_mapping_error(vq, addr)) + return -ENOMEM; + + sg->dma_address = addr; + } + } + + for (; n < (out_sgs + in_sgs); n++) { + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); + + if (vring_mapping_error(vq, addr...
2020 Sep 15
0
[PATCH 10/18] hal2: convert to dma_alloc_noncoherent
...codec->buffer_dma = buffer_dma; @@ -470,20 +471,22 @@ static int hal2_alloc_dmabuf(struct snd_hal2 *hal2, struct hal2_codec *codec) desc_dma : desc_dma + (i + 1) * sizeof(struct hal2_desc); desc++; } - dma_cache_sync(dev, codec->desc, count * sizeof(struct hal2_desc), - DMA_TO_DEVICE); + dma_sync_single_for_device(dev, codec->desc_dma, + count * sizeof(struct hal2_desc), + DMA_BIDIRECTIONAL); codec->desc_count = count; return 0; } -static void hal2_free_dmabuf(struct snd_hal2 *hal2, struct hal2_codec *codec) +static void hal2_free_dmabuf(struct snd_hal2...
2020 Sep 14
0
[PATCH 07/17] 53c700: improve non-coherent DMA handling
...meters { > for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); > i++) { \ > __u32 val = > bS_to_cpu((script)[A_##symbol##_used[i]]) + da; \ > (script)[A_##symbol##_used[i]] = bS_to_host(val); \ > - dma_cache_sync((dev), > &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ > + dma_sync_to_dev((dev), > &(script)[A_##symbol##_used[i]], 4); \ > DEBUG((" script, patching %s at %d to %pad\n", \ > #symbol, A_##symbol##_used[i], &da)); \ > } \ > @@ -441,7 +442,7 @@ struct NCR_700_Host_Parameters { > dma_addr_t da...
2020 Sep 02
1
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...->dev.parent, VIRT_TO_DMA(sp, addr), + sizeof(struct sgiseeq_rx_desc), DMA_FROM_DEVICE); } static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) { - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), - DMA_TO_DEVICE); + struct sgiseeq_private *sp = netdev_priv(dev); + + dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr), + sizeof(struct sgiseeq_rx_desc), DMA_TO_DEVICE); } -- Crap can work. Given enough thrust pigs will fly, but it's not necessarily a...
2023 Feb 20
2
[PATCH vhost 01/10] virtio_ring: split: refactor virtqueue_add_split() for premapped
...dx; > - int head; > - bool indirect; > + unsigned int n; > > - START_USE(vq); > + for (n = 0; n < out_sgs; n++) { > + for (sg = sgs[n]; sg; sg = sg_next(sg)) { > + dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); > + > + if (vring_mapping_error(vq, addr)) > + return -ENOMEM; > + > + sg->dma_address = addr; > + } > + } > + for (; n < (out_sgs + in_sgs); n++) { > +...
2018 Dec 07
0
[RFC 3/3] virtio_ring: use new vring flags
...BIT(VRING_SPLIT_DESC_F_INDIRECT)) { dma_unmap_single(vring_dma_dev(vq), virtio64_to_cpu(vq->vq.vdev, desc->addr), virtio32_to_cpu(vq->vq.vdev, desc->len), - (flags & VRING_DESC_F_WRITE) ? + (flags & BIT(VRING_SPLIT_DESC_F_WRITE)) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { dma_unmap_page(vring_dma_dev(vq), virtio64_to_cpu(vq->vq.vdev, desc->addr), virtio32_to_cpu(vq->vq.vdev, desc->len), - (flags & VRING_DESC_F_WRITE) ? + (flags & BIT(VRING_SPLIT_DESC_F_WRITE)) ? DMA_FROM_DEVICE :...
2023 Mar 21
1
[PATCH vhost v3 01/11] virtio_ring: split: separate dma codes
...+{ + struct scatterlist *sg; + unsigned int n; + + if (!vq->use_dma_api) + return; + + for (n = 0; n < out_sgs; n++) { + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + if (!sg->dma_address) + return; + + dma_unmap_page(vring_dma_dev(vq), sg->dma_address, + sg->length, DMA_TO_DEVICE); + } + } + + for (; n < (out_sgs + in_sgs); n++) { + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + if (!sg->dma_address) + return; + + dma_unmap_page(vring_dma_dev(vq), sg->dma_address, + sg->length, DMA_FROM_DEVICE); + } + } +} + +static int virtqueue_map_sgs(struct...