search for: dma_bidirectional

Displaying 20 results from an estimated 140 matches for "dma_bidirectional".

2014 Jul 31
2
[PATCH v5] drm/nouveau: map pages using DMA API
...ulate(struct ttm_tt *ttm) } for (i = 0; i < ttm->num_pages; i++) { - ttm_dma->dma_address[i] = nv_device_map_page(device, - ttm->pages[i]); - if (!ttm_dma->dma_address[i]) { + dma_addr_t addr; + + addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(pdev, addr)) { while (--i) { - nv_device_unmap_page(device, - ttm_dma->dma_address[i]); + dma_unmap_page(pdev, ttm_dma->dma_address[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); ttm_dma->dma_address[i] = 0; } ttm_pool_unpopula...
2014 Aug 04
0
[PATCH v5] drm/nouveau: map pages using DMA API
...or (i = 0; i < ttm->num_pages; i++) { > - ttm_dma->dma_address[i] = nv_device_map_page(device, > - ttm->pages[i]); > - if (!ttm_dma->dma_address[i]) { > + dma_addr_t addr; > + > + addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE, > + DMA_BIDIRECTIONAL); > + > + if (dma_mapping_error(pdev, addr)) { > while (--i) { > - nv_device_unmap_page(device, > - ttm_dma->dma_address[i]); > + dma_unmap_page(pdev, ttm_dma->dma_address[i], > + PAGE_SIZE, DMA_BIDIRECTIONAL); > ttm_dma->dma_add...
2020 Sep 15
0
[PATCH 12/18] sgiseeq: convert to dma_alloc_noncoherent
...u(struct net_device *dev, void *addr) { - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), - DMA_FROM_DEVICE); + struct sgiseeq_private *sp = netdev_priv(dev); + + dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr), + sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL); } static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) { - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), - DMA_TO_DEVICE); + struct sgiseeq_private *sp = netdev_priv(dev); + + dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp...
2020 Sep 01
2
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...r) > { > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), > - DMA_FROM_DEVICE); > + struct sgiseeq_private *sp = netdev_priv(dev); > + > + dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr), > + sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL); > } > > static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) > { > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), > - DMA_TO_DEVICE); > + struct sgiseeq_private *sp = netdev_priv(dev); > + > + dma_sync_single_...
2020 Sep 14
2
[PATCH 11/17] sgiseeq: convert to dma_alloc_noncoherent
...;dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) - -#define DMA_INV(ndev, addr, len) \ - do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0) - -#define DMA_WBACK_INV(ndev, addr, len) \ - do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0) - #define SYSBUS 0x0000006c /* big endian CPU, 82596 "big" endian mode */ #define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16)) #define SWAP16(x) (x) +#define NONCOHERENT_DMA 1 + #include "lib82596.c" MODULE_AUTHOR("Richard H...
2020 Aug 19
0
[PATCH 19/28] dma-mapping: replace DMA_ATTR_NON_CONSISTENT with dma_{alloc, free}_pages
....sversion == 0x72 ? OPT_SWAP_PORT : 0; - lp->dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma), - &lp->dma_addr, GFP_KERNEL, - DMA_ATTR_NON_CONSISTENT); + lp->dma = dma_alloc_pages(dev->dev.parent, sizeof(struct i596_dma), + &lp->dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL); if (!lp->dma) goto out_free_netdev; @@ -196,8 +195,8 @@ lan_init_chip(struct parisc_device *dev) return 0; out_free_dma: - dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma), - lp->dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT); + dma_free_pages(dev-&...
2020 Sep 15
0
[PATCH 13/18] 53c700: convert to dma_alloc_noncoherent
..._t)p - (uintptr_t)h->script); +} + static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h, void *addr, size_t size) { if (h->noncoherent) - dma_cache_sync(h->dev, addr, size, DMA_TO_DEVICE); + dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), + size, DMA_BIDIRECTIONAL); } static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h, void *addr, size_t size) { if (h->noncoherent) - dma_cache_sync(h->dev, addr, size, DMA_FROM_DEVICE); + dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), size, + DMA_BIDIRECTIONAL); }...
2016 Jun 21
1
[RFC PATCH v2] drm/nouveau/fb/nv50: set DMA mask before mapping scratch page
...ch may cause + * problems on systems with no RAM below the 4 GB mark. So set + * the streaming DMA mask here as well. + */ + dma_set_mask(device->dev, DMA_BIT_MASK(device->mmu->dma_bits)); + + fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(device->dev, fb->r100c08)) { + nvkm_warn(&fb->base.subdev, + "dma_map_page() failed on 100c08 page\n"); + } + } + /* Not a clue what this is exactly. Without pointing it at a * scratch page, VRAM->GART blits with M2MF (as in DDX DFS...
2016 Sep 26
6
[PATCH v4 0/3] drm/nouveau: set DMA mask before mapping scratch page
This v4 is now a 3 piece series, after Alexandre pointed out that both GF 100 and NV50 are affected by the same issue, and that a related issue has been solved already for Tegra in commit 9d0394c6bed5 ("drm/nouveau/instmem/gk20a: set DMA mask early"). The issue that this series addresses is the fact that the Nouveau driver invokes the DMA API before setting the DMA mask. In both cases
2016 Sep 26
0
[PATCH v4 2/3] drm/nouveau/fb/gf100: defer DMA mapping of scratch page to init() hook
...3,7 +93,18 @@ gf100_fb_init(struct nvkm_fb *base) struct gf100_fb *fb = gf100_fb(base); struct nvkm_device *device = fb->base.subdev.device; - if (fb->r100c10_page) + if (!fb->r100c10) { + dma_addr_t addr = dma_map_page(device->dev, fb->r100c10_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (!dma_mapping_error(device->dev, addr)) { + fb->r100c10 = addr; + } else { + nvkm_warn(&fb->base.subdev, + "dma_map_page() failed on 100c10 page\n"); + } + } + + if (fb->r100c10) nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8); } @@ -103,12 +...
2017 Aug 18
0
[PATCH] drm/nouveau: use new TTM populate/DMA map function
...!= tt_unpopulated) @@ -1480,30 +1478,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) } #endif - r = ttm_pool_populate(ttm); - if (r) { - return r; - } - - for (i = 0; i < ttm->num_pages; i++) { - dma_addr_t addr; - - addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE, - DMA_BIDIRECTIONAL); - - if (dma_mapping_error(pdev, addr)) { - while (i--) { - dma_unmap_page(pdev, ttm_dma->dma_address[i], - PAGE_SIZE, DMA_BIDIRECTIONAL); - ttm_dma->dma_address[i] = 0; - } - ttm_pool_unpopulate(ttm); - return -EFAULT; - } - - ttm_dma->dma_address[i] = addr; -...
2016 Sep 26
0
[PATCH v4 3/3] drm/nouveau/fb/nv50: defer DMA mapping of scratch page to init() hook
...nvkm/subdev/fb/nv50.c @@ -216,11 +216,23 @@ nv50_fb_init(struct nvkm_fb *base) struct nv50_fb *fb = nv50_fb(base); struct nvkm_device *device = fb->base.subdev.device; + if (!fb->r100c08) { + dma_addr_t addr = dma_map_page(device->dev, fb->r100c08_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (!dma_mapping_error(device->dev, addr)) { + fb->r100c08 = addr; + } else { + nvkm_warn(&fb->base.subdev, + "dma_map_page() failed on 100c08 page\n"); + } + } + /* Not a clue what this is exactly. Without pointing it at a * scratch page, VRAM->GART...
2016 Jul 07
3
[PATCH v3] drm/nouveau/fb/nv50: set DMA mask before mapping scratch page
...+ * problems on systems with no RAM below the 4 GB mark. So set + * the streaming DMA mask here as well. + */ + dma_addr_t addr; + + dma_set_mask(device->dev, DMA_BIT_MASK(device->mmu->dma_bits)); + + addr = dma_map_page(device->dev, fb->r100c08_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (!dma_mapping_error(device->dev, addr)) { + fb->r100c08 = addr; + } else { + nvkm_warn(&fb->base.subdev, + "dma_map_page() failed on 100c08 page\n"); + } + } + /* Not a clue what this is exactly. Without pointing it at a * scratch page, VRAM->GART...
2023 Feb 20
1
[PATCH vhost 08/10] virtio_ring: introduce dma sync api for virtio
...+ enum dma_data_direction dir) > > +{ > > + struct virtio_device *vdev = dev_to_virtio(dev); > > + > > + dma_sync_single_range_for_cpu(vdev->dev.parent, addr, offset, > > + size, DMA_BIDIRECTIONAL); > > +} > > +EXPORT_SYMBOL_GPL(virtio_dma_sync_single_range_for_cpu); > > + > > +/** > > + * virtio_dma_sync_single_range_for_device - dma sync for device > > + * @dev: virtio device > > + * @addr: DMA address > > + * @offset: DMA address offset >...
2020 Sep 15
0
[PATCH 10/18] hal2: convert to dma_alloc_noncoherent
...er_dma, + buffer_dir, GFP_KERNEL); if (!codec->buffer) return -ENOMEM; - desc = dma_alloc_attrs(dev, count * sizeof(struct hal2_desc), - &desc_dma, GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); + desc = dma_alloc_noncoherent(dev, count * sizeof(struct hal2_desc), + &desc_dma, DMA_BIDIRECTIONAL, GFP_KERNEL); if (!desc) { - dma_free_attrs(dev, H2_BUF_SIZE, codec->buffer, buffer_dma, - DMA_ATTR_NON_CONSISTENT); + dma_free_noncoherent(dev, H2_BUF_SIZE, codec->buffer, buffer_dma, + buffer_dir); return -ENOMEM; } codec->buffer_dma = buffer_dma; @@ -470,20 +471,2...
2020 Sep 15
0
[PATCH 11/18] lib82596: convert to dma_alloc_noncoherent
...;dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) - -#define DMA_INV(ndev, addr, len) \ - do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0) - -#define DMA_WBACK_INV(ndev, addr, len) \ - do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0) - #define SYSBUS 0x0000006c /* big endian CPU, 82596 "big" endian mode */ #define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16)) #define SWAP16(x) (x) +#define NONCOHERENT_DMA 1 + #include "lib82596.c" MODULE_AUTHOR("Richard H...
2014 Jul 10
3
[PATCH v4 2/6] drm/nouveau: map pages using DMA API on platform devices
...89,10 @@ nv_device_map_page(struct nouveau_device *device, struct page *page) > if (pci_dma_mapping_error(device->pdev, ret)) > ret = 0; > } else { > - ret = page_to_phys(page); > + ret = dma_map_page(&device->platformdev->dev, page, 0, > + PAGE_SIZE, DMA_BIDIRECTIONAL); > + if (dma_mapping_error(&device->platformdev->dev, ret)) > + ret = 0; > } > > return ret; > @@ -501,6 +504,9 @@ nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr) > if (nv_device_is_pci(device)) > pci_unmap_page(device->pdev,...
2019 Aug 08
0
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...continue; - - spage = migrate_pfn_to_page(src_pfns[i]); - if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page(dpage); - continue; - } - - fault->dma[fault->npages] = - dma_map_page_attrs(dev, dpage, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC); - if (dma_mapping_error(dev, fault->dma[fault->npages])) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page(dpage); - continue; - } + *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, *dma_addr)) + g...
2019 Jul 29
0
[PATCH 5/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...continue; - - spage = migrate_pfn_to_page(src_pfns[i]); - if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page(dpage); - continue; - } - - fault->dma[fault->npages] = - dma_map_page_attrs(dev, dpage, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC); - if (dma_mapping_error(dev, fault->dma[fault->npages])) { - dst_pfns[i] = MIGRATE_PFN_ERROR; - __free_page(dpage); - continue; - } - - ret = copy(drm, 1, NOUVEAU_APER_HOST, - fault->dma[fault->npages++], - NOUVEAU_APER_VRAM, - nouvea...
2019 Aug 08
1
[PATCH 6/9] nouveau: simplify nouveau_dmem_migrate_to_ram
...rc_pfns[i]); > - if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) { > - dst_pfns[i] = MIGRATE_PFN_ERROR; > - __free_page(dpage); > - continue; > - } > - > - fault->dma[fault->npages] = > - dma_map_page_attrs(dev, dpage, 0, PAGE_SIZE, > - PCI_DMA_BIDIRECTIONAL, > - DMA_ATTR_SKIP_CPU_SYNC); > - if (dma_mapping_error(dev, fault->dma[fault->npages])) { > - dst_pfns[i] = MIGRATE_PFN_ERROR; > - __free_page(dpage); > - continue; > - } > + *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); > + if...