search for: arch_dma_cache_sync

Displaying 7 results from an estimated 7 matches for "arch_dma_cache_sync".

2020 Sep 15
0
[PATCH 14/18] dma-mapping: remove dma_cache_sync
...- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -620,7 +620,6 @@ const struct dma_map_ops jazz_dma_ops = { .sync_single_for_device = jazz_dma_sync_single_for_device, .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu, .sync_sg_for_device = jazz_dma_sync_sg_for_device, - .cache_sync = arch_dma_cache_sync, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, }; diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 97a14adbafc99c..f34ad1f09799f1 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -137,12 +137,6 @@ void ar...
2020 Aug 19
0
[PATCH 10/28] MIPS/jazzdma: decouple from dma-direct
...truct page *page, @@ -608,7 +619,6 @@ const struct dma_map_ops jazz_dma_ops = { .sync_single_for_device = jazz_dma_sync_single_for_device, .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu, .sync_sg_for_device = jazz_dma_sync_sg_for_device, - .dma_supported = dma_direct_supported, .cache_sync = arch_dma_cache_sync, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, -- 2.28.0
2020 Aug 19
0
[PATCH 08/28] MIPS: make dma_sync_*_for_cpu a little less overzealous
...rue); } #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU @@ -119,16 +133,14 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { if (cpu_needs_post_dma_flush()) - dma_sync_phys(paddr, size, dir); + dma_sync_phys(paddr, size, dir, false); } #endif void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { - BUG_ON(direction == DMA_NONE); - - dma_sync_virt(vaddr, size, direction); + dma_sync_virt_for_device(vaddr, size, direction); } #ifdef CONFIG_DMA_PERDEV_COHERENT -- 2.28.0
2020 Sep 14
20
a saner API for allocating DMA addressable pages v2
Hi all, this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs with a separate new dma_alloc_pages API, which is available on all platforms. In addition to cleaning up the convoluted code path, this ensures that other drivers that have asked for better support for non-coherent DMA to pages with incurring bounce buffering over can finally be properly supported. I'm still a
2020 Sep 15
32
a saner API for allocating DMA addressable pages v3
Hi all, this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs with a separate new dma_alloc_pages API, which is available on all platforms. In addition to cleaning up the convoluted code path, this ensures that other drivers that have asked for better support for non-coherent DMA to pages with incurring bounce buffering over can finally be properly supported. As a follow up I
2020 Aug 19
39
a saner API for allocating DMA addressable pages
Hi all, this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs with a separate new dma_alloc_pages API, which is available on all platforms. In addition to cleaning up the convoluted code path, this ensures that other drivers that have asked for better support for non-coherent DMA to pages with incurring bounce buffering over can finally be properly supported. I'm still a
2020 Aug 19
0
[PATCH 19/28] dma-mapping: replace DMA_ATTR_NON_CONSISTENT with dma_{alloc, free}_pages
...dr, dma_addr_t dma_handle, unsigned long attrs) { vdma_free(dma_handle); - if (!(attrs & DMA_ATTR_NON_CONSISTENT)) - vaddr = __va(vaddr - UNCAC_BASE); __free_pages(virt_to_page(vaddr), get_order(size)); } @@ -622,5 +617,7 @@ const struct dma_map_ops jazz_dma_ops = { .cache_sync = arch_dma_cache_sync, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, }; EXPORT_SYMBOL(jazz_dma_ops); diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 569fecd7b5b234..d4e702d74b...