Displaying 14 results from an estimated 14 matches for "arm_coherent_dma_ops".
2018 Jul 02
1
[PATCH v4 1/2] ARM: dma-mapping: Set proper DMA ops in arm_iommu_detach_device()
...ping.c
> +++ b/arch/arm/mm/dma-mapping.c
> @@ -1151,6 +1151,11 @@ int arm_dma_supported(struct device *dev, u64 mask)
> return __dma_supported(dev, mask, false);
> }
>
> +static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
> +{
> + return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
> +}
> +
> #ifdef CONFIG_ARM_DMA_USE_IOMMU
>
> static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
> @@ -2296,7 +2301,7 @@ void arm_iommu_detach_device(struct device *dev)
> iommu_detach_device(mapping->domain, dev);
>...
2018 May 30
8
[PATCH v4 0/2] drm/nouveau: tegra: Detach from ARM DMA/IOMMU mapping
From: Thierry Reding <treding at nvidia.com>
An unfortunate interaction between the 32-bit ARM DMA/IOMMU mapping code
and Tegra SMMU driver changes to support IOMMU groups introduced a boot-
time regression on Tegra124. This was caught very late because none of
the standard configurations that are tested on Tegra enable the ARM DMA/
IOMMU mapping code since it is not needed.
The reason for
2018 May 30
0
[PATCH v4 1/2] ARM: dma-mapping: Set proper DMA ops in arm_iommu_detach_device()
...37574e4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1151,6 +1151,11 @@ int arm_dma_supported(struct device *dev, u64 mask)
return __dma_supported(dev, mask, false);
}
+static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
+{
+ return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
+}
+
#ifdef CONFIG_ARM_DMA_USE_IOMMU
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
@@ -2296,7 +2301,7 @@ void arm_iommu_detach_device(struct device *dev)
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, relea...
2018 Apr 23
1
[PATCH] drm/nouveau: tegra: Detach from ARM DMA/IOMMU mapping
...map;
int ret;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ arm_iommu_release_mapping(mapping);
+ arm_iommu_detach_device(dev);
+
+ if (dev->archdata.dma_coherent)
+ set_dma_ops(dev, &arm_coherent_dma_ops);
+ else
+ set_dma_ops(dev, &arm_dma_ops);
+ }
+#endif
+
if (!tdev->func->iommu_bit)
return;
--
2.17.0
2018 Apr 25
5
[PATCH 1/4] drm/nouveau: tegra: Detach from ARM DMA/IOMMU mapping
...map;
int ret;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ arm_iommu_release_mapping(mapping);
+ arm_iommu_detach_device(dev);
+
+ if (dev->archdata.dma_coherent)
+ set_dma_ops(dev, &arm_coherent_dma_ops);
+ else
+ set_dma_ops(dev, &arm_dma_ops);
+ }
+#endif
+
if (!tdev->func->iommu_bit)
return;
--
2.17.0
2018 Apr 25
11
[PATCH v2 1/5] drm/nouveau: tegra: Detach from ARM DMA/IOMMU mapping
...map;
int ret;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ arm_iommu_release_mapping(mapping);
+ arm_iommu_detach_device(dev);
+
+ if (dev->archdata.dma_coherent)
+ set_dma_ops(dev, &arm_coherent_dma_ops);
+ else
+ set_dma_ops(dev, &arm_dma_ops);
+ }
+#endif
+
if (!tdev->func->iommu_bit)
return;
--
2.17.0
2018 Apr 25
0
[PATCH v2 5/5] ARM: Unconditionally enable ARM_DMA_USE_IOMMU
...mu_ops *iommu)
-{
- return false;
-}
-
-static void arm_teardown_iommu_dma_ops(struct device *dev) { }
-
-#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
-
-#endif /* CONFIG_ARM_DMA_USE_IOMMU */
-
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
@@ -2426,7 +2410,6 @@ void arch_teardown_dma_ops(struct device *dev)
void arch_iommu_detach_device(struct device *dev)
{
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
const struct dma_map_ops *dma_ops;
@@ -2438,5 +2421,...
2020 Sep 15
0
[PATCH 15/18] dma-mapping: add a new dma_alloc_pages API
...c_pages = dma_direct_alloc_pages,
+ .free_pages = dma_direct_free_pages,
.mmap = arm_dma_mmap,
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_dma_map_page,
@@ -226,6 +228,8 @@ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
const struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc,
.free = arm_coherent_dma_free,
+ .alloc_pages = dma_direct_alloc_pages,
+ .free_pages = dma_direct_free_pages,
.mmap = arm_coherent_dma_mmap,
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
diff --git a/arch/ia64/hp...
2020 Aug 19
0
[PATCH 19/28] dma-mapping: replace DMA_ATTR_NON_CONSISTENT with dma_{alloc, free}_pages
...c_pages = dma_direct_alloc_pages,
+ .free_pages = dma_direct_free_pages,
.mmap = arm_dma_mmap,
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_dma_map_page,
@@ -226,6 +228,8 @@ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
const struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc,
.free = arm_coherent_dma_free,
+ .alloc_pages = dma_direct_alloc_pages,
+ .free_pages = dma_direct_free_pages,
.mmap = arm_coherent_dma_mmap,
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
diff --git a/arch/ia64/hp...
2013 Oct 17
42
[PATCH v8 0/19] enable swiotlb-xen on arm and arm64
Hi all,
this patch series enables xen-swiotlb on arm and arm64.
It has been heavily reworked compared to the previous versions in order
to achieve better performances and to address review comments.
We are not using dma_mark_clean to ensure coherency anymore. We call the
platform implementation of map_page and unmap_page.
We assume that dom0 has been mapped 1:1 (physical address ==
machine
2020 Sep 14
20
a saner API for allocating DMA addressable pages v2
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
I'm still a
2020 Sep 15
32
a saner API for allocating DMA addressable pages v3
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
As a follow up I
2020 Aug 19
39
a saner API for allocating DMA addressable pages
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
I'm still a
2016 Jun 02
52
[RFC v3 00/45] dma-mapping: Use unsigned long for dma_attrs
Hi,
This is third approach (complete this time) for replacing struct
dma_attrs with unsigned long.
The main patch (2/45) doing the change is split into many subpatches
for easier review (3-43). They should be squashed together when
applying.
*Important:* Patchset is *only* build tested on allyesconfigs: ARM,
ARM64, i386, x86_64 and powerpc. Please provide reviewes and tests
for other