Displaying 9 results from an estimated 9 matches for "mm_to_dma_pfn".
2019 Dec 21
0
[PATCH 3/8] iommu/vt-d: Remove IOVA handling code from non-dma_ops path
...t have overlap with physical memory range,
* clear it first
@@ -2760,7 +2751,8 @@ static int __init si_domain_init(int hw)
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
ret = iommu_domain_identity_map(si_domain,
- PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
+ mm_to_dma_pfn(start_pfn),
+ mm_to_dma_pfn(end_pfn));
if (ret)
return ret;
}
@@ -4593,58 +4585,37 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
unsigned long val, void *v)
{
struct memory_notify *mhp = v;
- unsigned long long start, end;
- unsigned long start_...
2019 Dec 21
13
[PATCH 0/8] Convert the intel iommu driver to the dma-iommu api
This patchset converts the intel iommu driver to the dma-iommu api.
While converting the driver I exposed a bug in the intel i915 driver which causes a huge amount of artifacts on the screen of my laptop. You can see a picture of it here:
https://github.com/pippy360/kernelPatches/blob/master/IMG_20191219_225922.jpg
This issue is most likely in the i915 driver and is most likely caused by the
2019 Dec 21
13
[PATCH 0/8] Convert the intel iommu driver to the dma-iommu api
This patchset converts the intel iommu driver to the dma-iommu api.
While converting the driver I exposed a bug in the intel i915 driver which causes a huge amount of artifacts on the screen of my laptop. You can see a picture of it here:
https://github.com/pippy360/kernelPatches/blob/master/IMG_20191219_225922.jpg
This issue is most likely in the i915 driver and is most likely caused by the
2020 Apr 29
0
[PATCH 1/5] swiotlb: Introduce concept of swiotlb_pool
...}
cfb94a372f2d4e Lu Baolu 2019-09-06 4008 } else {
cfb94a372f2d4e Lu Baolu 2019-09-06 4009 tlb_addr = paddr;
cfb94a372f2d4e Lu Baolu 2019-09-06 4010 }
cfb94a372f2d4e Lu Baolu 2019-09-06 4011
cfb94a372f2d4e Lu Baolu 2019-09-06 4012 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
cfb94a372f2d4e Lu Baolu 2019-09-06 4013 tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
cfb94a372f2d4e Lu Baolu 2019-09-06 4014 if (ret)
cfb94a372f2d4e Lu Baolu 2019-09-06 4015 goto mapping_error;
cfb94a372f2d4e Lu Baolu 2019-09-06 4016
cfb94a372f2d4e L...
2020 Aug 18
3
[PATCH V2 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...igned
+ long iova, size_t size,
+ struct page *freelist)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long start_pfn, last_pfn;
+ unsigned long iova_pfn = IOVA_PFN(iova);
+ unsigned long nrpages;
+ int iommu_id;
+
+ nrpages = aligned_nrpages(iova, size);
+ start_pfn = mm_to_dma_pfn(iova_pfn);
+ last_pfn = start_pfn + nrpages - 1;
for_each_domain_iommu(iommu_id, dmar_domain)
iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
- start_pfn, npages, !freelist, 0);
+ start_pfn, nrpages, !freelist, 0);
dma_free_pagelist(freelist);
-
- if (dmar_domain...
2020 Aug 18
3
[PATCH V2 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...igned
+ long iova, size_t size,
+ struct page *freelist)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long start_pfn, last_pfn;
+ unsigned long iova_pfn = IOVA_PFN(iova);
+ unsigned long nrpages;
+ int iommu_id;
+
+ nrpages = aligned_nrpages(iova, size);
+ start_pfn = mm_to_dma_pfn(iova_pfn);
+ last_pfn = start_pfn + nrpages - 1;
for_each_domain_iommu(iommu_id, dmar_domain)
iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
- start_pfn, npages, !freelist, 0);
+ start_pfn, nrpages, !freelist, 0);
dma_free_pagelist(freelist);
-
- if (dmar_domain...
2019 Dec 21
0
[PATCH 4/8] iommu: Handle freelists when using deferred flushing in iommu drivers
...unsigned long iova, size_t size,
+ struct page *freelist)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long start_pfn, last_pfn;
+ unsigned long iova_pfn = IOVA_PFN(iova);
+ unsigned long nrpages;
+ int iommu_id;
+
+ nrpages = aligned_nrpages(iova, size);
+ start_pfn = mm_to_dma_pfn(iova_pfn);
+ last_pfn = start_pfn + nrpages - 1;
for_each_domain_iommu(iommu_id, dmar_domain)
iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
- start_pfn, npages, !freelist, 0);
+ start_pfn, nrpages, !freelist, 0);
dma_free_pagelist(freelist);
-
- if (dmar_domain...
2020 Aug 17
1
[PATCH 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...igned
+ long iova, size_t size,
+ struct page *freelist)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long start_pfn, last_pfn;
+ unsigned long iova_pfn = IOVA_PFN(iova);
+ unsigned long nrpages;
+ int iommu_id;
+
+ nrpages = aligned_nrpages(iova, size);
+ start_pfn = mm_to_dma_pfn(iova_pfn);
+ last_pfn = start_pfn + nrpages - 1;
for_each_domain_iommu(iommu_id, dmar_domain)
iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
- start_pfn, npages, !freelist, 0);
+ start_pfn, nrpages, !freelist, 0);
dma_free_pagelist(freelist);
-
- if (dmar_domain...
2020 Aug 18
0
[PATCH V2 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...ge *freelist)
> +{
> + struct dmar_domain *dmar_domain = to_dmar_domain(domain);
> + unsigned long start_pfn, last_pfn;
> + unsigned long iova_pfn = IOVA_PFN(iova);
> + unsigned long nrpages;
> + int iommu_id;
> +
> + nrpages = aligned_nrpages(iova, size);
> + start_pfn = mm_to_dma_pfn(iova_pfn);
> + last_pfn = start_pfn + nrpages - 1;
>
> for_each_domain_iommu(iommu_id, dmar_domain)
> iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
> - start_pfn, npages, !freelist, 0);
> + start_pfn, nrpages, !freelist, 0);
>
> dma...